mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 02:48:01 -05:00
Compare commits
578 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d80802a35 | ||
|
|
694925f427 | ||
|
|
61d5cb2536 | ||
|
|
c23fe4f6d2 | ||
|
|
e6e93bbb80 | ||
|
|
b5bd5240b6 | ||
|
|
827ac82d54 | ||
|
|
9c2f3259ca | ||
|
|
6abe2bfe42 | ||
|
|
acf955fc7b | ||
|
|
023db8ac41 | ||
|
|
65cf733a0c | ||
|
|
8323169864 | ||
|
|
bf5cd1bd3b | ||
|
|
c9db01e272 | ||
|
|
6d5e9161fb | ||
|
|
0636348585 | ||
|
|
4c44523ba0 | ||
|
|
5372800e60 | ||
|
|
2ae396640b | ||
|
|
252f222068 | ||
|
|
142ba8c8ea | ||
|
|
84dfd2003e | ||
|
|
5a633ba811 | ||
|
|
f207647f0f | ||
|
|
ad16581ab8 | ||
|
|
fd722ddf7d | ||
|
|
d669e69755 | ||
|
|
d912bab4c2 | ||
|
|
68c2722c02 | ||
|
|
426fea9681 | ||
|
|
62cfdb9f11 | ||
|
|
46b4d6497c | ||
|
|
757c0a5775 | ||
|
|
9c8f0b44ad | ||
|
|
21433a948c | ||
|
|
183344b878 | ||
|
|
fc164d5be2 | ||
|
|
45aa770cd1 | ||
|
|
6d0e782d71 | ||
|
|
117f70e1ec | ||
|
|
c840bd8c12 | ||
|
|
3c64fad379 | ||
|
|
bc813e4065 | ||
|
|
7c1d2422f0 | ||
|
|
a5b11e1071 | ||
|
|
c7e4daf431 | ||
|
|
4c61f3a514 | ||
|
|
2a179799d8 | ||
|
|
650f4bb58c | ||
|
|
7b92b27ceb | ||
|
|
8f1b301d01 | ||
|
|
e3a19d4f3e | ||
|
|
70283f7d8d | ||
|
|
ecbb385447 | ||
|
|
8dc56471ef | ||
|
|
282ba201d2 | ||
|
|
2394f6458f | ||
|
|
47c1be3322 | ||
|
|
741464b053 | ||
|
|
3aab5e7e20 | ||
|
|
1e7a6dc676 | ||
|
|
81fd2ee8c1 | ||
|
|
357601e2d6 | ||
|
|
71ff759692 | ||
|
|
b0657d5fde | ||
|
|
fa391c0b78 | ||
|
|
6082aace6d | ||
|
|
7ef63161ba | ||
|
|
b731b55de4 | ||
|
|
51956ba356 | ||
|
|
f494077003 | ||
|
|
317165c410 | ||
|
|
f5aadbc200 | ||
|
|
774230f7b9 | ||
|
|
72e25d99c7 | ||
|
|
7c7c1ba02d | ||
|
|
9c6af74556 | ||
|
|
57daa3e1c2 | ||
|
|
ce98fdc5c4 | ||
|
|
f901645c12 | ||
|
|
f514f17e92 | ||
|
|
8744dd0c46 | ||
|
|
f3d669319e | ||
|
|
ace7032067 | ||
|
|
d32819875a | ||
|
|
5b5898827c | ||
|
|
8a233174de | ||
|
|
bec81170b5 | ||
|
|
2f25363d76 | ||
|
|
2aa5688d90 | ||
|
|
ed06a70eca | ||
|
|
e80160f8dd | ||
|
|
bfe64b1510 | ||
|
|
bb1769abab | ||
|
|
e3f906e90d | ||
|
|
d77dc68119 | ||
|
|
ee3d695e2e | ||
|
|
0443befd2f | ||
|
|
b4fd02b910 | ||
|
|
4e0fe4ad6e | ||
|
|
3231499992 | ||
|
|
c134161a45 | ||
|
|
c3f533f20f | ||
|
|
519a9071a8 | ||
|
|
87b4663026 | ||
|
|
6c11e8ee06 | ||
|
|
2a739890a3 | ||
|
|
02e84c9565 | ||
|
|
39715017f9 | ||
|
|
35518542f8 | ||
|
|
0aa1106c96 | ||
|
|
33f832e6ab | ||
|
|
281c788489 | ||
|
|
3858bef185 | ||
|
|
f9a1afd09c | ||
|
|
251e9c0294 | ||
|
|
d8bf2e3c10 | ||
|
|
218f30b7d0 | ||
|
|
da983c7773 | ||
|
|
7012e16c43 | ||
|
|
b1050abf7f | ||
|
|
210998081a | ||
|
|
604acb9d91 | ||
|
|
5beeb1a897 | ||
|
|
de6304b729 | ||
|
|
d0be79c33d | ||
|
|
b4ed8bc47a | ||
|
|
bd85e00530 | ||
|
|
4e446130d8 | ||
|
|
4c93b514bb | ||
|
|
d078941316 | ||
|
|
230d3a496d | ||
|
|
ec2890c19b | ||
|
|
a540cc537f | ||
|
|
39c57aa358 | ||
|
|
2d990c1f54 | ||
|
|
7fb2da8741 | ||
|
|
c69fcb1c10 | ||
|
|
0982548e1f | ||
|
|
11a29fdc4d | ||
|
|
24407048a5 | ||
|
|
a7c2333312 | ||
|
|
b5b541c747 | ||
|
|
ad6ea02c9c | ||
|
|
c22326f9f8 | ||
|
|
1a6ed85d99 | ||
|
|
a094bbd839 | ||
|
|
73dda812ea | ||
|
|
8eaf1c4033 | ||
|
|
4f44b64052 | ||
|
|
c559bf3e10 | ||
|
|
a485515bc6 | ||
|
|
2c9b29725b | ||
|
|
28612c899a | ||
|
|
88acbeaa35 | ||
|
|
46729efe95 | ||
|
|
b3d03e1146 | ||
|
|
e29c9a7d9e | ||
|
|
9b157b6532 | ||
|
|
10a1e7962b | ||
|
|
cb672d7d00 | ||
|
|
e791fb6b0b | ||
|
|
1c9001ad21 | ||
|
|
3083356cf0 | ||
|
|
179814e50a | ||
|
|
9515c07fca | ||
|
|
a45e94fde7 | ||
|
|
8b6196e0a2 | ||
|
|
ee2c0ab51b | ||
|
|
ca5f129902 | ||
|
|
cf2eca7c60 | ||
|
|
16aea1e869 | ||
|
|
75ff6cd3c3 | ||
|
|
7b7b31637c | ||
|
|
fca564c18a | ||
|
|
eb8d87e185 | ||
|
|
dbadb1d7b5 | ||
|
|
a4afb69615 | ||
|
|
8b7925edf3 | ||
|
|
168a51c5a6 | ||
|
|
3f5d8c3e44 | ||
|
|
609bb19573 | ||
|
|
d561d6d3dd | ||
|
|
7ffaa17551 | ||
|
|
97eac58a50 | ||
|
|
cedbe8fcd7 | ||
|
|
a461875abd | ||
|
|
ab018ccdfe | ||
|
|
d41dcdfc46 | ||
|
|
972aecc4c5 | ||
|
|
6b7be4e5dc | ||
|
|
9b1a7b553f | ||
|
|
7f99efc5df | ||
|
|
0a6d8b4855 | ||
|
|
5e41811fb5 | ||
|
|
5a4967582e | ||
|
|
1d0ba4a1a7 | ||
|
|
4878c7a2d5 | ||
|
|
9e5aa645a7 | ||
|
|
d01e23973e | ||
|
|
71bbd78574 | ||
|
|
fff41a7349 | ||
|
|
d5f524a156 | ||
|
|
3ab9d02883 | ||
|
|
27a2e27c3a | ||
|
|
da04b11a31 | ||
|
|
3795b40f63 | ||
|
|
9436f2e3d1 | ||
|
|
7fadd5e5c4 | ||
|
|
4c2a588e1f | ||
|
|
5f9de762ff | ||
|
|
91f7abb398 | ||
|
|
6420b81a5d | ||
|
|
b6ed5eafd6 | ||
|
|
694d5aa2e8 | ||
|
|
833079140b | ||
|
|
fd27948c36 | ||
|
|
1dfaaa2a57 | ||
|
|
bac6b50dd1 | ||
|
|
a30c91f398 | ||
|
|
17294bfa55 | ||
|
|
3fa1771cc9 | ||
|
|
f3bd386ff0 | ||
|
|
8486ce31de | ||
|
|
1d9845557f | ||
|
|
55dce6cfdd | ||
|
|
58be915446 | ||
|
|
dc9268f772 | ||
|
|
47ddc00c6a | ||
|
|
0d22fd59ed | ||
|
|
d5efd57c28 | ||
|
|
b52a92da7e | ||
|
|
b949162e7e | ||
|
|
5409991256 | ||
|
|
be1bcbc173 | ||
|
|
d6196e863d | ||
|
|
63e790b79b | ||
|
|
cf53bba99e | ||
|
|
ed4c8f6a8a | ||
|
|
aab8263c31 | ||
|
|
b21bd6f428 | ||
|
|
cb6903dfd0 | ||
|
|
cd87ca8214 | ||
|
|
58e5bf5a58 | ||
|
|
f17c7ca6f7 | ||
|
|
c3dd28cff9 | ||
|
|
db4e1e8b53 | ||
|
|
3e43c3e698 | ||
|
|
cc7733af1c | ||
|
|
2a29734a56 | ||
|
|
f2e533f7c8 | ||
|
|
078f897b67 | ||
|
|
8352ab2076 | ||
|
|
1a3d47814b | ||
|
|
e852ad0a51 | ||
|
|
136cd0e868 | ||
|
|
7afe26320a | ||
|
|
702da71515 | ||
|
|
b313cf8afd | ||
|
|
852d78d9ad | ||
|
|
5570a88858 | ||
|
|
cfd897874b | ||
|
|
1249147c57 | ||
|
|
eec5c3bbb1 | ||
|
|
ca8d9fb885 | ||
|
|
7d77fb9691 | ||
|
|
a4c0dfb33c | ||
|
|
2dded68267 | ||
|
|
172ce3dc25 | ||
|
|
6c8d4b091e | ||
|
|
7beebc3659 | ||
|
|
5461318eda | ||
|
|
d0abe13b60 | ||
|
|
aca9d74489 | ||
|
|
a0c213a158 | ||
|
|
740210fc99 | ||
|
|
ca10d0652f | ||
|
|
e1a85d8184 | ||
|
|
9d8236c59d | ||
|
|
7eafcd47a6 | ||
|
|
ded3f13a33 | ||
|
|
e5646d7241 | ||
|
|
79ac9698c1 | ||
|
|
d29f57c93d | ||
|
|
9b7cde8918 | ||
|
|
8ae71303a5 | ||
|
|
2cd7bd4a8e | ||
|
|
b813298f2a | ||
|
|
58f787f7d4 | ||
|
|
2bba543d20 | ||
|
|
d3c1b747ee | ||
|
|
b9ecf93ba3 | ||
|
|
487da8394d | ||
|
|
4c93bc56f8 | ||
|
|
727dfeae43 | ||
|
|
88d561dee7 | ||
|
|
7a379f1d4f | ||
|
|
3ad89f99d2 | ||
|
|
d76c5da514 | ||
|
|
da5b0673e7 | ||
|
|
d7180afe9d | ||
|
|
2e9c15711b | ||
|
|
e19b08b149 | ||
|
|
234d76a269 | ||
|
|
826d941068 | ||
|
|
34e449213c | ||
|
|
671c5943e4 | ||
|
|
16c24ec367 | ||
|
|
e8240855e0 | ||
|
|
a5e065048e | ||
|
|
a53c3269db | ||
|
|
8bf93d3a32 | ||
|
|
d42cc0fd1c | ||
|
|
d2553d783c | ||
|
|
10b747d22b | ||
|
|
1d567fa593 | ||
|
|
3a3dd39d3a | ||
|
|
f4b3d7dba2 | ||
|
|
de2c7fd372 | ||
|
|
b140e1c619 | ||
|
|
1308584289 | ||
|
|
2ac4778bcf | ||
|
|
6101d67dba | ||
|
|
3cd50fe3a1 | ||
|
|
e683b574d1 | ||
|
|
0decd05913 | ||
|
|
d01b7ea2d2 | ||
|
|
4fa91724d9 | ||
|
|
e3d1c64b77 | ||
|
|
17f35a7bba | ||
|
|
ab2f0a6fbf | ||
|
|
41cbf2f7c4 | ||
|
|
d5d2e1d7a3 | ||
|
|
587faa3e52 | ||
|
|
80229ab73e | ||
|
|
68b2911d2f | ||
|
|
2bf2f627e4 | ||
|
|
58676b2ce2 | ||
|
|
11f79dc1e1 | ||
|
|
2a095ddc8e | ||
|
|
dd849d2e91 | ||
|
|
8c63fac958 | ||
|
|
11a70e9764 | ||
|
|
33ce78e4a2 | ||
|
|
4f78518858 | ||
|
|
fad99ac4d2 | ||
|
|
423b592b25 | ||
|
|
8aa7d1da55 | ||
|
|
6b702c32ca | ||
|
|
767012aec0 | ||
|
|
2267057e2b | ||
|
|
b8212e4dea | ||
|
|
5b7e4a5f5d | ||
|
|
07f9fa63d0 | ||
|
|
1ae8986451 | ||
|
|
b305c240de | ||
|
|
248dc81ec3 | ||
|
|
ebe0071ed2 | ||
|
|
7a518218e5 | ||
|
|
fc14ac7faa | ||
|
|
95e2739c47 | ||
|
|
f129393a2e | ||
|
|
c55bbd1a85 | ||
|
|
ccba41cdb2 | ||
|
|
3d442bbf22 | ||
|
|
4888d0d832 | ||
|
|
47de3fb007 | ||
|
|
41bc160cb8 | ||
|
|
d0ba155c19 | ||
|
|
5f0848bf7d | ||
|
|
6551527fe2 | ||
|
|
159ce2ea08 | ||
|
|
3715570d17 | ||
|
|
65a7432b5a | ||
|
|
557e28f460 | ||
|
|
62a7f252f5 | ||
|
|
2fa14200aa | ||
|
|
0605cf94f0 | ||
|
|
d69156c616 | ||
|
|
0963bbbe78 | ||
|
|
f3351a5e47 | ||
|
|
f3f4c68acc | ||
|
|
5d617ce63d | ||
|
|
8a0d45ac5a | ||
|
|
2468ba7445 | ||
|
|
65b7d2db47 | ||
|
|
e07f1bb89c | ||
|
|
f4f813d108 | ||
|
|
6217edcb6c | ||
|
|
c5cc832304 | ||
|
|
a76038bac4 | ||
|
|
ff4942f9b4 | ||
|
|
1ccad64871 | ||
|
|
19f0022bbe | ||
|
|
ecc7b7a700 | ||
|
|
e46102124e | ||
|
|
314ed7d8f6 | ||
|
|
b1341bc611 | ||
|
|
07be605dcb | ||
|
|
fe318775c3 | ||
|
|
1bb07795d8 | ||
|
|
caf07479ec | ||
|
|
508780d07f | ||
|
|
05e67e924c | ||
|
|
fb2488314f | ||
|
|
062f58209b | ||
|
|
7cb9d6b1a6 | ||
|
|
fb721234ec | ||
|
|
92906aeb08 | ||
|
|
cab41f0538 | ||
|
|
5d0dcaf81e | ||
|
|
9591c8d4e0 | ||
|
|
bcb1fbe031 | ||
|
|
e87a2fe14b | ||
|
|
d00571b5a4 | ||
|
|
b08a514594 | ||
|
|
265ccaca4a | ||
|
|
7aa6c827f7 | ||
|
|
093174942b | ||
|
|
f299f40763 | ||
|
|
7545e38655 | ||
|
|
0bc55a0d55 | ||
|
|
d38e7170fe | ||
|
|
15a9412255 | ||
|
|
e29399e032 | ||
|
|
bc18a94d8c | ||
|
|
5d2bdd478c | ||
|
|
9cacba916b | ||
|
|
628e82fa79 | ||
|
|
fbbbba2fac | ||
|
|
9cbf9d52b4 | ||
|
|
fb35fe1a41 | ||
|
|
b60b5750af | ||
|
|
3ff40114fa | ||
|
|
71c6ae8789 | ||
|
|
d9a7536fa8 | ||
|
|
99f4417cd7 | ||
|
|
47f94bde04 | ||
|
|
197e6b95e3 | ||
|
|
8e47ca8d57 | ||
|
|
714fff39ba | ||
|
|
89239d1c54 | ||
|
|
c03d98cf46 | ||
|
|
d1ad46d6f1 | ||
|
|
6ae7560f66 | ||
|
|
e561d19206 | ||
|
|
9eed1919c2 | ||
|
|
b87f7b1129 | ||
|
|
7410a60208 | ||
|
|
7c86130a3d | ||
|
|
58a1d9aae0 | ||
|
|
24e32f6ae2 | ||
|
|
3dd7393984 | ||
|
|
f18f743d03 | ||
|
|
c660dcdfcd | ||
|
|
9e0250c0b4 | ||
|
|
08c747f1e0 | ||
|
|
04ae6fde80 | ||
|
|
b1a53c8ef0 | ||
|
|
cd64511f24 | ||
|
|
1e98e0b159 | ||
|
|
4f7af55bc3 | ||
|
|
d0e6a57e48 | ||
|
|
d28a486769 | ||
|
|
84722d92f6 | ||
|
|
8a3b5ac21d | ||
|
|
717d53a773 | ||
|
|
96926d6648 | ||
|
|
f3639de8b1 | ||
|
|
b71e675e8d | ||
|
|
d3c850104b | ||
|
|
c00155f6a4 | ||
|
|
8753070fc7 | ||
|
|
ed8f9f021d | ||
|
|
3ccc705396 | ||
|
|
11e422cf29 | ||
|
|
7f695fed39 | ||
|
|
310501cd8a | ||
|
|
106b3aea1b | ||
|
|
6e52ca3307 | ||
|
|
94c31f672f | ||
|
|
240bbb9852 | ||
|
|
8cf2ed91a9 | ||
|
|
7be5b4ca8b | ||
|
|
d589ad96aa | ||
|
|
097e41e8d2 | ||
|
|
4cf43b858d | ||
|
|
13a4666a6e | ||
|
|
9232290950 | ||
|
|
f3153d45bc | ||
|
|
d9cb6da951 | ||
|
|
17535d887f | ||
|
|
35da7f5b96 | ||
|
|
4e95a68582 | ||
|
|
9dfeb93f80 | ||
|
|
02247ffc79 | ||
|
|
48da030415 | ||
|
|
817e04bee0 | ||
|
|
e5d0b0c37d | ||
|
|
950f450665 | ||
|
|
79daf8b039 | ||
|
|
383cbca896 | ||
|
|
07c55d5e2a | ||
|
|
156151df45 | ||
|
|
03b1d71af9 | ||
|
|
f6ad107fdd | ||
|
|
e2c392631a | ||
|
|
4a1b4d63ef | ||
|
|
83ecda977c | ||
|
|
9601febef8 | ||
|
|
0503680efa | ||
|
|
57ccec1df3 | ||
|
|
22f3634481 | ||
|
|
5590c73af2 | ||
|
|
1f76b30e54 | ||
|
|
8bd04654c7 | ||
|
|
0dce3188cc | ||
|
|
106c7aa956 | ||
|
|
b04f199035 | ||
|
|
a2b992dfd1 | ||
|
|
745e253a78 | ||
|
|
2ea551d37d | ||
|
|
8d1481ca10 | ||
|
|
307e7e00c2 | ||
|
|
c3ad1c8a9f | ||
|
|
05d51d7b5b | ||
|
|
09f69a4d28 | ||
|
|
a338af17c8 | ||
|
|
bc82fc0cdd | ||
|
|
418a3d6e41 | ||
|
|
fbcc52ec3d | ||
|
|
47e89f4ba1 | ||
|
|
888d3ae968 | ||
|
|
a28120abdd | ||
|
|
4493d83aea | ||
|
|
eff0fb9a69 | ||
|
|
5bb0f9bedc | ||
|
|
bf812e6493 | ||
|
|
a3da12d867 | ||
|
|
6b4a06c3fc | ||
|
|
3833b28132 | ||
|
|
e8f9ab82ed | ||
|
|
6ab364b16a | ||
|
|
a4dc11addc | ||
|
|
0372702eb4 | ||
|
|
aa8eeea478 | ||
|
|
e54ecc4c37 | ||
|
|
4a12c76097 | ||
|
|
be72faf78e | ||
|
|
28d44d80ed | ||
|
|
9008d9996f | ||
|
|
be2a9b78bb | ||
|
|
70003ee5b1 | ||
|
|
45a5ccba84 | ||
|
|
f80a64a0f4 | ||
|
|
511df2963b | ||
|
|
f92f62a91b | ||
|
|
7f41893da4 | ||
|
|
42da4f57c2 | ||
|
|
c2e11dfe83 | ||
|
|
17e1930229 | ||
|
|
bde94347d3 | ||
|
|
b1612afff4 | ||
|
|
1d10d952b2 | ||
|
|
9150f9ef3c | ||
|
|
7bc0f7cc6c | ||
|
|
c52d11b24c | ||
|
|
59486615dd | ||
|
|
f0212cd361 | ||
|
|
ee4cb5fdc9 | ||
|
|
75b919237b | ||
|
|
07a9062e1f | ||
|
|
cdb3e18b80 | ||
|
|
01eb93d664 | ||
|
|
89f69c2d94 | ||
|
|
dc6f6fcab7 | ||
|
|
6ca177e462 |
@@ -3,21 +3,23 @@
|
||||
!invokeai
|
||||
!ldm
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
**/*.ckpt
|
||||
|
||||
# ignore frontend but whitelist dist
|
||||
invokeai/frontend/**
|
||||
!invokeai/frontend/dist
|
||||
invokeai/frontend/
|
||||
!invokeai/frontend/dist/
|
||||
|
||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||
invokeai/assets
|
||||
!invokeai/assets/web
|
||||
invokeai/assets/
|
||||
!invokeai/assets/web/
|
||||
|
||||
# ignore python cache
|
||||
**/__pycache__
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
**/*.egg-info
|
||||
|
||||
# Distribution / packaging
|
||||
*.egg-info/
|
||||
*.egg
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
root = true
|
||||
|
||||
# All files
|
||||
[*]
|
||||
max_line_length = 80
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 2
|
||||
@@ -10,3 +13,18 @@ trim_trailing_whitespace = true
|
||||
# Python
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
max_line_length = 120
|
||||
|
||||
# css
|
||||
[*.css]
|
||||
indent_size = 4
|
||||
|
||||
# flake8
|
||||
[.flake8]
|
||||
indent_size = 4
|
||||
|
||||
# Markdown MkDocs
|
||||
[docs/**/*.md]
|
||||
max_line_length = 80
|
||||
indent_size = 4
|
||||
indent_style = unset
|
||||
|
||||
37
.flake8
Normal file
37
.flake8
Normal file
@@ -0,0 +1,37 @@
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
extend-ignore =
|
||||
# See https://github.com/PyCQA/pycodestyle/issues/373
|
||||
E203,
|
||||
# use Bugbear's B950 instead
|
||||
E501,
|
||||
# from black repo https://github.com/psf/black/blob/main/.flake8
|
||||
E266, W503, B907
|
||||
extend-select =
|
||||
# Bugbear line length
|
||||
B950
|
||||
extend-exclude =
|
||||
scripts/orig_scripts/*
|
||||
ldm/models/*
|
||||
ldm/modules/*
|
||||
ldm/data/*
|
||||
ldm/generate.py
|
||||
ldm/util.py
|
||||
ldm/simplet2i.py
|
||||
per-file-ignores =
|
||||
# B950 line too long
|
||||
# W605 invalid escape sequence
|
||||
# F841 assigned to but never used
|
||||
# F401 imported but unused
|
||||
tests/test_prompt_parser.py: B950, W605, F401
|
||||
tests/test_textual_inversion.py: F841, B950
|
||||
# B023 Function definition does not bind loop variable
|
||||
scripts/legacy_api.py: F401, B950, B023, F841
|
||||
ldm/invoke/__init__.py: F401
|
||||
# B010 Do not call setattr with a constant attribute value
|
||||
ldm/invoke/server_legacy.py: B010
|
||||
# =====================
|
||||
# flake-quote settings:
|
||||
# =====================
|
||||
# Set this to match black style:
|
||||
inline-quotes = double
|
||||
59
.github/CODEOWNERS
vendored
59
.github/CODEOWNERS
vendored
@@ -1,18 +1,18 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @mauwii
|
||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @mauwii @tildebyte
|
||||
mkdocs.yml @lstein @mauwii
|
||||
/docs/ @lstein @mauwii @blessedcoolant
|
||||
mkdocs.yml @mauwii @lstein
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @mauwii @lstein @ebr
|
||||
/docker/ @mauwii
|
||||
/scripts/ @ebr @lstein
|
||||
/installer/ @ebr @lstein @tildebyte
|
||||
/scripts/ @ebr @lstein @blessedcoolant
|
||||
/installer/ @ebr @lstein
|
||||
ldm/invoke/config @lstein @ebr
|
||||
invokeai/assets @lstein @ebr
|
||||
invokeai/configs @lstein @ebr
|
||||
invokeai/assets @lstein @blessedcoolant
|
||||
invokeai/configs @lstein @ebr @blessedcoolant
|
||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
||||
|
||||
# web ui
|
||||
@@ -20,31 +20,42 @@ invokeai/configs @lstein @ebr
|
||||
/invokeai/backend @blessedcoolant @psychedelicious
|
||||
|
||||
# generation and model management
|
||||
/ldm/*.py @lstein
|
||||
/ldm/*.py @lstein @blessedcoolant
|
||||
/ldm/generate.py @lstein @keturn
|
||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt* @lstein
|
||||
/ldm/invoke/ckpt_generator @lstein
|
||||
/ldm/invoke/CLI.py @lstein
|
||||
/ldm/invoke/config @lstein @ebr @mauwii
|
||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
||||
/ldm/invoke/generator @keturn @damian0815
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
||||
/ldm/invoke/txt2mask.py @lstein
|
||||
/ldm/invoke/patchmatch.py @Kyle0654
|
||||
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
||||
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
|
||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
||||
|
||||
# attention, textual inversion, model configuration
|
||||
/ldm/models @damian0815 @keturn
|
||||
/ldm/modules @damian0815 @keturn
|
||||
/ldm/models @damian0815 @keturn @blessedcoolant
|
||||
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
|
||||
/ldm/modules/attention.py @damian0815 @keturn
|
||||
/ldm/modules/diffusionmodules @damian0815 @keturn
|
||||
/ldm/modules/distributions @damian0815 @keturn
|
||||
/ldm/modules/ema.py @damian0815 @keturn
|
||||
/ldm/modules/embedding_manager.py @lstein
|
||||
/ldm/modules/encoders @damian0815 @keturn
|
||||
/ldm/modules/image_degradation @damian0815 @keturn
|
||||
/ldm/modules/losses @damian0815 @keturn
|
||||
/ldm/modules/x_transformer.py @damian0815 @keturn
|
||||
|
||||
# Nodes
|
||||
apps/ @Kyle0654
|
||||
apps/ @Kyle0654 @jpphoto
|
||||
|
||||
# legacy REST API
|
||||
# is CapableWeb still engaged?
|
||||
/ldm/invoke/pngwriter.py @CapableWeb
|
||||
/ldm/invoke/server_legacy.py @CapableWeb
|
||||
/scripts/legacy_api.py @CapableWeb
|
||||
/tests/legacy_tests.sh @CapableWeb
|
||||
# these are dead code
|
||||
#/ldm/invoke/pngwriter.py @CapableWeb
|
||||
#/ldm/invoke/server_legacy.py @CapableWeb
|
||||
#/scripts/legacy_api.py @CapableWeb
|
||||
#/tests/legacy_tests.sh @CapableWeb
|
||||
|
||||
|
||||
|
||||
65
.github/workflows/build-container.yml
vendored
65
.github/workflows/build-container.yml
vendored
@@ -3,9 +3,19 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'update/ci/*'
|
||||
- 'update/ci/docker/*'
|
||||
- 'update/docker/*'
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
- 'docker/Dockerfile'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
@@ -20,18 +30,15 @@ jobs:
|
||||
include:
|
||||
- flavor: amd
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cuda
|
||||
pip-extra-index-url: ''
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cpu
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.flavor }}
|
||||
env:
|
||||
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||
DOCKERFILE: 'docker/Dockerfile'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -41,7 +48,9 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
images: |
|
||||
ghcr.io/${{ github.repository }}
|
||||
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
@@ -52,13 +61,14 @@ jobs:
|
||||
flavor: |
|
||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.flavor }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: ${{ matrix.platforms }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
@@ -68,25 +78,34 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ${{ env.DOCKERFILE }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
type=gha,scope=main-${{ matrix.flavor }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
|
||||
- name: Output image, digest and metadata to summary
|
||||
run: |
|
||||
{
|
||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
||||
echo version: "${{ steps.meta.outputs.version }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
- name: Docker Hub Description
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: peter-evans/dockerhub-description@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
|
||||
10
.github/workflows/mkdocs-material.yml
vendored
10
.github/workflows/mkdocs-material.yml
vendored
@@ -9,6 +9,10 @@ jobs:
|
||||
mkdocs-material:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||
REPO_NAME: '${{ github.repository }}'
|
||||
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||
steps:
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -19,11 +23,15 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install requirements
|
||||
env:
|
||||
PIP_USE_PEP517: 1
|
||||
run: |
|
||||
python -m \
|
||||
pip install -r docs/requirements-mkdocs.txt
|
||||
pip install ".[docs]"
|
||||
|
||||
- name: confirm buildability
|
||||
run: |
|
||||
|
||||
2
.github/workflows/pypi-release.yml
vendored
2
.github/workflows/pypi-release.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
run: twine check dist/*
|
||||
|
||||
- name: check PyPI versions
|
||||
if: github.ref == 'refs/heads/main'
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
||||
run: |
|
||||
pip install --upgrade requests
|
||||
python -c "\
|
||||
|
||||
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Test invoke.py pip
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
# - linux-cuda-11_6
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
# - windows-cuda-11_6
|
||||
# - windows-cuda-11_7
|
||||
include:
|
||||
# - pytorch: linux-cuda-11_6
|
||||
# os: ubuntu-22.04
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: macos-default
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: windows-cpu
|
||||
os: windows-2022
|
||||
github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_6
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_7
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
13
.github/workflows/test-invoke-pip.yml
vendored
13
.github/workflows/test-invoke-pip.yml
vendored
@@ -3,11 +3,24 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
.idea/
|
||||
embeddings/
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
@@ -232,4 +233,4 @@ installer/update.bat
|
||||
installer/update.sh
|
||||
|
||||
# no longer stored in source directory
|
||||
models
|
||||
models
|
||||
|
||||
41
.pre-commit-config.yaml
Normal file
41
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies:
|
||||
- flake8-black
|
||||
- flake8-bugbear
|
||||
- flake8-comprehensions
|
||||
- flake8-simplify
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: 'v3.0.0-alpha.4'
|
||||
hooks:
|
||||
- id: prettier
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
- id: trailing-whitespace
|
||||
14
.prettierignore
Normal file
14
.prettierignore
Normal file
@@ -0,0 +1,14 @@
|
||||
invokeai/frontend/.husky
|
||||
invokeai/frontend/patches
|
||||
|
||||
# Ignore artifacts:
|
||||
build
|
||||
coverage
|
||||
static
|
||||
invokeai/frontend/dist
|
||||
|
||||
# Ignore all HTML files:
|
||||
*.html
|
||||
|
||||
# Ignore deprecated docs
|
||||
docs/installation/deprecated_documentation
|
||||
@@ -1,9 +1,9 @@
|
||||
endOfLine: lf
|
||||
tabWidth: 2
|
||||
useTabs: false
|
||||
singleQuote: true
|
||||
quoteProps: as-needed
|
||||
embeddedLanguageFormatting: auto
|
||||
endOfLine: lf
|
||||
singleQuote: true
|
||||
semi: true
|
||||
trailingComma: es5
|
||||
useTabs: false
|
||||
overrides:
|
||||
- files: '*.md'
|
||||
options:
|
||||
@@ -11,3 +11,9 @@ overrides:
|
||||
printWidth: 80
|
||||
parser: markdown
|
||||
cursorOffset: -1
|
||||
- files: docs/**/*.md
|
||||
options:
|
||||
tabWidth: 4
|
||||
- files: 'invokeai/frontend/public/locales/*.json'
|
||||
options:
|
||||
tabWidth: 4
|
||||
|
||||
158
README.md
158
README.md
@@ -1,6 +1,6 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
|
||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
@@ -28,12 +28,14 @@
|
||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||
|
||||
</div>
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||
|
||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
_Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
@@ -41,38 +43,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
</div>
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
## Table of Contents
|
||||
|
||||
1. [Quick Start](#getting-started-with-invokeai)
|
||||
2. [Installation](#detailed-installation-instructions)
|
||||
3. [Hardware Requirements](#hardware-requirements)
|
||||
4. [Features](#features)
|
||||
5. [Latest Changes](#latest-changes)
|
||||
6. [Troubleshooting](#troubleshooting)
|
||||
7. [Contributing](#contributing)
|
||||
8. [Contributors](#contributors)
|
||||
9. [Support](#support)
|
||||
10. [Further Reading](#further-reading)
|
||||
|
||||
## Getting Started with InvokeAI
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
### Automatic Installer (suggested for 1st time users)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
|
||||
3. Unzip the file.
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
5. Wait a while, until it is done.
|
||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||
8. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On
|
||||
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
||||
into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
|
||||
## Table of Contents
|
||||
5. You'll be asked to confirm the location of the folder in which
|
||||
to install InvokeAI and its image generation model files. Pick a
|
||||
location with at least 15 GB of free memory. More if you plan on
|
||||
installing lots of models.
|
||||
|
||||
1. [Installation](#installation)
|
||||
2. [Hardware Requirements](#hardware-requirements)
|
||||
3. [Features](#features)
|
||||
4. [Latest Changes](#latest-changes)
|
||||
5. [Troubleshooting](#troubleshooting)
|
||||
6. [Contributing](#contributing)
|
||||
7. [Contributors](#contributors)
|
||||
8. [Support](#support)
|
||||
9. [Further Reading](#further-reading)
|
||||
6. Wait while the installer does its thing. After installing the software,
|
||||
the installer will launch a script that lets you configure InvokeAI and
|
||||
select a set of starting image generaiton models.
|
||||
|
||||
## Installation
|
||||
7. Find the folder that InvokeAI was installed into (it is not the
|
||||
same as the unpacked zip file directory!) The default location of this
|
||||
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||
|
||||
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
|
||||
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||
minute or two for Stable Diffusion to start up, then open your browser
|
||||
and go to http://localhost:9090.
|
||||
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
### Command-Line Installation (for users familiar with Terminals)
|
||||
|
||||
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
||||
not supported.
|
||||
|
||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||
|
||||
```terminal
|
||||
mkdir invokeai
|
||||
````
|
||||
|
||||
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||
|
||||
```terminal
|
||||
cd invokeai
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||
|
||||
_For Linux/Mac users:_
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
_For Windows users:_
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||
|
||||
_For Windows/Linux with an NVIDIA GPU:_
|
||||
|
||||
```terminal
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
```terminal
|
||||
invokeai --web
|
||||
```
|
||||
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
|
||||
### Detailed Installation Instructions
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
@@ -80,13 +180,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
|
||||
### Hardware Requirements
|
||||
## Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
#### System
|
||||
### System
|
||||
|
||||
You will need one of the following:
|
||||
|
||||
@@ -98,11 +198,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
#### Memory
|
||||
### Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
#### Disk
|
||||
### Disk
|
||||
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
@@ -152,13 +252,15 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
problems and other issues.
|
||||
|
||||
# Contributing
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
|
||||
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||
@@ -175,6 +277,8 @@ This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
||||
|
||||
@@ -147,7 +147,7 @@ echo ***** Installed invoke launcher script ******
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
||||
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
deactivate
|
||||
|
||||
@@ -1,57 +1,63 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
##################
|
||||
## base image ##
|
||||
##################
|
||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
||||
|
||||
# prepare for buildkit cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
|
||||
# Install necesarry packages
|
||||
# prepare for buildkit cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
# Install necessary packages
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libopencv-dev=4.5.*
|
||||
|
||||
# set working directory and path
|
||||
# set working directory and env
|
||||
ARG APPDIR=/usr/src
|
||||
ARG APPNAME=InvokeAI
|
||||
WORKDIR ${APPDIR}
|
||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||
# Keeps Python from generating .pyc files in the container
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
# Turns off buffering for easier container logging
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
# don't fall back to legacy build system
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
#######################
|
||||
## build pyproject ##
|
||||
#######################
|
||||
FROM python-base AS pyproject-builder
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
# prepare for buildkit cache
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.*
|
||||
|
||||
# prepare pip for buildkit cache
|
||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# create virtual environment
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
python3 -m venv "${APPNAME}" \
|
||||
--upgrade-deps
|
||||
|
||||
@@ -61,9 +67,8 @@ COPY --link . .
|
||||
# install pyproject.toml
|
||||
ARG PIP_EXTRA_INDEX_URL
|
||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||
ARG PIP_PACKAGE=.
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
"${APPNAME}/bin/pip" install .
|
||||
|
||||
# build patchmatch
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
@@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
||||
#####################
|
||||
FROM python-base AS runtime
|
||||
|
||||
# setup environment
|
||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
||||
ENV INVOKEAI_ROOT=/data
|
||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||
# Create a new user
|
||||
ARG UNAME=appuser
|
||||
RUN useradd \
|
||||
--no-log-init \
|
||||
-m \
|
||||
-U \
|
||||
"${UNAME}"
|
||||
|
||||
# set Entrypoint and default CMD
|
||||
# create volume directory
|
||||
ARG VOLUME_DIR=/data
|
||||
RUN mkdir -p "${VOLUME_DIR}" \
|
||||
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
||||
|
||||
# setup runtime environment
|
||||
USER ${UNAME}
|
||||
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||
EXPOSE 9090
|
||||
ENTRYPOINT [ "invokeai" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
VOLUME [ "/data" ]
|
||||
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||
VOLUME [ "${VOLUME_DIR}" ]
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
||||
# CPU: https://download.pytorch.org/whl/cpu
|
||||
# as found on https://pytorch.org/get-started/locally/
|
||||
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||
# Possible Values are:
|
||||
# - cpu
|
||||
# - cuda
|
||||
# - rocm
|
||||
# Don't forget to also set it when executing run.sh
|
||||
# if it is not set, the script will try to detect the flavor by itself.
|
||||
#
|
||||
# Doc can be found here:
|
||||
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
@@ -21,23 +26,25 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||
echo -e "Platform:\t\t${PLATFORM}"
|
||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||
|
||||
# Create docker volume
|
||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||
echo -e "Volume already exists\n"
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
echo -n "creating docker volume "
|
||||
docker volume create "${VOLUMENAME}"
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${CONTAINER_IMAGE}" \
|
||||
--platform="${PLATFORM:-linux/amd64}" \
|
||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||
--file="${DOCKERFILE}" \
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||
|
||||
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||
|
||||
# Activate virtual environment if not already activated and exists
|
||||
if [[ -z $VIRTUAL_ENV ]]; then
|
||||
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||
fi
|
||||
|
||||
# Decide which container flavor to build if not specified
|
||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||
# Check for CUDA and ROCm
|
||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="cuda"
|
||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="rocm"
|
||||
else
|
||||
CONTAINER_FLAVOR="cpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||
@@ -26,9 +38,10 @@ fi
|
||||
|
||||
# Variables shared by build.sh and run.sh
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||
ARCH="${ARCH-$(uname -m)}"
|
||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
||||
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
# Create outputs directory if it does not exist
|
||||
[[ -d ./outputs ]] || mkdir ./outputs
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Volumename:\t${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||
@@ -22,10 +24,18 @@ docker run \
|
||||
--name="${REPOSITORY_NAME,,}" \
|
||||
--hostname="${REPOSITORY_NAME,,}" \
|
||||
--mount=source="${VOLUMENAME}",target=/data \
|
||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
||||
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||
--publish=9090:9090 \
|
||||
--cap-add=sys_nice \
|
||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||
|
||||
# Remove Trash folder
|
||||
for f in outputs/.Trash*; do
|
||||
if [ -e "$f" ]; then
|
||||
rm -Rf "$f"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
5
docs/.markdownlint.jsonc
Normal file
5
docs/.markdownlint.jsonc
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"MD046": false,
|
||||
"MD007": false,
|
||||
"MD030": false
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 84 KiB |
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 114 KiB |
@@ -214,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
|
||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||
|
||||
!!! note
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ for adj in adjectives:
|
||||
print(f'a {adj} day -A{samp} -C{cg}')
|
||||
```
|
||||
|
||||
It's output looks like this (abbreviated):
|
||||
Its output looks like this (abbreviated):
|
||||
|
||||
```bash
|
||||
a sunny day -Aklms -C7.5
|
||||
|
||||
@@ -250,6 +250,24 @@ invokeai-ti \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Using Embeddings
|
||||
|
||||
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||
|
||||
These will be automatically loaded when you start InvokeAI.
|
||||
|
||||
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
|
||||
|
||||
**Note:** `.pt` embeddings do not require the angle brackets.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||
|
||||
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||
|
||||
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||
|
||||
## Reading
|
||||
|
||||
For more information on textual inversion, please see the following
|
||||
|
||||
@@ -2,62 +2,82 @@
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for InvokeAI's various features.
|
||||
- The Basics
|
||||
|
||||
## The Basics
|
||||
### * The [Web User Interface](WEB.md)
|
||||
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
- The [Web User Interface](WEB.md)
|
||||
|
||||
### * The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||
Build complex scenes by combine and modifying multiple images in a stepwise
|
||||
fashion. This feature combines img2img, inpainting and outpainting in
|
||||
a single convenient digital artist-optimized user interface.
|
||||
Guide to the Web interface. Also see the
|
||||
[WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
|
||||
### * The [Command Line Interface (CLI)](CLI.md)
|
||||
Scriptable access to InvokeAI's features.
|
||||
- The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||
|
||||
## Image Generation
|
||||
### * [Prompt Engineering](PROMPTS.md)
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
Build complex scenes by combine and modifying multiple images in a
|
||||
stepwise fashion. This feature combines img2img, inpainting and
|
||||
outpainting in a single convenient digital artist-optimized user
|
||||
interface.
|
||||
|
||||
## * [Post-Processing](POSTPROCESS.md)
|
||||
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||
- The [Command Line Interface (CLI)](CLI.md)
|
||||
|
||||
## * The [Concepts Library](CONCEPTS.md)
|
||||
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
||||
Scriptable access to InvokeAI's features.
|
||||
|
||||
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||
Use a seed image to build new creations in the CLI.
|
||||
- Image Generation
|
||||
|
||||
### * [Inpainting Guide for the CLI](INPAINTING.md)
|
||||
Selectively erase and replace portions of an existing image in the CLI.
|
||||
- [Prompt Engineering](PROMPTS.md)
|
||||
|
||||
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||
Extend the borders of the image with an "outcrop" function within the CLI.
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
|
||||
### * [Generating Variations](VARIATIONS.md)
|
||||
Have an image you like and want to generate many more like it? Variations
|
||||
are the ticket.
|
||||
- [Post-Processing](POSTPROCESS.md)
|
||||
|
||||
## Model Management
|
||||
Restore mangled faces and make images larger with upscaling. Also see
|
||||
the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||
|
||||
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
Learn how to import third-party models and switch among them. This
|
||||
guide also covers optimizing models to load quickly.
|
||||
- The [Concepts Library](CONCEPTS.md)
|
||||
|
||||
## * [Merging Models](MODEL_MERGING.md)
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||
new model that combines characteristics of the originals.
|
||||
Add custom subjects and styles using HuggingFace's repository of
|
||||
embeddings.
|
||||
|
||||
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
Personalize models by adding your own style or subjects.
|
||||
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||
|
||||
# Other Features
|
||||
Use a seed image to build new creations in the CLI.
|
||||
|
||||
## * [The NSFW Checker](NSFW.md)
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
- [Inpainting Guide for the CLI](INPAINTING.md)
|
||||
|
||||
## * [Miscellaneous](OTHER.md)
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
Selectively erase and replace portions of an existing image in the CLI.
|
||||
|
||||
- [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||
|
||||
Extend the borders of the image with an "outcrop" function within the
|
||||
CLI.
|
||||
|
||||
- [Generating Variations](VARIATIONS.md)
|
||||
|
||||
Have an image you like and want to generate many more like it?
|
||||
Variations are the ticket.
|
||||
|
||||
- Model Management
|
||||
|
||||
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
Learn how to import third-party models and switch among them. This guide
|
||||
also covers optimizing models to load quickly.
|
||||
|
||||
- [Merging Models](MODEL_MERGING.md)
|
||||
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a new
|
||||
model that combines characteristics of the originals.
|
||||
|
||||
- [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
|
||||
Personalize models by adding your own style or subjects.
|
||||
|
||||
- Other Features
|
||||
|
||||
- [The NSFW Checker](NSFW.md)
|
||||
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
|
||||
- [Miscellaneous](OTHER.md)
|
||||
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
|
||||
4
docs/help/IDE-Settings/index.md
Normal file
4
docs/help/IDE-Settings/index.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# :octicons-file-code-16: IDE-Settings
|
||||
|
||||
Here we will share settings for IDEs used by our developers, maybe you can find
|
||||
something interestening which will help to boost your development efficency 🔥
|
||||
250
docs/help/IDE-Settings/vs-code.md
Normal file
250
docs/help/IDE-Settings/vs-code.md
Normal file
@@ -0,0 +1,250 @@
|
||||
---
|
||||
title: Visual Studio Code
|
||||
---
|
||||
|
||||
# :material-microsoft-visual-studio-code:Visual Studio Code
|
||||
|
||||
The Workspace Settings are stored in the project (repository) root and get
|
||||
higher priorized than your user settings.
|
||||
|
||||
This helps to have different settings for different projects, while the user
|
||||
settings get used as a default value if no workspace settings are provided.
|
||||
|
||||
## tasks.json
|
||||
|
||||
First we will create a task configuration which will create a virtual
|
||||
environment and update the deps (pip, setuptools and wheel).
|
||||
|
||||
Into this venv we will then install the pyproject.toml in editable mode with
|
||||
dev, docs and test dependencies.
|
||||
|
||||
```json title=".vscode/tasks.json"
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Create virtual environment",
|
||||
"detail": "Create .venv and upgrade pip, setuptools and wheel",
|
||||
"command": "python3",
|
||||
"args": [
|
||||
"-m",
|
||||
"venv",
|
||||
".venv",
|
||||
"--prompt",
|
||||
"InvokeAI",
|
||||
"--upgrade-deps"
|
||||
],
|
||||
"runOptions": {
|
||||
"instanceLimit": 1,
|
||||
"reevaluateOnRerun": true
|
||||
},
|
||||
"group": {
|
||||
"kind": "build"
|
||||
},
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "always",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "build InvokeAI",
|
||||
"detail": "Build pyproject.toml with extras dev, docs and test",
|
||||
"command": "${workspaceFolder}/.venv/bin/python3",
|
||||
"args": [
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--use-pep517",
|
||||
"--editable",
|
||||
".[dev,docs,test]"
|
||||
],
|
||||
"dependsOn": "Create virtual environment",
|
||||
"dependsOrder": "sequence",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "always",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The fastest way to build InvokeAI now is ++cmd+shift+b++
|
||||
|
||||
## launch.json
|
||||
|
||||
This file is used to define debugger configurations, so that you can one-click
|
||||
launch and monitor the application, set halt points to inspect specific states,
|
||||
...
|
||||
|
||||
```json title=".vscode/launch.json"
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "invokeai web",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/invokeai",
|
||||
"justMyCode": true
|
||||
},
|
||||
{
|
||||
"name": "invokeai cli",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/invokeai",
|
||||
"justMyCode": true
|
||||
},
|
||||
{
|
||||
"name": "mkdocs serve",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/mkdocs",
|
||||
"args": ["serve"],
|
||||
"justMyCode": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
|
||||
you have created a virtual environment via the [tasks](#tasksjson) from the
|
||||
previous step.)
|
||||
|
||||
## extensions.json
|
||||
|
||||
A list of recommended vscode-extensions to make your life easier:
|
||||
|
||||
```json title=".vscode/extensions.json"
|
||||
{
|
||||
"recommendations": [
|
||||
"editorconfig.editorconfig",
|
||||
"github.vscode-pull-request-github",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.flake8",
|
||||
"ms-python.isort",
|
||||
"ms-python.python",
|
||||
"ms-python.vscode-pylance",
|
||||
"redhat.vscode-yaml",
|
||||
"tamasfe.even-better-toml",
|
||||
"eamodio.gitlens",
|
||||
"foxundermoon.shell-format",
|
||||
"timonwong.shellcheck",
|
||||
"esbenp.prettier-vscode",
|
||||
"davidanson.vscode-markdownlint",
|
||||
"yzhang.markdown-all-in-one",
|
||||
"bierner.github-markdown-preview",
|
||||
"ms-azuretools.vscode-docker",
|
||||
"mads-hartmann.bash-ide-vscode"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## settings.json
|
||||
|
||||
With bellow settings your files already get formated when you save them (only
|
||||
your modifications if available), which will help you to not run into trouble
|
||||
with the pre-commit hooks. If the hooks fail, they will prevent you from
|
||||
commiting, but most hooks directly add a fixed version, so that you just need to
|
||||
stage and commit them:
|
||||
|
||||
```json title=".vscode/settings.json"
|
||||
{
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.quickSuggestions": {
|
||||
"comments": false,
|
||||
"strings": true,
|
||||
"other": true
|
||||
},
|
||||
"editor.suggest.insertMode": "replace",
|
||||
"gitlens.codeLens.scopes": ["document"]
|
||||
},
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "ms-python.black-formatter",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "file"
|
||||
},
|
||||
"[toml]": {
|
||||
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[yaml]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[markdown]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.rulers": [80],
|
||||
"editor.unicodeHighlight.ambiguousCharacters": false,
|
||||
"editor.unicodeHighlight.invisibleCharacters": false,
|
||||
"diffEditor.ignoreTrimWhitespace": false,
|
||||
"editor.wordWrap": "on",
|
||||
"editor.quickSuggestions": {
|
||||
"comments": "off",
|
||||
"strings": "off",
|
||||
"other": "off"
|
||||
},
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[shellscript]": {
|
||||
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||
},
|
||||
"[ignore]": {
|
||||
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||
},
|
||||
"editor.rulers": [88],
|
||||
"evenBetterToml.formatter.alignEntries": false,
|
||||
"evenBetterToml.formatter.allowedBlankLines": 1,
|
||||
"evenBetterToml.formatter.arrayAutoExpand": true,
|
||||
"evenBetterToml.formatter.arrayTrailingComma": true,
|
||||
"evenBetterToml.formatter.arrayAutoCollapse": true,
|
||||
"evenBetterToml.formatter.columnWidth": 88,
|
||||
"evenBetterToml.formatter.compactArrays": true,
|
||||
"evenBetterToml.formatter.compactInlineTables": true,
|
||||
"evenBetterToml.formatter.indentEntries": false,
|
||||
"evenBetterToml.formatter.inlineTableExpand": true,
|
||||
"evenBetterToml.formatter.reorderArrays": true,
|
||||
"evenBetterToml.formatter.reorderKeys": true,
|
||||
"evenBetterToml.formatter.compactEntries": false,
|
||||
"evenBetterToml.schema.enabled": true,
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
"python.formatting.provider": "black",
|
||||
"python.languageServer": "Pylance",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.flake8Enabled": true,
|
||||
"python.testing.unittestEnabled": false,
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.pytestArgs": [
|
||||
"tests",
|
||||
"--cov=ldm",
|
||||
"--cov-branch",
|
||||
"--cov-report=term:skip-covered"
|
||||
],
|
||||
"yaml.schemas": {
|
||||
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
|
||||
}
|
||||
}
|
||||
```
|
||||
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
title: Pull-Request
|
||||
---
|
||||
|
||||
# :octicons-git-pull-request-16: Pull-Request
|
||||
|
||||
## pre-requirements
|
||||
|
||||
To follow the steps in this tutorial you will need:
|
||||
|
||||
- [GitHub](https://github.com) account
|
||||
- [git](https://git-scm.com/downloads) source controll
|
||||
- Text / Code Editor (personally I preffer
|
||||
[Visual Studio Code](https://code.visualstudio.com/Download))
|
||||
- Terminal:
|
||||
- If you are on Linux/MacOS you can use bash or zsh
|
||||
- for Windows Users the commands are written for PowerShell
|
||||
|
||||
## Fork Repository
|
||||
|
||||
The first step to be done if you want to contribute to InvokeAI, is to fork the
|
||||
rpeository.
|
||||
|
||||
Since you are already reading this doc, the easiest way to do so is by clicking
|
||||
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
|
||||
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
|
||||
in the top right.
|
||||
|
||||
## Clone your fork
|
||||
|
||||
After you forked the Repository, you should clone it to your dev machine:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/<github username>/InvokeAI \
|
||||
&& cd InvokeAI
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
git clone https://github.com/<github username>/InvokeAI `
|
||||
&& cd InvokeAI
|
||||
```
|
||||
|
||||
## Install in Editable Mode
|
||||
|
||||
To install InvokeAI in editable mode, (as always) we recommend to create and
|
||||
activate a venv first. Afterwards you can install the InvokeAI Package,
|
||||
including dev and docs extras in editable mode, follwed by the installation of
|
||||
the pre-commit hook:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
python -m venv .venv \
|
||||
--prompt InvokeAI \
|
||||
--upgrade-deps \
|
||||
&& source .venv/bin/activate \
|
||||
&& pip install \
|
||||
--upgrade-deps \
|
||||
--use-pep517 \
|
||||
--editable=".[dev,docs]" \
|
||||
&& pre-commit install
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
python -m venv .venv `
|
||||
--prompt InvokeAI `
|
||||
--upgrade-deps `
|
||||
&& .venv/scripts/activate.ps1 `
|
||||
&& pip install `
|
||||
--upgrade `
|
||||
--use-pep517 `
|
||||
--editable=".[dev,docs]" `
|
||||
&& pre-commit install
|
||||
```
|
||||
|
||||
## Create a branch
|
||||
|
||||
Make sure you are on main branch, from there create your feature branch:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
git checkout main \
|
||||
&& git pull \
|
||||
&& git checkout -B <branch name>
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
git checkout main `
|
||||
&& git pull `
|
||||
&& git checkout -B <branch name>
|
||||
```
|
||||
|
||||
## Commit your changes
|
||||
|
||||
When you are done with adding / updating content, you need to commit those
|
||||
changes to your repository before you can actually open an PR:
|
||||
|
||||
```{ .sh .annotate }
|
||||
git add <files you have changed> # (1)!
|
||||
git commit -m "A commit message which describes your change"
|
||||
git push
|
||||
```
|
||||
|
||||
1. Replace this with a space seperated list of the files you changed, like:
|
||||
`README.md foo.sh bar.json baz`
|
||||
|
||||
## Create a Pull Request
|
||||
|
||||
After pushing your changes, you are ready to create a Pull Request. just head
|
||||
over to your fork on [GitHub](https://github.com), which should already show you
|
||||
a message that there have been recent changes on your feature branch and a green
|
||||
button which you could use to create the PR.
|
||||
|
||||
The default target for your PRs would be the main branch of
|
||||
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
|
||||
|
||||
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
|
||||
the GitHub CLI in a VS-Code Terminal Window 🤭):
|
||||
|
||||
```sh
|
||||
gh pr create
|
||||
```
|
||||
|
||||
The CLI will inform you if there are still unpushed commits on your branch. It
|
||||
will also prompt you for things like the the Title and the Body (Description) if
|
||||
you did not already pass them as arguments.
|
||||
26
docs/help/contributing/020_ISSUES.md
Normal file
26
docs/help/contributing/020_ISSUES.md
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
title: Issues
|
||||
---
|
||||
|
||||
# :octicons-issue-opened-16: Issues
|
||||
|
||||
## :fontawesome-solid-bug: Report a bug
|
||||
|
||||
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
|
||||
you
|
||||
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||
to inform us about the details so that our developers can look into it.
|
||||
|
||||
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
|
||||
find out how to create a Pull Request.
|
||||
|
||||
## Request a feature
|
||||
|
||||
If you have a idea for a new feature on your mind which you would like to see in
|
||||
InvokeAI, there is a
|
||||
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||
available in the issues section of the repository.
|
||||
|
||||
If you are just curious which features already got requested you can find the
|
||||
overview of open requests
|
||||
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
||||
32
docs/help/contributing/030_DOCS.md
Normal file
32
docs/help/contributing/030_DOCS.md
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
title: docs
|
||||
---
|
||||
|
||||
# :simple-readthedocs: MkDocs-Material
|
||||
|
||||
If you want to contribute to the docs, there is a easy way to verify the results
|
||||
of your changes before commiting them.
|
||||
|
||||
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
|
||||
already
|
||||
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
|
||||
When installed it's as simple as:
|
||||
|
||||
```sh
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
This will build the docs locally and serve them on your local host, even
|
||||
auto-refresh is included, so you can just update a doc, save it and tab to the
|
||||
browser, without the needs of restarting the `mkdocs serve`.
|
||||
|
||||
More information about the "mkdocs flavored markdown syntax" can be found
|
||||
[here](https://squidfunk.github.io/mkdocs-material/reference/).
|
||||
|
||||
## :material-microsoft-visual-studio-code:VS-Code
|
||||
|
||||
We also provide a
|
||||
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
|
||||
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
|
||||
the formatting since this is automated via prettier, but this is of course not
|
||||
limited to VS-Code.
|
||||
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Tranformation to nodes
|
||||
|
||||
## Current state
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| generate(generate);
|
||||
web --> |txt2img| generate(generate);
|
||||
cli --> |txt2img| generate(generate);
|
||||
cli --> |img2img| generate(generate);
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
```
|
||||
|
||||
## Transitional Architecture
|
||||
|
||||
### first step
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| img2img_node(Img2img node);
|
||||
web --> |txt2img| generate(generate);
|
||||
img2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
cli --> |txt2img| generate;
|
||||
cli --> |img2img| generate;
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
```
|
||||
|
||||
### second step
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| img2img_node(img2img node);
|
||||
img2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
web --> |txt2img| txt2img_node(txt2img node);
|
||||
cli --> |txt2img| txt2img_node;
|
||||
cli --> |img2img| generate(generate);
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
txt2img_node --> model_manager;
|
||||
txt2img_node --> generators;
|
||||
txt2img_node --> ti_manager[TI Manager];
|
||||
```
|
||||
|
||||
## Final Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img|img2img_node(img2img node);
|
||||
cli --> |img2img|img2img_node;
|
||||
web --> |txt2img|txt2img_node(txt2img node);
|
||||
cli --> |txt2img|txt2img_node;
|
||||
img2img_node --> model_manager;
|
||||
txt2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
txt2img_node --> generators;
|
||||
img2img_node --> ti_manager[TI Manager];
|
||||
txt2img_node --> ti_manager[TI Manager];
|
||||
```
|
||||
16
docs/help/contributing/index.md
Normal file
16
docs/help/contributing/index.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
title: Contributing
|
||||
---
|
||||
|
||||
# :fontawesome-solid-code-commit: Contributing
|
||||
|
||||
There are different ways how you can contribute to
|
||||
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
|
||||
Issues for Bugs or ideas how to improve.
|
||||
|
||||
This Section of the docs will explain some of the different ways of how you can
|
||||
contribute to make it easier for newcommers as well as advanced users :nerd:
|
||||
|
||||
If you want to contribute code, but you do not have an exact idea yet, take a
|
||||
look at the currently open
|
||||
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
12
docs/help/index.md
Normal file
12
docs/help/index.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# :material-help:Help
|
||||
|
||||
If you are looking for help with the installation of InvokeAI, please take a
|
||||
look into the [Installation](../installation/index.md) section of the docs.
|
||||
|
||||
Here you will find help to topics like
|
||||
|
||||
- how to contribute
|
||||
- configuration recommendation for IDEs
|
||||
|
||||
If you have an Idea about what's missing and aren't scared from contributing,
|
||||
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.
|
||||
@@ -1,19 +0,0 @@
|
||||
<!-- HTML for static distribution bundle build -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Swagger UI</title>
|
||||
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
|
||||
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
|
||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
|
||||
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
||||
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
|
||||
</body>
|
||||
</html>
|
||||
295
docs/index.md
295
docs/index.md
@@ -2,6 +2,8 @@
|
||||
title: Home
|
||||
---
|
||||
|
||||
# :octicons-home-16: Home
|
||||
|
||||
<!--
|
||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||
|
||||
@@ -29,36 +31,36 @@ title: Home
|
||||
[![github open prs badge]][github open prs link]
|
||||
|
||||
[ci checks on dev badge]:
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
[ci checks on dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
[ci checks on main badge]:
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[ci checks on main link]:
|
||||
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]:
|
||||
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
[github forks link]:
|
||||
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||
[github open issues badge]:
|
||||
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
[github open issues link]:
|
||||
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||
[github open prs badge]:
|
||||
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||
[github open prs link]:
|
||||
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||
[github stars badge]:
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]:
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
[latest release badge]:
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
|
||||
</div>
|
||||
@@ -87,24 +89,24 @@ Q&A</a>]
|
||||
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with their
|
||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||
images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory and Disk
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python,
|
||||
and all its dependencies.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
@@ -113,48 +115,65 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
|
||||
This method is recommended for 1st time users
|
||||
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
|
||||
This method is recommended for experienced users and developers
|
||||
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
### The InvokeAI Web Interface
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- separator -->
|
||||
|
||||
### The InvokeAI Command Line Interface
|
||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||
|
||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||
<!-- separator -->
|
||||
|
||||
### Image Management
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
### Model Management
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
|
||||
### Prompt Engineering
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
@@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
#### Migration to Stable Diffusion `diffusers` models
|
||||
|
||||
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
|
||||
Previous versions of InvokeAI supported the original model file format
|
||||
introduced with Stable Diffusion 1.4. In the original format, known variously as
|
||||
"checkpoint", or "legacy" format, there is a single large weights file ending
|
||||
with `.ckpt` or `.safetensors`. Though this format has served the community
|
||||
well, it has a number of disadvantages, including file size, slow loading times,
|
||||
and a variety of non-standard variants that require special-case code to handle.
|
||||
In addition, because checkpoint files are actually a bundle of multiple machine
|
||||
learning sub-models, it is hard to swap different sub-models in and out, or to
|
||||
share common sub-models. A new format, introduced by the StabilityAI company in
|
||||
collaboration with HuggingFace, is called `diffusers` and consists of a
|
||||
directory of individual models. The most immediate benefit of `diffusers` is
|
||||
that they load from disk very quickly. A longer term benefit is that in the near
|
||||
future `diffusers` models will be able to share common sub-models, dramatically
|
||||
reducing disk space when you have multiple fine-tune models derived from the
|
||||
same base.
|
||||
|
||||
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
|
||||
When you perform a new install of version 2.3.0, you will be offered the option
|
||||
to install the `diffusers` versions of a number of popular SD models, including
|
||||
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
|
||||
2.1). These will act and work just like the checkpoint versions. Do not be
|
||||
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
|
||||
InvokeAI 2.3.0 can still load these and generate images from them without any
|
||||
extra intervention on your part.
|
||||
|
||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
|
||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI
|
||||
offers options to convert legacy checkpoint models into optimized `diffusers`
|
||||
models. If you use the `invokeai` command line interface, the relevant commands
|
||||
are:
|
||||
|
||||
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
|
||||
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
|
||||
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
|
||||
- `!convert_model` -- Take the path to a local checkpoint file or a URL that
|
||||
is pointing to one, convert it into a `diffusers` model, and import it into
|
||||
InvokeAI's models registry file.
|
||||
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
|
||||
models file, this command will accept its short name and convert it into a
|
||||
like-named `diffusers` model, optionally deleting the original checkpoint
|
||||
file.
|
||||
- `!import_model` -- Take the local path of either a checkpoint file or a
|
||||
`diffusers` model directory and import it into InvokeAI's registry file. You
|
||||
may also provide the ID of any diffusers model that has been published on
|
||||
the
|
||||
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
|
||||
and it will be downloaded and installed automatically.
|
||||
|
||||
The WebGUI offers similar functionality for model management.
|
||||
|
||||
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
|
||||
For advanced users, new command-line options provide additional functionality.
|
||||
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
|
||||
the path to a directory of checkpoint files, automatically converts them into
|
||||
`diffusers` models and imports them. Each time the script is launched, the
|
||||
directory will be scanned for new checkpoint files to be loaded. Alternatively,
|
||||
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
|
||||
that is already registered with InvokeAI to be converted into a `diffusers`
|
||||
model on the fly, allowing you to take advantage of future diffusers-only
|
||||
features without explicitly converting the model and saving it to disk.
|
||||
|
||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
|
||||
Please see
|
||||
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
|
||||
for more information on model management in both the command-line and Web
|
||||
interfaces.
|
||||
|
||||
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
||||
|
||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
|
||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
|
||||
installed, the`xformers` package dramatically reduces the memory footprint of
|
||||
loaded Stable Diffusion models files and modestly increases image generation
|
||||
speed. `xformers` will be installed and activated automatically if you specify a
|
||||
CUDA system at install time.
|
||||
|
||||
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
|
||||
The caveat with using `xformers` is that it introduces slightly
|
||||
non-deterministic behavior, and images generated using the same seed and other
|
||||
settings will be subtly different between invocations. Generally the changes are
|
||||
unnoticeable unless you rapidly shift back and forth between images, but to
|
||||
disable `xformers` and restore fully deterministic behavior, you may launch
|
||||
InvokeAI using the `--no-xformers` option. This is most conveniently done by
|
||||
opening the file `invokeai/invokeai.init` with a text editor, and adding the
|
||||
line `--no-xformers` at the bottom.
|
||||
|
||||
#### A Negative Prompt Box in the WebUI
|
||||
|
||||
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
|
||||
There is now a separate text input box for negative prompts in the WebUI. This
|
||||
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
|
||||
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
|
||||
box as well.
|
||||
|
||||
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
|
||||
To see exactly how your prompts are being parsed, launch `invokeai` with the
|
||||
`--log_tokenization` option. The console window will then display the
|
||||
tokenization process for both positive and negative prompts.
|
||||
|
||||
#### Model Merging
|
||||
|
||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable
|
||||
Diffusion models using an intuitive user interface. Model merging allows you to
|
||||
mix the behavior of models to achieve very interesting effects. To use this,
|
||||
each of the models must already be imported into InvokeAI and saved in
|
||||
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
|
||||
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
|
||||
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
|
||||
the proportions in which to mix them, and the mixing algorithm. The script will
|
||||
create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||
|
||||
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
|
||||
See
|
||||
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
|
||||
for more details.
|
||||
|
||||
#### Textual Inversion Training
|
||||
|
||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
|
||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to
|
||||
emit a particular subject or style when triggered by a keyword phrase. You can
|
||||
perform TI training by placing a small number of images of the subject or style
|
||||
in a directory, and choosing a distinctive trigger phrase, such as
|
||||
"pointillist-style". After successful training, The subject or style will be
|
||||
activated by including `<pointillist-style>` in your prompt.
|
||||
|
||||
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
|
||||
Previous versions of InvokeAI were able to perform TI, but it required using a
|
||||
command-line script with dozens of obscure command-line arguments. Version 2.3.0
|
||||
features an intuitive TI frontend that will build a TI model on top of any
|
||||
`diffusers` model. To access training you can launch from a new item in the
|
||||
launcher script or from the command line using `invokeai-ti --gui`.
|
||||
|
||||
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
|
||||
See
|
||||
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||
for further details.
|
||||
|
||||
#### A New Installer Experience
|
||||
|
||||
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
|
||||
The InvokeAI installer has been upgraded in order to provide a smoother and
|
||||
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
|
||||
a PyPi project, allowing developers and power-users to install InvokeAI with the
|
||||
command `pip install InvokeAI --use-pep517`. Please see
|
||||
[Installation](#installation) for details.
|
||||
|
||||
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
|
||||
Developers should be aware that the `pip` installation procedure has been
|
||||
simplified and that the `conda` method is no longer supported at all.
|
||||
Accordingly, the `environments_and_requirements` directory has been deleted from
|
||||
the repository.
|
||||
|
||||
#### Command-line name changes
|
||||
|
||||
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||
All of InvokeAI's functionality, including the WebUI, command-line interface,
|
||||
textual inversion training and model merging, can all be accessed from the
|
||||
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
|
||||
expanded to add the new functionality. For the convenience of developers and
|
||||
power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||
|
||||
* `invokeai` -- Command-line client
|
||||
* `invokeai --web` -- Web GUI
|
||||
* `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
|
||||
- `invokeai` -- Command-line client
|
||||
- `invokeai --web` -- Web GUI
|
||||
- `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||
- `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||
- `invokeai-configure` -- Configuration tool for initializing the `invokeai`
|
||||
directory and selecting popular starter models.
|
||||
|
||||
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
|
||||
For backward compatibility, the old command names are also recognized, including
|
||||
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
|
||||
eventually be removed.
|
||||
|
||||
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
|
||||
* `invokeai` => `ldm/invoke/CLI.py`
|
||||
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||
Developers should be aware that the locations of the script's source code has
|
||||
been moved. The new locations are:
|
||||
|
||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
|
||||
- `invokeai` => `ldm/invoke/CLI.py`
|
||||
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||
|
||||
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
|
||||
For older changelogs, please visit the
|
||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI
|
||||
using `pip install -e . --use-pep517` in the Git repository, and then to call
|
||||
the scripts using their 2.3.0 names, rather than executing the scripts directly.
|
||||
Developers should also be aware that the several important data files have been
|
||||
relocated into a new directory named `invokeai`. This includes the WebGUI's
|
||||
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
|
||||
by the installer to select starter models. Eventually all InvokeAI modules will
|
||||
be in subdirectories of `invokeai`.
|
||||
|
||||
Please see
|
||||
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
|
||||
for further details. For older changelogs, please visit the
|
||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
Please check out our **[:material-frequently-asked-questions:
|
||||
Troubleshooting
|
||||
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
|
||||
get solutions for common installation problems and other issues.
|
||||
Please check out our
|
||||
**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
|
||||
to get solutions for common installation problems and other issues.
|
||||
|
||||
## :octicons-repo-push-24: Contributing
|
||||
|
||||
@@ -265,8 +388,8 @@ thank them for their time, hard work and effort.
|
||||
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||
free to send me an email if you use and like the script.
|
||||
|
||||
Original portions of the software are Copyright (c) 2022-23
|
||||
by [The InvokeAI Team](https://github.com/invoke-ai).
|
||||
Original portions of the software are Copyright (c) 2022-23 by
|
||||
[The InvokeAI Team](https://github.com/invoke-ai).
|
||||
|
||||
## :octicons-book-24: Further Reading
|
||||
|
||||
|
||||
@@ -40,9 +40,10 @@ experimental versions later.
|
||||
this, open up a command-line window ("Terminal" on Linux and
|
||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||
--version`. If Python is installed, it will print out the version
|
||||
number. If it is version `3.9.1` or `3.10.x`, you meet
|
||||
requirements.
|
||||
|
||||
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||
requirements. We do not recommend using Python 3.11 or higher,
|
||||
as not all the libraries that InvokeAI depends on work properly
|
||||
with this version.
|
||||
|
||||
!!! warning "What to do if you have an unsupported version"
|
||||
|
||||
@@ -50,8 +51,7 @@ experimental versions later.
|
||||
and download the appropriate installer package for your
|
||||
platform. We recommend [Version
|
||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI. At this time
|
||||
we do not recommend Python 3.11.
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
_Please select your platform in the section below for platform-specific
|
||||
setup requirements._
|
||||
@@ -150,7 +150,7 @@ experimental versions later.
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
C:\Documents\Linco\invokeAI> .\install.bat
|
||||
```
|
||||
|
||||
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||
@@ -167,6 +167,11 @@ experimental versions later.
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
on Macintoshes, where "YourName" is your login name.
|
||||
|
||||
-If you have previously installed InvokeAI, you will be asked to
|
||||
confirm whether you want to reinstall into this directory. You
|
||||
may choose to reinstall, in which case your version will be upgraded,
|
||||
or choose a different directory.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path completions.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
@@ -181,11 +186,6 @@ experimental versions later.
|
||||
are unsure what GPU you are using, you can ask the installer to
|
||||
guess.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
|
||||
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI and the application itself.
|
||||
|
||||
@@ -197,25 +197,141 @@ experimental versions later.
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. **Post-install Configuration**: After installation completes, the installer will launch the
|
||||
configuration script, which will guide you through the first-time
|
||||
process of selecting one or more Stable Diffusion model weights
|
||||
files, downloading and configuring them. We provide a list of
|
||||
popular models that InvokeAI performs well with. However, you can
|
||||
add more weight files later on using the command-line client or
|
||||
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
|
||||
details.
|
||||
|
||||
<figure markdown>
|
||||

|
||||

|
||||
</figure>
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||
10. **Post-install Configuration**: After installation completes, the
|
||||
installer will launch the configuration form, which will guide you
|
||||
through the first-time process of adjusting some of InvokeAI's
|
||||
startup settings. To move around this form use ctrl-N for
|
||||
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||
and shift-<tab> to move forward and back. Once you are in a
|
||||
multi-checkbox field use the up and down cursor keys to select the
|
||||
item you want, and <space> to toggle it on and off. Within
|
||||
a directory field, pressing <tab> will provide autocomplete
|
||||
options.
|
||||
|
||||
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
Generally the defaults are fine, and you can come back to this screen at
|
||||
any time to tweak your system. Here are the options you can adjust:
|
||||
|
||||
- ***Output directory for images***
|
||||
This is the path to a directory in which InvokeAI will store all its
|
||||
generated images.
|
||||
|
||||
- ***NSFW checker***
|
||||
If checked, InvokeAI will test images for potential sexual content
|
||||
and blur them out if found. Note that the NSFW checker consumes
|
||||
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||
can reduce out of memory errors by disabling the checker.
|
||||
|
||||
- ***HuggingFace Access Token***
|
||||
InvokeAI has the ability to download embedded styles and subjects
|
||||
from the HuggingFace Concept Library on-demand. However, some of
|
||||
the concept library files are password protected. To make download
|
||||
smoother, you can set up an account at huggingface.co, obtain an
|
||||
access token, and paste it into this field. Note that you paste
|
||||
to this screen using ctrl-shift-V
|
||||
|
||||
- ***Free GPU memory after each generation***
|
||||
This is useful for low-memory machines and helps minimize the
|
||||
amount of GPU VRAM used by InvokeAI.
|
||||
|
||||
- ***Enable xformers support if available***
|
||||
If the xformers library was successfully installed, this will activate
|
||||
it to reduce memory consumption and increase rendering speed noticeably.
|
||||
Note that xformers has the side effect of generating slightly different
|
||||
images even when presented with the same seed and other settings.
|
||||
|
||||
- ***Force CPU to be used on GPU systems***
|
||||
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||
can be used to generate images on systems that don't have a compatible
|
||||
GPU.
|
||||
|
||||
- ***Precision***
|
||||
This controls whether to use float32 or float16 arithmetic.
|
||||
float16 uses less memory but is also slightly less accurate.
|
||||
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||
but you may have to use float32 to get images on certain systems
|
||||
and graphics cards. The "autocast" option is deprecated and
|
||||
shouldn't be used unless you are asked to by a member of the team.
|
||||
|
||||
- ***Number of models to cache in CPU memory***
|
||||
This allows you to keep models in memory and switch rapidly among
|
||||
them rather than having them load from disk each time. This slider
|
||||
controls how many models to keep loaded at once. Each
|
||||
model will use 2-4 GB of RAM, so use this cautiously
|
||||
|
||||
- ***Directory containing embedding/textual inversion files***
|
||||
This is the directory in which you can place custom embedding
|
||||
files (.pt or .bin). During startup, this directory will be
|
||||
scanned and InvokeAI will print out the text terms that
|
||||
are available to trigger the embeddings.
|
||||
|
||||
At the bottom of the screen you will see a checkbox for accepting
|
||||
the CreativeML Responsible AI License. You need to accept the license
|
||||
in order to download Stable Diffusion models from the next screen.
|
||||
|
||||
_You can come back to the startup options form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||
this script. On the command line, it is named `invokeai-configure`.
|
||||
|
||||
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||
to another screen that prompts you to download a series of starter models. The ones
|
||||
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||
pick and choose.
|
||||
You will probably wish to download `autoencoder-840000` for use with models that
|
||||
were trained with an older version of the Stability VAE.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Below the preselected list of starter models is a large text field which you can use
|
||||
to specify a series of models to import. You can specify models in a variety of formats,
|
||||
each separated by a space or newline. The formats accepted are:
|
||||
|
||||
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||
extraneous quotation marks and other things that come along for the ride.
|
||||
|
||||
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||
The directory will be scanned from top to bottom (including subfolders) and any
|
||||
file that can be imported will be.
|
||||
|
||||
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||
and paste directly from a web page, or simply drag the link from the web page
|
||||
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||
The file will be downloaded and installed.
|
||||
|
||||
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||
|
||||
- The path to a local directory containing a `diffusers`
|
||||
model. These directories always have the file `model_index.json`
|
||||
at their top level.
|
||||
|
||||
_Select a directory for models to import_ You may select a local
|
||||
directory for autoimporting at startup time. If you select this
|
||||
option, the directory you choose will be scanned for new
|
||||
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||
files will be automatically imported and made available for your
|
||||
use.
|
||||
|
||||
_Convert imported models into diffusers_ When legacy checkpoint
|
||||
files are imported, you may select to use them unmodified (the
|
||||
default) or to convert them into `diffusers` models. The latter
|
||||
load much faster and have slightly better rendering performance,
|
||||
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||
Version 2.X files are **only** supported in `diffusers` format and will
|
||||
be converted regardless.
|
||||
|
||||
_You can come back to the model install form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||
this script. On the command line, it is named `invokeai-model-install`.
|
||||
|
||||
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
@@ -327,6 +443,52 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### Out of Memory Issues
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself
|
||||
faced with Out of Memory errors when generating images. Here are some
|
||||
tips to reduce the problem:
|
||||
|
||||
* **4 GB of VRAM**
|
||||
|
||||
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||
and derived models, provided that you **disable** the NSFW checker. To
|
||||
disable the filter, do one of the following:
|
||||
|
||||
* Select option (6) "_change InvokeAI startup options_" from the
|
||||
launcher. This will bring up the console-based startup settings
|
||||
dialogue and allow you to unselect the "NSFW Checker" option.
|
||||
* Start the startup settings dialogue directly by running
|
||||
`invokeai-configure --skip-sd-weights --skip-support-models`
|
||||
from the command line.
|
||||
* Find the `invokeai.init` initialization file in the InvokeAI root
|
||||
directory, open it in a text editor, and change `--nsfw_checker`
|
||||
to `--no-nsfw_checker`
|
||||
|
||||
If you are on a CUDA system, you can realize significant memory
|
||||
savings by activating the `xformers` library as described above. The
|
||||
downside is `xformers` introduces non-deterministic behavior, such
|
||||
that images generated with exactly the same prompt and settings will
|
||||
be slightly different from each other. See above for more information.
|
||||
|
||||
* **6 GB of VRAM**
|
||||
|
||||
This is a border case. Using the SD 1.5 series you should be able to
|
||||
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||
1024x1024 with it disabled and `xformers` activated.
|
||||
|
||||
If you run into persistent memory issues there are a series of
|
||||
environment variables that you can set before launching InvokeAI that
|
||||
alter how the PyTorch machine learning library manages memory. See
|
||||
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
|
||||
a list of these tweaks.
|
||||
|
||||
* **12 GB of VRAM**
|
||||
|
||||
This should be sufficient to generate larger images up to about
|
||||
1280x1280. If you wish to push further, consider activating
|
||||
`xformers`.
|
||||
|
||||
### Other Problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
@@ -348,25 +510,11 @@ version (recommended), follow these steps:
|
||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||
`invokeai` root directory.
|
||||
|
||||
2. Choose menu item (6) "Developer's Console". This will launch a new
|
||||
command line.
|
||||
|
||||
3. Type the following command:
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --upgrade
|
||||
```
|
||||
4. Watch the installation run. Once it is complete, you may exit the
|
||||
command line by typing `exit`, and then start InvokeAI from the
|
||||
launch script as per usual.
|
||||
|
||||
|
||||
Alternatively, if you wish to get the most recent unreleased
|
||||
development version, perform the same steps to enter the developer's
|
||||
console, and then type:
|
||||
|
||||
```bash
|
||||
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
|
||||
```
|
||||
2. Choose menu item (10) "Update InvokeAI".
|
||||
|
||||
3. This will launch a menu that gives you the option of:
|
||||
|
||||
1. Updating to the latest official release;
|
||||
2. Updating to the bleeding-edge development version; or
|
||||
3. Manually entering the tag or branch name of a version of
|
||||
InvokeAI you wish to try out.
|
||||
|
||||
@@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
||||
already be installed (if, for example, you have used your system for
|
||||
gaming):
|
||||
|
||||
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
|
||||
* **Python**
|
||||
|
||||
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
|
||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||
version 3.9 or 3.10 (3.11 is not recommended).
|
||||
|
||||
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
|
||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||
lack of a Windows ROCm library.
|
||||
* **CUDA Tools**
|
||||
|
||||
* **Visual C++ Libraries** _Windows users_ must install the free
|
||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||
For those with _NVidia GPUs_, you will need to
|
||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||
|
||||
* **The Xcode command line tools** for _Macintosh users_. Instructions are
|
||||
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
* **ROCm Tools**
|
||||
|
||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||
if model downloads give lots of certificate errors. Run:
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
For _Linux users with AMD GPUs_, you will need
|
||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||
lack of a Windows ROCm library.
|
||||
|
||||
* **Visual C++ Libraries**
|
||||
|
||||
_Windows users_ must install the free
|
||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||
|
||||
* **The Xcode command line tools**
|
||||
|
||||
for _Macintosh users_. Instructions are available at
|
||||
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
|
||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||
if model downloads give lots of certificate errors. Run:
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
@@ -75,7 +85,7 @@ manager, please follow these steps:
|
||||
=== "Linux/Mac"
|
||||
|
||||
```bash
|
||||
export INVOKEAI_ROOT="~/invokeai"
|
||||
export INVOKEAI_ROOT=~/invokeai
|
||||
mkdir $INVOKEAI_ROOT
|
||||
```
|
||||
|
||||
@@ -99,35 +109,30 @@ manager, please follow these steps:
|
||||
Windows environment variable using the Advanced System Settings dialogue.
|
||||
Refer to your operating system documentation for details.
|
||||
|
||||
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
```terminal
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the new environment:
|
||||
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
```
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
.venv\script\activate
|
||||
```
|
||||
If you get a permissions error at this point, run the command
|
||||
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
|
||||
and try `activate` again.
|
||||
|
||||
The command-line prompt should change to to show `(.venv)` at the
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
If you get a permissions error at this point, run this command and try again
|
||||
|
||||
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||
|
||||
The command-line prompt should change to to show `(InvokeAI)` at the
|
||||
beginning of the prompt. Note that all the following steps should be
|
||||
run while inside the INVOKEAI_ROOT directory
|
||||
|
||||
@@ -137,40 +142,47 @@ manager, please follow these steps:
|
||||
python -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
|
||||
CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||
|
||||
=== "CUDA (NVidia)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||
become available in the environment
|
||||
|
||||
=== "Linux/Macintosh"
|
||||
|
||||
```bash
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
|
||||
```ps
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
8. Set up the runtime directory
|
||||
@@ -179,7 +191,7 @@ manager, please follow these steps:
|
||||
models, model config files, directory for textual inversion embeddings, and
|
||||
your outputs.
|
||||
|
||||
```bash
|
||||
```terminal
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
@@ -283,13 +295,12 @@ on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
1. From the command line, run this command:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
|
||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||
installation protocol (important!)
|
||||
@@ -314,7 +325,7 @@ installation protocol (important!)
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
pip install -e . --use-pep517
|
||||
```
|
||||
|
||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||
@@ -330,5 +341,29 @@ installation protocol (important!)
|
||||
repository. You can then use GitHub functions to create and submit
|
||||
pull requests to contribute improvements to the project.
|
||||
|
||||
Please see [Contributing](/index.md#Contributing) for hints
|
||||
Please see [Contributing](../index.md#contributing) for hints
|
||||
on getting started.
|
||||
|
||||
### Unsupported Conda Install
|
||||
|
||||
Congratulations, you found the "secret" Conda installation
|
||||
instructions. If you really **really** want to use Conda with InvokeAI
|
||||
you can do so using this unsupported recipe:
|
||||
|
||||
```
|
||||
mkdir ~/invokeai
|
||||
conda create -n invokeai python=3.10
|
||||
conda activate invokeai
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
invokeai-configure --root ~/invokeai
|
||||
invokeai --root ~/invokeai --web
|
||||
```
|
||||
|
||||
The `pip install` command shown in this recipe is for Linux/Windows
|
||||
systems with an NVIDIA GPU. See step (6) above for the command to use
|
||||
with other platforms/GPU combinations. If you don't wish to pass the
|
||||
`--root` argument to `invokeai` with each launch, you may set the
|
||||
environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
@@ -43,25 +43,31 @@ InvokeAI comes with support for a good set of starter models. You'll
|
||||
find them listed in the master models file
|
||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||
subset that are currently installed are found in
|
||||
`configs/models.yaml`. The current list is:
|
||||
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||
|
||||
| Model | HuggingFace Repo ID | Description | URL
|
||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
||||
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
|
||||
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
|
||||
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
|
||||
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
|
||||
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
|
||||
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
|
||||
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
|
||||
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||
|---------- | ---------- | ----------- | --- |
|
||||
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||
|
||||
Note that these files are covered by an "Ethical AI" license which forbids
|
||||
certain uses. When you initially download them, you are asked to
|
||||
accept the license terms.
|
||||
Note that these files are covered by an "Ethical AI" license which
|
||||
forbids certain uses. When you initially download them, you are asked
|
||||
to accept the license terms. In addition, some of these models carry
|
||||
additional license terms that limit their use in commercial
|
||||
applications or on public servers. Be sure to familiarize yourself
|
||||
with the model terms by visiting the URLs in the table above.
|
||||
|
||||
## Community-Contributed Models
|
||||
|
||||
@@ -80,6 +86,13 @@ only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||
aware that CIVITAI hosts many models that generate NSFW content.
|
||||
|
||||
!!! note
|
||||
|
||||
InvokeAI 2.3.x does not support directly importing and
|
||||
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||
convert them into `diffusers` models using the conversion methods
|
||||
described below.
|
||||
|
||||
## Installation
|
||||
|
||||
There are multiple ways to install and manage models:
|
||||
@@ -90,7 +103,7 @@ There are multiple ways to install and manage models:
|
||||
models files.
|
||||
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
|
||||
@@ -106,7 +119,7 @@ confirm that the files are complete.
|
||||
You can install a new model, including any of the community-supported ones, via
|
||||
the command-line client's `!import_model` command.
|
||||
|
||||
#### Installing `.ckpt` and `.safetensors` models
|
||||
#### Installing individual `.ckpt` and `.safetensors` models
|
||||
|
||||
If the model is already downloaded to your local disk, use
|
||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||
@@ -131,15 +144,40 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
|
||||
When you import a legacy model, the CLI will ask you a few questions
|
||||
about the model, including what size image it was trained on (usually
|
||||
512x512), what name and description you wish to use for it, what
|
||||
configuration file to use for it (usually the default
|
||||
`v1-inference.yaml`), whether you'd like to make this model the
|
||||
default at startup time, and whether you would like to install a
|
||||
custom VAE (variable autoencoder) file for the model. For recent
|
||||
models, the answer to the VAE question is usually "no," but it won't
|
||||
hurt to answer "yes".
|
||||
When you import a legacy model, the CLI will first ask you what type
|
||||
of model this is. You can indicate whether it is a model based on
|
||||
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||
type, or it will not load correctly. You can correct the model type
|
||||
after the fact using the `!edit_model` command.
|
||||
|
||||
The system will then ask you a few other questions about the model,
|
||||
including what size image it was trained on (usually 512x512), what
|
||||
name and description you wish to use for it, and whether you would
|
||||
like to install a custom VAE (variable autoencoder) file for the
|
||||
model. For recent models, the answer to the VAE question is usually
|
||||
"no," but it won't hurt to answer "yes".
|
||||
|
||||
After importing, the model will load. If this is successful, you will
|
||||
be asked if you want to keep the model loaded in memory to start
|
||||
generating immediately. You'll also be asked if you wish to make this
|
||||
the default model on startup. You can change this later using
|
||||
`!edit_model`.
|
||||
|
||||
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||
|
||||
You may also point `!import_model` to a directory containing a set of
|
||||
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||
|
||||
!!! example
|
||||
|
||||
```console
|
||||
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||
```
|
||||
|
||||
You will be given the option to import all models found in the
|
||||
directory, or select which ones to import. If there are subfolders
|
||||
within the directory, they will be searched for models to import.
|
||||
|
||||
#### Installing `diffusers` models
|
||||
|
||||
@@ -279,19 +317,23 @@ After you save the modified `models.yaml` file relaunch
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
a cute in the upper right side of the browser screen. This will bring
|
||||
a cube in the upper right side of the browser screen. This will bring
|
||||
up a dialogue that lists the models you have already installed, and
|
||||
allows you to load, delete or edit them:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
To add a new model, click on **+ Add New** and select to either a
|
||||
checkpoint/safetensors model, or a diffusers model:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||
@@ -302,7 +344,9 @@ choose to enter a path to disk, the system will autocomplete for you
|
||||
as you type:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||
@@ -317,7 +361,9 @@ directory and press the "Search" icon. This will display the
|
||||
subfolders, and allow you to choose which ones to import:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
## Model Management Startup Options
|
||||
@@ -342,9 +388,8 @@ invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
|
||||
And here is what the same argument looks like in `invokeai.init`:
|
||||
|
||||
```
|
||||
```bash
|
||||
--outdir="/home/fred/invokeai/outputs
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Stable Diffusion
|
||||
description: |-
|
||||
TODO: Description Here
|
||||
|
||||
Some useful links:
|
||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
||||
|
||||
license:
|
||||
name: MIT License
|
||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
||||
version: 1.0.0
|
||||
servers:
|
||||
- url: http://localhost:9090/api
|
||||
tags:
|
||||
- name: images
|
||||
description: Retrieve and manage generated images
|
||||
paths:
|
||||
/images/{imageId}:
|
||||
get:
|
||||
tags:
|
||||
- images
|
||||
summary: Get image by ID
|
||||
description: Returns a single image
|
||||
operationId: getImageById
|
||||
parameters:
|
||||
- name: imageId
|
||||
in: path
|
||||
description: ID of image to return
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
description: Image not found
|
||||
/intermediates/{intermediateId}/{step}:
|
||||
get:
|
||||
tags:
|
||||
- images
|
||||
summary: Get intermediate image by ID
|
||||
description: Returns a single intermediate image
|
||||
operationId: getIntermediateById
|
||||
parameters:
|
||||
- name: intermediateId
|
||||
in: path
|
||||
description: ID of intermediate to return
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: step
|
||||
in: path
|
||||
description: The generation step of the intermediate
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
description: Intermediate not found
|
||||
19
docs/other/TRANSLATION.md
Normal file
19
docs/other/TRANSLATION.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Translation
|
||||
|
||||
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||
|
||||
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||
|
||||
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to the InvokeAI community for their efforts to translate the project!
|
||||
@@ -1,5 +0,0 @@
|
||||
mkdocs
|
||||
mkdocs-material>=8, <9
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-redirects==1.2.0
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 665 B |
Binary file not shown.
|
Before Width: | Height: | Size: 628 B |
@@ -1,16 +0,0 @@
|
||||
html {
|
||||
box-sizing: border-box;
|
||||
overflow: -moz-scrollbars-vertical;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
*,
|
||||
*:before,
|
||||
*:after {
|
||||
box-sizing: inherit;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background: #fafafa;
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en-US">
|
||||
<head>
|
||||
<title>Swagger UI: OAuth2 Redirect</title>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
'use strict';
|
||||
function run () {
|
||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
||||
var sentState = oauth2.state;
|
||||
var redirectUrl = oauth2.redirectUrl;
|
||||
var isValid, qp, arr;
|
||||
|
||||
if (/code|token|error/.test(window.location.hash)) {
|
||||
qp = window.location.hash.substring(1).replace('?', '&');
|
||||
} else {
|
||||
qp = location.search.substring(1);
|
||||
}
|
||||
|
||||
arr = qp.split("&");
|
||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
||||
function (key, value) {
|
||||
return key === "" ? value : decodeURIComponent(value);
|
||||
}
|
||||
) : {};
|
||||
|
||||
isValid = qp.state === sentState;
|
||||
|
||||
if ((
|
||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
||||
) && !oauth2.auth.code) {
|
||||
if (!isValid) {
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "warning",
|
||||
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
|
||||
});
|
||||
}
|
||||
|
||||
if (qp.code) {
|
||||
delete oauth2.state;
|
||||
oauth2.auth.code = qp.code;
|
||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
||||
} else {
|
||||
let oauthErrorMsg;
|
||||
if (qp.error) {
|
||||
oauthErrorMsg = "["+qp.error+"]: " +
|
||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
||||
}
|
||||
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "error",
|
||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
|
||||
});
|
||||
}
|
||||
} else {
|
||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
||||
}
|
||||
window.close();
|
||||
}
|
||||
|
||||
if (document.readyState !== 'loading') {
|
||||
run();
|
||||
} else {
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
run();
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,20 +0,0 @@
|
||||
window.onload = function() {
|
||||
//<editor-fold desc="Changeable Configuration Block">
|
||||
|
||||
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
|
||||
window.ui = SwaggerUIBundle({
|
||||
url: "openapi3_0.yaml",
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout"
|
||||
});
|
||||
|
||||
//</editor-fold>
|
||||
};
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -20,10 +20,9 @@ echo Building installer for version $VERSION
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
git commit -a
|
||||
|
||||
if ! git tag $VERSION ; then
|
||||
echo "Existing/invalid tag"
|
||||
@@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
|
||||
|
||||
git push origin :refs/tags/$LATEST_TAG
|
||||
git tag -fa $LATEST_TAG
|
||||
|
||||
echo "remember to push --tags!"
|
||||
fi
|
||||
|
||||
# ----------------------
|
||||
|
||||
@@ -67,6 +67,8 @@ del /q .tmp1 .tmp2
|
||||
@rem -------------- Install and Configure ---------------
|
||||
|
||||
call python .\lib\main.py
|
||||
pause
|
||||
exit /b
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
|
||||
@@ -9,13 +9,16 @@ cd $scriptdir
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
||||
for candidate in python3.10 python3.9 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -28,3 +31,4 @@ if [ -z "$PYTHON" ]; then
|
||||
fi
|
||||
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
read -p "Press any key to exit"
|
||||
|
||||
@@ -336,7 +336,8 @@ class InvokeAiInstance:
|
||||
elif el in ['-y','--yes','--yes-to-all']:
|
||||
new_argv.append(el)
|
||||
sys.argv = new_argv
|
||||
|
||||
|
||||
import requests # to catch download exceptions
|
||||
from messages import introduction
|
||||
|
||||
introduction()
|
||||
@@ -346,7 +347,21 @@ class InvokeAiInstance:
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
# this may change in the future with config refactoring!
|
||||
invokeai_configure.main()
|
||||
succeeded = False
|
||||
try:
|
||||
invokeai_configure.main()
|
||||
succeeded = True
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||
except OSError as e:
|
||||
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||
except Exception as e:
|
||||
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||
finally:
|
||||
if not succeeded:
|
||||
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||
print('Alternatively you can relaunch the installer.')
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
|
||||
@@ -6,15 +6,20 @@ setlocal
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
:start
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 1. command-line interface
|
||||
echo 2. browser-based UI
|
||||
echo 3. run textual inversion training
|
||||
echo 4. merge models (diffusers type only)
|
||||
echo 5. re-run the configure script to download new models
|
||||
echo 6. open the developer console
|
||||
echo 7. command-line help
|
||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
||||
echo 5. download and install models
|
||||
echo 6. change InvokeAI startup options
|
||||
echo 7. re-run the configure script to fix a broken install
|
||||
echo 8. open the developer console
|
||||
echo 9. update InvokeAI
|
||||
echo 10. command-line help
|
||||
echo Q - quit
|
||||
set /P restore="Please enter 1-10, Q: [2] "
|
||||
if not defined restore set restore=2
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
@@ -24,14 +29,20 @@ IF /I "%restore%" == "1" (
|
||||
python .venv\Scripts\invokeai.exe --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Starting textual inversion training..
|
||||
python .venv\Scripts\invokeai-ti.exe --gui %*
|
||||
python .venv\Scripts\invokeai-ti.exe --gui
|
||||
) ELSE IF /I "%restore%" == "4" (
|
||||
echo Starting model merging script..
|
||||
python .venv\Scripts\invokeai-merge.exe --gui %*
|
||||
python .venv\Scripts\invokeai-merge.exe --gui
|
||||
) ELSE IF /I "%restore%" == "5" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe %*
|
||||
echo Running invokeai-model-install...
|
||||
python .venv\Scripts\invokeai-model-install.exe
|
||||
) ELSE IF /I "%restore%" == "6" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||
) ELSE IF /I "%restore%" == "7" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||
) ELSE IF /I "%restore%" == "8" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
@@ -43,14 +54,27 @@ IF /I "%restore%" == "1" (
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%restore%" == "7" (
|
||||
) ELSE IF /I "%restore%" == "9" (
|
||||
echo Running invokeai-update...
|
||||
python .venv\Scripts\invokeai-update.exe %*
|
||||
) ELSE IF /I "%restore%" == "10" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai.exe --help %*
|
||||
pause
|
||||
exit /b
|
||||
) ELSE IF /I "%restore%" == "q" (
|
||||
echo Goodbye!
|
||||
goto ending
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
goto start
|
||||
|
||||
endlocal
|
||||
pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
|
||||
|
||||
@@ -24,45 +24,64 @@ if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
while true
|
||||
do
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "1. command-line interface"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. run textual inversion training"
|
||||
echo "4. merge models (diffusers type only)"
|
||||
echo "5. open the developer console"
|
||||
echo "6. re-run the configure script to download new models"
|
||||
echo "7. command-line help "
|
||||
echo "5. download and install models"
|
||||
echo "6. change InvokeAI startup options"
|
||||
echo "7. re-run the configure script to fix a broken install"
|
||||
echo "8. open the developer console"
|
||||
echo "9. update InvokeAI"
|
||||
echo "10. command-line help"
|
||||
echo "Q - Quit"
|
||||
echo ""
|
||||
read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn
|
||||
read -p "Please enter 1-10, Q: [2] " yn
|
||||
choice=${yn:='2'}
|
||||
case $choice in
|
||||
1)
|
||||
echo "Starting the InvokeAI command-line..."
|
||||
exec invokeai $@
|
||||
invokeai $@
|
||||
;;
|
||||
2)
|
||||
echo "Starting the InvokeAI browser-based UI..."
|
||||
exec invokeai --web $@
|
||||
invokeai --web $@
|
||||
;;
|
||||
3)
|
||||
echo "Starting Textual Inversion:"
|
||||
exec invokeai-ti --gui $@
|
||||
invokeai-ti --gui $@
|
||||
;;
|
||||
4)
|
||||
echo "Merging Models:"
|
||||
exec invokeai-merge --gui $@
|
||||
invokeai-merge --gui $@
|
||||
;;
|
||||
5)
|
||||
echo "Developer Console:"
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
echo "Developer Console:"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
6)
|
||||
exec invokeai-configure --root ${INVOKEAI_ROOT}
|
||||
9)
|
||||
echo "Update:"
|
||||
invokeai-update
|
||||
;;
|
||||
7)
|
||||
exec invokeai --help
|
||||
10)
|
||||
invokeai --help
|
||||
;;
|
||||
[qQ])
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection"
|
||||
@@ -73,3 +92,4 @@ else # in developer console
|
||||
echo "Press ^D to exit"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -25,12 +25,15 @@ from invokeai.backend.modules.parameters import parameters_to_command
|
||||
import invokeai.frontend.dist as frontend
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, split_weighted_subprompts, \
|
||||
get_tokenizer
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.globals import Globals, global_converted_ckpts_dir
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||
from compel.prompt_parser import Blend
|
||||
from ldm.invoke.globals import global_models_dir
|
||||
from ldm.invoke.merge_diffusers import merge_diffusion_models
|
||||
|
||||
# Loading Arguments
|
||||
opt = Args()
|
||||
@@ -43,7 +46,8 @@ if not os.path.isabs(args.outdir):
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(opt.conf):
|
||||
opt.conf = os.path.normpath(os.path.join(Globals.root,opt.conf))
|
||||
opt.conf = os.path.normpath(os.path.join(Globals.root, opt.conf))
|
||||
|
||||
|
||||
class InvokeAIWebServer:
|
||||
def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
|
||||
@@ -189,7 +193,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(file_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
file_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
response = {
|
||||
@@ -203,11 +208,7 @@ class InvokeAIWebServer:
|
||||
return make_response(response, 200)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
return make_response("Error uploading file", 500)
|
||||
|
||||
self.load_socketio_listeners(self.socketio)
|
||||
@@ -264,14 +265,16 @@ class InvokeAIWebServer:
|
||||
# location for "finished" images
|
||||
self.result_path = args.outdir
|
||||
# temporary path for intermediates
|
||||
self.intermediate_path = os.path.join(self.result_path, "intermediates/")
|
||||
self.intermediate_path = os.path.join(
|
||||
self.result_path, "intermediates/")
|
||||
# path for user-uploaded init images and masks
|
||||
self.init_image_path = os.path.join(self.result_path, "init-images/")
|
||||
self.mask_image_path = os.path.join(self.result_path, "mask-images/")
|
||||
# path for temp images e.g. gallery generations which are not committed
|
||||
self.temp_image_path = os.path.join(self.result_path, "temp-images/")
|
||||
# path for thumbnail images
|
||||
self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
|
||||
self.thumbnail_image_path = os.path.join(
|
||||
self.result_path, "thumbnails/")
|
||||
# txt log
|
||||
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
|
||||
# make all output paths
|
||||
@@ -290,7 +293,7 @@ class InvokeAIWebServer:
|
||||
def load_socketio_listeners(self, socketio):
|
||||
@socketio.on("requestSystemConfig")
|
||||
def handle_request_capabilities():
|
||||
print(f">> System config requested")
|
||||
print(">> System config requested")
|
||||
config = self.get_system_config()
|
||||
config["model_list"] = self.generate.model_manager.list_models()
|
||||
config["infill_methods"] = infill_methods()
|
||||
@@ -301,20 +304,19 @@ class InvokeAIWebServer:
|
||||
try:
|
||||
if not search_folder:
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
)
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
)
|
||||
else:
|
||||
search_folder, found_models = self.generate.model_manager.search_models(search_folder)
|
||||
search_folder, found_models = self.generate.model_manager.search_models(
|
||||
search_folder)
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': search_folder, 'found_models': found_models},
|
||||
{'search_folder': search_folder,
|
||||
'found_models': found_models},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
self.handle_exceptions(e)
|
||||
print("\n")
|
||||
|
||||
@socketio.on("addNewModel")
|
||||
@@ -344,11 +346,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
print(f">> New Model Added: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("deleteModel")
|
||||
def handle_delete_model(model_name: str):
|
||||
@@ -364,11 +362,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
print(f">> Model Deleted: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestModelChange")
|
||||
def handle_set_model(model_name: str):
|
||||
@@ -387,11 +381,110 @@ class InvokeAIWebServer:
|
||||
{"model_name": model_name, "model_list": model_list},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
@socketio.on('convertToDiffusers')
|
||||
def convert_to_diffusers(model_to_convert: dict):
|
||||
try:
|
||||
if (model_info := self.generate.model_manager.model_info(model_name=model_to_convert['model_name'])):
|
||||
if 'weights' in model_info:
|
||||
ckpt_path = Path(model_info['weights'])
|
||||
original_config_file = Path(model_info['config'])
|
||||
model_name = model_to_convert['model_name']
|
||||
model_description = model_info['description']
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Model is not a valid checkpoint file"})
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Could not retrieve model info."})
|
||||
|
||||
if not ckpt_path.is_absolute():
|
||||
ckpt_path = Path(Globals.root, ckpt_path)
|
||||
|
||||
if original_config_file and not original_config_file.is_absolute():
|
||||
original_config_file = Path(
|
||||
Globals.root, original_config_file)
|
||||
|
||||
diffusers_path = Path(
|
||||
ckpt_path.parent.absolute(),
|
||||
f'{model_name}_diffusers'
|
||||
)
|
||||
|
||||
if model_to_convert['save_location'] == 'root':
|
||||
diffusers_path = Path(
|
||||
global_converted_ckpts_dir(), f'{model_name}_diffusers')
|
||||
|
||||
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
|
||||
diffusers_path = Path(
|
||||
model_to_convert['custom_location'], f'{model_name}_diffusers')
|
||||
|
||||
if diffusers_path.exists():
|
||||
shutil.rmtree(diffusers_path)
|
||||
|
||||
self.generate.model_manager.convert_and_import(
|
||||
ckpt_path,
|
||||
diffusers_path,
|
||||
model_name=model_name,
|
||||
model_description=model_description,
|
||||
vae=None,
|
||||
original_config_file=original_config_file,
|
||||
commit_to_conf=opt.conf,
|
||||
)
|
||||
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
socketio.emit(
|
||||
"modelConverted",
|
||||
{"new_model_name": model_name,
|
||||
"model_list": new_model_list, 'update': True},
|
||||
)
|
||||
print(f">> Model Converted: {model_name}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on('mergeDiffusersModels')
|
||||
def merge_diffusers_models(model_merge_info: dict):
|
||||
try:
|
||||
models_to_merge = model_merge_info['models_to_merge']
|
||||
model_ids_or_paths = [
|
||||
self.generate.model_manager.model_name_or_path(x) for x in models_to_merge]
|
||||
merged_pipe = merge_diffusion_models(
|
||||
model_ids_or_paths, model_merge_info['alpha'], model_merge_info['interp'], model_merge_info['force'])
|
||||
|
||||
dump_path = global_models_dir() / 'merged_models'
|
||||
if model_merge_info['model_merge_save_path'] is not None:
|
||||
dump_path = Path(model_merge_info['model_merge_save_path'])
|
||||
|
||||
os.makedirs(dump_path, exist_ok=True)
|
||||
dump_path = dump_path / model_merge_info['merged_model_name']
|
||||
merged_pipe.save_pretrained(dump_path, safe_serialization=1)
|
||||
|
||||
merged_model_config = dict(
|
||||
model_name=model_merge_info['merged_model_name'],
|
||||
description=f'Merge of models {", ".join(models_to_merge)}',
|
||||
commit_to_conf=opt.conf
|
||||
)
|
||||
|
||||
if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None):
|
||||
print(
|
||||
f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||
merged_model_config.update(vae=vae)
|
||||
|
||||
self.generate.model_manager.import_diffuser_model(
|
||||
dump_path, **merged_model_config)
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
|
||||
socketio.emit(
|
||||
"modelsMerged",
|
||||
{"merged_models": models_to_merge,
|
||||
"merged_model_name": model_merge_info['merged_model_name'],
|
||||
"model_list": new_model_list, 'update': True},
|
||||
)
|
||||
print(f">> Models Merged: {models_to_merge}")
|
||||
print(
|
||||
f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestEmptyTempFolder")
|
||||
def empty_temp_folder():
|
||||
@@ -406,22 +499,20 @@ class InvokeAIWebServer:
|
||||
)
|
||||
os.remove(thumbnail_path)
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": f"Unable to delete {f}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to delete {f}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit("tempFolderEmptied")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestSaveStagingAreaImageToGallery")
|
||||
def save_temp_image_to_gallery(url):
|
||||
try:
|
||||
image_path = self.get_image_path_from_url(url)
|
||||
new_path = os.path.join(self.result_path, os.path.basename(image_path))
|
||||
new_path = os.path.join(
|
||||
self.result_path, os.path.basename(image_path))
|
||||
shutil.copy2(image_path, new_path)
|
||||
|
||||
if os.path.splitext(new_path)[1] == ".png":
|
||||
@@ -434,7 +525,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(new_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
new_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array = [
|
||||
@@ -455,11 +547,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestLatestImages")
|
||||
def handle_request_latest_images(category, latest_mtime):
|
||||
@@ -497,7 +585,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -515,7 +604,8 @@ class InvokeAIWebServer:
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -523,11 +613,7 @@ class InvokeAIWebServer:
|
||||
{"images": image_array, "category": category},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestImages")
|
||||
def handle_request_images(category, earliest_mtime=None):
|
||||
@@ -569,7 +655,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -588,7 +675,8 @@ class InvokeAIWebServer:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f">> Unable to load {path}")
|
||||
socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -600,11 +688,7 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("generateImage")
|
||||
def handle_generate_image_event(
|
||||
@@ -626,7 +710,8 @@ class InvokeAIWebServer:
|
||||
printable_parameters["init_mask"][:64] + "..."
|
||||
)
|
||||
|
||||
print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||
print(
|
||||
f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
|
||||
print(f'>> Facetool Parameters: {facetool_parameters}')
|
||||
|
||||
@@ -636,11 +721,7 @@ class InvokeAIWebServer:
|
||||
facetool_parameters,
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("runPostprocessing")
|
||||
def handle_run_postprocessing(original_image, postprocessing_parameters):
|
||||
@@ -662,16 +743,18 @@ class InvokeAIWebServer:
|
||||
|
||||
try:
|
||||
seed = original_image["metadata"]["image"]["seed"]
|
||||
except (KeyError) as e:
|
||||
except KeyError:
|
||||
seed = "unknown_seed"
|
||||
pass
|
||||
|
||||
if postprocessing_parameters["type"] == "esrgan":
|
||||
progress.set_current_status("common:statusUpscalingESRGAN")
|
||||
progress.set_current_status("common.statusUpscalingESRGAN")
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
elif postprocessing_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@@ -680,7 +763,8 @@ class InvokeAIWebServer:
|
||||
image = self.esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=postprocessing_parameters["upscale"][0],
|
||||
strength=postprocessing_parameters["upscale"][1],
|
||||
denoise_str=postprocessing_parameters["upscale"][1],
|
||||
strength=postprocessing_parameters["upscale"][2],
|
||||
seed=seed,
|
||||
)
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
@@ -704,7 +788,7 @@ class InvokeAIWebServer:
|
||||
f'{postprocessing_parameters["type"]} is not a valid postprocessing type'
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
progress.set_current_status("common.statusSavingImage")
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
@@ -751,15 +835,11 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("cancel")
|
||||
def handle_cancel():
|
||||
print(f">> Cancel processing requested")
|
||||
print(">> Cancel processing requested")
|
||||
self.canceled.set()
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@@ -780,11 +860,7 @@ class InvokeAIWebServer:
|
||||
{"url": url, "uuid": uuid, "category": category},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
# App Functions
|
||||
def get_system_config(self):
|
||||
@@ -841,12 +917,10 @@ class InvokeAIWebServer:
|
||||
So we need to convert each into a PIL Image.
|
||||
"""
|
||||
|
||||
truncated_outpaint_image_b64 = generation_parameters["init_img"][:64]
|
||||
truncated_outpaint_mask_b64 = generation_parameters["init_mask"][:64]
|
||||
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy()
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy(
|
||||
)
|
||||
|
||||
initial_image = dataURL_to_image(
|
||||
generation_parameters["init_img"]
|
||||
@@ -923,7 +997,8 @@ class InvokeAIWebServer:
|
||||
elif generation_parameters["generation_mode"] == "img2img":
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
init_img_path = self.get_image_path_from_url(init_img_url)
|
||||
generation_parameters["init_img"] = Image.open(init_img_path).convert('RGB')
|
||||
generation_parameters["init_img"] = Image.open(
|
||||
init_img_path).convert('RGB')
|
||||
|
||||
def image_progress(sample, step):
|
||||
if self.canceled.is_set():
|
||||
@@ -934,10 +1009,10 @@ class InvokeAIWebServer:
|
||||
nonlocal progress
|
||||
|
||||
generation_messages = {
|
||||
"txt2img": "common:statusGeneratingTextToImage",
|
||||
"img2img": "common:statusGeneratingImageToImage",
|
||||
"inpainting": "common:statusGeneratingInpainting",
|
||||
"outpainting": "common:statusGeneratingOutpainting",
|
||||
"txt2img": "common.statusGeneratingTextToImage",
|
||||
"img2img": "common.statusGeneratingImageToImage",
|
||||
"inpainting": "common.statusGeneratingInpainting",
|
||||
"outpainting": "common.statusGeneratingOutpainting",
|
||||
}
|
||||
|
||||
progress.set_current_step(step + 1)
|
||||
@@ -982,9 +1057,9 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
image = self.generate.sample_to_lowres_estimated_image(
|
||||
sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
@@ -1003,7 +1078,8 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
@@ -1015,7 +1091,6 @@ class InvokeAIWebServer:
|
||||
nonlocal facetool_parameters
|
||||
nonlocal progress
|
||||
|
||||
step_index = 1
|
||||
nonlocal prior_variations
|
||||
|
||||
"""
|
||||
@@ -1029,9 +1104,10 @@ class InvokeAIWebServer:
|
||||
**generation_parameters["bounding_box"],
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusGenerationComplete")
|
||||
progress.set_current_status("common.statusGenerationComplete")
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
all_parameters = generation_parameters
|
||||
@@ -1042,7 +1118,8 @@ class InvokeAIWebServer:
|
||||
and all_parameters["variation_amount"] > 0
|
||||
):
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
this_variation = [
|
||||
[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = (
|
||||
prior_variations + this_variation
|
||||
)
|
||||
@@ -1056,14 +1133,16 @@ class InvokeAIWebServer:
|
||||
raise CanceledException
|
||||
|
||||
if esrgan_parameters:
|
||||
progress.set_current_status("common:statusUpscaling")
|
||||
progress.set_current_status("common.statusUpscaling")
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = self.esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["level"],
|
||||
denoise_str=esrgan_parameters['denoise_str'],
|
||||
strength=esrgan_parameters["strength"],
|
||||
seed=seed,
|
||||
)
|
||||
@@ -1071,6 +1150,7 @@ class InvokeAIWebServer:
|
||||
postprocessing = True
|
||||
all_parameters["upscale"] = [
|
||||
esrgan_parameters["level"],
|
||||
esrgan_parameters['denoise_str'],
|
||||
esrgan_parameters["strength"],
|
||||
]
|
||||
|
||||
@@ -1079,12 +1159,15 @@ class InvokeAIWebServer:
|
||||
|
||||
if facetool_parameters:
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
elif facetool_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
@@ -1113,8 +1196,9 @@ class InvokeAIWebServer:
|
||||
]
|
||||
all_parameters["facetool_type"] = facetool_parameters["type"]
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
progress.set_current_status("common.statusSavingImage")
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
# restore the stashed URLS and discard the paths, we are about to send the result to client
|
||||
@@ -1125,12 +1209,14 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
if "init_mask" in all_parameters:
|
||||
all_parameters["init_mask"] = "" # TODO: store the mask in metadata
|
||||
# TODO: store the mask in metadata
|
||||
all_parameters["init_mask"] = ""
|
||||
|
||||
if generation_parameters["generation_mode"] == "unifiedCanvas":
|
||||
all_parameters["bounding_box"] = original_bounding_box
|
||||
|
||||
metadata = self.parameters_to_generated_image_metadata(all_parameters)
|
||||
metadata = self.parameters_to_generated_image_metadata(
|
||||
all_parameters)
|
||||
|
||||
command = parameters_to_command(all_parameters)
|
||||
|
||||
@@ -1160,17 +1246,20 @@ class InvokeAIWebServer:
|
||||
|
||||
if progress.total_iterations > progress.current_iteration:
|
||||
progress.set_current_step(1)
|
||||
progress.set_current_status("common:statusIterationComplete")
|
||||
progress.set_current_status(
|
||||
"common.statusIterationComplete")
|
||||
progress.set_current_status_has_steps(False)
|
||||
else:
|
||||
progress.mark_complete()
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
parsed_prompt, _ = get_prompt_structure(
|
||||
generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt(self.generate.model, parsed_prompt)
|
||||
get_tokens_for_prompt_object(get_tokenizer(self.generate.model), parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
|
||||
@@ -1221,11 +1310,7 @@ class InvokeAIWebServer:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.empty_cuda_cache()
|
||||
print(e)
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def empty_cuda_cache(self):
|
||||
if self.generate.device.type == "cuda":
|
||||
@@ -1287,7 +1372,8 @@ class InvokeAIWebServer:
|
||||
{
|
||||
"type": "esrgan",
|
||||
"scale": int(parameters["upscale"][0]),
|
||||
"strength": float(parameters["upscale"][1]),
|
||||
"denoise_str": int(parameters["upscale"][1]),
|
||||
"strength": float(parameters["upscale"][2]),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1298,13 +1384,6 @@ class InvokeAIWebServer:
|
||||
# semantic drift
|
||||
rfc_dict["sampler"] = parameters["sampler_name"]
|
||||
|
||||
# display weighted subprompts (liable to change)
|
||||
subprompts = split_weighted_subprompts(
|
||||
parameters["prompt"], skip_normalize=True
|
||||
)
|
||||
subprompts = [{"prompt": x[0], "weight": x[1]} for x in subprompts]
|
||||
rfc_dict["prompt"] = subprompts
|
||||
|
||||
# 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
|
||||
variations = []
|
||||
|
||||
@@ -1331,17 +1410,14 @@ class InvokeAIWebServer:
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def parameters_to_post_processed_image_metadata(
|
||||
self, parameters, original_image_path
|
||||
):
|
||||
try:
|
||||
current_metadata = retrieve_metadata(original_image_path)["sd-metadata"]
|
||||
current_metadata = retrieve_metadata(
|
||||
original_image_path)["sd-metadata"]
|
||||
postprocessing_metadata = {}
|
||||
|
||||
"""
|
||||
@@ -1361,7 +1437,8 @@ class InvokeAIWebServer:
|
||||
if parameters["type"] == "esrgan":
|
||||
postprocessing_metadata["type"] = "esrgan"
|
||||
postprocessing_metadata["scale"] = parameters["upscale"][0]
|
||||
postprocessing_metadata["strength"] = parameters["upscale"][1]
|
||||
postprocessing_metadata["denoise_str"] = parameters["upscale"][1]
|
||||
postprocessing_metadata["strength"] = parameters["upscale"][2]
|
||||
elif parameters["type"] == "gfpgan":
|
||||
postprocessing_metadata["type"] = "gfpgan"
|
||||
postprocessing_metadata["strength"] = parameters["facetool_strength"]
|
||||
@@ -1380,16 +1457,13 @@ class InvokeAIWebServer:
|
||||
postprocessing_metadata
|
||||
)
|
||||
else:
|
||||
current_metadata["image"]["postprocessing"] = [postprocessing_metadata]
|
||||
current_metadata["image"]["postprocessing"] = [
|
||||
postprocessing_metadata]
|
||||
|
||||
return current_metadata
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def save_result_image(
|
||||
self,
|
||||
@@ -1419,7 +1493,7 @@ class InvokeAIWebServer:
|
||||
if step_index:
|
||||
filename += f".{step_index}"
|
||||
if postprocessing:
|
||||
filename += f".postprocessed"
|
||||
filename += ".postprocessed"
|
||||
|
||||
filename += ".png"
|
||||
|
||||
@@ -1433,11 +1507,7 @@ class InvokeAIWebServer:
|
||||
return os.path.abspath(path)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def make_unique_init_image_filename(self, name):
|
||||
try:
|
||||
@@ -1446,11 +1516,7 @@ class InvokeAIWebServer:
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
return name
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def calculate_real_steps(self, steps, strength, has_init_image):
|
||||
import math
|
||||
@@ -1465,11 +1531,7 @@ class InvokeAIWebServer:
|
||||
file.writelines(message)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def get_image_path_from_url(self, url):
|
||||
"""Given a url to an image used by the client, returns the absolute file path to that image"""
|
||||
@@ -1492,18 +1554,15 @@ class InvokeAIWebServer:
|
||||
)
|
||||
elif "thumbnails" in url:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.thumbnail_image_path, os.path.basename(url))
|
||||
os.path.join(self.thumbnail_image_path,
|
||||
os.path.basename(url))
|
||||
)
|
||||
else:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.result_path, os.path.basename(url))
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def get_url_from_image_path(self, path):
|
||||
"""Given an absolute file path to an image, returns the URL that the client can use to load the image"""
|
||||
@@ -1521,11 +1580,7 @@ class InvokeAIWebServer:
|
||||
else:
|
||||
return os.path.join(self.result_url, os.path.basename(path))
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def save_file_unique_uuid_name(self, bytes, name, path):
|
||||
try:
|
||||
@@ -1544,11 +1599,13 @@ class InvokeAIWebServer:
|
||||
|
||||
return file_path
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
def handle_exceptions(self, exception, emit_key: str = 'error'):
|
||||
self.socketio.emit(emit_key, {"message": (str(exception))})
|
||||
print("\n")
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
|
||||
class Progress:
|
||||
@@ -1569,7 +1626,7 @@ class Progress:
|
||||
self.total_iterations = (
|
||||
generation_parameters["iterations"] if generation_parameters else 1
|
||||
)
|
||||
self.current_status = "common:statusPreparing"
|
||||
self.current_status = "common.statusPreparing"
|
||||
self.is_processing = True
|
||||
self.current_status_has_steps = False
|
||||
self.has_error = False
|
||||
@@ -1599,7 +1656,7 @@ class Progress:
|
||||
self.has_error = has_error
|
||||
|
||||
def mark_complete(self):
|
||||
self.current_status = "common:statusProcessingComplete"
|
||||
self.current_status = "common.statusProcessingComplete"
|
||||
self.current_step = 0
|
||||
self.total_steps = 0
|
||||
self.current_iteration = 0
|
||||
@@ -1661,10 +1718,12 @@ def dataURL_to_image(dataURL: str) -> ImageType:
|
||||
)
|
||||
return image
|
||||
|
||||
|
||||
"""
|
||||
Converts an image into a base64 image dataURL.
|
||||
"""
|
||||
|
||||
|
||||
def image_to_dataURL(image: ImageType) -> str:
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
@@ -1674,7 +1733,6 @@ def image_to_dataURL(image: ImageType) -> str:
|
||||
return image_base64
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Converts a base64 image dataURL into bytes.
|
||||
The dataURL is split on the first commma.
|
||||
|
||||
@@ -6,83 +6,83 @@ stable-diffusion-1.5:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
default: True
|
||||
inpainting-1.5:
|
||||
sd-inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-inpainting
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
dreamlike-diffusion-1.0:
|
||||
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art, diffusers version (2.13 BG)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-diffusion-1.0
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
dreamlike-photoreal-2.0:
|
||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
||||
recommended: False
|
||||
stable-diffusion-2.1-768:
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-1
|
||||
format: diffusers
|
||||
recommended: True
|
||||
stable-diffusion-2.1-base:
|
||||
description: Stable Diffusion version 2.1 diffusers base model, trained on 512 pixel images (5.21 GB)
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 512 pixel images (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-1-base
|
||||
format: diffusers
|
||||
recommended: False
|
||||
sd-inpainting-2.0:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||
format: diffusers
|
||||
recommended: False
|
||||
analog-diffusion-1.0:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
repo_id: wavymulder/Analog-Diffusion
|
||||
format: diffusers
|
||||
recommended: false
|
||||
deliberate-1.0:
|
||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||
format: diffusers
|
||||
repo_id: XpucT/Deliberate
|
||||
recommended: False
|
||||
d&d-diffusion-1.0:
|
||||
description: Dungeons & Dragons characters (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: 0xJustin/Dungeons-and-Diffusion
|
||||
recommended: False
|
||||
dreamlike-photoreal-2.0:
|
||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
||||
recommended: False
|
||||
inkpunk-1.0:
|
||||
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||
format: diffusers
|
||||
repo_id: Envvi/Inkpunk-Diffusion
|
||||
recommended: False
|
||||
openjourney-4.0:
|
||||
description: An SD 1.5 model fine tuned on Midjourney images by PromptHero - include "mdjrny-v4 style" in your prompts (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: prompthero/openjourney
|
||||
vae:
|
||||
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: prompthero/openjourney
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
nitro-diffusion-1.0:
|
||||
description: A SD 1.5 model trained on three artstyles - prompt with "archer style", "arcane style" and/or "modern disney style" (2.13 GB)
|
||||
repo_id: nitrosocke/Nitro-Diffusion
|
||||
recommended: False
|
||||
portrait-plus-1.0:
|
||||
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: wavymulder/portraitplus
|
||||
recommended: False
|
||||
seek-art-mega-1.0:
|
||||
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||
repo_id: coreco/seek.art_MEGA
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
trinart-2.0:
|
||||
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures, diffusers version (2.13 GB)
|
||||
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||
repo_id: naclbit/trinart_stable_diffusion_v2
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
trinart-characters-2_0:
|
||||
description: An SD model finetuned with 19.2M anime/manga style images (ckpt version) (4.27 GB)
|
||||
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
||||
config: v1-inference.yaml
|
||||
file: derrida_final.ckpt
|
||||
format: ckpt
|
||||
waifu-diffusion-1.4:
|
||||
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
|
||||
repo_id: hakurei/waifu-diffusion
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
||||
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
recommended: False
|
||||
ft-mse-improved-autoencoder-840000:
|
||||
description: StabilityAI improved autoencoder fine-tuned for human faces. Improves legacy .ckpt models (335 MB)
|
||||
repo_id: stabilityai/sd-vae-ft-mse-original
|
||||
format: ckpt
|
||||
config: VAE/default
|
||||
file: vae-ft-mse-840000-ema-pruned.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
recommended: True
|
||||
trinart_vae:
|
||||
description: Custom autoencoder for trinart_characters for legacy .ckpt models only (335 MB)
|
||||
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
||||
config: VAE/trinart
|
||||
format: ckpt
|
||||
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
|
||||
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-4
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False # we set this to false because this is an inference only config
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
use_checkpoint: True
|
||||
use_fp16: True
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_head_channels: 64 # need to fix for flash-attn
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
#attn_type: "vanilla-xformers"
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
layer: "penultimate"
|
||||
@@ -2,4 +2,4 @@ dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
||||
11
invokeai/frontend/.gitignore
vendored
11
invokeai/frontend/.gitignore
vendored
@@ -25,4 +25,13 @@ dist-ssr
|
||||
*.sw?
|
||||
|
||||
# build stats
|
||||
stats.html
|
||||
stats.html
|
||||
|
||||
# Yarn - https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored
|
||||
.pnp.*
|
||||
.yarn/*
|
||||
!.yarn/patches
|
||||
!.yarn/plugins
|
||||
!.yarn/releases
|
||||
!.yarn/sdks
|
||||
!.yarn/versions
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
cd invokeai/frontend/ && npx run lint
|
||||
cd invokeai/frontend/ && npm run lint-staged
|
||||
|
||||
@@ -2,4 +2,4 @@ dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
module.exports = {
|
||||
trailingComma: 'es5',
|
||||
tabWidth: 2,
|
||||
endOfLine: 'auto',
|
||||
semi: true,
|
||||
singleQuote: true,
|
||||
overrides: [
|
||||
{
|
||||
files: ['public/locales/*.json'],
|
||||
options: {
|
||||
tabWidth: 4,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
193957
invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
193957
invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
File diff suppressed because one or more lines are too long
5
invokeai/frontend/.yarnrc
Normal file
5
invokeai/frontend/.yarnrc
Normal file
@@ -0,0 +1,5 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
yarn-path ".yarn/releases/yarn-1.22.19.cjs"
|
||||
1
invokeai/frontend/.yarnrc.yml
Normal file
1
invokeai/frontend/.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
yarnPath: .yarn/releases/yarn-1.22.19.cjs
|
||||
@@ -7,7 +7,7 @@ The UI is in `invokeai/frontend`.
|
||||
Install [node](https://nodejs.org/en/download/) (includes npm) and
|
||||
[yarn](https://yarnpkg.com/getting-started/install).
|
||||
|
||||
From `invokeai/frontend/` run `yarn install` to get everything set up.
|
||||
From `invokeai/frontend/` run `yarn install --immutable` to get everything set up.
|
||||
|
||||
## Dev
|
||||
|
||||
|
||||
1
invokeai/frontend/dist/assets/index-14cb2922.css
vendored
Normal file
1
invokeai/frontend/dist/assets/index-14cb2922.css
vendored
Normal file
File diff suppressed because one or more lines are too long
638
invokeai/frontend/dist/assets/index-252612ad.js
vendored
638
invokeai/frontend/dist/assets/index-252612ad.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
Normal file
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
Normal file
File diff suppressed because one or more lines are too long
4
invokeai/frontend/dist/index.html
vendored
4
invokeai/frontend/dist/index.html
vendored
@@ -5,8 +5,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-252612ad.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
|
||||
<script type="module" crossorigin src="./assets/index-c09cf9ca.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-14cb2922.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
520
invokeai/frontend/dist/locales/ar.json
vendored
Normal file
520
invokeai/frontend/dist/locales/ar.json
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "مفاتيح الأختصار",
|
||||
"themeLabel": "الموضوع",
|
||||
"languagePickerLabel": "منتقي اللغة",
|
||||
"reportBugLabel": "بلغ عن خطأ",
|
||||
"settingsLabel": "إعدادات",
|
||||
"darkTheme": "داكن",
|
||||
"lightTheme": "فاتح",
|
||||
"greenTheme": "أخضر",
|
||||
"text2img": "نص إلى صورة",
|
||||
"img2img": "صورة إلى صورة",
|
||||
"unifiedCanvas": "لوحة موحدة",
|
||||
"nodes": "عقد",
|
||||
"langArabic": "العربية",
|
||||
"nodesDesc": "نظام مبني على العقد لإنتاج الصور قيد التطوير حاليًا. تبقى على اتصال مع تحديثات حول هذه الميزة المذهلة.",
|
||||
"postProcessing": "معالجة بعد الإصدار",
|
||||
"postProcessDesc1": "Invoke AI توفر مجموعة واسعة من ميزات المعالجة بعد الإصدار. تحسين الصور واستعادة الوجوه متاحين بالفعل في واجهة الويب. يمكنك الوصول إليهم من الخيارات المتقدمة في قائمة الخيارات في علامة التبويب Text To Image و Image To Image. يمكن أيضًا معالجة الصور مباشرةً باستخدام أزرار الإجراء على الصورة فوق عرض الصورة الحالي أو في العارض.",
|
||||
"postProcessDesc2": "سيتم إصدار واجهة رسومية مخصصة قريبًا لتسهيل عمليات المعالجة بعد الإصدار المتقدمة.",
|
||||
"postProcessDesc3": "واجهة سطر الأوامر Invoke AI توفر ميزات أخرى عديدة بما في ذلك Embiggen.",
|
||||
"training": "تدريب",
|
||||
"trainingDesc1": "تدفق خاص مخصص لتدريب تضميناتك الخاصة ونقاط التحقق باستخدام العكس النصي و دريم بوث من واجهة الويب.",
|
||||
"trainingDesc2": " استحضر الذكاء الصناعي يدعم بالفعل تدريب تضمينات مخصصة باستخدام العكس النصي باستخدام السكريبت الرئيسي.",
|
||||
"upload": "رفع",
|
||||
"close": "إغلاق",
|
||||
"load": "تحميل",
|
||||
"back": "الى الخلف",
|
||||
"statusConnected": "متصل",
|
||||
"statusDisconnected": "غير متصل",
|
||||
"statusError": "خطأ",
|
||||
"statusPreparing": "جاري التحضير",
|
||||
"statusProcessingCanceled": "تم إلغاء المعالجة",
|
||||
"statusProcessingComplete": "اكتمال المعالجة",
|
||||
"statusGenerating": "جاري التوليد",
|
||||
"statusGeneratingTextToImage": "جاري توليد النص إلى الصورة",
|
||||
"statusGeneratingImageToImage": "جاري توليد الصورة إلى الصورة",
|
||||
"statusGeneratingInpainting": "جاري توليد Inpainting",
|
||||
"statusGeneratingOutpainting": "جاري توليد Outpainting",
|
||||
"statusGenerationComplete": "اكتمال التوليد",
|
||||
"statusIterationComplete": "اكتمال التكرار",
|
||||
"statusSavingImage": "جاري حفظ الصورة",
|
||||
"statusRestoringFaces": "جاري استعادة الوجوه",
|
||||
"statusRestoringFacesGFPGAN": "تحسيت الوجوه (جي إف بي جان)",
|
||||
"statusRestoringFacesCodeFormer": "تحسين الوجوه (كود فورمر)",
|
||||
"statusUpscaling": "تحسين الحجم",
|
||||
"statusUpscalingESRGAN": "تحسين الحجم (إي إس آر جان)",
|
||||
"statusLoadingModel": "تحميل النموذج",
|
||||
"statusModelChanged": "تغير النموذج"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "الأجيال",
|
||||
"showGenerations": "عرض الأجيال",
|
||||
"uploads": "التحميلات",
|
||||
"showUploads": "عرض التحميلات",
|
||||
"galleryImageSize": "حجم الصورة",
|
||||
"galleryImageResetSize": "إعادة ضبط الحجم",
|
||||
"gallerySettings": "إعدادات المعرض",
|
||||
"maintainAspectRatio": "الحفاظ على نسبة الأبعاد",
|
||||
"autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة",
|
||||
"singleColumnLayout": "تخطيط عمود واحد",
|
||||
"pinGallery": "تثبيت المعرض",
|
||||
"allImagesLoaded": "تم تحميل جميع الصور",
|
||||
"loadMore": "تحميل المزيد",
|
||||
"noImagesInGallery": "لا توجد صور في المعرض"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "مفاتيح الأزرار المختصرة",
|
||||
"appHotkeys": "مفاتيح التطبيق",
|
||||
"generalHotkeys": "مفاتيح عامة",
|
||||
"galleryHotkeys": "مفاتيح المعرض",
|
||||
"unifiedCanvasHotkeys": "مفاتيح اللوحةالموحدة ",
|
||||
"invoke": {
|
||||
"title": "أدعو",
|
||||
"desc": "إنشاء صورة"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "إلغاء",
|
||||
"desc": "إلغاء إنشاء الصورة"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "تركيز الإشعار",
|
||||
"desc": "تركيز منطقة الإدخال الإشعار"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "تبديل الخيارات",
|
||||
"desc": "فتح وإغلاق لوحة الخيارات"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "خيارات التثبيت",
|
||||
"desc": "ثبت لوحة الخيارات"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "تبديل العارض",
|
||||
"desc": "فتح وإغلاق مشاهد الصور"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "تبديل المعرض",
|
||||
"desc": "فتح وإغلاق درابزين المعرض"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "تكبير مساحة العمل",
|
||||
"desc": "إغلاق اللوحات وتكبير مساحة العمل"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "تغيير الألسنة",
|
||||
"desc": "التبديل إلى مساحة عمل أخرى"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "تبديل الطرفية",
|
||||
"desc": "فتح وإغلاق الطرفية"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "ضبط التشعب",
|
||||
"desc": "استخدم تشعب الصورة الحالية"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "ضبط البذور",
|
||||
"desc": "استخدم بذور الصورة الحالية"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "ضبط المعلمات",
|
||||
"desc": "استخدم جميع المعلمات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "استعادة الوجوه",
|
||||
"desc": "استعادة الصورة الحالية"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "تحسين الحجم",
|
||||
"desc": "تحسين حجم الصورة الحالية"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "عرض المعلومات",
|
||||
"desc": "عرض معلومات البيانات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "أرسل إلى صورة إلى صورة",
|
||||
"desc": "أرسل الصورة الحالية إلى صورة إلى صورة"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "حذف الصورة",
|
||||
"desc": "حذف الصورة الحالية"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "أغلق اللوحات",
|
||||
"desc": "يغلق اللوحات المفتوحة"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "الصورة السابقة",
|
||||
"desc": "عرض الصورة السابقة في الصالة"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "الصورة التالية",
|
||||
"desc": "عرض الصورة التالية في الصالة"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "تبديل تثبيت الصالة",
|
||||
"desc": "يثبت ويفتح تثبيت الصالة على الواجهة الرسومية"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "زيادة حجم صورة الصالة",
|
||||
"desc": "يزيد حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "انقاص حجم صورة الصالة",
|
||||
"desc": "ينقص حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "تحديد الفرشاة",
|
||||
"desc": "يحدد الفرشاة على اللوحة"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "تحديد الممحاة",
|
||||
"desc": "يحدد الممحاة على اللوحة"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "تصغير حجم الفرشاة",
|
||||
"desc": "يصغر حجم الفرشاة/الممحاة على اللوحة"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "زيادة حجم الفرشاة",
|
||||
"desc": "يزيد حجم فرشة اللوحة / الممحاة"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "تخفيض شفافية الفرشاة",
|
||||
"desc": "يخفض شفافية فرشة اللوحة"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "زيادة شفافية الفرشاة",
|
||||
"desc": "يزيد شفافية فرشة اللوحة"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "أداة التحريك",
|
||||
"desc": "يتيح التحرك في اللوحة"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "ملء الصندوق المحدد",
|
||||
"desc": "يملأ الصندوق المحدد بلون الفرشاة"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "محو الصندوق المحدد",
|
||||
"desc": "يمحو منطقة الصندوق المحدد"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "اختيار منتقي اللون",
|
||||
"desc": "يختار منتقي اللون الخاص باللوحة"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "تبديل التأكيد",
|
||||
"desc": "يبديل تأكيد الشبكة"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "تبديل سريع للتحريك",
|
||||
"desc": "يبديل مؤقتا وضع التحريك"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "تبديل الطبقة",
|
||||
"desc": "يبديل إختيار الطبقة القناع / الأساسية"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "مسح القناع",
|
||||
"desc": "مسح القناع بأكمله"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "إخفاء الكمامة",
|
||||
"desc": "إخفاء وإظهار الكمامة"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "إظهار / إخفاء علبة التحديد",
|
||||
"desc": "تبديل ظهور علبة التحديد"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "دمج الطبقات الظاهرة",
|
||||
"desc": "دمج جميع الطبقات الظاهرة في اللوحة"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "حفظ إلى صالة الأزياء",
|
||||
"desc": "حفظ اللوحة الحالية إلى صالة الأزياء"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "نسخ إلى الحافظة",
|
||||
"desc": "نسخ اللوحة الحالية إلى الحافظة"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "تنزيل الصورة",
|
||||
"desc": "تنزيل اللوحة الحالية"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "تراجع عن الخط",
|
||||
"desc": "تراجع عن خط الفرشاة"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "إعادة الخط",
|
||||
"desc": "إعادة خط الفرشاة"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "إعادة تعيين العرض",
|
||||
"desc": "إعادة تعيين عرض اللوحة"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "الصورة السابقة في المرحلة التجريبية",
|
||||
"desc": "الصورة السابقة في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "الصورة التالية في المرحلة التجريبية",
|
||||
"desc": "الصورة التالية في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "قبول الصورة في المرحلة التجريبية",
|
||||
"desc": "قبول الصورة الحالية في منطقة المرحلة التجريبية"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "مدير النموذج",
|
||||
"model": "نموذج",
|
||||
"allModels": "جميع النماذج",
|
||||
"checkpointModels": "نقاط التحقق",
|
||||
"diffusersModels": "المصادر المتعددة",
|
||||
"safetensorModels": "التنسورات الآمنة",
|
||||
"modelAdded": "تمت إضافة النموذج",
|
||||
"modelUpdated": "تم تحديث النموذج",
|
||||
"modelEntryDeleted": "تم حذف مدخل النموذج",
|
||||
"cannotUseSpaces": "لا يمكن استخدام المساحات",
|
||||
"addNew": "إضافة جديد",
|
||||
"addNewModel": "إضافة نموذج جديد",
|
||||
"addCheckpointModel": "إضافة نقطة تحقق / نموذج التنسور الآمن",
|
||||
"addDiffuserModel": "إضافة مصادر متعددة",
|
||||
"addManually": "إضافة يدويًا",
|
||||
"manual": "يدوي",
|
||||
"name": "الاسم",
|
||||
"nameValidationMsg": "أدخل اسما لنموذجك",
|
||||
"description": "الوصف",
|
||||
"descriptionValidationMsg": "أضف وصفا لنموذجك",
|
||||
"config": "تكوين",
|
||||
"configValidationMsg": "مسار الملف الإعدادي لنموذجك.",
|
||||
"modelLocation": "موقع النموذج",
|
||||
"modelLocationValidationMsg": "موقع النموذج على الجهاز الخاص بك.",
|
||||
"repo_id": "معرف المستودع",
|
||||
"repoIDValidationMsg": "المستودع الإلكتروني لنموذجك",
|
||||
"vaeLocation": "موقع فاي إي",
|
||||
"vaeLocationValidationMsg": "موقع فاي إي على الجهاز الخاص بك.",
|
||||
"vaeRepoID": "معرف مستودع فاي إي",
|
||||
"vaeRepoIDValidationMsg": "المستودع الإلكتروني فاي إي",
|
||||
"width": "عرض",
|
||||
"widthValidationMsg": "عرض افتراضي لنموذجك.",
|
||||
"height": "ارتفاع",
|
||||
"heightValidationMsg": "ارتفاع افتراضي لنموذجك.",
|
||||
"addModel": "أضف نموذج",
|
||||
"updateModel": "تحديث النموذج",
|
||||
"availableModels": "النماذج المتاحة",
|
||||
"search": "بحث",
|
||||
"load": "تحميل",
|
||||
"active": "نشط",
|
||||
"notLoaded": "غير محمل",
|
||||
"cached": "مخبأ",
|
||||
"checkpointFolder": "مجلد التدقيق",
|
||||
"clearCheckpointFolder": "مسح مجلد التدقيق",
|
||||
"findModels": "إيجاد النماذج",
|
||||
"scanAgain": "فحص مرة أخرى",
|
||||
"modelsFound": "النماذج الموجودة",
|
||||
"selectFolder": "حدد المجلد",
|
||||
"selected": "تم التحديد",
|
||||
"selectAll": "حدد الكل",
|
||||
"deselectAll": "إلغاء تحديد الكل",
|
||||
"showExisting": "إظهار الموجود",
|
||||
"addSelected": "أضف المحدد",
|
||||
"modelExists": "النموذج موجود",
|
||||
"selectAndAdd": "حدد وأضف النماذج المدرجة أدناه",
|
||||
"noModelsFound": "لم يتم العثور على نماذج",
|
||||
"delete": "حذف",
|
||||
"deleteModel": "حذف النموذج",
|
||||
"deleteConfig": "حذف التكوين",
|
||||
"deleteMsg1": "هل أنت متأكد من رغبتك في حذف إدخال النموذج هذا من استحضر الذكاء الصناعي",
|
||||
"deleteMsg2": "هذا لن يحذف ملف نقطة التحكم للنموذج من القرص الخاص بك. يمكنك إعادة إضافتهم إذا كنت ترغب في ذلك.",
|
||||
"formMessageDiffusersModelLocation": "موقع النموذج للمصعد",
|
||||
"formMessageDiffusersModelLocationDesc": "يرجى إدخال واحد على الأقل.",
|
||||
"formMessageDiffusersVAELocation": "موقع فاي إي",
|
||||
"formMessageDiffusersVAELocationDesc": "إذا لم يتم توفيره، سيبحث استحضر الذكاء الصناعي عن ملف فاي إي داخل موقع النموذج المعطى أعلاه."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "الصور",
|
||||
"steps": "الخطوات",
|
||||
"cfgScale": "مقياس الإعداد الذاتي للجملة",
|
||||
"width": "عرض",
|
||||
"height": "ارتفاع",
|
||||
"sampler": "مزج",
|
||||
"seed": "بذرة",
|
||||
"randomizeSeed": "تبديل بذرة",
|
||||
"shuffle": "تشغيل",
|
||||
"noiseThreshold": "عتبة الضوضاء",
|
||||
"perlinNoise": "ضجيج برلين",
|
||||
"variations": "تباينات",
|
||||
"variationAmount": "كمية التباين",
|
||||
"seedWeights": "أوزان البذور",
|
||||
"faceRestoration": "استعادة الوجه",
|
||||
"restoreFaces": "استعادة الوجوه",
|
||||
"type": "نوع",
|
||||
"strength": "قوة",
|
||||
"upscaling": "تصغير",
|
||||
"upscale": "تصغير",
|
||||
"upscaleImage": "تصغير الصورة",
|
||||
"scale": "مقياس",
|
||||
"otherOptions": "خيارات أخرى",
|
||||
"seamlessTiling": "تجهيز بلاستيكي بدون تشققات",
|
||||
"hiresOptim": "تحسين الدقة العالية",
|
||||
"imageFit": "ملائمة الصورة الأولية لحجم الخرج",
|
||||
"codeformerFidelity": "الوثوقية",
|
||||
"seamSize": "حجم التشقق",
|
||||
"seamBlur": "ضباب التشقق",
|
||||
"seamStrength": "قوة التشقق",
|
||||
"seamSteps": "خطوات التشقق",
|
||||
"scaleBeforeProcessing": "تحجيم قبل المعالجة",
|
||||
"scaledWidth": "العرض المحجوب",
|
||||
"scaledHeight": "الارتفاع المحجوب",
|
||||
"infillMethod": "طريقة التعبئة",
|
||||
"tileSize": "حجم البلاطة",
|
||||
"boundingBoxHeader": "صندوق التحديد",
|
||||
"seamCorrectionHeader": "تصحيح التشقق",
|
||||
"infillScalingHeader": "التعبئة والتحجيم",
|
||||
"img2imgStrength": "قوة صورة إلى صورة",
|
||||
"toggleLoopback": "تبديل الإعادة",
|
||||
"invoke": "إطلاق",
|
||||
"promptPlaceholder": "اكتب المحث هنا. [العلامات السلبية], (زيادة الوزن) ++, (نقص الوزن)--, التبديل و الخلط متاحة (انظر الوثائق)",
|
||||
"sendTo": "أرسل إلى",
|
||||
"sendToImg2Img": "أرسل إلى صورة إلى صورة",
|
||||
"sendToUnifiedCanvas": "أرسل إلى الخطوط الموحدة",
|
||||
"copyImage": "نسخ الصورة",
|
||||
"copyImageToLink": "نسخ الصورة إلى الرابط",
|
||||
"downloadImage": "تحميل الصورة",
|
||||
"openInViewer": "فتح في العارض",
|
||||
"closeViewer": "إغلاق العارض",
|
||||
"usePrompt": "استخدم المحث",
|
||||
"useSeed": "استخدام البذور",
|
||||
"useAll": "استخدام الكل",
|
||||
"useInitImg": "استخدام الصورة الأولية",
|
||||
"info": "معلومات",
|
||||
"deleteImage": "حذف الصورة",
|
||||
"initialImage": "الصورة الأولية",
|
||||
"showOptionsPanel": "إظهار لوحة الخيارات"
|
||||
},
|
||||
"settings": {
|
||||
"models": "موديلات",
|
||||
"displayInProgress": "عرض الصور المؤرشفة",
|
||||
"saveSteps": "حفظ الصور كل n خطوات",
|
||||
"confirmOnDelete": "تأكيد عند الحذف",
|
||||
"displayHelpIcons": "عرض أيقونات المساعدة",
|
||||
"useCanvasBeta": "استخدام مخطط الأزرار بيتا",
|
||||
"enableImageDebugging": "تمكين التصحيح عند التصوير",
|
||||
"resetWebUI": "إعادة تعيين واجهة الويب",
|
||||
"resetWebUIDesc1": "إعادة تعيين واجهة الويب يعيد فقط ذاكرة التخزين المؤقت للمتصفح لصورك وإعداداتك المذكورة. لا يحذف أي صور من القرص.",
|
||||
"resetWebUIDesc2": "إذا لم تظهر الصور في الصالة أو إذا كان شيء آخر غير ناجح، يرجى المحاولة إعادة تعيين قبل تقديم مشكلة على جيت هب.",
|
||||
"resetComplete": "تم إعادة تعيين واجهة الويب. تحديث الصفحة لإعادة التحميل."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "تم تفريغ مجلد المؤقت",
|
||||
"uploadFailed": "فشل التحميل",
|
||||
"uploadFailedMultipleImagesDesc": "تم الصق صور متعددة، قد يتم تحميل صورة واحدة فقط في الوقت الحالي",
|
||||
"uploadFailedUnableToLoadDesc": "تعذر تحميل الملف",
|
||||
"downloadImageStarted": "بدأ تنزيل الصورة",
|
||||
"imageCopied": "تم نسخ الصورة",
|
||||
"imageLinkCopied": "تم نسخ رابط الصورة",
|
||||
"imageNotLoaded": "لم يتم تحميل أي صورة",
|
||||
"imageNotLoadedDesc": "لم يتم العثور على صورة لإرسالها إلى وحدة الصورة",
|
||||
"imageSavedToGallery": "تم حفظ الصورة في المعرض",
|
||||
"canvasMerged": "تم دمج الخط",
|
||||
"sentToImageToImage": "تم إرسال إلى صورة إلى صورة",
|
||||
"sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة",
|
||||
"parametersSet": "تم تعيين المعلمات",
|
||||
"parametersNotSet": "لم يتم تعيين المعلمات",
|
||||
"parametersNotSetDesc": "لم يتم العثور على معلمات بيانية لهذه الصورة.",
|
||||
"parametersFailed": "حدث مشكلة في تحميل المعلمات",
|
||||
"parametersFailedDesc": "تعذر تحميل صورة البدء.",
|
||||
"seedSet": "تم تعيين البذرة",
|
||||
"seedNotSet": "لم يتم تعيين البذرة",
|
||||
"seedNotSetDesc": "تعذر العثور على البذرة لهذه الصورة.",
|
||||
"promptSet": "تم تعيين الإشعار",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "تعذر العثور على الإشعار لهذه الصورة.",
|
||||
"upscalingFailed": "فشل التحسين",
|
||||
"faceRestoreFailed": "فشل استعادة الوجه",
|
||||
"metadataLoadFailed": "فشل تحميل البيانات الوصفية",
|
||||
"initialImageSet": "تم تعيين الصورة الأولية",
|
||||
"initialImageNotSet": "لم يتم تعيين الصورة الأولية",
|
||||
"initialImageNotSetDesc": "تعذر تحميل الصورة الأولية"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "هذا هو حقل التحذير. يشمل التحذير عناصر الإنتاج والمصطلحات الأسلوبية. يمكنك إضافة الأوزان (أهمية الرمز) في التحذير أيضًا، ولكن أوامر CLI والمعلمات لن تعمل.",
|
||||
"gallery": "تعرض Gallery منتجات من مجلد الإخراج عندما يتم إنشاؤها. تخزن الإعدادات داخل الملفات ويتم الوصول إليها عن طريق قائمة السياق.",
|
||||
"other": "ستمكن هذه الخيارات من وضع عمليات معالجة بديلة لـاستحضر الذكاء الصناعي. سيؤدي 'الزخرفة بلا جدران' إلى إنشاء أنماط تكرارية في الإخراج. 'دقة عالية' هي الإنتاج خلال خطوتين عبر صورة إلى صورة: استخدم هذا الإعداد عندما ترغب في توليد صورة أكبر وأكثر تجانبًا دون العيوب. ستستغرق الأشياء وقتًا أطول من نص إلى صورة المعتاد.",
|
||||
"seed": "يؤثر قيمة البذور على الضوضاء الأولي الذي يتم تكوين الصورة منه. يمكنك استخدام البذور الخاصة بالصور السابقة. 'عتبة الضوضاء' يتم استخدامها لتخفيف العناصر الخللية في قيم CFG العالية (جرب مدى 0-10), و Perlin لإضافة ضوضاء Perlin أثناء الإنتاج: كلا منهما يعملان على إضافة التنوع إلى النتائج الخاصة بك.",
|
||||
"variations": "جرب التغيير مع قيمة بين 0.1 و 1.0 لتغيير النتائج لبذور معينة. التغييرات المثيرة للاهتمام للبذور تكون بين 0.1 و 0.3.",
|
||||
"upscale": "استخدم إي إس آر جان لتكبير الصورة على الفور بعد الإنتاج.",
|
||||
"faceCorrection": "تصحيح الوجه باستخدام جي إف بي جان أو كود فورمر: يكتشف الخوارزمية الوجوه في الصورة وتصحح أي عيوب. قيمة عالية ستغير الصورة أكثر، مما يؤدي إلى وجوه أكثر جمالا. كود فورمر بدقة أعلى يحتفظ بالصورة الأصلية على حساب تصحيح وجه أكثر قوة.",
|
||||
"imageToImage": "تحميل صورة إلى صورة أي صورة كأولية، والتي يتم استخدامها لإنشاء صورة جديدة مع التشعيب. كلما كانت القيمة أعلى، كلما تغيرت نتيجة الصورة. من الممكن أن تكون القيم بين 0.0 و 1.0، وتوصي النطاق الموصى به هو .25-.75",
|
||||
"boundingBox": "مربع الحدود هو نفس الإعدادات العرض والارتفاع لنص إلى صورة أو صورة إلى صورة. فقط المنطقة في المربع سيتم معالجتها.",
|
||||
"seamCorrection": "يتحكم بالتعامل مع الخطوط المرئية التي تحدث بين الصور المولدة في سطح اللوحة.",
|
||||
"infillAndScaling": "إدارة أساليب التعبئة (المستخدمة على المناطق المخفية أو الممحوة في سطح اللوحة) والزيادة في الحجم (مفيدة لحجوزات الإطارات الصغيرة)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "طبقة",
|
||||
"base": "قاعدة",
|
||||
"mask": "قناع",
|
||||
"maskingOptions": "خيارات القناع",
|
||||
"enableMask": "مكن القناع",
|
||||
"preserveMaskedArea": "الحفاظ على المنطقة المقنعة",
|
||||
"clearMask": "مسح القناع",
|
||||
"brush": "فرشاة",
|
||||
"eraser": "ممحاة",
|
||||
"fillBoundingBox": "ملئ إطار الحدود",
|
||||
"eraseBoundingBox": "مسح إطار الحدود",
|
||||
"colorPicker": "اختيار اللون",
|
||||
"brushOptions": "خيارات الفرشاة",
|
||||
"brushSize": "الحجم",
|
||||
"move": "تحريك",
|
||||
"resetView": "إعادة تعيين العرض",
|
||||
"mergeVisible": "دمج الظاهر",
|
||||
"saveToGallery": "حفظ إلى المعرض",
|
||||
"copyToClipboard": "نسخ إلى الحافظة",
|
||||
"downloadAsImage": "تنزيل على شكل صورة",
|
||||
"undo": "تراجع",
|
||||
"redo": "إعادة",
|
||||
"clearCanvas": "مسح سبيكة الكاملة",
|
||||
"canvasSettings": "إعدادات سبيكة الكاملة",
|
||||
"showIntermediates": "إظهار الوسطاء",
|
||||
"showGrid": "إظهار الشبكة",
|
||||
"snapToGrid": "الالتفاف إلى الشبكة",
|
||||
"darkenOutsideSelection": "تعمية خارج التحديد",
|
||||
"autoSaveToGallery": "حفظ تلقائي إلى المعرض",
|
||||
"saveBoxRegionOnly": "حفظ منطقة الصندوق فقط",
|
||||
"limitStrokesToBox": "تحديد عدد الخطوط إلى الصندوق",
|
||||
"showCanvasDebugInfo": "إظهار معلومات تصحيح سبيكة الكاملة",
|
||||
"clearCanvasHistory": "مسح تاريخ سبيكة الكاملة",
|
||||
"clearHistory": "مسح التاريخ",
|
||||
"clearCanvasHistoryMessage": "مسح تاريخ اللوحة تترك اللوحة الحالية عائمة، ولكن تمسح بشكل غير قابل للتراجع تاريخ التراجع والإعادة.",
|
||||
"clearCanvasHistoryConfirm": "هل أنت متأكد من رغبتك في مسح تاريخ اللوحة؟",
|
||||
"emptyTempImageFolder": "إفراغ مجلد الصور المؤقتة",
|
||||
"emptyFolder": "إفراغ المجلد",
|
||||
"emptyTempImagesFolderMessage": "إفراغ مجلد الصور المؤقتة يؤدي أيضًا إلى إعادة تعيين اللوحة الموحدة بشكل كامل. وهذا يشمل كل تاريخ التراجع / الإعادة والصور في منطقة التخزين وطبقة الأساس لللوحة.",
|
||||
"emptyTempImagesFolderConfirm": "هل أنت متأكد من رغبتك في إفراغ مجلد الصور المؤقتة؟",
|
||||
"activeLayer": "الطبقة النشطة",
|
||||
"canvasScale": "مقياس اللوحة",
|
||||
"boundingBox": "صندوق الحدود",
|
||||
"scaledBoundingBox": "صندوق الحدود المكبر",
|
||||
"boundingBoxPosition": "موضع صندوق الحدود",
|
||||
"canvasDimensions": "أبعاد اللوحة",
|
||||
"canvasPosition": "موضع اللوحة",
|
||||
"cursorPosition": "موضع المؤشر",
|
||||
"previous": "السابق",
|
||||
"next": "التالي",
|
||||
"accept": "قبول",
|
||||
"showHide": "إظهار/إخفاء",
|
||||
"discardAll": "تجاهل الكل",
|
||||
"betaClear": "مسح",
|
||||
"betaDarkenOutside": "ظل الخارج",
|
||||
"betaLimitToBox": "تحديد إلى الصندوق",
|
||||
"betaPreserveMasked": "المحافظة على المخفية"
|
||||
}
|
||||
}
|
||||
55
invokeai/frontend/dist/locales/common/de.json
vendored
55
invokeai/frontend/dist/locales/common/de.json
vendored
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Thema",
|
||||
"languagePickerLabel": "Sprachauswahl",
|
||||
"reportBugLabel": "Fehler melden",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Einstellungen",
|
||||
"darkTheme": "Dunkel",
|
||||
"lightTheme": "Hell",
|
||||
"greenTheme": "Grün",
|
||||
"langEnglish": "Englisch",
|
||||
"langRussian": "Russisch",
|
||||
"langItalian": "Italienisch",
|
||||
"langPortuguese": "Portugiesisch",
|
||||
"langFrench": "Französich",
|
||||
"langGerman": "Deutsch",
|
||||
"langSpanish": "Spanisch",
|
||||
"text2img": "Text zu Bild",
|
||||
"img2img": "Bild zu Bild",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Knoten",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
"postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.",
|
||||
"postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
|
||||
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
|
||||
"upload": "Upload",
|
||||
"close": "Schließen",
|
||||
"load": "Laden",
|
||||
"statusConnected": "Verbunden",
|
||||
"statusDisconnected": "Getrennt",
|
||||
"statusError": "Fehler",
|
||||
"statusPreparing": "Vorbereiten",
|
||||
"statusProcessingCanceled": "Verarbeitung abgebrochen",
|
||||
"statusProcessingComplete": "Verarbeitung komplett",
|
||||
"statusGenerating": "Generieren",
|
||||
"statusGeneratingTextToImage": "Erzeugen von Text zu Bild",
|
||||
"statusGeneratingImageToImage": "Erzeugen von Bild zu Bild",
|
||||
"statusGeneratingInpainting": "Erzeuge Inpainting",
|
||||
"statusGeneratingOutpainting": "Erzeuge Outpainting",
|
||||
"statusGenerationComplete": "Generierung abgeschlossen",
|
||||
"statusIterationComplete": "Iteration abgeschlossen",
|
||||
"statusSavingImage": "Speichere Bild",
|
||||
"statusRestoringFaces": "Gesichter restaurieren",
|
||||
"statusRestoringFacesGFPGAN": "Gesichter restaurieren (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gesichter restaurieren (CodeFormer)",
|
||||
"statusUpscaling": "Hochskalierung",
|
||||
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
|
||||
"statusLoadingModel": "Laden des Modells",
|
||||
"statusModelChanged": "Modell Geändert"
|
||||
}
|
||||
62
invokeai/frontend/dist/locales/common/en-US.json
vendored
62
invokeai/frontend/dist/locales/common/en-US.json
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"langDutch": "Dutch",
|
||||
"langUkranian": "Ukranian",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
||||
62
invokeai/frontend/dist/locales/common/en.json
vendored
62
invokeai/frontend/dist/locales/common/en.json
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"langDutch": "Dutch",
|
||||
"langUkranian": "Ukranian",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
||||
58
invokeai/frontend/dist/locales/common/es.json
vendored
58
invokeai/frontend/dist/locales/common/es.json
vendored
@@ -1,58 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Atajos de teclado",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Selector de idioma",
|
||||
"reportBugLabel": "Reportar errores",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Ajustes",
|
||||
"darkTheme": "Oscuro",
|
||||
"lightTheme": "Claro",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "Inglés",
|
||||
"langRussian": "Ruso",
|
||||
"langItalian": "Italiano",
|
||||
"langBrPortuguese": "Portugués (Brasil)",
|
||||
"langGerman": "Alemán",
|
||||
"langPortuguese": "Portugués",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSpanish": "Español",
|
||||
"text2img": "Texto a Imagen",
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Nodos",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
|
||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||
"training": "Entrenamiento",
|
||||
"trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"trainingDesc2": "InvokeAI ya soporta el entrenamiento de -embeddings- personalizados utilizando la Inversión Textual mediante el script principal.",
|
||||
"upload": "Subir imagen",
|
||||
"close": "Cerrar",
|
||||
"load": "Cargar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Desconectado",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Procesamiento Cancelado",
|
||||
"statusProcessingComplete": "Procesamiento Completo",
|
||||
"statusGenerating": "Generando",
|
||||
"statusGeneratingTextToImage": "Generando Texto a Imagen",
|
||||
"statusGeneratingImageToImage": "Generando Imagen a Imagen",
|
||||
"statusGeneratingInpainting": "Generando pintura interior",
|
||||
"statusGeneratingOutpainting": "Generando pintura exterior",
|
||||
"statusGenerationComplete": "Generación Completa",
|
||||
"statusIterationComplete": "Iteración Completa",
|
||||
"statusSavingImage": "Guardando Imagen",
|
||||
"statusRestoringFaces": "Restaurando Rostros",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostros (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostros (CodeFormer)",
|
||||
"statusUpscaling": "Aumentando Tamaño",
|
||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||
"statusLoadingModel": "Cargando Modelo",
|
||||
"statusModelChanged": "Modelo cambiado"
|
||||
}
|
||||
62
invokeai/frontend/dist/locales/common/fr.json
vendored
62
invokeai/frontend/dist/locales/common/fr.json
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Raccourcis clavier",
|
||||
"themeLabel": "Thème",
|
||||
"languagePickerLabel": "Sélecteur de langue",
|
||||
"reportBugLabel": "Signaler un bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Paramètres",
|
||||
"darkTheme": "Sombre",
|
||||
"lightTheme": "Clair",
|
||||
"greenTheme": "Vert",
|
||||
"langEnglish": "Anglais",
|
||||
"langRussian": "Russe",
|
||||
"langItalian": "Italien",
|
||||
"langBrPortuguese": "Portugais (Brésilien)",
|
||||
"langGerman": "Allemand",
|
||||
"langPortuguese": "Portugais",
|
||||
"langFrench": "Français",
|
||||
"langPolish": "Polonais",
|
||||
"langSimplifiedChinese": "Chinois simplifié",
|
||||
"langSpanish": "Espagnol",
|
||||
"langJapanese": "Japonais",
|
||||
"langDutch": "Néerlandais",
|
||||
"text2img": "Texte en image",
|
||||
"img2img": "Image en image",
|
||||
"unifiedCanvas": "Canvas unifié",
|
||||
"nodes": "Nœuds",
|
||||
"nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.",
|
||||
"postProcessing": "Post-traitement",
|
||||
"postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu Options avancées des onglets Texte en image et Image en image. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image ci-dessus l'affichage d'image actuel ou dans le visualiseur.",
|
||||
"postProcessDesc2": "Une interface utilisateur dédiée sera bientôt disponible pour faciliter les workflows de post-traitement plus avancés.",
|
||||
"postProcessDesc3": "L'interface en ligne de commande d'Invoke AI offre diverses autres fonctionnalités, notamment Embiggen.",
|
||||
"training": "Formation",
|
||||
"trainingDesc1": "Un workflow dédié pour former vos propres embeddings et checkpoints en utilisant Textual Inversion et Dreambooth depuis l'interface web.",
|
||||
"trainingDesc2": "InvokeAI prend déjà en charge la formation d'embeddings personnalisés en utilisant Textual Inversion en utilisant le script principal.",
|
||||
"upload": "Télécharger",
|
||||
"close": "Fermer",
|
||||
"load": "Charger",
|
||||
"back": "Retour",
|
||||
"statusConnected": "Connecté",
|
||||
"statusDisconnected": "Déconnecté",
|
||||
"statusError": "Erreur",
|
||||
"statusPreparing": "Préparation",
|
||||
"statusProcessingCanceled": "Traitement Annulé",
|
||||
"statusProcessingComplete": "Traitement Terminé",
|
||||
"statusGenerating": "Génération",
|
||||
"statusGeneratingTextToImage": "Génération Texte vers Image",
|
||||
"statusGeneratingImageToImage": "Génération Image vers Image",
|
||||
"statusGeneratingInpainting": "Génération de Réparation",
|
||||
"statusGeneratingOutpainting": "Génération de Completion",
|
||||
"statusGenerationComplete": "Génération Terminée",
|
||||
"statusIterationComplete": "Itération Terminée",
|
||||
"statusSavingImage": "Sauvegarde de l'Image",
|
||||
"statusRestoringFaces": "Restauration des Visages",
|
||||
"statusRestoringFacesGFPGAN": "Restauration des Visages (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restauration des Visages (CodeFormer)",
|
||||
"statusUpscaling": "Mise à Échelle",
|
||||
"statusUpscalingESRGAN": "Mise à Échelle (ESRGAN)",
|
||||
"statusLoadingModel": "Chargement du Modèle",
|
||||
"statusModelChanged": "Modèle Changé"
|
||||
|
||||
}
|
||||
61
invokeai/frontend/dist/locales/common/it.json
vendored
61
invokeai/frontend/dist/locales/common/it.json
vendored
@@ -1,61 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Tasti di scelta rapida",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Seleziona lingua",
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"darkTheme": "Scuro",
|
||||
"lightTheme": "Chiaro",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "Inglese",
|
||||
"langRussian": "Russo",
|
||||
"langItalian": "Italiano",
|
||||
"langBrPortuguese": "Portoghese (Brasiliano)",
|
||||
"langGerman": "Tedesco",
|
||||
"langPortuguese": "Portoghese",
|
||||
"langFrench": "Francese",
|
||||
"langPolish": "Polacco",
|
||||
"langSimplifiedChinese": "Cinese semplificato",
|
||||
"langSpanish": "Spagnolo",
|
||||
"langJapanese": "Giapponese",
|
||||
"langDutch": "Olandese",
|
||||
"text2img": "Testo a Immagine",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"nodes": "Nodi",
|
||||
"nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.",
|
||||
"postProcessing": "Post-elaborazione",
|
||||
"postProcessDesc1": "Invoke AI offre un'ampia varietà di funzionalità di post-elaborazione. Ampiamento Immagine e Restaura i Volti sono già disponibili nell'interfaccia Web. È possibile accedervi dal menu 'Opzioni avanzate' delle schede 'Testo a Immagine' e 'Immagine a Immagine'. È inoltre possibile elaborare le immagini direttamente, utilizzando i pulsanti di azione dell'immagine sopra la visualizzazione dell'immagine corrente o nel visualizzatore.",
|
||||
"postProcessDesc2": "Presto verrà rilasciata un'interfaccia utente dedicata per facilitare flussi di lavoro di post-elaborazione più avanzati.",
|
||||
"postProcessDesc3": "L'interfaccia da riga di comando di 'Invoke AI' offre varie altre funzionalità tra cui Embiggen.",
|
||||
"training": "Addestramento",
|
||||
"trainingDesc1": "Un flusso di lavoro dedicato per addestrare i tuoi incorporamenti e checkpoint utilizzando Inversione Testuale e Dreambooth dall'interfaccia web.",
|
||||
"trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale utilizzando lo script principale.",
|
||||
"upload": "Caricamento",
|
||||
"close": "Chiudi",
|
||||
"load": "Carica",
|
||||
"back": "Indietro",
|
||||
"statusConnected": "Collegato",
|
||||
"statusDisconnected": "Disconnesso",
|
||||
"statusError": "Errore",
|
||||
"statusPreparing": "Preparazione",
|
||||
"statusProcessingCanceled": "Elaborazione annullata",
|
||||
"statusProcessingComplete": "Elaborazione completata",
|
||||
"statusGenerating": "Generazione in corso",
|
||||
"statusGeneratingTextToImage": "Generazione da Testo a Immagine",
|
||||
"statusGeneratingImageToImage": "Generazione da Immagine a Immagine",
|
||||
"statusGeneratingInpainting": "Generazione Inpainting",
|
||||
"statusGeneratingOutpainting": "Generazione Outpainting",
|
||||
"statusGenerationComplete": "Generazione completata",
|
||||
"statusIterationComplete": "Iterazione completata",
|
||||
"statusSavingImage": "Salvataggio dell'immagine",
|
||||
"statusRestoringFaces": "Restaura i volti",
|
||||
"statusRestoringFacesGFPGAN": "Restaura volti (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaura volti (CodeFormer)",
|
||||
"statusUpscaling": "Ampliamento",
|
||||
"statusUpscalingESRGAN": "Ampliamento (ESRGAN)",
|
||||
"statusLoadingModel": "Caricamento del modello",
|
||||
"statusModelChanged": "Modello cambiato"
|
||||
}
|
||||
60
invokeai/frontend/dist/locales/common/ja.json
vendored
60
invokeai/frontend/dist/locales/common/ja.json
vendored
@@ -1,60 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "テーマ",
|
||||
"languagePickerLabel": "言語選択",
|
||||
"reportBugLabel": "バグ報告",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "設定",
|
||||
"darkTheme": "ダーク",
|
||||
"lightTheme": "ライト",
|
||||
"greenTheme": "緑",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "現在、画像生成のためのノードベースシステムを開発中です。機能についてのアップデートにご期待ください。",
|
||||
"postProcessing": "後処理",
|
||||
"postProcessDesc1": "Invoke AIは、多彩な後処理の機能を備えています。アップスケーリングと顔修復は、すでにWebUI上で利用可能です。これらは、[Text To Image]および[Image To Image]タブの[詳細オプション]メニューからアクセスできます。また、現在の画像表示の上やビューア内の画像アクションボタンを使って、画像を直接処理することもできます。",
|
||||
"postProcessDesc2": "より高度な後処理の機能を実現するための専用UIを近日中にリリース予定です。",
|
||||
"postProcessDesc3": "Invoke AI CLIでは、この他にもEmbiggenをはじめとする様々な機能を利用することができます。",
|
||||
"training": "追加学習",
|
||||
"trainingDesc1": "Textual InversionとDreamboothを使って、WebUIから独自のEmbeddingとチェックポイントを追加学習するための専用ワークフローです。",
|
||||
"trainingDesc2": "InvokeAIは、すでにメインスクリプトを使ったTextual Inversionによるカスタム埋め込み追加学習にも対応しています。",
|
||||
"upload": "アップロード",
|
||||
"close": "閉じる",
|
||||
"load": "ロード",
|
||||
"back": "戻る",
|
||||
"statusConnected": "接続済",
|
||||
"statusDisconnected": "切断済",
|
||||
"statusError": "エラー",
|
||||
"statusPreparing": "準備中",
|
||||
"statusProcessingCanceled": "処理をキャンセル",
|
||||
"statusProcessingComplete": "処理完了",
|
||||
"statusGenerating": "生成中",
|
||||
"statusGeneratingTextToImage": "Text To Imageで生成中",
|
||||
"statusGeneratingImageToImage": "Image To Imageで生成中",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "生成完了",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "画像を保存",
|
||||
"statusRestoringFaces": "顔の修復",
|
||||
"statusRestoringFacesGFPGAN": "顔の修復 (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "顔の修復 (CodeFormer)",
|
||||
"statusUpscaling": "アップスケーリング",
|
||||
"statusUpscalingESRGAN": "アップスケーリング (ESRGAN)",
|
||||
"statusLoadingModel": "モデルを読み込む",
|
||||
"statusModelChanged": "モデルを変更"
|
||||
}
|
||||
|
||||
59
invokeai/frontend/dist/locales/common/nl.json
vendored
59
invokeai/frontend/dist/locales/common/nl.json
vendored
@@ -1,59 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Sneltoetsen",
|
||||
"themeLabel": "Thema",
|
||||
"languagePickerLabel": "Taalkeuze",
|
||||
"reportBugLabel": "Meld bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Instellingen",
|
||||
"darkTheme": "Donker",
|
||||
"lightTheme": "Licht",
|
||||
"greenTheme": "Groen",
|
||||
"langEnglish": "Engels",
|
||||
"langRussian": "Russisch",
|
||||
"langItalian": "Italiaans",
|
||||
"langBrPortuguese": "Portugees (Braziliaans)",
|
||||
"langGerman": "Duits",
|
||||
"langPortuguese": "Portugees",
|
||||
"langFrench": "Frans",
|
||||
"langPolish": "Pools",
|
||||
"langSimplifiedChinese": "Vereenvoudigd Chinees",
|
||||
"langSpanish": "Spaans",
|
||||
"langDutch": "Nederlands",
|
||||
"text2img": "Tekst naar afbeelding",
|
||||
"img2img": "Afbeelding naar afbeelding",
|
||||
"unifiedCanvas": "Centraal canvas",
|
||||
"nodes": "Knooppunten",
|
||||
"nodesDesc": "Een op knooppunten gebaseerd systeem voor het genereren van afbeeldingen is momenteel in ontwikkeling. Blijf op de hoogte voor nieuws over deze verbluffende functie.",
|
||||
"postProcessing": "Naverwerking",
|
||||
"postProcessDesc1": "Invoke AI biedt een breed scala aan naverwerkingsfuncties. Afbeeldingsopschaling en Gezichtsherstel zijn al beschikbaar in de web-UI. Je kunt ze openen via het menu Uitgebreide opties in de tabbladen Tekst naar afbeelding en Afbeelding naar afbeelding. Je kunt een afbeelding ook direct verwerken via de afbeeldingsactieknoppen boven de weergave van de huidigde afbeelding of in de Viewer.",
|
||||
"postProcessDesc2": "Een individuele gebruikersinterface voor uitgebreidere naverwerkingsworkflows.",
|
||||
"postProcessDesc3": "De opdrachtregelinterface van InvokeAI biedt diverse andere functies, waaronder Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "Een individuele workflow in de webinterface voor het trainen van je eigen embeddings en checkpoints via Textual Inversion en Dreambooth.",
|
||||
"trainingDesc2": "InvokeAI ondersteunt al het trainen van eigen embeddings via Textual Inversion via het hoofdscript.",
|
||||
"upload": "Upload",
|
||||
"close": "Sluit",
|
||||
"load": "Laad",
|
||||
"statusConnected": "Verbonden",
|
||||
"statusDisconnected": "Niet verbonden",
|
||||
"statusError": "Fout",
|
||||
"statusPreparing": "Voorbereiden",
|
||||
"statusProcessingCanceled": "Verwerking geannuleerd",
|
||||
"statusProcessingComplete": "Verwerking voltooid",
|
||||
"statusGenerating": "Genereren",
|
||||
"statusGeneratingTextToImage": "Genereren van tekst naar afbeelding",
|
||||
"statusGeneratingImageToImage": "Genereren van afbeelding naar afbeelding",
|
||||
"statusGeneratingInpainting": "Genereren van Inpainting",
|
||||
"statusGeneratingOutpainting": "Genereren van Outpainting",
|
||||
"statusGenerationComplete": "Genereren voltooid",
|
||||
"statusIterationComplete": "Iteratie voltooid",
|
||||
"statusSavingImage": "Afbeelding bewaren",
|
||||
"statusRestoringFaces": "Gezichten herstellen",
|
||||
"statusRestoringFacesGFPGAN": "Gezichten herstellen (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gezichten herstellen (CodeFormer)",
|
||||
"statusUpscaling": "Opschaling",
|
||||
"statusUpscalingESRGAN": "Opschaling (ESRGAN)",
|
||||
"statusLoadingModel": "Laden van model",
|
||||
"statusModelChanged": "Model gewijzigd"
|
||||
}
|
||||
55
invokeai/frontend/dist/locales/common/pl.json
vendored
55
invokeai/frontend/dist/locales/common/pl.json
vendored
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Skróty klawiszowe",
|
||||
"themeLabel": "Motyw",
|
||||
"languagePickerLabel": "Wybór języka",
|
||||
"reportBugLabel": "Zgłoś błąd",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Ustawienia",
|
||||
"darkTheme": "Ciemny",
|
||||
"lightTheme": "Jasny",
|
||||
"greenTheme": "Zielony",
|
||||
"langEnglish": "Angielski",
|
||||
"langRussian": "Rosyjski",
|
||||
"langItalian": "Włoski",
|
||||
"langPortuguese": "Portugalski",
|
||||
"langFrench": "Francuski",
|
||||
"langPolish": "Polski",
|
||||
"langSpanish": "Hiszpański",
|
||||
"text2img": "Tekst na obraz",
|
||||
"img2img": "Obraz na obraz",
|
||||
"unifiedCanvas": "Tryb uniwersalny",
|
||||
"nodes": "Węzły",
|
||||
"nodesDesc": "W tym miejscu powstanie graficzny system generowania obrazów oparty na węzłach. Jest na co czekać!",
|
||||
"postProcessing": "Przetwarzanie końcowe",
|
||||
"postProcessDesc1": "Invoke AI oferuje wiele opcji przetwarzania końcowego. Z poziomu przeglądarki dostępne jest już zwiększanie rozdzielczości oraz poprawianie twarzy. Znajdziesz je wśród ustawień w trybach \"Tekst na obraz\" oraz \"Obraz na obraz\". Są również obecne w pasku menu wyświetlanym nad podglądem wygenerowanego obrazu.",
|
||||
"postProcessDesc2": "Niedługo zostanie udostępniony specjalny interfejs, który będzie oferował jeszcze więcej możliwości.",
|
||||
"postProcessDesc3": "Z poziomu linii poleceń już teraz dostępne są inne opcje, takie jak skalowanie obrazu metodą Embiggen.",
|
||||
"training": "Trenowanie",
|
||||
"trainingDesc1": "W tym miejscu dostępny będzie system przeznaczony do tworzenia własnych zanurzeń (ang. embeddings) i punktów kontrolnych przy użyciu metod w rodzaju inwersji tekstowej lub Dreambooth.",
|
||||
"trainingDesc2": "Obecnie jest możliwe tworzenie własnych zanurzeń przy użyciu skryptów wywoływanych z linii poleceń.",
|
||||
"upload": "Prześlij",
|
||||
"close": "Zamknij",
|
||||
"load": "Załaduj",
|
||||
"statusConnected": "Połączono z serwerem",
|
||||
"statusDisconnected": "Odłączono od serwera",
|
||||
"statusError": "Błąd",
|
||||
"statusPreparing": "Przygotowywanie",
|
||||
"statusProcessingCanceled": "Anulowano przetwarzanie",
|
||||
"statusProcessingComplete": "Zakończono przetwarzanie",
|
||||
"statusGenerating": "Przetwarzanie",
|
||||
"statusGeneratingTextToImage": "Przetwarzanie tekstu na obraz",
|
||||
"statusGeneratingImageToImage": "Przetwarzanie obrazu na obraz",
|
||||
"statusGeneratingInpainting": "Przemalowywanie",
|
||||
"statusGeneratingOutpainting": "Domalowywanie",
|
||||
"statusGenerationComplete": "Zakończono generowanie",
|
||||
"statusIterationComplete": "Zakończono iterację",
|
||||
"statusSavingImage": "Zapisywanie obrazu",
|
||||
"statusRestoringFaces": "Poprawianie twarzy",
|
||||
"statusRestoringFacesGFPGAN": "Poprawianie twarzy (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Poprawianie twarzy (CodeFormer)",
|
||||
"statusUpscaling": "Powiększanie obrazu",
|
||||
"statusUpscalingESRGAN": "Powiększanie (ESRGAN)",
|
||||
"statusLoadingModel": "Wczytywanie modelu",
|
||||
"statusModelChanged": "Zmieniono model"
|
||||
}
|
||||
55
invokeai/frontend/dist/locales/common/pt_br.json
vendored
55
invokeai/frontend/dist/locales/common/pt_br.json
vendored
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Teclas de atalho",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Seletor de Idioma",
|
||||
"reportBugLabel": "Relatar Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Configurações",
|
||||
"darkTheme": "Noite",
|
||||
"lightTheme": "Dia",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Texto Para Imagem",
|
||||
"img2img": "Imagem Para Imagem",
|
||||
"unifiedCanvas": "Tela Unificada",
|
||||
"nodes": "Nódulos",
|
||||
"nodesDesc": "Um sistema baseado em nódulos para geração de imagens está em contrução. Fique ligado para atualizações sobre essa funcionalidade incrível.",
|
||||
"postProcessing": "Pós-processamento",
|
||||
"postProcessDesc1": "Invoke AI oferece uma variedade e funcionalidades de pós-processamento. Redimensionador de Imagem e Restauração Facial já estão disponíveis na interface. Você pode acessar elas no menu de Opções Avançadas na aba de Texto para Imagem e Imagem para Imagem. Você também pode processar imagens diretamente, usando os botões de ação de imagem acima da atual tela de imagens ou visualizador.",
|
||||
"postProcessDesc2": "Uma interface dedicada será lançada em breve para facilitar fluxos de trabalho com opções mais avançadas de pós-processamento.",
|
||||
"postProcessDesc3": "A interface do comando de linha da Invoke oferece várias funcionalidades incluindo Ampliação.",
|
||||
"training": "Treinando",
|
||||
"trainingDesc1": "Um fluxo de trabalho dedicado para treinar suas próprias incorporações e chockpoints usando Inversão Textual e Dreambooth na interface web.",
|
||||
"trainingDesc2": "InvokeAI já suporta treinar incorporações personalizadas usando Inversão Textual com o script principal.",
|
||||
"upload": "Enviar",
|
||||
"close": "Fechar",
|
||||
"load": "Carregar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Disconectado",
|
||||
"statusError": "Erro",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Processamento Canceledo",
|
||||
"statusProcessingComplete": "Processamento Completo",
|
||||
"statusGenerating": "Gerando",
|
||||
"statusGeneratingTextToImage": "Gerando Texto Para Imagem",
|
||||
"statusGeneratingImageToImage": "Gerando Imagem Para Imagem",
|
||||
"statusGeneratingInpainting": "Gerando Inpainting",
|
||||
"statusGeneratingOutpainting": "Gerando Outpainting",
|
||||
"statusGenerationComplete": "Geração Completa",
|
||||
"statusIterationComplete": "Iteração Completa",
|
||||
"statusSavingImage": "Salvando Imagem",
|
||||
"statusRestoringFaces": "Restaurando Rostos",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostos (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostos (CodeFormer)",
|
||||
"statusUpscaling": "Redimensinando",
|
||||
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
||||
"statusLoadingModel": "Carregando Modelo",
|
||||
"statusModelChanged": "Modelo Alterado"
|
||||
}
|
||||
54
invokeai/frontend/dist/locales/common/ru.json
vendored
54
invokeai/frontend/dist/locales/common/ru.json
vendored
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Горячие клавиши",
|
||||
"themeLabel": "Тема",
|
||||
"languagePickerLabel": "Язык",
|
||||
"reportBugLabel": "Сообщить об ошибке",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Настройка",
|
||||
"darkTheme": "Темная",
|
||||
"lightTheme": "Светлая",
|
||||
"greenTheme": "Зеленая",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Русский",
|
||||
"langItalian": "Italian",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Изображение из текста (text2img)",
|
||||
"img2img": "Изображение в изображение (img2img)",
|
||||
"unifiedCanvas": "Универсальный холст",
|
||||
"nodes": "Ноды",
|
||||
"nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.",
|
||||
"postProcessing": "Постобработка",
|
||||
"postProcessDesc1": "Invoke AI предлагает широкий спектр функций постобработки. Увеличение изображения (upscale) и восстановление лиц уже доступны в интерфейсе. Получите доступ к ним из меню 'Дополнительные параметры' на вкладках 'Текст в изображение' и 'Изображение в изображение'. Обрабатывайте изображения напрямую, используя кнопки действий с изображениями над текущим изображением или в режиме просмотра.",
|
||||
"postProcessDesc2": "В ближайшее время будет выпущен специальный интерфейс для более продвинутых процессов постобработки.",
|
||||
"postProcessDesc3": "Интерфейс командной строки Invoke AI предлагает различные другие функции, включая увеличение Embiggen",
|
||||
"training": "Обучение",
|
||||
"trainingDesc1": "Специальный интерфейс для обучения собственных моделей с использованием Textual Inversion и Dreambooth",
|
||||
"trainingDesc2": "InvokeAI уже поддерживает обучение моделей с помощью TI, через интерфейс командной строки.",
|
||||
"upload": "Загрузить",
|
||||
"close": "Закрыть",
|
||||
"load": "Загрузить",
|
||||
"statusConnected": "Подключен",
|
||||
"statusDisconnected": "Отключен",
|
||||
"statusError": "Ошибка",
|
||||
"statusPreparing": "Подготовка",
|
||||
"statusProcessingCanceled": "Обработка прервана",
|
||||
"statusProcessingComplete": "Обработка завершена",
|
||||
"statusGenerating": "Генерация",
|
||||
"statusGeneratingTextToImage": "Создаем изображение из текста",
|
||||
"statusGeneratingImageToImage": "Создаем изображение из изображения",
|
||||
"statusGeneratingInpainting": "Дополняем внутри",
|
||||
"statusGeneratingOutpainting": "Дорисовываем снаружи",
|
||||
"statusGenerationComplete": "Генерация завершена",
|
||||
"statusIterationComplete": "Итерация завершена",
|
||||
"statusSavingImage": "Сохранение изображения",
|
||||
"statusRestoringFaces": "Восстановление лиц",
|
||||
"statusRestoringFacesGFPGAN": "Восстановление лиц (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Восстановление лиц (CodeFormer)",
|
||||
"statusUpscaling": "Увеличение",
|
||||
"statusUpscalingESRGAN": "Увеличение (ESRGAN)",
|
||||
"statusLoadingModel": "Загрузка модели",
|
||||
"statusModelChanged": "Модель изменена"
|
||||
}
|
||||
53
invokeai/frontend/dist/locales/common/ua.json
vendored
53
invokeai/frontend/dist/locales/common/ua.json
vendored
@@ -1,53 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Гарячi клавіші",
|
||||
"themeLabel": "Тема",
|
||||
"languagePickerLabel": "Мова",
|
||||
"reportBugLabel": "Повідомити про помилку",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Налаштування",
|
||||
"darkTheme": "Темна",
|
||||
"lightTheme": "Світла",
|
||||
"greenTheme": "Зелена",
|
||||
"langEnglish": "Англійська",
|
||||
"langRussian": "Російська",
|
||||
"langItalian": "Iталійська",
|
||||
"langPortuguese": "Португальська",
|
||||
"langFrench": "Французька",
|
||||
"text2img": "Зображення із тексту (text2img)",
|
||||
"img2img": "Зображення із зображення (img2img)",
|
||||
"unifiedCanvas": "Універсальне полотно",
|
||||
"nodes": "Вузли",
|
||||
"nodesDesc": "Система генерації зображень на основі нодів (вузлів) вже розробляється. Слідкуйте за новинами про цю чудову функцію.",
|
||||
"postProcessing": "Постобробка",
|
||||
"postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.",
|
||||
"postProcessDesc2": "Найближчим часом буде випущено спеціальний інтерфейс для більш сучасних процесів постобробки.",
|
||||
"postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen",
|
||||
"training": "Навчання",
|
||||
"trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth",
|
||||
"trainingDesc2": "InvokeAI вже підтримує навчання моделей за допомогою TI, через інтерфейс командного рядка.",
|
||||
"upload": "Завантажити",
|
||||
"close": "Закрити",
|
||||
"load": "Завантажити",
|
||||
"statusConnected": "Підключено",
|
||||
"statusDisconnected": "Відключено",
|
||||
"statusError": "Помилка",
|
||||
"statusPreparing": "Підготування",
|
||||
"statusProcessingCanceled": "Обробка перервана",
|
||||
"statusProcessingComplete": "Обробка завершена",
|
||||
"statusGenerating": "Генерація",
|
||||
"statusGeneratingTextToImage": "Генерація зображення із тексту",
|
||||
"statusGeneratingImageToImage": "Генерація зображення із зображення",
|
||||
"statusGeneratingInpainting": "Домальовка всередині",
|
||||
"statusGeneratingOutpainting": "Домальовка зовні",
|
||||
"statusGenerationComplete": "Генерація завершена",
|
||||
"statusIterationComplete": "Iтерація завершена",
|
||||
"statusSavingImage": "Збереження зображення",
|
||||
"statusRestoringFaces": "Відновлення облич",
|
||||
"statusRestoringFacesGFPGAN": "Відновлення облич (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Відновлення облич (CodeFormer)",
|
||||
"statusUpscaling": "Збільшення",
|
||||
"statusUpscalingESRGAN": "Збільшення (ESRGAN)",
|
||||
"statusLoadingModel": "Завантаження моделі",
|
||||
"statusModelChanged": "Модель змінено"
|
||||
}
|
||||
54
invokeai/frontend/dist/locales/common/zh_cn.json
vendored
54
invokeai/frontend/dist/locales/common/zh_cn.json
vendored
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "快捷键",
|
||||
"themeLabel": "主题",
|
||||
"languagePickerLabel": "语言",
|
||||
"reportBugLabel": "提交错误报告",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "设置",
|
||||
"darkTheme": "暗色",
|
||||
"lightTheme": "亮色",
|
||||
"greenTheme": "绿色",
|
||||
"langEnglish": "英语",
|
||||
"langRussian": "俄语",
|
||||
"langItalian": "意大利语",
|
||||
"langPortuguese": "葡萄牙语",
|
||||
"langFrench": "法语",
|
||||
"langChineseSimplified": "简体中文",
|
||||
"text2img": "文字到图像",
|
||||
"img2img": "图像到图像",
|
||||
"unifiedCanvas": "统一画布",
|
||||
"nodes": "节点",
|
||||
"nodesDesc": "一个基于节点的图像生成系统目前正在开发中。请持续关注关于这一功能的更新。",
|
||||
"postProcessing": "后期处理",
|
||||
"postProcessDesc1": "Invoke AI 提供各种各样的后期处理功能。图像放大和面部修复在网页界面中已经可用。你可以从文本到图像和图像到图像页面的高级选项菜单中访问它们。你也可以直接使用图像显示上方或查看器中的图像操作按钮处理图像。",
|
||||
"postProcessDesc2": "一个专门的界面将很快发布,新的界面能够处理更复杂的后期处理流程。",
|
||||
"postProcessDesc3": "Invoke AI 命令行界面提供例如Embiggen的各种其他功能。",
|
||||
"training": "训练",
|
||||
"trainingDesc1": "一个专门用于从网络UI使用Textual Inversion和Dreambooth训练自己的嵌入模型和检查点的工作流程。",
|
||||
"trainingDesc2": "InvokeAI已经支持使用主脚本中的Textual Inversion来训练自定义的嵌入模型。",
|
||||
"upload": "上传",
|
||||
"close": "关闭",
|
||||
"load": "加载",
|
||||
"statusConnected": "已连接",
|
||||
"statusDisconnected": "未连接",
|
||||
"statusError": "错误",
|
||||
"statusPreparing": "准备中",
|
||||
"statusProcessingCanceled": "处理取消",
|
||||
"statusProcessingComplete": "处理完成",
|
||||
"statusGenerating": "生成中",
|
||||
"statusGeneratingTextToImage": "文字到图像生成中",
|
||||
"statusGeneratingImageToImage": "图像到图像生成中",
|
||||
"statusGeneratingInpainting": "生成内画中",
|
||||
"statusGeneratingOutpainting": "生成外画中",
|
||||
"statusGenerationComplete": "生成完成",
|
||||
"statusIterationComplete": "迭代完成",
|
||||
"statusSavingImage": "图像保存中",
|
||||
"statusRestoringFaces": "脸部修复中",
|
||||
"statusRestoringFacesGFPGAN": "脸部修复中 (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "脸部修复中 (CodeFormer)",
|
||||
"statusUpscaling": "放大中",
|
||||
"statusUpscalingESRGAN": "放大中 (ESRGAN)",
|
||||
"statusLoadingModel": "模型加载中",
|
||||
"statusModelChanged": "模型已切换"
|
||||
}
|
||||
494
invokeai/frontend/dist/locales/de.json
vendored
Normal file
494
invokeai/frontend/dist/locales/de.json
vendored
Normal file
@@ -0,0 +1,494 @@
|
||||
{
|
||||
"common": {
|
||||
"themeLabel": "Thema",
|
||||
"languagePickerLabel": "Sprachauswahl",
|
||||
"reportBugLabel": "Fehler melden",
|
||||
"settingsLabel": "Einstellungen",
|
||||
"darkTheme": "Dunkel",
|
||||
"lightTheme": "Hell",
|
||||
"greenTheme": "Grün",
|
||||
"text2img": "Text zu Bild",
|
||||
"img2img": "Bild zu Bild",
|
||||
"nodes": "Knoten",
|
||||
"langGerman": "Deutsch",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
"postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.",
|
||||
"postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
|
||||
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
|
||||
"upload": "Upload",
|
||||
"close": "Schließen",
|
||||
"load": "Laden",
|
||||
"statusConnected": "Verbunden",
|
||||
"statusDisconnected": "Getrennt",
|
||||
"statusError": "Fehler",
|
||||
"statusPreparing": "Vorbereiten",
|
||||
"statusProcessingCanceled": "Verarbeitung abgebrochen",
|
||||
"statusProcessingComplete": "Verarbeitung komplett",
|
||||
"statusGenerating": "Generieren",
|
||||
"statusGeneratingTextToImage": "Erzeugen von Text zu Bild",
|
||||
"statusGeneratingImageToImage": "Erzeugen von Bild zu Bild",
|
||||
"statusGeneratingInpainting": "Erzeuge Inpainting",
|
||||
"statusGeneratingOutpainting": "Erzeuge Outpainting",
|
||||
"statusGenerationComplete": "Generierung abgeschlossen",
|
||||
"statusIterationComplete": "Iteration abgeschlossen",
|
||||
"statusSavingImage": "Speichere Bild",
|
||||
"statusRestoringFaces": "Gesichter restaurieren",
|
||||
"statusRestoringFacesGFPGAN": "Gesichter restaurieren (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gesichter restaurieren (CodeFormer)",
|
||||
"statusUpscaling": "Hochskalierung",
|
||||
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
|
||||
"statusLoadingModel": "Laden des Modells",
|
||||
"statusModelChanged": "Modell Geändert"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
"showGenerations": "Zeige Erzeugnisse",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Zeige Uploads",
|
||||
"galleryImageSize": "Bildgröße",
|
||||
"galleryImageResetSize": "Größe zurücksetzen",
|
||||
"gallerySettings": "Galerie-Einstellungen",
|
||||
"maintainAspectRatio": "Seitenverhältnis beibehalten",
|
||||
"autoSwitchNewImages": "Automatisch zu neuen Bildern wechseln",
|
||||
"singleColumnLayout": "Einspaltiges Layout",
|
||||
"pinGallery": "Galerie anpinnen",
|
||||
"allImagesLoaded": "Alle Bilder geladen",
|
||||
"loadMore": "Mehr laden",
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
"appHotkeys": "App-Tastenkombinationen",
|
||||
"generalHotkeys": "Allgemeine Tastenkürzel",
|
||||
"galleryHotkeys": "Galerie Tastenkürzel",
|
||||
"unifiedCanvasHotkeys": "Unified Canvas Tastenkürzel",
|
||||
"invoke": {
|
||||
"desc": "Ein Bild erzeugen"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Abbrechen",
|
||||
"desc": "Bilderzeugung abbrechen"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Fokussiere Prompt",
|
||||
"desc": "Fokussieren des Eingabefeldes für den Prompt"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Optionen umschalten",
|
||||
"desc": "Öffnen und Schließen des Optionsfeldes"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Optionen anheften",
|
||||
"desc": "Anheften des Optionsfeldes"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Bildbetrachter umschalten",
|
||||
"desc": "Bildbetrachter öffnen und schließen"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Galerie umschalten",
|
||||
"desc": "Öffnen und Schließen des Galerie-Schubfachs"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Arbeitsbereich maximieren",
|
||||
"desc": "Schließen Sie die Panels und maximieren Sie den Arbeitsbereich"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Tabs wechseln",
|
||||
"desc": "Zu einem anderen Arbeitsbereich wechseln"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Konsole Umschalten",
|
||||
"desc": "Konsole öffnen und schließen"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Prompt setzen",
|
||||
"desc": "Verwende den Prompt des aktuellen Bildes"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Seed setzen",
|
||||
"desc": "Verwende den Seed des aktuellen Bildes"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Parameter setzen",
|
||||
"desc": "Alle Parameter des aktuellen Bildes verwenden"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Gesicht restaurieren",
|
||||
"desc": "Das aktuelle Bild restaurieren"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Hochskalieren",
|
||||
"desc": "Das aktuelle Bild hochskalieren"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Info anzeigen",
|
||||
"desc": "Metadaten des aktuellen Bildes anzeigen"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "An Bild zu Bild senden",
|
||||
"desc": "Aktuelles Bild an Bild zu Bild senden"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Bild löschen",
|
||||
"desc": "Aktuelles Bild löschen"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Panels schließen",
|
||||
"desc": "Schließt offene Panels"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Vorheriges Bild",
|
||||
"desc": "Vorheriges Bild in der Galerie anzeigen"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Nächstes Bild",
|
||||
"desc": "Nächstes Bild in Galerie anzeigen"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Galerie anheften umschalten",
|
||||
"desc": "Heftet die Galerie an die Benutzeroberfläche bzw. löst die sie."
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Größe der Galeriebilder erhöhen",
|
||||
"desc": "Vergrößert die Galerie-Miniaturansichten"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Größe der Galeriebilder verringern",
|
||||
"desc": "Verringert die Größe der Galerie-Miniaturansichten"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Pinsel auswählen",
|
||||
"desc": "Wählt den Leinwandpinsel aus"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Radiergummi auswählen",
|
||||
"desc": "Wählt den Radiergummi für die Leinwand aus"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Pinselgröße verkleinern",
|
||||
"desc": "Verringert die Größe des Pinsels/Radiergummis"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Pinselgröße erhöhen",
|
||||
"desc": "Erhöht die Größe des Pinsels/Radiergummis"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Deckkraft des Pinsels vermindern",
|
||||
"desc": "Verringert die Deckkraft des Pinsels"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Deckkraft des Pinsels erhöhen",
|
||||
"desc": "Erhöht die Deckkraft des Pinsels"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Verschieben Werkzeug",
|
||||
"desc": "Ermöglicht die Navigation auf der Leinwand"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Begrenzungsrahmen füllen",
|
||||
"desc": "Füllt den Begrenzungsrahmen mit Pinselfarbe"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Begrenzungsrahmen löschen",
|
||||
"desc": "Löscht den Bereich des Begrenzungsrahmens"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Farbpipette",
|
||||
"desc": "Farben aus dem Bild aufnehmen"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Einrasten umschalten",
|
||||
"desc": "Schaltet Einrasten am Raster ein und aus"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Schnell Verschiebemodus",
|
||||
"desc": "Schaltet vorübergehend den Verschiebemodus um"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Ebene umschalten",
|
||||
"desc": "Schaltet die Auswahl von Maske/Basisebene um"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Lösche Maske",
|
||||
"desc": "Die gesamte Maske löschen"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Maske ausblenden",
|
||||
"desc": "Maske aus- und einblenden"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Begrenzungsrahmen ein-/ausblenden",
|
||||
"desc": "Sichtbarkeit des Begrenzungsrahmens ein- und ausschalten"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Sichtbares Zusammenführen",
|
||||
"desc": "Alle sichtbaren Ebenen der Leinwand zusammenführen"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "In Galerie speichern",
|
||||
"desc": "Aktuelle Leinwand in Galerie speichern"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "In die Zwischenablage kopieren",
|
||||
"desc": "Aktuelle Leinwand in die Zwischenablage kopieren"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Bild herunterladen",
|
||||
"desc": "Aktuelle Leinwand herunterladen"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Pinselstrich rückgängig machen",
|
||||
"desc": "Einen Pinselstrich rückgängig machen"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Pinselstrich wiederherstellen",
|
||||
"desc": "Einen Pinselstrich wiederherstellen"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Ansicht zurücksetzen",
|
||||
"desc": "Leinwandansicht zurücksetzen"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Vorheriges Staging-Bild",
|
||||
"desc": "Bild des vorherigen Staging-Bereichs"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Nächstes Staging-Bild",
|
||||
"desc": "Bild des nächsten Staging-Bereichs"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Staging-Bild akzeptieren",
|
||||
"desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelAdded": "Model hinzugefügt",
|
||||
"modelUpdated": "Model aktualisiert",
|
||||
"modelEntryDeleted": "Modelleintrag gelöscht",
|
||||
"cannotUseSpaces": "Leerzeichen können nicht verwendet werden",
|
||||
"addNew": "Neue hinzufügen",
|
||||
"addNewModel": "Neues Model hinzufügen",
|
||||
"addManually": "Manuell hinzufügen",
|
||||
"nameValidationMsg": "Geben Sie einen Namen für Ihr Model ein",
|
||||
"description": "Beschreibung",
|
||||
"descriptionValidationMsg": "Fügen Sie eine Beschreibung für Ihr Model hinzu",
|
||||
"config": "Konfiguration",
|
||||
"configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Models.",
|
||||
"modelLocation": "Ort des Models",
|
||||
"modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models.",
|
||||
"vaeLocation": "VAE Ort",
|
||||
"vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.",
|
||||
"width": "Breite",
|
||||
"widthValidationMsg": "Standardbreite Ihres Models.",
|
||||
"height": "Höhe",
|
||||
"heightValidationMsg": "Standardbhöhe Ihres Models.",
|
||||
"addModel": "Model hinzufügen",
|
||||
"updateModel": "Model aktualisieren",
|
||||
"availableModels": "Verfügbare Models",
|
||||
"search": "Suche",
|
||||
"load": "Laden",
|
||||
"active": "Aktiv",
|
||||
"notLoaded": "nicht geladen",
|
||||
"cached": "zwischengespeichert",
|
||||
"checkpointFolder": "Checkpoint-Ordner",
|
||||
"clearCheckpointFolder": "Checkpoint-Ordner löschen",
|
||||
"findModels": "Models finden",
|
||||
"scanAgain": "Erneut scannen",
|
||||
"modelsFound": "Models gefunden",
|
||||
"selectFolder": "Ordner auswählen",
|
||||
"selected": "Ausgewählt",
|
||||
"selectAll": "Alles auswählen",
|
||||
"deselectAll": "Alle abwählen",
|
||||
"showExisting": "Vorhandene anzeigen",
|
||||
"addSelected": "Auswahl hinzufügen",
|
||||
"modelExists": "Model existiert",
|
||||
"selectAndAdd": "Unten aufgeführte Models auswählen und hinzufügen",
|
||||
"noModelsFound": "Keine Models gefunden",
|
||||
"delete": "Löschen",
|
||||
"deleteModel": "Model löschen",
|
||||
"deleteConfig": "Konfiguration löschen",
|
||||
"deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?",
|
||||
"deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
"steps": "Schritte",
|
||||
"cfgScale": "CFG-Skala",
|
||||
"width": "Breite",
|
||||
"height": "Höhe",
|
||||
"sampler": "Sampler",
|
||||
"randomizeSeed": "Zufälliger Seed",
|
||||
"shuffle": "Mischen",
|
||||
"noiseThreshold": "Rausch-Schwellenwert",
|
||||
"perlinNoise": "Perlin-Rauschen",
|
||||
"variations": "Variationen",
|
||||
"variationAmount": "Höhe der Abweichung",
|
||||
"seedWeights": "Seed-Gewichte",
|
||||
"faceRestoration": "Gesichtsrestaurierung",
|
||||
"restoreFaces": "Gesichter wiederherstellen",
|
||||
"type": "Art",
|
||||
"strength": "Stärke",
|
||||
"upscaling": "Hochskalierung",
|
||||
"upscale": "Hochskalieren",
|
||||
"upscaleImage": "Bild hochskalieren",
|
||||
"scale": "Maßstab",
|
||||
"otherOptions": "Andere Optionen",
|
||||
"seamlessTiling": "Nahtlose Kacheln",
|
||||
"hiresOptim": "High-Res-Optimierung",
|
||||
"imageFit": "Ausgangsbild an Ausgabegröße anpassen",
|
||||
"codeformerFidelity": "Glaubwürdigkeit",
|
||||
"seamSize": "Nahtgröße",
|
||||
"seamBlur": "Nahtunschärfe",
|
||||
"seamStrength": "Stärke der Naht",
|
||||
"seamSteps": "Nahtstufen",
|
||||
"scaleBeforeProcessing": "Skalieren vor der Verarbeitung",
|
||||
"scaledWidth": "Skaliert W",
|
||||
"scaledHeight": "Skaliert H",
|
||||
"infillMethod": "Infill-Methode",
|
||||
"tileSize": "Kachelgröße",
|
||||
"boundingBoxHeader": "Begrenzungsrahmen",
|
||||
"seamCorrectionHeader": "Nahtkorrektur",
|
||||
"infillScalingHeader": "Infill und Skalierung",
|
||||
"img2imgStrength": "Bild-zu-Bild-Stärke",
|
||||
"toggleLoopback": "Toggle Loopback",
|
||||
"invoke": "Invoke",
|
||||
"promptPlaceholder": "Prompt hier eingeben. [negative Token], (mehr Gewicht)++, (geringeres Gewicht)--, Tausch und Überblendung sind verfügbar (siehe Dokumente)",
|
||||
"sendTo": "Senden an",
|
||||
"sendToImg2Img": "Senden an Bild zu Bild",
|
||||
"sendToUnifiedCanvas": "Senden an Unified Canvas",
|
||||
"copyImageToLink": "Bild-Link kopieren",
|
||||
"downloadImage": "Bild herunterladen",
|
||||
"openInViewer": "Im Viewer öffnen",
|
||||
"closeViewer": "Viewer schließen",
|
||||
"usePrompt": "Prompt verwenden",
|
||||
"useSeed": "Seed verwenden",
|
||||
"useAll": "Alle verwenden",
|
||||
"useInitImg": "Ausgangsbild verwenden",
|
||||
"deleteImage": "Bild löschen",
|
||||
"initialImage": "Ursprüngliches Bild",
|
||||
"showOptionsPanel": "Optionsleiste zeigen"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Bilder in Bearbeitung anzeigen",
|
||||
"saveSteps": "Speichern der Bilder alle n Schritte",
|
||||
"confirmOnDelete": "Bestätigen beim Löschen",
|
||||
"displayHelpIcons": "Hilfesymbole anzeigen",
|
||||
"useCanvasBeta": "Canvas Beta Layout verwenden",
|
||||
"enableImageDebugging": "Bild-Debugging aktivieren",
|
||||
"resetWebUI": "Web-Oberfläche zurücksetzen",
|
||||
"resetWebUIDesc1": "Das Zurücksetzen der Web-Oberfläche setzt nur den lokalen Cache des Browsers mit Ihren Bildern und gespeicherten Einstellungen zurück. Es werden keine Bilder von der Festplatte gelöscht.",
|
||||
"resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.",
|
||||
"resetComplete": "Die Web-Oberfläche wurde zurückgesetzt. Aktualisieren Sie die Seite, um sie neu zu laden."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Temp-Ordner geleert",
|
||||
"uploadFailed": "Hochladen fehlgeschlagen",
|
||||
"uploadFailedMultipleImagesDesc": "Mehrere Bilder eingefügt, es kann nur ein Bild auf einmal hochgeladen werden",
|
||||
"uploadFailedUnableToLoadDesc": "Datei kann nicht geladen werden",
|
||||
"downloadImageStarted": "Bild wird heruntergeladen",
|
||||
"imageCopied": "Bild kopiert",
|
||||
"imageLinkCopied": "Bildlink kopiert",
|
||||
"imageNotLoaded": "Kein Bild geladen",
|
||||
"imageNotLoadedDesc": "Kein Bild gefunden, das an das Bild zu Bild-Modul gesendet werden kann",
|
||||
"imageSavedToGallery": "Bild in die Galerie gespeichert",
|
||||
"canvasMerged": "Leinwand zusammengeführt",
|
||||
"sentToImageToImage": "Gesendet an Bild zu Bild",
|
||||
"sentToUnifiedCanvas": "Gesendet an Unified Canvas",
|
||||
"parametersSet": "Parameter festlegen",
|
||||
"parametersNotSet": "Parameter nicht festgelegt",
|
||||
"parametersNotSetDesc": "Keine Metadaten für dieses Bild gefunden.",
|
||||
"parametersFailed": "Problem beim Laden der Parameter",
|
||||
"parametersFailedDesc": "Ausgangsbild kann nicht geladen werden.",
|
||||
"seedSet": "Seed festlegen",
|
||||
"seedNotSet": "Saatgut nicht festgelegt",
|
||||
"seedNotSetDesc": "Für dieses Bild wurde kein Seed gefunden.",
|
||||
"promptSet": "Prompt festgelegt",
|
||||
"promptNotSet": "Prompt nicht festgelegt",
|
||||
"promptNotSetDesc": "Für dieses Bild wurde kein Prompt gefunden.",
|
||||
"upscalingFailed": "Hochskalierung fehlgeschlagen",
|
||||
"faceRestoreFailed": "Gesichtswiederherstellung fehlgeschlagen",
|
||||
"metadataLoadFailed": "Metadaten konnten nicht geladen werden",
|
||||
"initialImageSet": "Ausgangsbild festgelegt",
|
||||
"initialImageNotSet": "Ausgangsbild nicht festgelegt",
|
||||
"initialImageNotSetDesc": "Ausgangsbild konnte nicht geladen werden"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "Dies ist das Prompt-Feld. Ein Prompt enthält Generierungsobjekte und stilistische Begriffe. Sie können auch Gewichtungen (Token-Bedeutung) dem Prompt hinzufügen, aber CLI-Befehle und Parameter funktionieren nicht.",
|
||||
"gallery": "Die Galerie zeigt erzeugte Bilder aus dem Ausgabeordner an, sobald sie erstellt wurden. Die Einstellungen werden in den Dateien gespeichert und können über das Kontextmenü aufgerufen werden.",
|
||||
"other": "Mit diesen Optionen werden alternative Verarbeitungsmodi für InvokeAI aktiviert. 'Nahtlose Kachelung' erzeugt sich wiederholende Muster in der Ausgabe. 'Hohe Auflösungen' werden in zwei Schritten mit img2img erzeugt: Verwenden Sie diese Einstellung, wenn Sie ein größeres und kohärenteres Bild ohne Artefakte wünschen. Es dauert länger als das normale txt2img.",
|
||||
"seed": "Der Seed-Wert beeinflusst das Ausgangsrauschen, aus dem das Bild erstellt wird. Sie können die bereits vorhandenen Seeds von früheren Bildern verwenden. 'Der Rauschschwellenwert' wird verwendet, um Artefakte bei hohen CFG-Werten abzuschwächen (versuchen Sie es im Bereich 0-10), und Perlin, um während der Erzeugung Perlin-Rauschen hinzuzufügen: Beide dienen dazu, Ihre Ergebnisse zu variieren.",
|
||||
"variations": "Versuchen Sie eine Variation mit einem Wert zwischen 0,1 und 1,0, um das Ergebnis für ein bestimmtes Seed zu ändern. Interessante Variationen des Seeds liegen zwischen 0,1 und 0,3.",
|
||||
"upscale": "Verwenden Sie ESRGAN, um das Bild unmittelbar nach der Erzeugung zu vergrößern.",
|
||||
"faceCorrection": "Gesichtskorrektur mit GFPGAN oder Codeformer: Der Algorithmus erkennt Gesichter im Bild und korrigiert alle Fehler. Ein hoher Wert verändert das Bild stärker, was zu attraktiveren Gesichtern führt. Codeformer mit einer höheren Genauigkeit bewahrt das Originalbild auf Kosten einer stärkeren Gesichtskorrektur.",
|
||||
"imageToImage": "Bild zu Bild lädt ein beliebiges Bild als Ausgangsbild, aus dem dann zusammen mit dem Prompt ein neues Bild erzeugt wird. Je höher der Wert ist, desto stärker wird das Ergebnisbild verändert. Werte von 0,0 bis 1,0 sind möglich, der empfohlene Bereich ist .25-.75",
|
||||
"boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text zu Bild oder Bild zu Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.",
|
||||
"seamCorrection": "Steuert die Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auf der Leinwand auftreten.",
|
||||
"infillAndScaling": "Verwalten Sie Infill-Methoden (für maskierte oder gelöschte Bereiche der Leinwand) und Skalierung (nützlich für kleine Begrenzungsrahmengrößen)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Ebene",
|
||||
"base": "Basis",
|
||||
"mask": "Maske",
|
||||
"maskingOptions": "Maskierungsoptionen",
|
||||
"enableMask": "Maske aktivieren",
|
||||
"preserveMaskedArea": "Maskierten Bereich bewahren",
|
||||
"clearMask": "Maske löschen",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radierer",
|
||||
"fillBoundingBox": "Begrenzungsrahmen füllen",
|
||||
"eraseBoundingBox": "Begrenzungsrahmen löschen",
|
||||
"colorPicker": "Farbpipette",
|
||||
"brushOptions": "Pinseloptionen",
|
||||
"brushSize": "Größe",
|
||||
"move": "Bewegen",
|
||||
"resetView": "Ansicht zurücksetzen",
|
||||
"mergeVisible": "Sichtbare Zusammenführen",
|
||||
"saveToGallery": "In Galerie speichern",
|
||||
"copyToClipboard": "In Zwischenablage kopieren",
|
||||
"downloadAsImage": "Als Bild herunterladen",
|
||||
"undo": "Rückgängig",
|
||||
"redo": "Wiederherstellen",
|
||||
"clearCanvas": "Leinwand löschen",
|
||||
"canvasSettings": "Leinwand-Einstellungen",
|
||||
"showIntermediates": "Zwischenprodukte anzeigen",
|
||||
"showGrid": "Gitternetz anzeigen",
|
||||
"snapToGrid": "Am Gitternetz einrasten",
|
||||
"darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln",
|
||||
"autoSaveToGallery": "Automatisch in Galerie speichern",
|
||||
"saveBoxRegionOnly": "Nur Auswahlbox speichern",
|
||||
"limitStrokesToBox": "Striche auf Box beschränken",
|
||||
"showCanvasDebugInfo": "Leinwand-Debug-Infos anzeigen",
|
||||
"clearCanvasHistory": "Leinwand-Verlauf löschen",
|
||||
"clearHistory": "Verlauf löschen",
|
||||
"clearCanvasHistoryMessage": "Wenn Sie den Verlauf der Leinwand löschen, bleibt die aktuelle Leinwand intakt, aber der Verlauf der Rückgängig- und Wiederherstellung wird unwiderruflich gelöscht.",
|
||||
"clearCanvasHistoryConfirm": "Sind Sie sicher, dass Sie den Verlauf der Leinwand löschen möchten?",
|
||||
"emptyTempImageFolder": "Temp-Image Ordner leeren",
|
||||
"emptyFolder": "Leerer Ordner",
|
||||
"emptyTempImagesFolderMessage": "Wenn Sie den Ordner für temporäre Bilder leeren, wird auch der Unified Canvas vollständig zurückgesetzt. Dies umfasst den gesamten Verlauf der Rückgängig-/Wiederherstellungsvorgänge, die Bilder im Bereitstellungsbereich und die Leinwand-Basisebene.",
|
||||
"emptyTempImagesFolderConfirm": "Sind Sie sicher, dass Sie den temporären Ordner leeren wollen?",
|
||||
"activeLayer": "Aktive Ebene",
|
||||
"canvasScale": "Leinwand Maßstab",
|
||||
"boundingBox": "Begrenzungsrahmen",
|
||||
"scaledBoundingBox": "Skalierter Begrenzungsrahmen",
|
||||
"boundingBoxPosition": "Begrenzungsrahmen Position",
|
||||
"canvasDimensions": "Maße der Leinwand",
|
||||
"canvasPosition": "Leinwandposition",
|
||||
"cursorPosition": "Position des Cursors",
|
||||
"previous": "Vorherige",
|
||||
"next": "Nächste",
|
||||
"accept": "Akzeptieren",
|
||||
"showHide": "Einblenden/Ausblenden",
|
||||
"discardAll": "Alles verwerfen",
|
||||
"betaClear": "Löschen",
|
||||
"betaDarkenOutside": "Außen abdunkeln",
|
||||
"betaLimitToBox": "Begrenzung auf das Feld",
|
||||
"betaPreserveMasked": "Maskiertes bewahren"
|
||||
}
|
||||
}
|
||||
597
invokeai/frontend/dist/locales/en.json
vendored
Normal file
597
invokeai/frontend/dist/locales/en.json
vendored
Normal file
@@ -0,0 +1,597 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langArabic": "العربية",
|
||||
"langEnglish": "English",
|
||||
"langDutch": "Nederlands",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "Deutsch",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "日本語",
|
||||
"langPolish": "Polski",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langRussian": "Русский",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langUkranian": "Украї́нська",
|
||||
"langSpanish": "Español",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed",
|
||||
"statusConvertingModel": "Converting Model",
|
||||
"statusModelConverted": "Model Converted",
|
||||
"statusMergingModels": "Merging Models",
|
||||
"statusMergedModels": "Models Merged",
|
||||
"pinOptionsPanel": "Pin Options Panel"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generations",
|
||||
"showGenerations": "Show Generations",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Show Uploads",
|
||||
"galleryImageSize": "Image Size",
|
||||
"galleryImageResetSize": "Reset Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
"maintainAspectRatio": "Maintain Aspect Ratio",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"singleColumnLayout": "Single Column Layout",
|
||||
"pinGallery": "Pin Gallery",
|
||||
"allImagesLoaded": "All Images Loaded",
|
||||
"loadMore": "Load More",
|
||||
"noImagesInGallery": "No Images In Gallery"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Keyboard Shorcuts",
|
||||
"appHotkeys": "App Hotkeys",
|
||||
"generalHotkeys": "General Hotkeys",
|
||||
"galleryHotkeys": "Gallery Hotkeys",
|
||||
"unifiedCanvasHotkeys": "Unified Canvas Hotkeys",
|
||||
"invoke": {
|
||||
"title": "Invoke",
|
||||
"desc": "Generate an image"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Cancel",
|
||||
"desc": "Cancel image generation"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Focus Prompt",
|
||||
"desc": "Focus the prompt input area"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Toggle Options",
|
||||
"desc": "Open and close the options panel"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Pin Options",
|
||||
"desc": "Pin the options panel"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Toggle Viewer",
|
||||
"desc": "Open and close Image Viewer"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Toggle Gallery",
|
||||
"desc": "Open and close the gallery drawer"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Maximize Workspace",
|
||||
"desc": "Close panels and maximize work area"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Change Tabs",
|
||||
"desc": "Switch to another workspace"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Console Toggle",
|
||||
"desc": "Open and close console"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Set Prompt",
|
||||
"desc": "Use the prompt of the current image"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Set Seed",
|
||||
"desc": "Use the seed of the current image"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Set Parameters",
|
||||
"desc": "Use all parameters of the current image"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Restore Faces",
|
||||
"desc": "Restore the current image"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Upscale",
|
||||
"desc": "Upscale the current image"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Show Info",
|
||||
"desc": "Show metadata info of the current image"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Send To Image To Image",
|
||||
"desc": "Send current image to Image to Image"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Delete Image",
|
||||
"desc": "Delete the current image"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Close Panels",
|
||||
"desc": "Closes open panels"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Previous Image",
|
||||
"desc": "Display the previous image in gallery"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Next Image",
|
||||
"desc": "Display the next image in gallery"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Toggle Gallery Pin",
|
||||
"desc": "Pins and unpins the gallery to the UI"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Increase Gallery Image Size",
|
||||
"desc": "Increases gallery thumbnails size"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Decrease Gallery Image Size",
|
||||
"desc": "Decreases gallery thumbnails size"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Select Brush",
|
||||
"desc": "Selects the canvas brush"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Select Eraser",
|
||||
"desc": "Selects the canvas eraser"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Decrease Brush Size",
|
||||
"desc": "Decreases the size of the canvas brush/eraser"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Increase Brush Size",
|
||||
"desc": "Increases the size of the canvas brush/eraser"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Decrease Brush Opacity",
|
||||
"desc": "Decreases the opacity of the canvas brush"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Increase Brush Opacity",
|
||||
"desc": "Increases the opacity of the canvas brush"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Move Tool",
|
||||
"desc": "Allows canvas navigation"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Fill Bounding Box",
|
||||
"desc": "Fills the bounding box with brush color"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Erase Bounding Box",
|
||||
"desc": "Erases the bounding box area"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Select Color Picker",
|
||||
"desc": "Selects the canvas color picker"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Toggle Snap",
|
||||
"desc": "Toggles Snap to Grid"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Quick Toggle Move",
|
||||
"desc": "Temporarily toggles Move mode"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Toggle Layer",
|
||||
"desc": "Toggles mask/base layer selection"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Clear Mask",
|
||||
"desc": "Clear the entire mask"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Hide Mask",
|
||||
"desc": "Hide and unhide mask"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Show/Hide Bounding Box",
|
||||
"desc": "Toggle visibility of bounding box"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Merge Visible",
|
||||
"desc": "Merge all visible layers of canvas"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "Save To Gallery",
|
||||
"desc": "Save current canvas to gallery"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "Copy to Clipboard",
|
||||
"desc": "Copy current canvas to clipboard"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Download Image",
|
||||
"desc": "Download current canvas"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Undo Stroke",
|
||||
"desc": "Undo a brush stroke"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Redo Stroke",
|
||||
"desc": "Redo a brush stroke"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Reset View",
|
||||
"desc": "Reset Canvas View"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Previous Staging Image",
|
||||
"desc": "Previous Staging Area Image"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Next Staging Image",
|
||||
"desc": "Next Staging Area Image"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Accept Staging Image",
|
||||
"desc": "Accept Current Staging Area Image"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Model Manager",
|
||||
"model": "Model",
|
||||
"allModels": "All Models",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"diffusersModels": "Diffusers",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "Model Added",
|
||||
"modelUpdated": "Model Updated",
|
||||
"modelEntryDeleted": "Model Entry Deleted",
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"addNew": "Add New",
|
||||
"addNewModel": "Add New Model",
|
||||
"addCheckpointModel": "Add Checkpoint / Safetensor Model",
|
||||
"addDiffuserModel": "Add Diffusers",
|
||||
"addManually": "Add Manually",
|
||||
"manual": "Manual",
|
||||
"name": "Name",
|
||||
"nameValidationMsg": "Enter a name for your model",
|
||||
"description": "Description",
|
||||
"descriptionValidationMsg": "Add a description for your model",
|
||||
"config": "Config",
|
||||
"configValidationMsg": "Path to the config file of your model.",
|
||||
"modelLocation": "Model Location",
|
||||
"modelLocationValidationMsg": "Path to where your model is located locally.",
|
||||
"repo_id": "Repo ID",
|
||||
"repoIDValidationMsg": "Online repository of your model",
|
||||
"vaeLocation": "VAE Location",
|
||||
"vaeLocationValidationMsg": "Path to where your VAE is located.",
|
||||
"vaeRepoID": "VAE Repo ID",
|
||||
"vaeRepoIDValidationMsg": "Online repository of your VAE",
|
||||
"width": "Width",
|
||||
"widthValidationMsg": "Default width of your model.",
|
||||
"height": "Height",
|
||||
"heightValidationMsg": "Default height of your model.",
|
||||
"addModel": "Add Model",
|
||||
"updateModel": "Update Model",
|
||||
"availableModels": "Available Models",
|
||||
"search": "Search",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
"notLoaded": "not loaded",
|
||||
"cached": "cached",
|
||||
"checkpointFolder": "Checkpoint Folder",
|
||||
"clearCheckpointFolder": "Clear Checkpoint Folder",
|
||||
"findModels": "Find Models",
|
||||
"scanAgain": "Scan Again",
|
||||
"modelsFound": "Models Found",
|
||||
"selectFolder": "Select Folder",
|
||||
"selected": "Selected",
|
||||
"selectAll": "Select All",
|
||||
"deselectAll": "Deselect All",
|
||||
"showExisting": "Show Existing",
|
||||
"addSelected": "Add Selected",
|
||||
"modelExists": "Model Exists",
|
||||
"selectAndAdd": "Select and Add Models Listed Below",
|
||||
"noModelsFound": "No Models Found",
|
||||
"delete": "Delete",
|
||||
"deleteModel": "Delete Model",
|
||||
"deleteConfig": "Delete Config",
|
||||
"deleteMsg1": "Are you sure you want to delete this model entry from InvokeAI?",
|
||||
"deleteMsg2": "This will not delete the model checkpoint file from your disk. You can readd them if you wish to.",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Model Location",
|
||||
"formMessageDiffusersModelLocationDesc": "Please enter at least one.",
|
||||
"formMessageDiffusersVAELocation": "VAE Location",
|
||||
"formMessageDiffusersVAELocationDesc": "If not provided, InvokeAI will look for the VAE file inside the model location given above.",
|
||||
"convert": "Convert",
|
||||
"convertToDiffusers": "Convert To Diffusers",
|
||||
"convertToDiffusersHelpText1": "This model will be converted to the 🧨 Diffusers format.",
|
||||
"convertToDiffusersHelpText2": "This process will replace your Model Manager entry with the Diffusers version of the same model.",
|
||||
"convertToDiffusersHelpText3": "Your checkpoint file on the disk will NOT be deleted or modified in anyway. You can add your checkpoint to the Model Manager again if you want to.",
|
||||
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
|
||||
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 4GB-7GB in size.",
|
||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||
"convertToDiffusersSaveLocation": "Save Location",
|
||||
"v1": "v1",
|
||||
"v2_base": "v2 (512px)",
|
||||
"v2_768": "v2 (768px)",
|
||||
"inpainting": "v1 Inpainting",
|
||||
"customConfig": "Custom Config",
|
||||
"pathToCustomConfig": "Path To Custom Config",
|
||||
"statusConverting": "Converting",
|
||||
"modelConverted": "Model Converted",
|
||||
"sameFolder": "Same folder",
|
||||
"invokeRoot": "InvokeAI folder",
|
||||
"custom": "Custom",
|
||||
"customSaveLocation": "Custom Save Location",
|
||||
"merge": "Merge",
|
||||
"modelsMerged": "Models Merged",
|
||||
"mergeModels": "Merge Models",
|
||||
"modelOne": "Model 1",
|
||||
"modelTwo": "Model 2",
|
||||
"modelThree": "Model 3",
|
||||
"mergedModelName": "Merged Model Name",
|
||||
"alpha": "Alpha",
|
||||
"interpolationType": "Interpolation Type",
|
||||
"mergedModelSaveLocation": "Save Location",
|
||||
"mergedModelCustomSaveLocation": "Custom Path",
|
||||
"invokeAIFolder": "Invoke AI Folder",
|
||||
"ignoreMismatch": "Ignore Mismatches Between Selected Models",
|
||||
"modelMergeHeaderHelp1": "You can merge upto three different models to create a blend that suits your needs.",
|
||||
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
|
||||
"modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
|
||||
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
|
||||
"inverseSigmoid": "Inverse Sigmoid",
|
||||
"sigmoid": "Sigmoid",
|
||||
"weightedSum": "Weighted Sum",
|
||||
"none": "none",
|
||||
"addDifference": "Add Difference"
|
||||
},
|
||||
"parameters": {
|
||||
"general": "General",
|
||||
"images": "Images",
|
||||
"steps": "Steps",
|
||||
"cfgScale": "CFG Scale",
|
||||
"width": "Width",
|
||||
"height": "Height",
|
||||
"sampler": "Sampler",
|
||||
"seed": "Seed",
|
||||
"imageToImage": "Image to Image",
|
||||
"randomizeSeed": "Randomize Seed",
|
||||
"shuffle": "Shuffle",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"variations": "Variations",
|
||||
"variationAmount": "Variation Amount",
|
||||
"seedWeights": "Seed Weights",
|
||||
"faceRestoration": "Face Restoration",
|
||||
"restoreFaces": "Restore Faces",
|
||||
"type": "Type",
|
||||
"strength": "Strength",
|
||||
"upscaling": "Upscaling",
|
||||
"upscale": "Upscale",
|
||||
"upscaleImage": "Upscale Image",
|
||||
"denoisingStrength": "Denoising Strength",
|
||||
"scale": "Scale",
|
||||
"otherOptions": "Other Options",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"hiresStrength": "High Res Strength",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
"seamBlur": "Seam Blur",
|
||||
"seamStrength": "Seam Strength",
|
||||
"seamSteps": "Seam Steps",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledWidth": "Scaled W",
|
||||
"scaledHeight": "Scaled H",
|
||||
"infillMethod": "Infill Method",
|
||||
"tileSize": "Tile Size",
|
||||
"boundingBoxHeader": "Bounding Box",
|
||||
"seamCorrectionHeader": "Seam Correction",
|
||||
"infillScalingHeader": "Infill and Scaling",
|
||||
"img2imgStrength": "Image To Image Strength",
|
||||
"toggleLoopback": "Toggle Loopback",
|
||||
"symmetry": "Symmetry",
|
||||
"hSymmetryStep": "H Symmetry Step",
|
||||
"vSymmetryStep": "V Symmetry Step",
|
||||
"invoke": "Invoke",
|
||||
"cancel": {
|
||||
"immediate": "Cancel immediately",
|
||||
"schedule": "Cancel after current iteration",
|
||||
"isScheduled": "Canceling",
|
||||
"setType": "Set cancel type"
|
||||
},
|
||||
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
|
||||
"negativePrompts": "Negative Prompts",
|
||||
"sendTo": "Send to",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
"copyImage": "Copy Image",
|
||||
"copyImageToLink": "Copy Image To Link",
|
||||
"downloadImage": "Download Image",
|
||||
"openInViewer": "Open In Viewer",
|
||||
"closeViewer": "Close Viewer",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
"useAll": "Use All",
|
||||
"useInitImg": "Use Initial Image",
|
||||
"info": "Info",
|
||||
"deleteImage": "Delete Image",
|
||||
"initialImage": "Initial Image",
|
||||
"showOptionsPanel": "Show Options Panel"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Models",
|
||||
"displayInProgress": "Display In-Progress Images",
|
||||
"saveSteps": "Save images every n steps",
|
||||
"confirmOnDelete": "Confirm On Delete",
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
"resetComplete": "Web UI has been reset. Refresh the page to reload."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Temp Folder Emptied",
|
||||
"uploadFailed": "Upload failed",
|
||||
"uploadFailedMultipleImagesDesc": "Multiple images pasted, may only upload one image at a time",
|
||||
"uploadFailedUnableToLoadDesc": "Unable to load file",
|
||||
"downloadImageStarted": "Image Download Started",
|
||||
"imageCopied": "Image Copied",
|
||||
"imageLinkCopied": "Image Link Copied",
|
||||
"imageNotLoaded": "No Image Loaded",
|
||||
"imageNotLoadedDesc": "No image found to send to image to image module",
|
||||
"imageSavedToGallery": "Image Saved to Gallery",
|
||||
"canvasMerged": "Canvas Merged",
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"parametersSet": "Parameters Set",
|
||||
"parametersNotSet": "Parameters Not Set",
|
||||
"parametersNotSetDesc": "No metadata found for this image.",
|
||||
"parametersFailed": "Problem loading parameters",
|
||||
"parametersFailedDesc": "Unable to load init image.",
|
||||
"seedSet": "Seed Set",
|
||||
"seedNotSet": "Seed Not Set",
|
||||
"seedNotSetDesc": "Could not find seed for this image.",
|
||||
"promptSet": "Prompt Set",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "Could not find prompt for this image.",
|
||||
"upscalingFailed": "Upscaling Failed",
|
||||
"faceRestoreFailed": "Face Restoration Failed",
|
||||
"metadataLoadFailed": "Failed to load metadata",
|
||||
"initialImageSet": "Initial Image Set",
|
||||
"initialImageNotSet": "Initial Image Not Set",
|
||||
"initialImageNotSetDesc": "Could not load initial image"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "This is the prompt field. Prompt includes generation objects and stylistic terms. You can add weight (token importance) in the prompt as well, but CLI commands and parameters will not work.",
|
||||
"gallery": "Gallery displays generations from the outputs folder as they're created. Settings are stored within files and accesed by context menu.",
|
||||
"other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer than usual txt2img.",
|
||||
"seed": "Seed value affects the initial noise from which the image is formed. You can use the already existing seeds from previous images. 'Noise Threshold' is used to mitigate artifacts at high CFG values (try the 0-10 range), and Perlin to add Perlin noise during generation: both serve to add variation to your outputs.",
|
||||
"variations": "Try a variation with a value between 0.1 and 1.0 to change the result for a given seed. Interesting variations of the seed are between 0.1 and 0.3.",
|
||||
"upscale": "Use ESRGAN to enlarge the image immediately after generation.",
|
||||
"faceCorrection": "Face correction with GFPGAN or Codeformer: the algorithm detects faces in the image and corrects any defects. High value will change the image more, resulting in more attractive faces. Codeformer with a higher fidelity preserves the original image at the expense of stronger face correction.",
|
||||
"imageToImage": "Image to Image loads any image as initial, which is then used to generate a new one along with the prompt. The higher the value, the more the result image will change. Values from 0.0 to 1.0 are possible, the recommended range is .25-.75",
|
||||
"boundingBox": "The bounding box is the same as the Width and Height settings for Text to Image or Image to Image. Only the area in the box will be processed.",
|
||||
"seamCorrection": "Controls the handling of visible seams that occur between generated images on the canvas.",
|
||||
"infillAndScaling": "Manage infill methods (used on masked or erased areas of the canvas) and scaling (useful for small bounding box sizes)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Layer",
|
||||
"base": "Base",
|
||||
"mask": "Mask",
|
||||
"maskingOptions": "Masking Options",
|
||||
"enableMask": "Enable Mask",
|
||||
"preserveMaskedArea": "Preserve Masked Area",
|
||||
"clearMask": "Clear Mask",
|
||||
"brush": "Brush",
|
||||
"eraser": "Eraser",
|
||||
"fillBoundingBox": "Fill Bounding Box",
|
||||
"eraseBoundingBox": "Erase Bounding Box",
|
||||
"colorPicker": "Color Picker",
|
||||
"brushOptions": "Brush Options",
|
||||
"brushSize": "Size",
|
||||
"move": "Move",
|
||||
"resetView": "Reset View",
|
||||
"mergeVisible": "Merge Visible",
|
||||
"saveToGallery": "Save To Gallery",
|
||||
"copyToClipboard": "Copy to Clipboard",
|
||||
"downloadAsImage": "Download As Image",
|
||||
"undo": "Undo",
|
||||
"redo": "Redo",
|
||||
"clearCanvas": "Clear Canvas",
|
||||
"canvasSettings": "Canvas Settings",
|
||||
"showIntermediates": "Show Intermediates",
|
||||
"showGrid": "Show Grid",
|
||||
"snapToGrid": "Snap to Grid",
|
||||
"darkenOutsideSelection": "Darken Outside Selection",
|
||||
"autoSaveToGallery": "Auto Save to Gallery",
|
||||
"saveBoxRegionOnly": "Save Box Region Only",
|
||||
"limitStrokesToBox": "Limit Strokes to Box",
|
||||
"showCanvasDebugInfo": "Show Canvas Debug Info",
|
||||
"clearCanvasHistory": "Clear Canvas History",
|
||||
"clearHistory": "Clear History",
|
||||
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",
|
||||
"clearCanvasHistoryConfirm": "Are you sure you want to clear the canvas history?",
|
||||
"emptyTempImageFolder": "Empty Temp Image Folder",
|
||||
"emptyFolder": "Empty Folder",
|
||||
"emptyTempImagesFolderMessage": "Emptying the temp image folder also fully resets the Unified Canvas. This includes all undo/redo history, images in the staging area, and the canvas base layer.",
|
||||
"emptyTempImagesFolderConfirm": "Are you sure you want to empty the temp folder?",
|
||||
"activeLayer": "Active Layer",
|
||||
"canvasScale": "Canvas Scale",
|
||||
"boundingBox": "Bounding Box",
|
||||
"scaledBoundingBox": "Scaled Bounding Box",
|
||||
"boundingBoxPosition": "Bounding Box Position",
|
||||
"canvasDimensions": "Canvas Dimensions",
|
||||
"canvasPosition": "Canvas Position",
|
||||
"cursorPosition": "Cursor Position",
|
||||
"previous": "Previous",
|
||||
"next": "Next",
|
||||
"accept": "Accept",
|
||||
"showHide": "Show/Hide",
|
||||
"discardAll": "Discard All",
|
||||
"betaClear": "Clear",
|
||||
"betaDarkenOutside": "Darken Outside",
|
||||
"betaLimitToBox": "Limit To Box",
|
||||
"betaPreserveMasked": "Preserve Masked"
|
||||
}
|
||||
}
|
||||
593
invokeai/frontend/dist/locales/es.json
vendored
Normal file
593
invokeai/frontend/dist/locales/es.json
vendored
Normal file
@@ -0,0 +1,593 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "Atajos de teclado",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Selector de idioma",
|
||||
"reportBugLabel": "Reportar errores",
|
||||
"settingsLabel": "Ajustes",
|
||||
"darkTheme": "Oscuro",
|
||||
"lightTheme": "Claro",
|
||||
"greenTheme": "Verde",
|
||||
"text2img": "Texto a Imagen",
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Nodos",
|
||||
"langSpanish": "Español",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
|
||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||
"training": "Entrenamiento",
|
||||
"trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.",
|
||||
"trainingDesc2": "InvokeAI ya soporta el entrenamiento de -embeddings- personalizados utilizando la Inversión Textual mediante el script principal.",
|
||||
"upload": "Subir imagen",
|
||||
"close": "Cerrar",
|
||||
"load": "Cargar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Desconectado",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Procesamiento Cancelado",
|
||||
"statusProcessingComplete": "Procesamiento Completo",
|
||||
"statusGenerating": "Generando",
|
||||
"statusGeneratingTextToImage": "Generando Texto a Imagen",
|
||||
"statusGeneratingImageToImage": "Generando Imagen a Imagen",
|
||||
"statusGeneratingInpainting": "Generando pintura interior",
|
||||
"statusGeneratingOutpainting": "Generando pintura exterior",
|
||||
"statusGenerationComplete": "Generación Completa",
|
||||
"statusIterationComplete": "Iteración Completa",
|
||||
"statusSavingImage": "Guardando Imagen",
|
||||
"statusRestoringFaces": "Restaurando Rostros",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostros (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostros (CodeFormer)",
|
||||
"statusUpscaling": "Aumentando Tamaño",
|
||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||
"statusLoadingModel": "Cargando Modelo",
|
||||
"statusModelChanged": "Modelo cambiado",
|
||||
"statusMergedModels": "Modelos combinados",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"langEnglish": "Inglés",
|
||||
"langDutch": "Holandés",
|
||||
"langFrench": "Francés",
|
||||
"langGerman": "Alemán",
|
||||
"langItalian": "Italiano",
|
||||
"langArabic": "Árabe",
|
||||
"langJapanese": "Japones",
|
||||
"langPolish": "Polaco",
|
||||
"langBrPortuguese": "Portugués brasileño",
|
||||
"langRussian": "Ruso",
|
||||
"langSimplifiedChinese": "Chino simplificado",
|
||||
"langUkranian": "Ucraniano",
|
||||
"back": "Atrás",
|
||||
"statusConvertingModel": "Convertir el modelo",
|
||||
"statusModelConverted": "Modelo adaptado",
|
||||
"statusMergingModels": "Fusionar modelos"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generaciones",
|
||||
"showGenerations": "Mostrar Generaciones",
|
||||
"uploads": "Subidas de archivos",
|
||||
"showUploads": "Mostar Subidas",
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
"galleryImageResetSize": "Restablecer tamaño de la imagen",
|
||||
"gallerySettings": "Ajustes de la galería",
|
||||
"maintainAspectRatio": "Mantener relación de aspecto",
|
||||
"autoSwitchNewImages": "Auto seleccionar Imágenes nuevas",
|
||||
"singleColumnLayout": "Diseño de una columna",
|
||||
"pinGallery": "Fijar galería",
|
||||
"allImagesLoaded": "Todas las imágenes cargadas",
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "Sin imágenes en la galería"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Atajos de teclado",
|
||||
"appHotkeys": "Atajos de applicación",
|
||||
"generalHotkeys": "Atajos generales",
|
||||
"galleryHotkeys": "Atajos de galería",
|
||||
"unifiedCanvasHotkeys": "Atajos de lienzo unificado",
|
||||
"invoke": {
|
||||
"title": "Invocar",
|
||||
"desc": "Generar una imagen"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el proceso de generación de imagen"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Mover foco a Entrada de texto",
|
||||
"desc": "Mover foco hacia el campo de texto de la Entrada"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Alternar opciones",
|
||||
"desc": "Mostar y ocultar el panel de opciones"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Fijar opciones",
|
||||
"desc": "Fijar el panel de opciones"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Alternar visor",
|
||||
"desc": "Mostar y ocultar el visor de imágenes"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Alternar galería",
|
||||
"desc": "Mostar y ocultar la galería de imágenes"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Maximizar espacio de trabajo",
|
||||
"desc": "Cerrar otros páneles y maximizar el espacio de trabajo"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Cambiar",
|
||||
"desc": "Cambiar entre áreas de trabajo"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Alternar consola",
|
||||
"desc": "Mostar y ocultar la consola"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Establecer Entrada",
|
||||
"desc": "Usar el texto de entrada de la imagen actual"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Establecer semilla",
|
||||
"desc": "Usar la semilla de la imagen actual"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Establecer parámetros",
|
||||
"desc": "Usar todos los parámetros de la imagen actual"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Restaurar rostros",
|
||||
"desc": "Restaurar rostros en la imagen actual"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Aumentar resolución",
|
||||
"desc": "Aumentar la resolución de la imagen actual"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Mostrar información",
|
||||
"desc": "Mostar metadatos de la imagen actual"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Enviar hacia Imagen a Imagen",
|
||||
"desc": "Enviar imagen actual hacia Imagen a Imagen"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Eliminar imagen",
|
||||
"desc": "Eliminar imagen actual"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Cerrar páneles",
|
||||
"desc": "Cerrar los páneles abiertos"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Imagen anterior",
|
||||
"desc": "Muetra la imagen anterior en la galería"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Imagen siguiente",
|
||||
"desc": "Muetra la imagen siguiente en la galería"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Alternar fijado de galería",
|
||||
"desc": "Fijar o desfijar la galería en la interfaz"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Aumentar imagen en galería",
|
||||
"desc": "Aumenta el tamaño de las miniaturas de la galería"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Reducir imagen en galería",
|
||||
"desc": "Reduce el tamaño de las miniaturas de la galería"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Seleccionar pincel",
|
||||
"desc": "Selecciona el pincel en el lienzo"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Seleccionar borrador",
|
||||
"desc": "Selecciona el borrador en el lienzo"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Disminuir tamaño de herramienta",
|
||||
"desc": "Disminuye el tamaño del pincel/borrador en el lienzo"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Aumentar tamaño del pincel",
|
||||
"desc": "Aumenta el tamaño del pincel en el lienzo"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Disminuir opacidad del pincel",
|
||||
"desc": "Disminuye la opacidad del pincel en el lienzo"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Aumentar opacidad del pincel",
|
||||
"desc": "Aumenta la opacidad del pincel en el lienzo"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Herramienta de movimiento",
|
||||
"desc": "Permite navegar por el lienzo"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Rellenar Caja contenedora",
|
||||
"desc": "Rellena la caja contenedora con el color seleccionado"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Borrar Caja contenedora",
|
||||
"desc": "Borra el contenido dentro de la caja contenedora"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Selecciona un color del lienzo"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Alternar ajuste de cuadrícula",
|
||||
"desc": "Activa o desactiva el ajuste automático a la cuadrícula"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Alternar movimiento rápido",
|
||||
"desc": "Activa momentáneamente la herramienta de movimiento"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Alternar capa",
|
||||
"desc": "Alterna entre las capas de máscara y base"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Limpiar máscara",
|
||||
"desc": "Limpia toda la máscara actual"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Ocultar máscara",
|
||||
"desc": "Oculta o muetre la máscara actual"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Alternar caja contenedora",
|
||||
"desc": "Muestra u oculta la caja contenedora"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Consolida capas visibles",
|
||||
"desc": "Consolida todas las capas visibles en una sola"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "Guardar en galería",
|
||||
"desc": "Guardar la imagen actual del lienzo en la galería"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "Copiar al portapapeles",
|
||||
"desc": "Copiar el lienzo actual al portapapeles"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Descargar imagen",
|
||||
"desc": "Descargar la imagen actual del lienzo"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Deshar trazo",
|
||||
"desc": "Desahacer el último trazo del pincel"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Rehacer trazo",
|
||||
"desc": "Rehacer el último trazo del pincel"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Restablecer vista",
|
||||
"desc": "Restablecer la vista del lienzo"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Imagen anterior",
|
||||
"desc": "Imagen anterior en el área de preparación"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Imagen siguiente",
|
||||
"desc": "Siguiente imagen en el área de preparación"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Aceptar imagen",
|
||||
"desc": "Aceptar la imagen actual en el área de preparación"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
"model": "Modelo",
|
||||
"modelAdded": "Modelo añadido",
|
||||
"modelUpdated": "Modelo actualizado",
|
||||
"modelEntryDeleted": "Endrada de Modelo eliminada",
|
||||
"cannotUseSpaces": "No se pueden usar Spaces",
|
||||
"addNew": "Añadir nuevo",
|
||||
"addNewModel": "Añadir nuevo modelo",
|
||||
"addManually": "Añadir manualmente",
|
||||
"manual": "Manual",
|
||||
"name": "Nombre",
|
||||
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
||||
"description": "Descripción",
|
||||
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
||||
"config": "Configurar",
|
||||
"configValidationMsg": "Ruta del archivo de configuración del modelo.",
|
||||
"modelLocation": "Ubicación del Modelo",
|
||||
"modelLocationValidationMsg": "Ruta del archivo de modelo.",
|
||||
"vaeLocation": "Ubicación VAE",
|
||||
"vaeLocationValidationMsg": "Ruta del archivo VAE.",
|
||||
"width": "Ancho",
|
||||
"widthValidationMsg": "Ancho predeterminado de tu modelo.",
|
||||
"height": "Alto",
|
||||
"heightValidationMsg": "Alto predeterminado de tu modelo.",
|
||||
"addModel": "Añadir Modelo",
|
||||
"updateModel": "Actualizar Modelo",
|
||||
"availableModels": "Modelos disponibles",
|
||||
"search": "Búsqueda",
|
||||
"load": "Cargar",
|
||||
"active": "activo",
|
||||
"notLoaded": "no cargado",
|
||||
"cached": "en caché",
|
||||
"checkpointFolder": "Directorio de Checkpoint",
|
||||
"clearCheckpointFolder": "Limpiar directorio de checkpoint",
|
||||
"findModels": "Buscar modelos",
|
||||
"scanAgain": "Escanear de nuevo",
|
||||
"modelsFound": "Modelos encontrados",
|
||||
"selectFolder": "Selecciona un directorio",
|
||||
"selected": "Seleccionado",
|
||||
"selectAll": "Seleccionar todo",
|
||||
"deselectAll": "Deseleccionar todo",
|
||||
"showExisting": "Mostrar existentes",
|
||||
"addSelected": "Añadir seleccionados",
|
||||
"modelExists": "Modelo existente",
|
||||
"selectAndAdd": "Selecciona de la lista un modelo para añadir",
|
||||
"noModelsFound": "No se encontró ningún modelo",
|
||||
"delete": "Eliminar",
|
||||
"deleteModel": "Eliminar Modelo",
|
||||
"deleteConfig": "Eliminar Configuración",
|
||||
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
|
||||
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"addDiffuserModel": "Añadir difusores",
|
||||
"inpainting": "v1 Repintado",
|
||||
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
|
||||
"checkpointModels": "Puntos de control",
|
||||
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
|
||||
"diffusersModels": "Difusores",
|
||||
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
|
||||
"vaeRepoID": "Identificador del repositorio de VAE",
|
||||
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
|
||||
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
|
||||
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
|
||||
"formMessageDiffusersVAELocation": "Ubicación VAE",
|
||||
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
|
||||
"convert": "Convertir",
|
||||
"convertToDiffusers": "Convertir en difusores",
|
||||
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
|
||||
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
|
||||
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
|
||||
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
|
||||
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
|
||||
"convertToDiffusersSaveLocation": "Guardar ubicación",
|
||||
"v1": "v1",
|
||||
"v2": "v2",
|
||||
"statusConverting": "Adaptar",
|
||||
"modelConverted": "Modelo adaptado",
|
||||
"sameFolder": "La misma carpeta",
|
||||
"invokeRoot": "Carpeta InvokeAI",
|
||||
"custom": "Personalizado",
|
||||
"customSaveLocation": "Ubicación personalizada para guardar",
|
||||
"merge": "Fusión",
|
||||
"modelsMerged": "Modelos fusionados",
|
||||
"mergeModels": "Combinar modelos",
|
||||
"modelOne": "Modelo 1",
|
||||
"modelTwo": "Modelo 2",
|
||||
"modelThree": "Modelo 3",
|
||||
"mergedModelName": "Nombre del modelo combinado",
|
||||
"alpha": "Alfa",
|
||||
"interpolationType": "Tipo de interpolación",
|
||||
"mergedModelSaveLocation": "Guardar ubicación",
|
||||
"mergedModelCustomSaveLocation": "Ruta personalizada",
|
||||
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
|
||||
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
|
||||
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
|
||||
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
|
||||
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
|
||||
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
|
||||
"inverseSigmoid": "Sigmoideo inverso",
|
||||
"weightedSum": "Modelo de suma ponderada",
|
||||
"sigmoid": "Función sigmoide",
|
||||
"allModels": "Todos los modelos",
|
||||
"repo_id": "Identificador del repositorio",
|
||||
"pathToCustomConfig": "Ruta a la configuración personalizada",
|
||||
"customConfig": "Configuración personalizada"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
"steps": "Pasos",
|
||||
"cfgScale": "Escala CFG",
|
||||
"width": "Ancho",
|
||||
"height": "Alto",
|
||||
"sampler": "Muestreo",
|
||||
"seed": "Semilla",
|
||||
"randomizeSeed": "Semilla aleatoria",
|
||||
"shuffle": "Aleatorizar",
|
||||
"noiseThreshold": "Umbral de Ruido",
|
||||
"perlinNoise": "Ruido Perlin",
|
||||
"variations": "Variaciones",
|
||||
"variationAmount": "Cantidad de Variación",
|
||||
"seedWeights": "Peso de las semillas",
|
||||
"faceRestoration": "Restauración de Rostros",
|
||||
"restoreFaces": "Restaurar rostros",
|
||||
"type": "Tipo",
|
||||
"strength": "Fuerza",
|
||||
"upscaling": "Aumento de resolución",
|
||||
"upscale": "Aumentar resolución",
|
||||
"upscaleImage": "Aumentar la resolución de la imagen",
|
||||
"scale": "Escala",
|
||||
"otherOptions": "Otras opciones",
|
||||
"seamlessTiling": "Mosaicos sin parches",
|
||||
"hiresOptim": "Optimización de Alta Resolución",
|
||||
"imageFit": "Ajuste tamaño de imagen inicial al tamaño objetivo",
|
||||
"codeformerFidelity": "Fidelidad",
|
||||
"seamSize": "Tamaño del parche",
|
||||
"seamBlur": "Desenfoque del parche",
|
||||
"seamStrength": "Fuerza del parche",
|
||||
"seamSteps": "Pasos del parche",
|
||||
"scaleBeforeProcessing": "Redimensionar antes de procesar",
|
||||
"scaledWidth": "Ancho escalado",
|
||||
"scaledHeight": "Alto escalado",
|
||||
"infillMethod": "Método de relleno",
|
||||
"tileSize": "Tamaño del mosaico",
|
||||
"boundingBoxHeader": "Caja contenedora",
|
||||
"seamCorrectionHeader": "Corrección de parches",
|
||||
"infillScalingHeader": "Remplazo y escalado",
|
||||
"img2imgStrength": "Peso de Imagen a Imagen",
|
||||
"toggleLoopback": "Alternar Retroalimentación",
|
||||
"invoke": "Invocar",
|
||||
"promptPlaceholder": "Ingrese la entrada aquí. [símbolos negativos], (subir peso)++, (bajar peso)--, también disponible alternado y mezclado (ver documentación)",
|
||||
"sendTo": "Enviar a",
|
||||
"sendToImg2Img": "Enviar a Imagen a Imagen",
|
||||
"sendToUnifiedCanvas": "Enviar a Lienzo Unificado",
|
||||
"copyImageToLink": "Copiar imagen a enlace",
|
||||
"downloadImage": "Descargar imagen",
|
||||
"openInViewer": "Abrir en Visor",
|
||||
"closeViewer": "Cerrar Visor",
|
||||
"usePrompt": "Usar Entrada",
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"useInitImg": "Usar Imagen Inicial",
|
||||
"info": "Información",
|
||||
"deleteImage": "Eliminar Imagen",
|
||||
"initialImage": "Imagen Inicial",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"symmetry": "Simetría",
|
||||
"vSymmetryStep": "Paso de simetría V",
|
||||
"hSymmetryStep": "Paso de simetría H",
|
||||
"cancel": {
|
||||
"immediate": "Cancelar inmediatamente",
|
||||
"schedule": "Cancelar tras la iteración actual",
|
||||
"isScheduled": "Cancelando",
|
||||
"setType": "Tipo de cancelación"
|
||||
},
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"negativePrompts": "Preguntas negativas",
|
||||
"imageToImage": "Imagen a imagen",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"hiresStrength": "Alta resistencia"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelos",
|
||||
"displayInProgress": "Mostrar imágenes en progreso",
|
||||
"saveSteps": "Guardar imágenes cada n pasos",
|
||||
"confirmOnDelete": "Confirmar antes de eliminar",
|
||||
"displayHelpIcons": "Mostrar iconos de ayuda",
|
||||
"useCanvasBeta": "Usar versión beta del Lienzo",
|
||||
"enableImageDebugging": "Habilitar depuración de imágenes",
|
||||
"resetWebUI": "Restablecer interfaz web",
|
||||
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
||||
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
||||
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
|
||||
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Directorio temporal vaciado",
|
||||
"uploadFailed": "Error al subir archivo",
|
||||
"uploadFailedMultipleImagesDesc": "Únicamente se puede subir una imágen a la vez",
|
||||
"uploadFailedUnableToLoadDesc": "No se pudo cargar la imágen",
|
||||
"downloadImageStarted": "Descargando imágen",
|
||||
"imageCopied": "Imágen copiada",
|
||||
"imageLinkCopied": "Enlace de imágen copiado",
|
||||
"imageNotLoaded": "No se cargó la imágen",
|
||||
"imageNotLoadedDesc": "No se encontró imagen para enviar al módulo Imagen a Imagen",
|
||||
"imageSavedToGallery": "Imágen guardada en la galería",
|
||||
"canvasMerged": "Lienzo consolidado",
|
||||
"sentToImageToImage": "Enviar hacia Imagen a Imagen",
|
||||
"sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado",
|
||||
"parametersSet": "Parámetros establecidos",
|
||||
"parametersNotSet": "Parámetros no establecidos",
|
||||
"parametersNotSetDesc": "No se encontraron metadatos para esta imágen.",
|
||||
"parametersFailed": "Error cargando parámetros",
|
||||
"parametersFailedDesc": "No fue posible cargar la imagen inicial.",
|
||||
"seedSet": "Semilla establecida",
|
||||
"seedNotSet": "Semilla no establecida",
|
||||
"seedNotSetDesc": "No se encontró una semilla para esta imágen.",
|
||||
"promptSet": "Entrada establecida",
|
||||
"promptNotSet": "Entrada no establecida",
|
||||
"promptNotSetDesc": "No se encontró una entrada para esta imágen.",
|
||||
"upscalingFailed": "Error al aumentar tamaño de imagn",
|
||||
"faceRestoreFailed": "Restauración de rostro fallida",
|
||||
"metadataLoadFailed": "Error al cargar metadatos",
|
||||
"initialImageSet": "Imágen inicial establecida",
|
||||
"initialImageNotSet": "Imagen inicial no establecida",
|
||||
"initialImageNotSetDesc": "Error al establecer la imágen inicial"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
||||
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
||||
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
|
||||
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
||||
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
||||
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
||||
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
||||
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
|
||||
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
||||
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
||||
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Capa",
|
||||
"base": "Base",
|
||||
"mask": "Máscara",
|
||||
"maskingOptions": "Opciones de máscara",
|
||||
"enableMask": "Habilitar Máscara",
|
||||
"preserveMaskedArea": "Preservar área enmascarada",
|
||||
"clearMask": "Limpiar máscara",
|
||||
"brush": "Pincel",
|
||||
"eraser": "Borrador",
|
||||
"fillBoundingBox": "Rellenar Caja Contenedora",
|
||||
"eraseBoundingBox": "Eliminar Caja Contenedora",
|
||||
"colorPicker": "Selector de color",
|
||||
"brushOptions": "Opciones de pincel",
|
||||
"brushSize": "Tamaño",
|
||||
"move": "Mover",
|
||||
"resetView": "Restablecer vista",
|
||||
"mergeVisible": "Consolidar vista",
|
||||
"saveToGallery": "Guardar en galería",
|
||||
"copyToClipboard": "Copiar al portapapeles",
|
||||
"downloadAsImage": "Descargar como imagen",
|
||||
"undo": "Deshacer",
|
||||
"redo": "Rehacer",
|
||||
"clearCanvas": "Limpiar lienzo",
|
||||
"canvasSettings": "Ajustes de lienzo",
|
||||
"showIntermediates": "Mostrar intermedios",
|
||||
"showGrid": "Mostrar cuadrícula",
|
||||
"snapToGrid": "Ajustar a cuadrícula",
|
||||
"darkenOutsideSelection": "Oscurecer fuera de la selección",
|
||||
"autoSaveToGallery": "Guardar automáticamente en galería",
|
||||
"saveBoxRegionOnly": "Guardar solo región dentro de la caja",
|
||||
"limitStrokesToBox": "Limitar trazos a la caja",
|
||||
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo",
|
||||
"clearCanvasHistory": "Limpiar historial de lienzo",
|
||||
"clearHistory": "Limpiar historial",
|
||||
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",
|
||||
"clearCanvasHistoryConfirm": "¿Está seguro de que desea limpiar el historial del lienzo?",
|
||||
"emptyTempImageFolder": "Vaciar directorio de imágenes temporales",
|
||||
"emptyFolder": "Vaciar directorio",
|
||||
"emptyTempImagesFolderMessage": "Vaciar el directorio de imágenes temporales también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",
|
||||
"emptyTempImagesFolderConfirm": "¿Está seguro de que desea vaciar el directorio temporal?",
|
||||
"activeLayer": "Capa activa",
|
||||
"canvasScale": "Escala de lienzo",
|
||||
"boundingBox": "Caja contenedora",
|
||||
"scaledBoundingBox": "Caja contenedora escalada",
|
||||
"boundingBoxPosition": "Posición de caja contenedora",
|
||||
"canvasDimensions": "Dimensiones de lienzo",
|
||||
"canvasPosition": "Posición de lienzo",
|
||||
"cursorPosition": "Posición del cursor",
|
||||
"previous": "Anterior",
|
||||
"next": "Siguiente",
|
||||
"accept": "Aceptar",
|
||||
"showHide": "Mostrar/Ocultar",
|
||||
"discardAll": "Descartar todo",
|
||||
"betaClear": "Limpiar",
|
||||
"betaDarkenOutside": "Oscurecer fuera",
|
||||
"betaLimitToBox": "Limitar a caja",
|
||||
"betaPreserveMasked": "Preservar área enmascarada"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user