Compare commits
1087 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e481bfac61 | ||
|
|
5040747c67 | ||
|
|
d1ab65a431 | ||
|
|
af4ee7feb8 | ||
|
|
764fb29ade | ||
|
|
1014d3ba44 | ||
|
|
40a48aca88 | ||
|
|
92abc00f16 | ||
|
|
a5719aabf8 | ||
|
|
44a18511fa | ||
|
|
b850dbadaf | ||
|
|
9ef8b944d5 | ||
|
|
efc5a98488 | ||
|
|
1417c87928 | ||
|
|
2dd6fc2b93 | ||
|
|
22213612a0 | ||
|
|
71ee44a827 | ||
|
|
b17ca0a5e7 | ||
|
|
71bbfe4a1a | ||
|
|
5702271991 | ||
|
|
10781e7dc4 | ||
|
|
099d1157c5 | ||
|
|
ab825bf7ee | ||
|
|
10cfeb5ada | ||
|
|
e97515d045 | ||
|
|
0f04bc5789 | ||
|
|
3f74aabecd | ||
|
|
b1a99a51b7 | ||
|
|
8004f8a6d9 | ||
|
|
ff8ff2212a | ||
|
|
8e5363cd83 | ||
|
|
1450779146 | ||
|
|
8cd5d95b8a | ||
|
|
abd6407394 | ||
|
|
734dacfbe9 | ||
|
|
636620b1d5 | ||
|
|
1fe41146f0 | ||
|
|
2ad6ef355a | ||
|
|
865502ee4f | ||
|
|
c7984f3299 | ||
|
|
7f150ed833 | ||
|
|
badf4e256c | ||
|
|
e64c60bbb3 | ||
|
|
1780618543 | ||
|
|
f91fd27624 | ||
|
|
09e41e8f76 | ||
|
|
6eeb2107b3 | ||
|
|
8b47c82992 | ||
|
|
eab435da27 | ||
|
|
17053ad8b7 | ||
|
|
fefb4dc1f8 | ||
|
|
d05b1b3544 | ||
|
|
82d4904c07 | ||
|
|
1cdcf33cfa | ||
|
|
6616fa835a | ||
|
|
cbc029c6f9 | ||
|
|
7b9a4564b1 | ||
|
|
d318968abe | ||
|
|
fcdefa0620 | ||
|
|
e71655237a | ||
|
|
ef8b3ce639 | ||
|
|
36870a8f53 | ||
|
|
b70420951d | ||
|
|
1f0c5b4cf1 | ||
|
|
8648da8111 | ||
|
|
45b4593563 | ||
|
|
41b04316cf | ||
|
|
e97c6db2a3 | ||
|
|
896820a349 | ||
|
|
06c8f468bf | ||
|
|
61920e2701 | ||
|
|
f34ba7ca70 | ||
|
|
c30ef0895d | ||
|
|
aa3a774f73 | ||
|
|
2c30555b84 | ||
|
|
743f605773 | ||
|
|
6b89adfa7e | ||
|
|
8aa4a258f4 | ||
|
|
519c661abb | ||
|
|
174a9b78b0 | ||
|
|
22c956c75f | ||
|
|
13696adc3a | ||
|
|
0196571a12 | ||
|
|
9666f466ab | ||
|
|
240e5486c8 | ||
|
|
aa247e68be | ||
|
|
895c47fd11 | ||
|
|
0c32d7b507 | ||
|
|
09625eae66 | ||
|
|
76249b3d4e | ||
|
|
d85cd99f17 | ||
|
|
f4576dcc2d | ||
|
|
62fe308f84 | ||
|
|
9b984e0d1e | ||
|
|
5502b29340 | ||
|
|
15fa246ccf | ||
|
|
4929ae6c1d | ||
|
|
16a52a607d | ||
|
|
7c68eff99f | ||
|
|
2048a47b85 | ||
|
|
8164b6b9cf | ||
|
|
f73d5a647d | ||
|
|
365e2dde1b | ||
|
|
4fc82d554f | ||
|
|
96b34c0f85 | ||
|
|
dd5a88dcee | ||
|
|
95ed56bf82 | ||
|
|
1ae80f5ab9 | ||
|
|
1f0bd3ca6c | ||
|
|
a1971f6830 | ||
|
|
c6118e8898 | ||
|
|
7ba958cf7f | ||
|
|
383905d5d2 | ||
|
|
6173e3e9ca | ||
|
|
3feb7d8922 | ||
|
|
1d9edbd0dd | ||
|
|
d439abdb89 | ||
|
|
ee47ea0c89 | ||
|
|
300bb2e627 | ||
|
|
ccf8593501 | ||
|
|
0fda612f3f | ||
|
|
5afff65b71 | ||
|
|
7e55bdefce | ||
|
|
620cf84d3d | ||
|
|
cfe567c62a | ||
|
|
cefe12f1df | ||
|
|
1e51c39928 | ||
|
|
42a02bbb80 | ||
|
|
f1ae6dae4c | ||
|
|
6195579910 | ||
|
|
16c8b23b34 | ||
|
|
07ae626b22 | ||
|
|
8d171bb044 | ||
|
|
6e33ca7e9e | ||
|
|
db46e12f2b | ||
|
|
868e4b2db8 | ||
|
|
2e562742c1 | ||
|
|
68e6958009 | ||
|
|
ea6e3a7949 | ||
|
|
b2879ca99f | ||
|
|
4e911566c3 | ||
|
|
9bafda6a15 | ||
|
|
871a8a5375 | ||
|
|
0eef74bc00 | ||
|
|
423ae32097 | ||
|
|
8282e5d045 | ||
|
|
19305cdbdf | ||
|
|
eb9028ab30 | ||
|
|
21483f5d07 | ||
|
|
82dcbac28f | ||
|
|
d43bd4625d | ||
|
|
ea891324a2 | ||
|
|
8fd9ea2193 | ||
|
|
fb02666856 | ||
|
|
f6f5c2731b | ||
|
|
b4e3f771e0 | ||
|
|
99bb9491ac | ||
|
|
a48e021c0b | ||
|
|
825fa6977d | ||
|
|
e332529fbd | ||
|
|
0f6aa7fe19 | ||
|
|
b8870d8290 | ||
|
|
ffa91be3f1 | ||
|
|
2d5294bca1 | ||
|
|
0453f21127 | ||
|
|
9fc09aa4bd | ||
|
|
2468a28e66 | ||
|
|
e3ed748191 | ||
|
|
3f5bf7ac44 | ||
|
|
00378e1ea6 | ||
|
|
5e87062cf8 | ||
|
|
3e7a459990 | ||
|
|
bbf4c03e50 | ||
|
|
b45e632f23 | ||
|
|
611a3a9753 | ||
|
|
1611f0d181 | ||
|
|
08835115e4 | ||
|
|
2d84e28d32 | ||
|
|
57be9ae6c3 | ||
|
|
ef17aae8ab | ||
|
|
0cc39f01a3 | ||
|
|
688d7258f1 | ||
|
|
4513320bf1 | ||
|
|
6c9a2761f5 | ||
|
|
533fd04ef0 | ||
|
|
2bdd738f03 | ||
|
|
7782760541 | ||
|
|
dff5681cf0 | ||
|
|
5a2790a69b | ||
|
|
7c5305ccba | ||
|
|
4013e8ad6f | ||
|
|
d1dfd257f9 | ||
|
|
5322d735ee | ||
|
|
cdb107dcda | ||
|
|
be1393a41c | ||
|
|
e554c2607f | ||
|
|
de2686d323 | ||
|
|
0b72a4a35e | ||
|
|
6215592b12 | ||
|
|
349cc25433 | ||
|
|
214d276379 | ||
|
|
ef24d76adc | ||
|
|
ab2b5a691d | ||
|
|
942a202945 | ||
|
|
1379642fc6 | ||
|
|
408cf5e092 | ||
|
|
ce298d32b5 | ||
|
|
d7107d931a | ||
|
|
147dcc2961 | ||
|
|
efd7f42414 | ||
|
|
4e1b619ad7 | ||
|
|
c7de2b2801 | ||
|
|
e8075658ac | ||
|
|
4202dabee1 | ||
|
|
d67db2bcf1 | ||
|
|
f26199d377 | ||
|
|
7159ec885f | ||
|
|
90cd791e76 | ||
|
|
b5cf734ba9 | ||
|
|
5a95ce5625 | ||
|
|
f7dc8eafee | ||
|
|
89da42ad79 | ||
|
|
e8aba99c92 | ||
|
|
ced9c83e96 | ||
|
|
247816db9a | ||
|
|
80f2cfe3e3 | ||
|
|
9a15a89e20 | ||
|
|
c73a61b785 | ||
|
|
88203d8db2 | ||
|
|
881c69e905 | ||
|
|
c40278dae7 | ||
|
|
7b329b7c91 | ||
|
|
c19b02ab21 | ||
|
|
6ebddf09c2 | ||
|
|
5841e1b5be | ||
|
|
5f09ffa276 | ||
|
|
9e70c216f6 | ||
|
|
cbe8a9550c | ||
|
|
259ecb7b71 | ||
|
|
002791ef68 | ||
|
|
21e491f878 | ||
|
|
12c4c715aa | ||
|
|
fe700d27df | ||
|
|
7a4ceb0f7c | ||
|
|
bb5d77a9fb | ||
|
|
3c55baf06b | ||
|
|
ca882ad5ff | ||
|
|
6a7b4ef63f | ||
|
|
f60d22b29b | ||
|
|
6a6fbe24a3 | ||
|
|
5efd2ed7a8 | ||
|
|
62c346850c | ||
|
|
f6fafe3eb3 | ||
|
|
6547c320a9 | ||
|
|
2d32cf4eeb | ||
|
|
7a4e358d53 | ||
|
|
ac1469bbd3 | ||
|
|
c0c32d9daa | ||
|
|
52e74fef7c | ||
|
|
e431d296c0 | ||
|
|
1e7a5fda24 | ||
|
|
050d72478e | ||
|
|
d3a09f1284 | ||
|
|
e096eef049 | ||
|
|
62c97dd7e6 | ||
|
|
e58b7a7ef9 | ||
|
|
dc556cb1a7 | ||
|
|
0c8f0e3386 | ||
|
|
98f03053ba | ||
|
|
59ef2471e1 | ||
|
|
ce7651944d | ||
|
|
a3e0b285d8 | ||
|
|
3cdfedc649 | ||
|
|
531f596bd1 | ||
|
|
8683426041 | ||
|
|
631592ec99 | ||
|
|
4cd29420ef | ||
|
|
582fee6c3a | ||
|
|
2b39d1677c | ||
|
|
47342277dd | ||
|
|
f7ce6fae9a | ||
|
|
8566490e51 | ||
|
|
6151968cd3 | ||
|
|
ba4691dae8 | ||
|
|
7d16af3aa7 | ||
|
|
61ff90d1fd | ||
|
|
303a2495c7 | ||
|
|
23d54ee69e | ||
|
|
330b417a7b | ||
|
|
f70af7afb9 | ||
|
|
e7368d7231 | ||
|
|
07c3c57cde | ||
|
|
b774c8afc3 | ||
|
|
231dfe01f4 | ||
|
|
5319796e58 | ||
|
|
39daa5aea7 | ||
|
|
a7517ce0de | ||
|
|
fbfffe028f | ||
|
|
19b6c671a6 | ||
|
|
c2fab45a6e | ||
|
|
0596ebd5a9 | ||
|
|
338efa5a7a | ||
|
|
5d4d8f54df | ||
|
|
3d4a9c2deb | ||
|
|
74fad5f6ed | ||
|
|
9c264b42c3 | ||
|
|
09ee1b1877 | ||
|
|
4b27d8821d | ||
|
|
c49d9c2611 | ||
|
|
4134e2e9da | ||
|
|
e4a212dfca | ||
|
|
19bb185fd9 | ||
|
|
1eaa58c970 | ||
|
|
4245c9e0cd | ||
|
|
2b078c0d6e | ||
|
|
0f4413da7d | ||
|
|
91b491b7e7 | ||
|
|
61e8916141 | ||
|
|
da5de6a240 | ||
|
|
fdf9b1c40c | ||
|
|
bc7bfed0d3 | ||
|
|
b532e6dd17 | ||
|
|
b46921c22d | ||
|
|
13f26a99b8 | ||
|
|
3d265e28ff | ||
|
|
29d9ce03ab | ||
|
|
3caa95ced9 | ||
|
|
94cf660848 | ||
|
|
e1cb5b8251 | ||
|
|
101fe9efa9 | ||
|
|
2e9463089d | ||
|
|
8127f0691e | ||
|
|
b55dcf5943 | ||
|
|
bb5fe98e94 | ||
|
|
0290cd6814 | ||
|
|
fc4d07f198 | ||
|
|
e7aeaa310c | ||
|
|
85b5fcd5e1 | ||
|
|
e5d0c9c224 | ||
|
|
162e420e9c | ||
|
|
bfbae09a9c | ||
|
|
d2e8ecbd4b | ||
|
|
a701e4f90b | ||
|
|
f22f81b4ff | ||
|
|
63202e2467 | ||
|
|
ef68a419f1 | ||
|
|
9fc6ee0c4c | ||
|
|
ea65650883 | ||
|
|
5d76c57ce2 | ||
|
|
2c250a515e | ||
|
|
4204740cb2 | ||
|
|
bd3ba596c2 | ||
|
|
0a89d350d9 | ||
|
|
b7fcf6dc04 | ||
|
|
accb1779cb | ||
|
|
387f39407a | ||
|
|
6a32adb7ed | ||
|
|
3ab3a7d37a | ||
|
|
da5fd10bb9 | ||
|
|
9291fde960 | ||
|
|
31ef15210d | ||
|
|
aa01657678 | ||
|
|
6fb6bc6d7f | ||
|
|
da33e038ca | ||
|
|
78f7094a0b | ||
|
|
0b046c95ef | ||
|
|
c13d7aea56 | ||
|
|
f7a47c1b67 | ||
|
|
6c34b89cfb | ||
|
|
7138faf5d3 | ||
|
|
0d3a931e88 | ||
|
|
861e825ebf | ||
|
|
1ca1ab594c | ||
|
|
9425389240 | ||
|
|
9f16ff1774 | ||
|
|
2ac3c9e8fd | ||
|
|
4a9209c5e8 | ||
|
|
b78d718357 | ||
|
|
104466f5c0 | ||
|
|
2ecdfca52f | ||
|
|
e81df1a701 | ||
|
|
61013e8eee | ||
|
|
48d4fccd61 | ||
|
|
2859af386c | ||
|
|
8dee3387fd | ||
|
|
63eeac49f8 | ||
|
|
d5fdee72d3 | ||
|
|
765092eb12 | ||
|
|
2c9747fd41 | ||
|
|
62898b0f8f | ||
|
|
ac7ee9d0a5 | ||
|
|
0adb7d4676 | ||
|
|
27a7980dad | ||
|
|
a5915ccd2c | ||
|
|
d6815f61ee | ||
|
|
d71f11f55c | ||
|
|
ed45dca7c1 | ||
|
|
dd71066391 | ||
|
|
6f51b2078e | ||
|
|
d035e0e811 | ||
|
|
55a8da0f02 | ||
|
|
43de16cae4 | ||
|
|
320cbdd62d | ||
|
|
f8dce07486 | ||
|
|
37382042c1 | ||
|
|
2af8139029 | ||
|
|
a5c77ff926 | ||
|
|
15df6c148a | ||
|
|
e6226b45de | ||
|
|
ab1e207765 | ||
|
|
d2ed8883f7 | ||
|
|
3ddf1f6c3e | ||
|
|
5395707280 | ||
|
|
710e465054 | ||
|
|
30bd79ffa1 | ||
|
|
20c83d7568 | ||
|
|
67e0e97eda | ||
|
|
6bebc679c4 | ||
|
|
9406b95518 | ||
|
|
8d8f93fd00 | ||
|
|
20a3875f32 | ||
|
|
8ab428e588 | ||
|
|
e5dcae5fff | ||
|
|
329cd8a38b | ||
|
|
39f0995d78 | ||
|
|
0855ab4173 | ||
|
|
fe7ab6e480 | ||
|
|
f8dd2df953 | ||
|
|
3795bec037 | ||
|
|
35face48da | ||
|
|
864d080502 | ||
|
|
3a7b495167 | ||
|
|
9d1594cbcc | ||
|
|
c48a1092f7 | ||
|
|
35dba1381c | ||
|
|
631dce3aca | ||
|
|
ea6e998094 | ||
|
|
d551de6e06 | ||
|
|
7ce1cf6f3e | ||
|
|
2e89997d29 | ||
|
|
a7e2a7037a | ||
|
|
75d8fc77c2 | ||
|
|
4ea954fd66 | ||
|
|
8b8c1068d9 | ||
|
|
7793dbb4b4 | ||
|
|
77b93ad0c2 | ||
|
|
f99671b764 | ||
|
|
a8a30065a4 | ||
|
|
05b8de5300 | ||
|
|
387f796ebe | ||
|
|
27ba91e74d | ||
|
|
3033331f65 | ||
|
|
362b234cd1 | ||
|
|
bbe53841e4 | ||
|
|
a825210bd3 | ||
|
|
88fb2a6b46 | ||
|
|
042d3e866f | ||
|
|
0ea711e520 | ||
|
|
ef5f9600e6 | ||
|
|
acdffb1503 | ||
|
|
6679e5be69 | ||
|
|
89ad2e55d9 | ||
|
|
f8dff5b6c2 | ||
|
|
104b0ef0ba | ||
|
|
07cdf6e9cb | ||
|
|
4cf9c965d4 | ||
|
|
4039e9e368 | ||
|
|
38fd0668ba | ||
|
|
5cae8206f9 | ||
|
|
3ce60161d2 | ||
|
|
00b5466f0d | ||
|
|
6eeef7c17e | ||
|
|
219da47576 | ||
|
|
47106eeeea | ||
|
|
07e21acab5 | ||
|
|
65acdfb09b | ||
|
|
9e2ce00f7b | ||
|
|
44599a239f | ||
|
|
7b46d5f823 | ||
|
|
2115874587 | ||
|
|
cd5141f3d1 | ||
|
|
b815aa2130 | ||
|
|
19a6e904ec | ||
|
|
1200fbd3bd | ||
|
|
343ae8b7af | ||
|
|
442f584afa | ||
|
|
55482d7ce3 | ||
|
|
0c3de595df | ||
|
|
38ff75c7ea | ||
|
|
963e0f8a53 | ||
|
|
12f40cbbeb | ||
|
|
e524fb2086 | ||
|
|
eb7ccc356f | ||
|
|
4635836ebc | ||
|
|
d25bf7a55a | ||
|
|
3539f0a1da | ||
|
|
737a7f779b | ||
|
|
71dcc17fa0 | ||
|
|
a90ce61b1b | ||
|
|
d43167ac0b | ||
|
|
245cf606a3 | ||
|
|
943616044a | ||
|
|
943808b925 | ||
|
|
30745f163d | ||
|
|
e20108878c | ||
|
|
f73d349dfe | ||
|
|
dc86fc92ce | ||
|
|
aa785c3ef1 | ||
|
|
fb4feb380b | ||
|
|
9b15b228b8 | ||
|
|
99eb7e6ef2 | ||
|
|
bf50a68eb5 | ||
|
|
67a7d46a29 | ||
|
|
3e2cf8a259 | ||
|
|
624fe4794b | ||
|
|
44731f8a37 | ||
|
|
b2a3c5cbe8 | ||
|
|
e9f690bf9d | ||
|
|
0eb07b7488 | ||
|
|
16e7cbdb38 | ||
|
|
135c62f1a4 | ||
|
|
582e19056a | ||
|
|
52de5c8b33 | ||
|
|
799dc6d0df | ||
|
|
79689e87ce | ||
|
|
0d0481ce75 | ||
|
|
869d9e22c7 | ||
|
|
3f77b68a9d | ||
|
|
2daf187bdb | ||
|
|
e73a2d68b5 | ||
|
|
2dd5c0696d | ||
|
|
f25ad03011 | ||
|
|
c00da1702f | ||
|
|
83f20c23aa | ||
|
|
0050176d57 | ||
|
|
f7bb90234d | ||
|
|
1d3c43b67f | ||
|
|
ef505d2bc5 | ||
|
|
a9a59a3046 | ||
|
|
da012e1bfd | ||
|
|
90c8aa716d | ||
|
|
94cd20de05 | ||
|
|
14725f9d59 | ||
|
|
c6c146f54f | ||
|
|
90d9d6ea00 | ||
|
|
1f62517636 | ||
|
|
29eea93592 | ||
|
|
7179cc7f25 | ||
|
|
b12c8a28d7 | ||
|
|
8c2e82cc54 | ||
|
|
3ae094b673 | ||
|
|
74e6ce3e6a | ||
|
|
71426d200e | ||
|
|
9b7159720f | ||
|
|
e7c2b90bd1 | ||
|
|
d05373d35a | ||
|
|
bd8bb8c80b | ||
|
|
dac1ab0a05 | ||
|
|
2a44411f5b | ||
|
|
2f1c1e7695 | ||
|
|
2b6d78e436 | ||
|
|
b1da13a984 | ||
|
|
d03947a6ee | ||
|
|
422f2ecc91 | ||
|
|
f73a116f43 | ||
|
|
8aa40714e3 | ||
|
|
eaf6d46a7b | ||
|
|
906dafe3cd | ||
|
|
d3047c7cb0 | ||
|
|
62412f8398 | ||
|
|
f1ca789097 | ||
|
|
4104ac6270 | ||
|
|
8d5a225011 | ||
|
|
ca2f579f43 | ||
|
|
b1a2f4ab44 | ||
|
|
3c1ef48fe2 | ||
|
|
c732fd0740 | ||
|
|
04c8937fb6 | ||
|
|
4352eb6628 | ||
|
|
1ae269b8e0 | ||
|
|
dd07392045 | ||
|
|
e33971fe2c | ||
|
|
83e1c39ab8 | ||
|
|
b101be041b | ||
|
|
909740f430 | ||
|
|
aaf7a4f1d3 | ||
|
|
99d23c4d81 | ||
|
|
5e8d1ca19f | ||
|
|
fb4dc7eaf9 | ||
|
|
175c7bddfc | ||
|
|
71a1e0d0e1 | ||
|
|
ce1bfbc32d | ||
|
|
a2e53892ec | ||
|
|
7a923beb4c | ||
|
|
be8a992b85 | ||
|
|
03353ce978 | ||
|
|
c8f4a04196 | ||
|
|
9bef643bf5 | ||
|
|
f6b31d51e0 | ||
|
|
62e1cb48fd | ||
|
|
543464182f | ||
|
|
83a3cc9eb4 | ||
|
|
d12ae3bab0 | ||
|
|
61a4897b71 | ||
|
|
194c8e1c2e | ||
|
|
44e4090909 | ||
|
|
0564397ee6 | ||
|
|
3081b6b7dd | ||
|
|
37d38f196e | ||
|
|
17aee48734 | ||
|
|
9cdd78c6cb | ||
|
|
5561a95232 | ||
|
|
27f0f3e52b | ||
|
|
b159b2fe42 | ||
|
|
63902f3d34 | ||
|
|
1fb15d5c81 | ||
|
|
cc2042bd4c | ||
|
|
ee4273d760 | ||
|
|
2619a0b286 | ||
|
|
92c6a3812d | ||
|
|
230527b1fb | ||
|
|
bfe36c9f8b | ||
|
|
40388b5b90 | ||
|
|
0c34554170 | ||
|
|
b0eb864a25 | ||
|
|
1264cc2d36 | ||
|
|
f7cd98c238 | ||
|
|
8e7d744c60 | ||
|
|
9210bf7d3a | ||
|
|
8f35819ddf | ||
|
|
04d93f0445 | ||
|
|
b7ce5b4f1b | ||
|
|
7e27f189cf | ||
|
|
9472945299 | ||
|
|
f25c1f900f | ||
|
|
493eaa7389 | ||
|
|
ce6d618e3b | ||
|
|
8254ca9492 | ||
|
|
7d677a63b8 | ||
|
|
a2fb2e0d6b | ||
|
|
93cba3fba5 | ||
|
|
3e48b9ff85 | ||
|
|
a956bf9fda | ||
|
|
9f77df70c9 | ||
|
|
c04133a512 | ||
|
|
59747ecf24 | ||
|
|
a6e7aa8f97 | ||
|
|
51fdbe22d2 | ||
|
|
3b01e6e423 | ||
|
|
2e14ba8716 | ||
|
|
7308022bc7 | ||
|
|
8273c04575 | ||
|
|
ee7d4d712a | ||
|
|
d8c1b78d83 | ||
|
|
554445a985 | ||
|
|
b2bf2b08ff | ||
|
|
e7573ac90f | ||
|
|
cdb664f6e5 | ||
|
|
a127eeff20 | ||
|
|
1ca517d73b | ||
|
|
38b1dce7c3 | ||
|
|
c9f9eed04e | ||
|
|
fbea657eff | ||
|
|
55db9dba0a | ||
|
|
64051d081c | ||
|
|
ddb007af65 | ||
|
|
e574a1574f | ||
|
|
2bf9f1f0d8 | ||
|
|
8142b72bcd | ||
|
|
dc2f30a34e | ||
|
|
be7de4849c | ||
|
|
83e6ab08aa | ||
|
|
b385fdd7de | ||
|
|
d965540103 | ||
|
|
404d59b1b8 | ||
|
|
9980c4baf9 | ||
|
|
4c1267338b | ||
|
|
2e0b1c4c8b | ||
|
|
da75876639 | ||
|
|
d4d1014c9f | ||
|
|
213e12fe13 | ||
|
|
3e0a7b6229 | ||
|
|
da88097aba | ||
|
|
3f13dd3ae8 | ||
|
|
d3b0c54c14 | ||
|
|
79b4afeae7 | ||
|
|
9c61aed7d0 | ||
|
|
da223dfe81 | ||
|
|
e035397dcf | ||
|
|
899ba975a6 | ||
|
|
bfa65560eb | ||
|
|
ed9307f469 | ||
|
|
ff87239fb0 | ||
|
|
a357bf4f19 | ||
|
|
63f274f6df | ||
|
|
2ca4242f5f | ||
|
|
c9d27634b4 | ||
|
|
027990928e | ||
|
|
87469a5fdd | ||
|
|
4101127011 | ||
|
|
f6191a4f12 | ||
|
|
8c5d614c38 | ||
|
|
42883545f9 | ||
|
|
61357e4e6e | ||
|
|
c6ae9f1176 | ||
|
|
11d7e6b92f | ||
|
|
c3b992db96 | ||
|
|
1ffd4a9e06 | ||
|
|
147d39cb7c | ||
|
|
824cb201b1 | ||
|
|
582880b314 | ||
|
|
2b79a716aa | ||
|
|
d572af2acf | ||
|
|
54e6a68acb | ||
|
|
09f62032ec | ||
|
|
711ffd238f | ||
|
|
056cb0d8a8 | ||
|
|
37a204324b | ||
|
|
1fc1f8bf05 | ||
|
|
8ff507b03b | ||
|
|
33d6603fef | ||
|
|
b0b1993918 | ||
|
|
07a3df6001 | ||
|
|
92d4dfaabf | ||
|
|
bc626af6ca | ||
|
|
a45786ca2e | ||
|
|
2926c8299c | ||
|
|
32a5ffe436 | ||
|
|
62dd3b7d7d | ||
|
|
15aa7593f6 | ||
|
|
9b3ac92c24 | ||
|
|
66f6ef1b35 | ||
|
|
d93cd10b0d | ||
|
|
a488b14373 | ||
|
|
0147dd6431 | ||
|
|
90d37eac03 | ||
|
|
9d19213b8a | ||
|
|
71c3835f3e | ||
|
|
0fbd26e9bf | ||
|
|
2a78eb96d0 | ||
|
|
3a1003f702 | ||
|
|
329a9d0b11 | ||
|
|
17d75f3da8 | ||
|
|
20551857da | ||
|
|
32122e0312 | ||
|
|
230de023ff | ||
|
|
e6fc8af249 | ||
|
|
febf86dedf | ||
|
|
76ae17abac | ||
|
|
339ff4b464 | ||
|
|
00c0e487dd | ||
|
|
5c8dfa38be | ||
|
|
acf85c66a5 | ||
|
|
3619918954 | ||
|
|
65b14683a8 | ||
|
|
f4fc02a3da | ||
|
|
c334170a93 | ||
|
|
deab6c64fc | ||
|
|
e1c9503951 | ||
|
|
9a21812bf5 | ||
|
|
347b5ce452 | ||
|
|
b39029521b | ||
|
|
97b26f3de2 | ||
|
|
e19a7a990d | ||
|
|
3e424e1046 | ||
|
|
db20b4af9c | ||
|
|
44ff8f8531 | ||
|
|
c974c95e2b | ||
|
|
3b2590243c | ||
|
|
1c2bd275fe | ||
|
|
0cf11ce488 | ||
|
|
a8b794d7e0 | ||
|
|
f868362ca8 | ||
|
|
8858f7e97c | ||
|
|
d6195522aa | ||
|
|
3b79b935a3 | ||
|
|
4079333e29 | ||
|
|
99581dbbf7 | ||
|
|
2db4969e18 | ||
|
|
2ecc1abf21 | ||
|
|
703bc9494a | ||
|
|
e5ab07091d | ||
|
|
891678b656 | ||
|
|
39ea2a257c | ||
|
|
2d68eae16b | ||
|
|
d65948c423 | ||
|
|
9e599c65c5 | ||
|
|
9910a0b004 | ||
|
|
ff96358cb3 | ||
|
|
22267475eb | ||
|
|
5eb0f8ffa7 | ||
|
|
e03a3fcf68 | ||
|
|
edf471f655 | ||
|
|
5b02c8ca4a | ||
|
|
e7688c53b8 | ||
|
|
87cada42db | ||
|
|
6fe67ee426 | ||
|
|
5fbc81885a | ||
|
|
25ba5451f2 | ||
|
|
138c9cf7a8 | ||
|
|
87981306a3 | ||
|
|
f7893b3ea9 | ||
|
|
87395fe6fe | ||
|
|
57bff2a663 | ||
|
|
15f876c66c | ||
|
|
522c35ac5b | ||
|
|
bb2d6d640f | ||
|
|
2412d8dec1 | ||
|
|
2ab5a43663 | ||
|
|
0ec3d6c10a | ||
|
|
d208e1b0f5 | ||
|
|
8a6ba6a212 | ||
|
|
b793d69ff3 | ||
|
|
54f55471df | ||
|
|
cec7fb7dc6 | ||
|
|
b0b82efffe | ||
|
|
e599604294 | ||
|
|
528a183d42 | ||
|
|
b953f82346 | ||
|
|
57a3ea9d7b | ||
|
|
ef2058824a | ||
|
|
6f93dc7712 | ||
|
|
a6e28d2eb7 | ||
|
|
a3a50bb886 | ||
|
|
a705a5a0aa | ||
|
|
f6bc13736a | ||
|
|
194d4c75b3 | ||
|
|
bc9c60ae71 | ||
|
|
0a7005f2bc | ||
|
|
c4fb8e304b | ||
|
|
fe2a2cfc8b | ||
|
|
32dab7d4bf | ||
|
|
1ea541baa6 | ||
|
|
82b7c118c4 | ||
|
|
1c501333e8 | ||
|
|
9a3c7800a7 | ||
|
|
11dc3ca1f8 | ||
|
|
ce5e57d828 | ||
|
|
e98fe9c22d | ||
|
|
6afc0f9b38 | ||
|
|
065a1da9d1 | ||
|
|
916f5bfbb2 | ||
|
|
7f491fd2d2 | ||
|
|
203a6d8a00 | ||
|
|
cac3f5fc61 | ||
|
|
7e33560010 | ||
|
|
759f563b6d | ||
|
|
8c47638eec | ||
|
|
8233098136 | ||
|
|
1cb365fff1 | ||
|
|
e405385e0d | ||
|
|
15c5d6a5ef | ||
|
|
132e2b3ae5 | ||
|
|
c16b7f090e | ||
|
|
057fc95aa3 | ||
|
|
94bad8555a | ||
|
|
6c0dd9b5ef | ||
|
|
1c102c71fc | ||
|
|
75f23793df | ||
|
|
9dcfa8de25 | ||
|
|
3d6650e59b | ||
|
|
7d201d7be0 | ||
|
|
cafaef11f7 | ||
|
|
1e201132ed | ||
|
|
8604fd2727 | ||
|
|
aa6aa68753 | ||
|
|
86b7b07c24 | ||
|
|
af56aee5c6 | ||
|
|
1ec92dd5f3 | ||
|
|
1c946561d3 | ||
|
|
b537e92789 | ||
|
|
7c06849c4d | ||
|
|
488334710b | ||
|
|
19341e95a6 | ||
|
|
c82e94811b | ||
|
|
c15a902e8d | ||
|
|
ca6385e6fa | ||
|
|
828ec1fb5c | ||
|
|
1c687d6d03 | ||
|
|
b9e910b5f4 | ||
|
|
101cac6a21 | ||
|
|
8ea07f3bb0 | ||
|
|
79e79b78aa | ||
|
|
2325c6cd40 | ||
|
|
3ec33414ec | ||
|
|
a61a690f6c | ||
|
|
06f542ed7a | ||
|
|
8954171eea | ||
|
|
e0e69ad279 | ||
|
|
e3e8024e15 | ||
|
|
c4cf888532 | ||
|
|
9eff9e5752 | ||
|
|
84c1825abc | ||
|
|
0621dd7ed4 | ||
|
|
67ddba9cff | ||
|
|
cbf5426d27 | ||
|
|
bac60ca21e | ||
|
|
8e0d671488 | ||
|
|
ee6deef14c | ||
|
|
5d8c048d0d | ||
|
|
f8fd6e39a3 | ||
|
|
dafca16c8b | ||
|
|
3449c05bf4 | ||
|
|
5c3fad22fd | ||
|
|
425cf67ee5 | ||
|
|
4f9529db9e | ||
|
|
f3931a031d | ||
|
|
a4995b7878 | ||
|
|
10d8d1bb25 | ||
|
|
b30ae57731 | ||
|
|
b0bfbafd3d | ||
|
|
7c50bd2039 | ||
|
|
ae4e385abd | ||
|
|
e301cd3321 | ||
|
|
2977680ca1 | ||
|
|
2a5aa6e986 | ||
|
|
3bba41ee89 | ||
|
|
179b5f7839 | ||
|
|
26d7712f03 | ||
|
|
c0b370e1b9 | ||
|
|
15cc92e54a | ||
|
|
acdd5b3922 | ||
|
|
9685fc210c | ||
|
|
f4cdc0001f | ||
|
|
3f78e9a1a3 | ||
|
|
280e2899d7 | ||
|
|
82b0bb838c | ||
|
|
8482518618 | ||
|
|
6425bda663 | ||
|
|
12413b0be6 | ||
|
|
275dca83be | ||
|
|
be5bf03ccc | ||
|
|
0c479cd706 | ||
|
|
7325b73073 | ||
|
|
49380f75a9 | ||
|
|
3d4276439f | ||
|
|
a4c36dbc15 | ||
|
|
4fbd11a1f2 | ||
|
|
8ce3d4dd7f | ||
|
|
b82c968278 | ||
|
|
bc8e86e643 | ||
|
|
1b6fab59a4 | ||
|
|
d1dd35a1d2 | ||
|
|
400f062771 | ||
|
|
40894d67ac | ||
|
|
08a0b85111 | ||
|
|
7da6fad359 | ||
|
|
b24d182237 | ||
|
|
2bdcc106f2 | ||
|
|
7a98387e8d | ||
|
|
58d0f14d03 | ||
|
|
bc9471987b | ||
|
|
dc6e60cbcc | ||
|
|
7dae5fb131 | ||
|
|
3bc1ff5e5a | ||
|
|
8ff9c69e2f | ||
|
|
988ace8029 | ||
|
|
6e9d996ece | ||
|
|
789714b0b1 | ||
|
|
773a64d4c0 | ||
|
|
bb7629d2b8 | ||
|
|
745c020aa2 | ||
|
|
c5344acb25 | ||
|
|
318eb35ea0 | ||
|
|
6e2fd2affe | ||
|
|
8faa06fb15 | ||
|
|
0b7ca6a326 | ||
|
|
ce8c238ac4 | ||
|
|
f6c37e46e1 | ||
|
|
2d69efccef | ||
|
|
f9d2aafaeb | ||
|
|
22514aec2e | ||
|
|
5a22a83f4c | ||
|
|
b1d43eae46 | ||
|
|
0b8cdb6964 | ||
|
|
aed5ad22fb | ||
|
|
dc9c16b93d | ||
|
|
f6e858a548 | ||
|
|
4c2db171ca | ||
|
|
1255127e49 | ||
|
|
1cb74a6357 | ||
|
|
5e2b250426 | ||
|
|
ad190cfbb2 | ||
|
|
542ceb051b | ||
|
|
3473669458 | ||
|
|
3170c83d8d | ||
|
|
3046dabde2 | ||
|
|
1b02074fea | ||
|
|
f15fd2c3d3 | ||
|
|
081271d6a1 | ||
|
|
27f62999c9 | ||
|
|
89d130edf4 | ||
|
|
0e551a3844 | ||
|
|
31869885d9 | ||
|
|
4c026d9d92 | ||
|
|
435231ef08 | ||
|
|
19a79caf41 | ||
|
|
7b095f8f97 | ||
|
|
f5dfd5b0dc | ||
|
|
9579a401b5 | ||
|
|
47a97f7e97 | ||
|
|
3c146ebf9e | ||
|
|
efbcbb0d91 | ||
|
|
578d8b0cb4 | ||
|
|
2b1aaf4ee7 | ||
|
|
4a7f5c7469 | ||
|
|
98fe044dee | ||
|
|
62d4bb05d4 | ||
|
|
02b1040264 | ||
|
|
dfd5899611 | ||
|
|
8ea88f49b1 | ||
|
|
a62541d976 | ||
|
|
fbd9a49899 | ||
|
|
4e571e12b8 | ||
|
|
2567f5faa5 | ||
|
|
97684d78d3 | ||
|
|
57791834ab | ||
|
|
3b0c4b74b6 | ||
|
|
7a701506a4 | ||
|
|
5157cbeda1 | ||
|
|
3d7bc074cf | ||
|
|
b296933ba0 | ||
|
|
70bb7f4a61 | ||
|
|
45cc867b0c | ||
|
|
9c9cb71544 | ||
|
|
173dc34194 | ||
|
|
333219be35 | ||
|
|
c1230da3ab | ||
|
|
a7515624b2 | ||
|
|
9f34ddfcea | ||
|
|
6499b99dad | ||
|
|
c6611b2ad6 | ||
|
|
395445e7b0 | ||
|
|
89c6c11214 | ||
|
|
c6a7be63b8 | ||
|
|
75165957c9 | ||
|
|
4f247a3672 | ||
|
|
d60df54f69 | ||
|
|
1f25f52af9 | ||
|
|
7541c7cf5d | ||
|
|
a6cdde3ce4 | ||
|
|
a53b9a443f | ||
|
|
6e1328d4c2 | ||
|
|
440065f7f8 | ||
|
|
2c27e759cd | ||
|
|
82481a6f9c | ||
|
|
90d64388ab | ||
|
|
3444c8e6b8 | ||
|
|
74419f41a3 | ||
|
|
d84321e080 | ||
|
|
6542556ebd | ||
|
|
542ee56c77 | ||
|
|
461e662644 | ||
|
|
58d73f5cae | ||
|
|
0c1c220bb9 | ||
|
|
bf5ccfffa5 | ||
|
|
70bbb670ec | ||
|
|
7b270ec3b0 | ||
|
|
e4ef7bdbb9 | ||
|
|
5f42d08945 | ||
|
|
911c99f125 | ||
|
|
c7ccb9dacd | ||
|
|
7a0d4c3350 | ||
|
|
2154dd2349 | ||
|
|
f3050fefce | ||
|
|
595d15455a | ||
|
|
183b98384f | ||
|
|
40d7141a4d | ||
|
|
6d475ee290 | ||
|
|
c430f5452b | ||
|
|
97de5e31f9 | ||
|
|
a99aab6309 | ||
|
|
5a40f7ad15 | ||
|
|
2f29b78a00 | ||
|
|
bcb6e2e506 | ||
|
|
194b875cf3 | ||
|
|
b2cd98259d | ||
|
|
0f55d89e20 | ||
|
|
762ca60a30 | ||
|
|
8a8be92eac | ||
|
|
e7fb9f342c | ||
|
|
9318719b9e | ||
|
|
935a9d3c75 | ||
|
|
8e76bc2b5d | ||
|
|
93b1298d46 | ||
|
|
1af86618e3 | ||
|
|
b732bcad2f |
3
.dockerignore
Normal file
@@ -0,0 +1,3 @@
|
||||
*
|
||||
!environment*.yml
|
||||
!docker-build
|
||||
4
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
||||
102
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
name: 🐞 Bug Report
|
||||
|
||||
description: File a bug report
|
||||
|
||||
title: '[bug]: '
|
||||
|
||||
labels: ['bug']
|
||||
|
||||
# assignees:
|
||||
# - moderator_bot
|
||||
# - lstein
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this Bug Report!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: |
|
||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
irst to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
|
||||
- type: dropdown
|
||||
id: os_dropdown
|
||||
attributes:
|
||||
label: OS
|
||||
description: Which operating System did you use when the bug occured
|
||||
multiple: false
|
||||
options:
|
||||
- 'Linux'
|
||||
- 'Windows'
|
||||
- 'macOS'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: gpu_dropdown
|
||||
attributes:
|
||||
label: GPU
|
||||
description: Which kind of Graphic-Adapter is your System using
|
||||
multiple: false
|
||||
options:
|
||||
- 'cuda'
|
||||
- 'amd'
|
||||
- 'mps'
|
||||
- 'cpu'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: vram
|
||||
attributes:
|
||||
label: VRAM
|
||||
description: Size of the VRAM if known
|
||||
placeholder: 8GB
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: |
|
||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
placeholder: this is what the result looked like <screenshot>
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
validations:
|
||||
required: false
|
||||
56
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Feature Request
|
||||
description: Commit a idea or Request a new feature
|
||||
title: '[enhancement]: '
|
||||
labels: ['enhancement']
|
||||
# assignees:
|
||||
# - lstein
|
||||
# - tildebyte
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this Feature request!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: |
|
||||
Please make use of the [search function](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
||||
to see if a simmilar issue already exists for the feature you want to request
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How could we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: whatisexpected
|
||||
attributes:
|
||||
label: What should this feature add?
|
||||
description: Please try to explain the functionality this feature should add
|
||||
placeholder: |
|
||||
Instead of one huge textfield, it would be nice to have forms for bug-reports, feature-requests, ...
|
||||
Great benefits with automatic labeling, assigning and other functionalitys not available in that form
|
||||
via old-fashioned markdown-templates. I would also love to see the use of a moderator bot 🤖 like
|
||||
https://github.com/marketplace/actions/issue-moderator-with-commands to auto close old issues and other things
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives
|
||||
description: Describe alternatives you've considered
|
||||
placeholder: A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Aditional Content
|
||||
description: Add any other context or screenshots about the feature request here.
|
||||
placeholder: This is a Mockup of the design how I imagine it <screenshot>
|
||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,36 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe your environment**
|
||||
- GPU: [cuda/amd/mps/cpu]
|
||||
- VRAM: [if known]
|
||||
- CPU arch: [x86/arm]
|
||||
- OS: [Linux/Windows/macOS]
|
||||
- Python: [Anaconda/miniconda/miniforge/pyenv/other (explain)]
|
||||
- Branch: [if `git status` says anything other than "On branch main" paste it here]
|
||||
- Commit: [run `git show` and paste the line that starts with "Merge" here]
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Project-Documentation
|
||||
url: https://invoke-ai.github.io/InvokeAI/
|
||||
about: Should be your first place to go when looking for manuals/FAQs regarding our InvokeAI Toolkit
|
||||
- name: Discord
|
||||
url: https://discord.gg/ZmtBAhwWhy
|
||||
about: Our Discord Community could maybe help you out via live-chat
|
||||
- name: GitHub Community Support
|
||||
url: https://github.com/orgs/community/discussions
|
||||
about: Please ask and answer questions regarding the GitHub Platform here.
|
||||
- name: GitHub Security Bug Bounty
|
||||
url: https://bounty.github.com/
|
||||
about: Please report security vulnerabilities of the GitHub Platform here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
48
.github/workflows/build-container.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
# Building the Image without pushing to confirm it is still buildable
|
||||
# confirum functionality would unfortunately need way more resources
|
||||
name: build container image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
include:
|
||||
- arch: x86_64
|
||||
conda-env-file: environment-lin-cuda.yml
|
||||
- arch: aarch64
|
||||
conda-env-file: environment-lin-aarch64.yml
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: prepare docker-tag
|
||||
env:
|
||||
repository: ${{ github.repository }}
|
||||
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
||||
build-args: |
|
||||
conda_env_file=${{ matrix.conda-env-file }}
|
||||
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
||||
invokeai_git=${{ github.repository }}
|
||||
invokeai_branch=${{ github.ref_name }}
|
||||
70
.github/workflows/create-caches.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Create Caches
|
||||
on:
|
||||
workflow_dispatch
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-12 ]
|
||||
name: Create Caches on ${{ matrix.os }} conda
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Set platform variables
|
||||
id: vars
|
||||
run: |
|
||||
if [ "$RUNNER_OS" = "macOS" ]; then
|
||||
echo "::set-output name=ENV_FILE::environment-mac.yml"
|
||||
echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python"
|
||||
elif [ "$RUNNER_OS" = "Linux" ]; then
|
||||
echo "::set-output name=ENV_FILE::environment.yml"
|
||||
echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python"
|
||||
fi
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Use Cached Stable Diffusion v1.4 Model
|
||||
id: cache-sd-v1-4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-sd-v1-4
|
||||
with:
|
||||
path: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}
|
||||
- name: Download Stable Diffusion v1.4 Model
|
||||
if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
if [ ! -e models/ldm/stable-diffusion-v1 ]; then
|
||||
mkdir -p models/ldm/stable-diffusion-v1
|
||||
fi
|
||||
if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then
|
||||
curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }}
|
||||
fi
|
||||
- name: Use Cached Dependencies
|
||||
id: cache-conda-env-ldm
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-env-ldm
|
||||
with:
|
||||
path: ~/.conda/envs/ldm
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }}
|
||||
- name: Install Dependencies
|
||||
if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
conda env create -f ${{ steps.vars.outputs.ENV_FILE }}
|
||||
- name: Use Cached Huggingface and Torch models
|
||||
id: cache-huggingface-torch
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-huggingface-torch
|
||||
with:
|
||||
path: ~/.cache
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }}
|
||||
- name: Download Huggingface and Torch models
|
||||
if: ${{ steps.cache-huggingface-torch.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py
|
||||
28
.github/workflows/mkdocs-flow.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Deploy
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
name: Deploy docs to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build
|
||||
uses: Tiryoh/actions-mkdocs@v0
|
||||
with:
|
||||
mkdocs_version: 'latest' # option
|
||||
requirements: '/requirements-mkdocs.txt' # option
|
||||
configfile: '/mkdocs.yml' # option
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./site
|
||||
40
.github/workflows/mkdocs-material.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: mkdocs-material
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
mkdocs-material:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: install requirements
|
||||
run: |
|
||||
python -m \
|
||||
pip install -r requirements-mkdocs.txt
|
||||
|
||||
- name: confirm buildability
|
||||
run: |
|
||||
python -m \
|
||||
mkdocs build \
|
||||
--clean \
|
||||
--verbose
|
||||
|
||||
- name: deploy to gh-pages
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: |
|
||||
python -m \
|
||||
mkdocs gh-deploy \
|
||||
--clean \
|
||||
--force
|
||||
97
.github/workflows/test-dream-conda.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: Test Dream with Conda
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
jobs:
|
||||
os_matrix:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-12 ]
|
||||
name: Test dream.py on ${{ matrix.os }} with conda
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- run: |
|
||||
echo The PR was merged
|
||||
- name: Set platform variables
|
||||
id: vars
|
||||
run: |
|
||||
# Note, can't "activate" via github action; specifying the env's python has the same effect
|
||||
if [ "$RUNNER_OS" = "macOS" ]; then
|
||||
echo "::set-output name=ENV_FILE::environment-mac.yml"
|
||||
echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python"
|
||||
elif [ "$RUNNER_OS" = "Linux" ]; then
|
||||
echo "::set-output name=ENV_FILE::environment.yml"
|
||||
echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python"
|
||||
fi
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Use Cached Stable Diffusion v1.4 Model
|
||||
id: cache-sd-v1-4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-sd-v1-4
|
||||
with:
|
||||
path: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}
|
||||
- name: Download Stable Diffusion v1.4 Model
|
||||
if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
if [ ! -e models/ldm/stable-diffusion-v1 ]; then
|
||||
mkdir -p models/ldm/stable-diffusion-v1
|
||||
fi
|
||||
if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then
|
||||
curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }}
|
||||
fi
|
||||
- name: Use Cached Dependencies
|
||||
id: cache-conda-env-ldm
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-env-ldm
|
||||
with:
|
||||
path: ~/.conda/envs/ldm
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }}
|
||||
- name: Install Dependencies
|
||||
if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
conda env create -f ${{ steps.vars.outputs.ENV_FILE }}
|
||||
- name: Use Cached Huggingface and Torch models
|
||||
id: cache-hugginface-torch
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-hugginface-torch
|
||||
with:
|
||||
path: ~/.cache
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }}
|
||||
- name: Download Huggingface and Torch models
|
||||
if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py
|
||||
# - name: Run tmate
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# timeout-minutes: 30
|
||||
- name: Run the tests
|
||||
run: |
|
||||
# Note, can't "activate" via github action; specifying the env's python has the same effect
|
||||
if [ $(uname) = "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
# Utterly hacky, but I don't know how else to do this
|
||||
if [[ ${{ github.ref }} == 'refs/heads/master' ]]; then
|
||||
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/preflight_prompts.txt
|
||||
elif [[ ${{ github.ref }} == 'refs/heads/development' ]]; then
|
||||
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/dev_prompts.txt
|
||||
fi
|
||||
mkdir -p outputs/img-samples
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results
|
||||
path: outputs/img-samples
|
||||
126
.github/workflows/test-invoke-conda.yml
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
name: Test invoke.py
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
# - 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
||||
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macOS-12
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment-lin-cuda.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macOS-12
|
||||
environment-file: environment-mac.yml
|
||||
default-shell: bash -l {0}
|
||||
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||
# stable-diffusion-model-switch: stable-diffusion-1.4
|
||||
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-switch: stable-diffusion-1.5
|
||||
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
CONDA_ENV_NAME: invokeai
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: create models.yaml from example
|
||||
run: cp configs/models.yaml.example configs/models.yaml
|
||||
|
||||
- name: create environment.yml
|
||||
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||
|
||||
- name: Use cached conda packages
|
||||
id: use-cached-conda-packages
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/conda_pkgs_dir
|
||||
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
||||
|
||||
- name: Activate Conda Env
|
||||
id: activate-conda-env
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: environment.yml
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-${{ matrix.stable-diffusion-model-switch }}
|
||||
with:
|
||||
path: ${{ matrix.stable-diffusion-model-dl-path }}
|
||||
key: ${{ env.cache-name }}
|
||||
|
||||
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||
id: download-stable-diffusion-model
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
||||
-L ${{ matrix.stable-diffusion-model }}
|
||||
|
||||
- name: run preload_models.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--model ${{ matrix.stable-diffusion-model-switch }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
||||
path: outputs/img-samples
|
||||
34
.gitignore
vendored
@@ -1,7 +1,11 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
ldm/dream/restoration/codeformer/weights
|
||||
**/restoration/codeformer/weights
|
||||
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
@@ -180,7 +184,7 @@ src
|
||||
**/__pycache__/
|
||||
outputs
|
||||
|
||||
# Logs and associated folders
|
||||
# Logs and associated folders
|
||||
# created from generated embeddings.
|
||||
logs
|
||||
testtube
|
||||
@@ -190,12 +194,36 @@ checkpoints
|
||||
|
||||
# Let the frontend manage its own gitignore
|
||||
!frontend/*
|
||||
frontend/apt-get
|
||||
frontend/dist
|
||||
frontend/sudo
|
||||
frontend/update
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
.vscode/
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/model.sha256
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
# config file (will be created by installer)
|
||||
configs/models.yaml
|
||||
|
||||
# weights (will be created by installer)
|
||||
models/ldm/stable-diffusion-v1/*.ckpt
|
||||
models/clipseg
|
||||
models/gfpgan
|
||||
|
||||
# ignore initfile
|
||||
invokeai.init
|
||||
|
||||
# ignore environment.yml and requirements.txt
|
||||
# these are links to the real files in environments-and-requirements
|
||||
environment.yml
|
||||
requirements.txt
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
||||
22
1-click-installer/create_installers.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-linux.zip invokeAI
|
||||
zip -r invokeAI-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-windows.zip invokeAI
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
116
1-click-installer/install.bat
Normal file
@@ -0,0 +1,116 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git init
|
||||
call git config --local init.defaultBranch main
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
# call git checkout origin/main -ft
|
||||
call git checkout origin/release-candidate-2-1-3 -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
cp environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
|
||||
135
1-click-installer/install.sh
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
if ! $(which conda) -V &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! which git &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git init
|
||||
git config --local init.defaultBranch main
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/release-candidate-2-1-3 -ft
|
||||
fi
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
if [ "$OS_NAME" == "mac" ]; then
|
||||
echo "Macintosh system detected. Installing MPS and CPU support."
|
||||
ln -sf environments-and-requirements/environment-mac.yml environment.yml
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
ln -sf environments-and-requirements/environment-lin-amd.yml environment.yml
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
ln -sf environments-and-requirements/environment-lin-cuda.yml environment.yml
|
||||
fi
|
||||
fi
|
||||
conda env update
|
||||
|
||||
status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
conda activate invokeai
|
||||
11
1-click-installer/readme.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
13
LICENSE
@@ -1,17 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||
|
||||
This software is derived from a fork of the source code available from
|
||||
https://github.com/pesser/stable-diffusion and
|
||||
https://github.com/CompViz/stable-diffusion. They carry the following
|
||||
copyrights:
|
||||
|
||||
Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
|
||||
Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
|
||||
|
||||
Please see individual source code files for copyright and authorship
|
||||
attributions.
|
||||
Copyright (c) 2022 InvokeAI Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
148
README.md
@@ -2,14 +2,7 @@
|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to
|
||||
report bugs and make feature requests. Be sure to use the provided
|
||||
templates. They will help aid diagnose issues faster._
|
||||
|
||||
_This repository was formally known as lstein/stable-diffusion_
|
||||
|
||||
# **Table of Contents**
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||

|
||||
|
||||
@@ -24,7 +17,7 @@ _This repository was formally known as lstein/stable-diffusion_
|
||||
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-dream-conda.yml
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
@@ -41,10 +34,18 @@ _This repository was formally known as lstein/stable-diffusion_
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
</div>
|
||||
|
||||
This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open
|
||||
source text-to-image generator. It provides a streamlined process with various new features and
|
||||
options to aid the image generation process. It runs on Windows, Mac and Linux machines, and runs on
|
||||
GPU cards with as little as 4 GB or RAM.
|
||||
This is a fork of
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
@@ -67,11 +68,11 @@ requests. Be sure to use the provided templates. They will help aid diagnose iss
|
||||
This fork is supported across multiple platforms. You can find individual installation instructions
|
||||
below.
|
||||
|
||||
- #### [Linux](docs/installation/INSTALL_LINUX.md)
|
||||
- #### [Linux](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_LINUX/)
|
||||
|
||||
- #### [Windows](docs/installation/INSTALL_WINDOWS.md)
|
||||
- #### [Windows](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_WINDOWS/)
|
||||
|
||||
- #### [Macintosh](docs/installation/INSTALL_MAC.md)
|
||||
- #### [Macintosh](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_MAC/)
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
@@ -88,84 +89,87 @@ You wil need one of the following:
|
||||
|
||||
#### Disk
|
||||
|
||||
- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
#### Note
|
||||
**Note**
|
||||
|
||||
If you have a Nvidia 10xx series card (e.g. the 1080ti), please
|
||||
run the dream script in full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter
|
||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `dream.py` with the `--precision=float32` flag:
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python scripts/dream.py --precision=float32
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
#### Major Features
|
||||
|
||||
- [Interactive Command Line Interface](docs/features/CLI.md)
|
||||
- [Image To Image](docs/features/IMG2IMG.md)
|
||||
- [Inpainting Support](docs/features/INPAINTING.md)
|
||||
- [Outpainting Support](docs/features/OUTPAINTING.md)
|
||||
- [Upscaling, face-restoration and outpainting](docs/features/POSTPROCESS.md)
|
||||
- [Seamless Tiling](docs/features/OTHER.md#seamless-tiling)
|
||||
- [Google Colab](docs/features/OTHER.md#google-colab)
|
||||
- [Web Server](docs/features/WEB.md)
|
||||
- [Reading Prompts From File](docs/features/PROMPTS.md#reading-prompts-from-a-file)
|
||||
- [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds)
|
||||
- [Prompt Blending](docs/features/PROMPTS.md#prompt-blending)
|
||||
- [Thresholding and Perlin Noise Initialization Options](/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options)
|
||||
- [Negative/Unconditioned Prompts](docs/features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
- [Variations](docs/features/VARIATIONS.md)
|
||||
- [Personalizing Text-to-Image Generation](docs/features/TEXTUAL_INVERSION.md)
|
||||
- [Simplified API for text to image generation](docs/features/OTHER.md#simplified-api)
|
||||
- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||
- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/)
|
||||
- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/)
|
||||
- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||
- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/)
|
||||
- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/)
|
||||
- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file)
|
||||
- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending)
|
||||
- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options)
|
||||
- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts)
|
||||
- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/)
|
||||
- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||
- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api)
|
||||
|
||||
#### Other Features
|
||||
|
||||
- [Creating Transparent Regions for Inpainting](docs/features/INPAINTING.md#creating-transparent-regions-for-inpainting)
|
||||
- [Preload Models](docs/features/OTHER.md#preload-models)
|
||||
- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab)
|
||||
- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling)
|
||||
- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds)
|
||||
- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models)
|
||||
|
||||
### Latest Changes
|
||||
|
||||
- vNEXT (TODO 2022)
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `dream.py` will auto
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
- v1.14 (11 September 2022)
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
- v1.13 (3 September 2022
|
||||
|
||||
- Support image variations (see [VARIATIONS](docs/features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on dream.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](docs/features/CHANGELOG.md)**.
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Please check out our **[Q&A](docs/help/TROUBLESHOOT.md)** to get solutions for common installation
|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
problems and other issues.
|
||||
|
||||
# Contributing
|
||||
@@ -183,7 +187,7 @@ changes.
|
||||
### Contributors
|
||||
|
||||
This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](docs/other/CONTRIBUTORS.md). We thank them for
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
### Support
|
||||
@@ -197,4 +201,4 @@ Original portions of the software are Copyright (c) 2020
|
||||
### Further Reading
|
||||
|
||||
Please see the original README for more information on this software and underlying algorithm,
|
||||
located in the file [README-CompViz.md](docs/other/README-CompViz.md).
|
||||
located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/).
|
||||
|
||||
BIN
assets/caution.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 466 KiB After Width: | Height: | Size: 466 KiB |
|
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
|
Before Width: | Height: | Size: 539 KiB After Width: | Height: | Size: 539 KiB |
|
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 7.6 KiB |
|
Before Width: | Height: | Size: 450 KiB After Width: | Height: | Size: 450 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 553 KiB After Width: | Height: | Size: 553 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 418 KiB After Width: | Height: | Size: 418 KiB |
|
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
|
Before Width: | Height: | Size: 542 KiB After Width: | Height: | Size: 542 KiB |
|
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 465 KiB After Width: | Height: | Size: 465 KiB |
|
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
@@ -1,6 +1,6 @@
|
||||
import argparse
|
||||
import os
|
||||
from ldm.dream.args import PRECISION_CHOICES
|
||||
from ldm.invoke.args import PRECISION_CHOICES
|
||||
|
||||
|
||||
def create_cmd_parser():
|
||||
|
||||
@@ -15,7 +15,7 @@ SAMPLER_CHOICES = [
|
||||
|
||||
def parameters_to_command(params):
|
||||
"""
|
||||
Converts dict of parameters into a `dream.py` REPL command.
|
||||
Converts dict of parameters into a `invoke.py` REPL command.
|
||||
"""
|
||||
|
||||
switches = list()
|
||||
@@ -36,6 +36,8 @@ def parameters_to_command(params):
|
||||
switches.append(f'-A {params["sampler_name"]}')
|
||||
if "seamless" in params and params["seamless"] == True:
|
||||
switches.append(f"--seamless")
|
||||
if "hires_fix" in params and params["hires_fix"] == True:
|
||||
switches.append(f"--hires")
|
||||
if "init_img" in params and len(params["init_img"]) > 0:
|
||||
switches.append(f'-I {params["init_img"]}')
|
||||
if "init_mask" in params and len(params["init_mask"]) > 0:
|
||||
@@ -46,8 +48,14 @@ def parameters_to_command(params):
|
||||
switches.append(f'-f {params["strength"]}')
|
||||
if "fit" in params and params["fit"] == True:
|
||||
switches.append(f"--fit")
|
||||
if "gfpgan_strength" in params and params["gfpgan_strength"]:
|
||||
if "facetool" in params:
|
||||
switches.append(f'-ft {params["facetool"]}')
|
||||
if "facetool_strength" in params and params["facetool_strength"]:
|
||||
switches.append(f'-G {params["facetool_strength"]}')
|
||||
elif "gfpgan_strength" in params and params["gfpgan_strength"]:
|
||||
switches.append(f'-G {params["gfpgan_strength"]}')
|
||||
if "codeformer_fidelity" in params:
|
||||
switches.append(f'-cf {params["codeformer_fidelity"]}')
|
||||
if "upscale" in params and params["upscale"]:
|
||||
switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
|
||||
if "variation_amount" in params and params["variation_amount"] > 0:
|
||||
|
||||
@@ -1,821 +0,0 @@
|
||||
import mimetypes
|
||||
import transformers
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
import eventlet
|
||||
import glob
|
||||
import shlex
|
||||
import math
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from argparse import ArgumentTypeError
|
||||
from modules.create_cmd_parser import create_cmd_parser
|
||||
|
||||
parser = create_cmd_parser()
|
||||
opt = parser.parse_args()
|
||||
|
||||
|
||||
from flask_socketio import SocketIO
|
||||
from flask import Flask, send_from_directory, url_for, jsonify
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from pytorch_lightning import logging
|
||||
from threading import Event
|
||||
from uuid import uuid4
|
||||
from send2trash import send2trash
|
||||
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.dream.restoration import Restoration
|
||||
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.dream.args import APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.dream.conditioning import split_weighted_subprompts
|
||||
|
||||
from modules.parameters import parameters_to_command
|
||||
|
||||
|
||||
"""
|
||||
USER CONFIG
|
||||
"""
|
||||
if opt.cors and "*" in opt.cors:
|
||||
raise ArgumentTypeError('"*" is not an allowed CORS origin')
|
||||
|
||||
|
||||
output_dir = "outputs/" # Base output directory for images
|
||||
host = opt.host # Web & socket.io host
|
||||
port = opt.port # Web & socket.io port
|
||||
verbose = opt.verbose # enables copious socket.io logging
|
||||
precision = opt.precision
|
||||
free_gpu_mem = opt.free_gpu_mem
|
||||
embedding_path = opt.embedding_path
|
||||
additional_allowed_origins = (
|
||||
opt.cors if opt.cors else []
|
||||
) # additional CORS allowed origins
|
||||
model = "stable-diffusion-1.4"
|
||||
|
||||
"""
|
||||
END USER CONFIG
|
||||
"""
|
||||
|
||||
|
||||
print("* Initializing, be patient...\n")
|
||||
|
||||
|
||||
"""
|
||||
SERVER SETUP
|
||||
"""
|
||||
|
||||
|
||||
# fix missing mimetypes on windows due to registry wonkiness
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
app = Flask(__name__, static_url_path="", static_folder="../frontend/dist/")
|
||||
|
||||
|
||||
app.config["OUTPUTS_FOLDER"] = "../outputs"
|
||||
|
||||
|
||||
@app.route("/outputs/<path:filename>")
|
||||
def outputs(filename):
|
||||
return send_from_directory(app.config["OUTPUTS_FOLDER"], filename)
|
||||
|
||||
|
||||
@app.route("/", defaults={"path": ""})
|
||||
def serve(path):
|
||||
return send_from_directory(app.static_folder, "index.html")
|
||||
|
||||
|
||||
logger = True if verbose else False
|
||||
engineio_logger = True if verbose else False
|
||||
|
||||
# default 1,000,000, needs to be higher for socketio to accept larger images
|
||||
max_http_buffer_size = 10000000
|
||||
|
||||
cors_allowed_origins = [f"http://{host}:{port}"] + additional_allowed_origins
|
||||
|
||||
socketio = SocketIO(
|
||||
app,
|
||||
logger=logger,
|
||||
engineio_logger=engineio_logger,
|
||||
max_http_buffer_size=max_http_buffer_size,
|
||||
cors_allowed_origins=cors_allowed_origins,
|
||||
ping_interval=(50, 50),
|
||||
ping_timeout=60,
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
END SERVER SETUP
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
APP SETUP
|
||||
"""
|
||||
|
||||
|
||||
class CanceledException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
gfpgan, codeformer, esrgan = None, None, None
|
||||
from ldm.dream.restoration.base import Restoration
|
||||
|
||||
restoration = Restoration()
|
||||
gfpgan, codeformer = restoration.load_face_restore_models()
|
||||
esrgan = restoration.load_esrgan()
|
||||
|
||||
# coreformer.process(self, image, strength, device, seed=None, fidelity=0.75)
|
||||
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||
|
||||
canceled = Event()
|
||||
|
||||
# reduce logging outputs to error
|
||||
transformers.logging.set_verbosity_error()
|
||||
logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
|
||||
|
||||
# Initialize and load model
|
||||
generate = Generate(
|
||||
model,
|
||||
precision=precision,
|
||||
embedding_path=embedding_path,
|
||||
)
|
||||
generate.free_gpu_mem = free_gpu_mem
|
||||
generate.load_model()
|
||||
|
||||
|
||||
# location for "finished" images
|
||||
result_path = os.path.join(output_dir, "img-samples/")
|
||||
|
||||
# temporary path for intermediates
|
||||
intermediate_path = os.path.join(result_path, "intermediates/")
|
||||
|
||||
# path for user-uploaded init images and masks
|
||||
init_image_path = os.path.join(result_path, "init-images/")
|
||||
mask_image_path = os.path.join(result_path, "mask-images/")
|
||||
|
||||
# txt log
|
||||
log_path = os.path.join(result_path, "dream_log.txt")
|
||||
|
||||
# make all output paths
|
||||
[
|
||||
os.makedirs(path, exist_ok=True)
|
||||
for path in [result_path, intermediate_path, init_image_path, mask_image_path]
|
||||
]
|
||||
|
||||
|
||||
"""
|
||||
END APP SETUP
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
SOCKET.IO LISTENERS
|
||||
"""
|
||||
|
||||
|
||||
@socketio.on("requestSystemConfig")
|
||||
def handle_request_capabilities():
|
||||
print(f">> System config requested")
|
||||
config = get_system_config()
|
||||
socketio.emit("systemConfig", config)
|
||||
|
||||
|
||||
@socketio.on("requestImages")
|
||||
def handle_request_images(page=1, offset=0, last_mtime=None):
|
||||
chunk_size = 50
|
||||
|
||||
if last_mtime:
|
||||
print(f">> Latest images requested")
|
||||
else:
|
||||
print(
|
||||
f">> Page {page} of images requested (page size {chunk_size} offset {offset})"
|
||||
)
|
||||
|
||||
paths = glob.glob(os.path.join(result_path, "*.png"))
|
||||
sorted_paths = sorted(paths, key=lambda x: os.path.getmtime(x), reverse=True)
|
||||
|
||||
if last_mtime:
|
||||
image_paths = filter(lambda x: os.path.getmtime(x) > last_mtime, sorted_paths)
|
||||
else:
|
||||
|
||||
image_paths = sorted_paths[
|
||||
slice(chunk_size * (page - 1) + offset, chunk_size * page + offset)
|
||||
]
|
||||
page = page + 1
|
||||
|
||||
image_array = []
|
||||
|
||||
for path in image_paths:
|
||||
metadata = retrieve_metadata(path)
|
||||
image_array.append(
|
||||
{
|
||||
"url": path,
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata["sd-metadata"],
|
||||
}
|
||||
)
|
||||
|
||||
socketio.emit(
|
||||
"galleryImages",
|
||||
{
|
||||
"images": image_array,
|
||||
"nextPage": page,
|
||||
"offset": offset,
|
||||
"onlyNewImages": True if last_mtime else False,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("generateImage")
|
||||
def handle_generate_image_event(
|
||||
generation_parameters, esrgan_parameters, gfpgan_parameters
|
||||
):
|
||||
print(
|
||||
f">> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}"
|
||||
)
|
||||
generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
|
||||
|
||||
@socketio.on("runESRGAN")
|
||||
def handle_run_esrgan_event(original_image, esrgan_parameters):
|
||||
print(
|
||||
f'>> ESRGAN upscale requested for "{original_image["url"]}": {esrgan_parameters}'
|
||||
)
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": 1,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": 1,
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = Image.open(original_image["url"])
|
||||
|
||||
seed = (
|
||||
original_image["metadata"]["seed"]
|
||||
if "seed" in original_image["metadata"]
|
||||
else "unknown_seed"
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Upscaling"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["upscale"][0],
|
||||
strength=esrgan_parameters["upscale"][1],
|
||||
seed=seed,
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
esrgan_parameters["seed"] = seed
|
||||
metadata = parameters_to_post_processed_image_metadata(
|
||||
parameters=esrgan_parameters,
|
||||
original_image_path=original_image["url"],
|
||||
type="esrgan",
|
||||
)
|
||||
command = parameters_to_command(esrgan_parameters)
|
||||
|
||||
path = save_image(image, command, metadata, result_path, postprocessing="esrgan")
|
||||
|
||||
write_log_message(f'[Upscaled] "{original_image["url"]}" > "{path}": {command}')
|
||||
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["isProcessing"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"esrganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("runGFPGAN")
|
||||
def handle_run_gfpgan_event(original_image, gfpgan_parameters):
|
||||
print(
|
||||
f'>> GFPGAN face fix requested for "{original_image["url"]}": {gfpgan_parameters}'
|
||||
)
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": 1,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": 1,
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = Image.open(original_image["url"])
|
||||
|
||||
seed = (
|
||||
original_image["metadata"]["seed"]
|
||||
if "seed" in original_image["metadata"]
|
||||
else "unknown_seed"
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Fixing faces"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = gfpgan.process(
|
||||
image=image, strength=gfpgan_parameters["gfpgan_strength"], seed=seed
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
gfpgan_parameters["seed"] = seed
|
||||
metadata = parameters_to_post_processed_image_metadata(
|
||||
parameters=gfpgan_parameters,
|
||||
original_image_path=original_image["url"],
|
||||
type="gfpgan",
|
||||
)
|
||||
command = parameters_to_command(gfpgan_parameters)
|
||||
|
||||
path = save_image(image, command, metadata, result_path, postprocessing="gfpgan")
|
||||
|
||||
write_log_message(f'[Fixed faces] "{original_image["url"]}" > "{path}": {command}')
|
||||
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["isProcessing"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"gfpganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.mtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("cancel")
|
||||
def handle_cancel():
|
||||
print(f">> Cancel processing requested")
|
||||
canceled.set()
|
||||
socketio.emit("processingCanceled")
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("deleteImage")
|
||||
def handle_delete_image(path, uuid):
|
||||
print(f'>> Delete requested "{path}"')
|
||||
send2trash(path)
|
||||
socketio.emit("imageDeleted", {"url": path, "uuid": uuid})
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadInitialImage")
|
||||
def handle_upload_initial_image(bytes, name):
|
||||
print(f'>> Init image upload requested "{name}"')
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
file_path = os.path.join(init_image_path, name)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
newFile = open(file_path, "wb")
|
||||
newFile.write(bytes)
|
||||
socketio.emit("initialImageUploaded", {"url": file_path, "uuid": ""})
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadMaskImage")
|
||||
def handle_upload_mask_image(bytes, name):
|
||||
print(f'>> Mask image upload requested "{name}"')
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
file_path = os.path.join(mask_image_path, name)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
newFile = open(file_path, "wb")
|
||||
newFile.write(bytes)
|
||||
socketio.emit("maskImageUploaded", {"url": file_path, "uuid": ""})
|
||||
|
||||
|
||||
"""
|
||||
END SOCKET.IO LISTENERS
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
ADDITIONAL FUNCTIONS
|
||||
"""
|
||||
|
||||
|
||||
def get_system_config():
|
||||
return {
|
||||
"model": "stable diffusion",
|
||||
"model_id": model,
|
||||
"model_hash": generate.model_hash,
|
||||
"app_id": APP_ID,
|
||||
"app_version": APP_VERSION,
|
||||
}
|
||||
|
||||
|
||||
def parameters_to_post_processed_image_metadata(parameters, original_image_path, type):
|
||||
# top-level metadata minus `image` or `images`
|
||||
metadata = get_system_config()
|
||||
|
||||
orig_hash = calculate_init_img_hash(original_image_path)
|
||||
|
||||
image = {"orig_path": original_image_path, "orig_hash": orig_hash}
|
||||
|
||||
if type == "esrgan":
|
||||
image["type"] = "esrgan"
|
||||
image["scale"] = parameters["upscale"][0]
|
||||
image["strength"] = parameters["upscale"][1]
|
||||
elif type == "gfpgan":
|
||||
image["type"] = "gfpgan"
|
||||
image["strength"] = parameters["gfpgan_strength"]
|
||||
else:
|
||||
raise TypeError(f"Invalid type: {type}")
|
||||
|
||||
metadata["image"] = image
|
||||
return metadata
|
||||
|
||||
|
||||
def parameters_to_generated_image_metadata(parameters):
|
||||
# top-level metadata minus `image` or `images`
|
||||
|
||||
metadata = get_system_config()
|
||||
# remove any image keys not mentioned in RFC #266
|
||||
rfc266_img_fields = [
|
||||
"type",
|
||||
"postprocessing",
|
||||
"sampler",
|
||||
"prompt",
|
||||
"seed",
|
||||
"variations",
|
||||
"steps",
|
||||
"cfg_scale",
|
||||
"threshold",
|
||||
"perlin",
|
||||
"step_number",
|
||||
"width",
|
||||
"height",
|
||||
"extra",
|
||||
"seamless",
|
||||
]
|
||||
|
||||
rfc_dict = {}
|
||||
|
||||
for item in parameters.items():
|
||||
key, value = item
|
||||
if key in rfc266_img_fields:
|
||||
rfc_dict[key] = value
|
||||
|
||||
postprocessing = []
|
||||
|
||||
# 'postprocessing' is either null or an
|
||||
if "gfpgan_strength" in parameters:
|
||||
|
||||
postprocessing.append(
|
||||
{"type": "gfpgan", "strength": float(parameters["gfpgan_strength"])}
|
||||
)
|
||||
|
||||
if "upscale" in parameters:
|
||||
postprocessing.append(
|
||||
{
|
||||
"type": "esrgan",
|
||||
"scale": int(parameters["upscale"][0]),
|
||||
"strength": float(parameters["upscale"][1]),
|
||||
}
|
||||
)
|
||||
|
||||
rfc_dict["postprocessing"] = postprocessing if len(postprocessing) > 0 else None
|
||||
|
||||
# semantic drift
|
||||
rfc_dict["sampler"] = parameters["sampler_name"]
|
||||
|
||||
# display weighted subprompts (liable to change)
|
||||
subprompts = split_weighted_subprompts(parameters["prompt"])
|
||||
subprompts = [{"prompt": x[0], "weight": x[1]} for x in subprompts]
|
||||
rfc_dict["prompt"] = subprompts
|
||||
|
||||
# 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
|
||||
variations = []
|
||||
|
||||
if "with_variations" in parameters:
|
||||
variations = [
|
||||
{"seed": x[0], "weight": x[1]} for x in parameters["with_variations"]
|
||||
]
|
||||
|
||||
rfc_dict["variations"] = variations
|
||||
|
||||
if "init_img" in parameters:
|
||||
rfc_dict["type"] = "img2img"
|
||||
rfc_dict["strength"] = parameters["strength"]
|
||||
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant
|
||||
rfc_dict["orig_hash"] = calculate_init_img_hash(parameters["init_img"])
|
||||
rfc_dict["init_image_path"] = parameters["init_img"] # TODO: Noncompliant
|
||||
rfc_dict["sampler"] = "ddim" # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS
|
||||
if "init_mask" in parameters:
|
||||
rfc_dict["mask_hash"] = calculate_init_img_hash(
|
||||
parameters["init_mask"]
|
||||
) # TODO: Noncompliant
|
||||
rfc_dict["mask_image_path"] = parameters["init_mask"] # TODO: Noncompliant
|
||||
else:
|
||||
rfc_dict["type"] = "txt2img"
|
||||
|
||||
metadata["image"] = rfc_dict
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def make_unique_init_image_filename(name):
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
return name
|
||||
|
||||
|
||||
def write_log_message(message, log_path=log_path):
|
||||
"""Logs the filename and parameters used to generate or process that image to log file"""
|
||||
message = f"{message}\n"
|
||||
with open(log_path, "a", encoding="utf-8") as file:
|
||||
file.writelines(message)
|
||||
|
||||
|
||||
def save_image(
|
||||
image, command, metadata, output_dir, step_index=None, postprocessing=False
|
||||
):
|
||||
pngwriter = PngWriter(output_dir)
|
||||
prefix = pngwriter.unique_prefix()
|
||||
|
||||
seed = "unknown_seed"
|
||||
|
||||
if "image" in metadata:
|
||||
if "seed" in metadata["image"]:
|
||||
seed = metadata["image"]["seed"]
|
||||
|
||||
filename = f"{prefix}.{seed}"
|
||||
|
||||
if step_index:
|
||||
filename += f".{step_index}"
|
||||
if postprocessing:
|
||||
filename += f".postprocessed"
|
||||
|
||||
filename += ".png"
|
||||
|
||||
path = pngwriter.save_image_and_prompt_to_png(
|
||||
image=image, dream_prompt=command, metadata=metadata, name=filename
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def calculate_real_steps(steps, strength, has_init_image):
|
||||
return math.floor(strength * steps) if has_init_image else steps
|
||||
|
||||
|
||||
def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters):
|
||||
canceled.clear()
|
||||
|
||||
step_index = 1
|
||||
prior_variations = (
|
||||
generation_parameters["with_variations"]
|
||||
if "with_variations" in generation_parameters
|
||||
else []
|
||||
)
|
||||
"""
|
||||
If a result image is used as an init image, and then deleted, we will want to be
|
||||
able to use it as an init image in the future. Need to copy it.
|
||||
|
||||
If the init/mask image doesn't exist in the init_image_path/mask_image_path,
|
||||
make a unique filename for it and copy it there.
|
||||
"""
|
||||
if "init_img" in generation_parameters:
|
||||
filename = os.path.basename(generation_parameters["init_img"])
|
||||
if not os.path.exists(os.path.join(init_image_path, filename)):
|
||||
unique_filename = make_unique_init_image_filename(filename)
|
||||
new_path = os.path.join(init_image_path, unique_filename)
|
||||
shutil.copy(generation_parameters["init_img"], new_path)
|
||||
generation_parameters["init_img"] = new_path
|
||||
if "init_mask" in generation_parameters:
|
||||
filename = os.path.basename(generation_parameters["init_mask"])
|
||||
if not os.path.exists(os.path.join(mask_image_path, filename)):
|
||||
unique_filename = make_unique_init_image_filename(filename)
|
||||
new_path = os.path.join(init_image_path, unique_filename)
|
||||
shutil.copy(generation_parameters["init_img"], new_path)
|
||||
generation_parameters["init_mask"] = new_path
|
||||
|
||||
totalSteps = calculate_real_steps(
|
||||
steps=generation_parameters["steps"],
|
||||
strength=generation_parameters["strength"]
|
||||
if "strength" in generation_parameters
|
||||
else None,
|
||||
has_init_image="init_img" in generation_parameters,
|
||||
)
|
||||
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": totalSteps,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": generation_parameters["iterations"],
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_progress(sample, step):
|
||||
if canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
nonlocal step_index
|
||||
nonlocal generation_parameters
|
||||
nonlocal progress
|
||||
|
||||
progress["currentStep"] = step + 1
|
||||
progress["currentStatus"] = "Generating"
|
||||
progress["currentStatusHasSteps"] = True
|
||||
|
||||
if (
|
||||
generation_parameters["progress_images"]
|
||||
and step % 5 == 0
|
||||
and step < generation_parameters["steps"] - 1
|
||||
):
|
||||
image = generate.sample_to_image(sample)
|
||||
|
||||
metadata = parameters_to_generated_image_metadata(generation_parameters)
|
||||
command = parameters_to_command(generation_parameters)
|
||||
path = save_image(image, command, metadata, intermediate_path, step_index=step_index, postprocessing=False)
|
||||
|
||||
step_index += 1
|
||||
socketio.emit(
|
||||
"intermediateResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed):
|
||||
nonlocal generation_parameters
|
||||
nonlocal esrgan_parameters
|
||||
nonlocal gfpgan_parameters
|
||||
nonlocal progress
|
||||
|
||||
step_index = 1
|
||||
nonlocal prior_variations
|
||||
|
||||
progress["currentStatus"] = "Generation complete"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
all_parameters = generation_parameters
|
||||
postprocessing = False
|
||||
|
||||
if (
|
||||
"variation_amount" in all_parameters
|
||||
and all_parameters["variation_amount"] > 0
|
||||
):
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = prior_variations + this_variation
|
||||
all_parameters["seed"] = first_seed
|
||||
elif ("with_variations" in all_parameters):
|
||||
all_parameters["seed"] = first_seed
|
||||
else:
|
||||
all_parameters["seed"] = seed
|
||||
|
||||
if esrgan_parameters:
|
||||
progress["currentStatus"] = "Upscaling"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["level"],
|
||||
strength=esrgan_parameters["strength"],
|
||||
seed=seed,
|
||||
)
|
||||
|
||||
postprocessing = True
|
||||
all_parameters["upscale"] = [
|
||||
esrgan_parameters["level"],
|
||||
esrgan_parameters["strength"],
|
||||
]
|
||||
|
||||
if gfpgan_parameters:
|
||||
progress["currentStatus"] = "Fixing faces"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = gfpgan.process(
|
||||
image=image, strength=gfpgan_parameters["strength"], seed=seed
|
||||
)
|
||||
postprocessing = True
|
||||
all_parameters["gfpgan_strength"] = gfpgan_parameters["strength"]
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
metadata = parameters_to_generated_image_metadata(all_parameters)
|
||||
command = parameters_to_command(all_parameters)
|
||||
|
||||
path = save_image(
|
||||
image, command, metadata, result_path, postprocessing=postprocessing
|
||||
)
|
||||
|
||||
print(f'>> Image generated: "{path}"')
|
||||
write_log_message(f'[Generated] "{path}": {command}')
|
||||
|
||||
if progress["totalIterations"] > progress["currentIteration"]:
|
||||
progress["currentStep"] = 1
|
||||
progress["currentIteration"] += 1
|
||||
progress["currentStatus"] = "Iteration finished"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
else:
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["isProcessing"] = False
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"generationResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
|
||||
try:
|
||||
generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=image_progress,
|
||||
image_callback=image_done,
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except CanceledException:
|
||||
pass
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
|
||||
"""
|
||||
END ADDITIONAL FUNCTIONS
|
||||
"""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f">> Starting server at http://{host}:{port}")
|
||||
socketio.run(app, host=host, port=port)
|
||||
@@ -1,54 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 16
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,53 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 4
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,54 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 3
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,53 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 64
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 64
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16,8]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,86 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: models/first_stage_models/vq-f4/model.ckpt
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.CelebAHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.CelebAHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,98 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 256
|
||||
attention_resolutions:
|
||||
#note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 32 for f8
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 16384
|
||||
ckpt_path: configs/first_stage_models/vq-f8/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 64
|
||||
num_workers: 12
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetTrain
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetValidation
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,68 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 192
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 5
|
||||
num_heads: 1
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
n_classes: 1001
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
@@ -1,85 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 42
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.FFHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.FFHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,85 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNBedroomsTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNBedroomsValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,91 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False'
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0155
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: "image"
|
||||
cond_stage_key: "image"
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: False
|
||||
concat_mode: False
|
||||
scale_by_std: True
|
||||
monitor: 'val/loss_simple_ema'
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [10000]
|
||||
cycle_lengths: [10000000000000]
|
||||
f_start: [1.e-6]
|
||||
f_max: [1.]
|
||||
f_min: [ 1.]
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 192
|
||||
attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2
|
||||
num_heads: 8
|
||||
use_scale_shift_norm: True
|
||||
resblock_updown: True
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: "val/rec_loss"
|
||||
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config: "__is_unconditional__"
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 96
|
||||
num_workers: 5
|
||||
wrap: False
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNChurchesTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNChurchesValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,71 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.012
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_heads: 8
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 1280
|
||||
use_checkpoint: true
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.BERTEmbedder
|
||||
params:
|
||||
n_embed: 1280
|
||||
n_layer: 32
|
||||
@@ -1,18 +0,0 @@
|
||||
# This file describes the alternative machine learning models
|
||||
# available to the dream script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
|
||||
laion400m:
|
||||
config: configs/latent-diffusion/txt2img-1p4B-eval.yaml
|
||||
weights: models/ldm/text2img-large/model.ckpt
|
||||
width: 256
|
||||
height: 256
|
||||
stable-diffusion-1.4:
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
weights: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
27
configs/models.yaml.example
Normal file
@@ -0,0 +1,27 @@
|
||||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
stable-diffusion-1.5:
|
||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: true
|
||||
stable-diffusion-1.4:
|
||||
description: Stable Diffusion inference model version 1.4
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
weights: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
inpainting-1.5:
|
||||
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
@@ -1,68 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.015
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: jpg
|
||||
cond_stage_key: nix
|
||||
image_size: 48
|
||||
channels: 16
|
||||
cond_stage_trainable: false
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_by_std: false
|
||||
scale_factor: 0.22765929
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 48
|
||||
in_channels: 16
|
||||
out_channels: 16
|
||||
model_channels: 448
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
use_scale_shift_norm: false
|
||||
resblock_updown: false
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: true
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 16
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: torch.nn.Identity
|
||||
@@ -32,7 +32,7 @@ model:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 6
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
@@ -76,4 +76,4 @@ model:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
|
||||
79
configs/stable-diffusion/v1-inpainting-inference.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
model:
|
||||
base_learning_rate: 7.5e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false # Note: different from the one we trained before
|
||||
conditioning_key: hybrid # important
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
finetune_keys: null
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
||||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||
f_start: [ 1.e-6 ]
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
@@ -1,57 +1,84 @@
|
||||
FROM debian
|
||||
FROM ubuntu AS get_miniconda
|
||||
|
||||
ARG gsd
|
||||
ENV GITHUB_STABLE_DIFFUSION $gsd
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ARG rsd
|
||||
ENV REQS $rsd
|
||||
# install wget
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG cs
|
||||
ENV CONDA_SUBDIR $cs
|
||||
# download and install miniconda
|
||||
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||
ARG conda_prefix=/opt/conda
|
||||
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||
&& rm -f /miniconda.sh
|
||||
|
||||
ENV PIP_EXISTS_ACTION="w"
|
||||
FROM ubuntu AS invokeai
|
||||
|
||||
# TODO: Optimize image size
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
# clean bashrc
|
||||
RUN echo "" > ~/.bashrc
|
||||
|
||||
WORKDIR /
|
||||
RUN apt update && apt upgrade -y \
|
||||
&& apt install -y \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
&& git clone $GITHUB_STABLE_DIFFUSION
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Anaconda or Miniconda
|
||||
COPY anaconda.sh .
|
||||
RUN bash anaconda.sh -b -u -p /anaconda && /anaconda/bin/conda init bash
|
||||
# clone repository, create models.yaml and create symlinks
|
||||
ARG invokeai_git=invoke-ai/InvokeAI
|
||||
ARG invokeai_branch=main
|
||||
ARG project_name=invokeai
|
||||
ARG conda_env_file=environment-lin-cuda.yml
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||
&& cp \
|
||||
"/${project_name}/configs/models.yaml.example" \
|
||||
"/${project_name}/configs/models.yaml" \
|
||||
&& ln -sf \
|
||||
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||
"/${project_name}/environment.yml" \
|
||||
&& ln -sf \
|
||||
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||
&& ln -sf \
|
||||
/data/outputs/ \
|
||||
"/${project_name}/outputs"
|
||||
|
||||
# SD
|
||||
WORKDIR /stable-diffusion
|
||||
RUN source ~/.bashrc \
|
||||
&& conda create -y --name ldm && conda activate ldm \
|
||||
&& conda config --env --set subdir $CONDA_SUBDIR \
|
||||
&& pip3 install -r $REQS \
|
||||
&& pip3 install basicsr facexlib realesrgan \
|
||||
&& mkdir models/ldm/stable-diffusion-v1 \
|
||||
&& ln -s "/data/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
||||
# set workdir
|
||||
WORKDIR "/${project_name}"
|
||||
|
||||
# Face restoreation
|
||||
# by default expected in a sibling directory to stable-diffusion
|
||||
WORKDIR /
|
||||
RUN git clone https://github.com/TencentARC/GFPGAN.git
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name "${project_name}" \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||
|
||||
WORKDIR /GFPGAN
|
||||
RUN pip3 install -r requirements.txt \
|
||||
&& python3 setup.py develop \
|
||||
&& ln -s "/data/GFPGANv1.4.pth" experiments/pretrained_models/GFPGANv1.4.pth
|
||||
RUN source ~/.bashrc \
|
||||
&& python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
WORKDIR /stable-diffusion
|
||||
RUN python3 scripts/preload_models.py
|
||||
|
||||
WORKDIR /
|
||||
COPY entrypoint.sh .
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX="${conda_prefix}"
|
||||
ENV PROJECT_NAME="${project_name}"
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
|
||||
84
docker-build/build.sh
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
||||
# configure values by using env when executing build.sh
|
||||
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
|
||||
# print the settings
|
||||
echo "You are using these values:"
|
||||
echo -e "project_name:\t\t ${project_name}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
_runAlpine() {
|
||||
docker run \
|
||||
--rm \
|
||||
--interactive \
|
||||
--tty \
|
||||
--mount source="$volumename",target=/data \
|
||||
--workdir /data \
|
||||
alpine "$@"
|
||||
}
|
||||
|
||||
_copyCheckpoints() {
|
||||
echo "creating subfolders for models and outputs"
|
||||
_runAlpine mkdir models
|
||||
_runAlpine mkdir outputs
|
||||
echo "downloading v1-5-pruned-emaonly.ckpt"
|
||||
_runAlpine wget \
|
||||
--header="Authorization: Bearer ${huggingface_token}" \
|
||||
-O models/v1-5-pruned-emaonly.ckpt \
|
||||
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
echo "done"
|
||||
}
|
||||
|
||||
_checkVolumeContent() {
|
||||
_runAlpine ls -lhA /data/models
|
||||
}
|
||||
|
||||
_getModelMd5s() {
|
||||
_runAlpine \
|
||||
alpine sh -c "md5sum /data/models/*.ckpt"
|
||||
}
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||
echo "looks empty, copying checkpoint"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
echo "Models in ${volumename}:"
|
||||
_checkVolumeContent
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${volumename}"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${platform}" \
|
||||
--tag "${invokeai_tag}" \
|
||||
--build-arg project_name="${project_name}" \
|
||||
--build-arg conda_version="${invokeai_conda_version}" \
|
||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||
--build-arg invokeai_git="${invokeai_git}" \
|
||||
--build-arg invokeai_branch="${invokeai_branch}" \
|
||||
--file ./docker-build/Dockerfile \
|
||||
.
|
||||
@@ -1,10 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd /stable-diffusion
|
||||
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||
conda activate "${PROJECT_NAME}"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
python3 scripts/dream.py --full_precision -o /data
|
||||
# bash
|
||||
else
|
||||
python3 scripts/dream.py --full_precision -o /data "$@"
|
||||
fi
|
||||
python scripts/invoke.py \
|
||||
${@:---web --host=0.0.0.0}
|
||||
|
||||
13
docker-build/env.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
project_name=${PROJECT_NAME:-invokeai}
|
||||
volumename=${VOLUMENAME:-${project_name}_data}
|
||||
arch=${ARCH:-x86_64}
|
||||
platform=${PLATFORM:-Linux/${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||
|
||||
export project_name
|
||||
export volumename
|
||||
export arch
|
||||
export platform
|
||||
export invokeai_tag
|
||||
15
docker-build/run.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--platform "$platform" \
|
||||
--name "$project_name" \
|
||||
--hostname "$project_name" \
|
||||
--mount source="$volumename",target=/data \
|
||||
--publish 9090:9090 \
|
||||
"$invokeai_tag" ${1:+$@}
|
||||
@@ -1,64 +1,313 @@
|
||||
# **Changelog**
|
||||
---
|
||||
title: Changelog
|
||||
---
|
||||
|
||||
## v1.13 (in process)
|
||||
# :octicons-log-16: **Changelog**
|
||||
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot)
|
||||
- Output directory can be specified on the dream> command line.
|
||||
- The grid was displaying duplicated images when not enough images to fill the final row [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- Can specify --grid on dream.py command line as the default.
|
||||
## v2.1.0 <small>(2 November 2022)</small>
|
||||
|
||||
- update mac instructions to use invokeai for env name by @willwillems in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||
missing after merge by @skurovec in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Update generate.py by @unreleased in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- Update gitignore to ignore codeformer weights at new location by
|
||||
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||
- update mac instructions to use invokeai for env name by @willwillems in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||
missing after merge by @skurovec in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Add text prompt to inpaint mask support by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/976
|
||||
- WebUI: Adds Codeformer support by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||
- Add Asymmetric Tiling by @carson-katri in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||
- fix clipseg loading problems by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||
- Correct color channels in upscale using array slicing by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||
- fix a number of bugs in textual inversion by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||
- Improve !fetch, add !replay command by @ArDiouscuros in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/882
|
||||
- Fix generation of image with s>1000 by @holstvoogd in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/951
|
||||
- Web UI: Gallery improvements by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||
- outcropping improvements by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||
- add support for loading VAE autoencoders by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||
- remove duplicate fix_func for MPS by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||
- Metadata storage and retrieval fixes by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||
- nix: add shell.nix file by @Cloudef in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||
- feat: adding filename format template by @plucked in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/968
|
||||
- Web UI: Fixes broken bundle by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||
- Support runwayML custom inpainting model by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||
- Update IMG2IMG.md by @talitore in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||
- Outpainting implementation by @Kyle0654 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||
- fixing aspect ratio on hires by @tjennings in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||
- Fix-build-container-action by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||
- handle all unicode characters by @damian0815 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||
- adds models.user.yml to .gitignore by @JakeHL in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||
- remove debug branch, set fail-fast to false by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||
- Protect-secrets-on-pr by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||
- Use proper authentication to download model by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||
- Prevent indexing error for mode RGB by @spezialspezial in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||
unecesarry caches by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to preload_models step by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||
contained environment (if necessary) before running the normal installation
|
||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- preload_models.py script downloads the weight files by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
- fix noisy images at high step count when using k\* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than via a new
|
||||
python process (which could break the environment)
|
||||
|
||||
## v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||
backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for [inpainting](features/INPAINTING.md) and
|
||||
[outpainting](features/OUTPAINTING.md)
|
||||
- img2img runs on all k\* samplers
|
||||
- Support for
|
||||
[negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for
|
||||
[post-processing of previously-generated images](features/POSTPROCESS.md)
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E
|
||||
infinite canvas), and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows
|
||||
[larger images to be created without duplicating elements](features/CLI.md#this-is-an-example-of-txt2img),
|
||||
at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control
|
||||
variation during image generation (see
|
||||
[Thresholding and Perlin Noise Initialization](features/OTHER.md#thresholding-and-perlin-noise-initialization-options))
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
||||
of images and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
||||
platforms.
|
||||
- Improved [command-line completion behavior](features/CLI.md) New commands
|
||||
added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like
|
||||
`--precision=float32`.
|
||||
|
||||
## v1.14 <small>(11 September 2022)</small>
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
## v1.13 <small>(3 September 2022)</small>
|
||||
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and
|
||||
reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google
|
||||
hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
---
|
||||
|
||||
## v1.12 (28 August 2022)
|
||||
## v1.12 <small>(28 August 2022)</small>
|
||||
|
||||
- Improved file handling, including ability to read prompts from standard input.
|
||||
(kudos to [Yunsaki](https://github.com/yunsaki)
|
||||
- The web server is now integrated with the dream.py script. Invoke by adding --web to
|
||||
the dream.py command arguments.
|
||||
- The web server is now integrated with the invoke.py script. Invoke by adding
|
||||
--web to the invoke.py command arguments.
|
||||
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
|
||||
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
|
||||
VRAM requirements are modestly reduced. Thanks to both [Blessedcoolant](https://github.com/blessedcoolant) and
|
||||
VRAM requirements are modestly reduced. Thanks to both
|
||||
[Blessedcoolant](https://github.com/blessedcoolant) and
|
||||
[Oceanswave](https://github.com/oceanswave) for their work on this.
|
||||
- You can now swap samplers on the dream> command line. [Blessedcoolant](https://github.com/blessedcoolant)
|
||||
- You can now swap samplers on the invoke> command line.
|
||||
[Blessedcoolant](https://github.com/blessedcoolant)
|
||||
|
||||
---
|
||||
|
||||
## v1.11 (26 August 2022)
|
||||
## v1.11 <small>(26 August 2022)</small>
|
||||
|
||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
|
||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
|
||||
Seed memory only extends back to the previous command, but will work on all images generated with the -n# switch.
|
||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module.
|
||||
(kudos to [Oceanswave](https://github.com/Oceanswave)
|
||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use
|
||||
the seed for the image generated before that, etc. Seed memory only extends
|
||||
back to the previous command, but will work on all images generated with the
|
||||
-n# switch.
|
||||
- Variant generation support temporarily disabled pending more general solution.
|
||||
- Created a feature branch named **yunsaki-morphing-dream** which adds experimental support for
|
||||
iteratively modifying the prompt and its parameters. Please see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
|
||||
for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
|
||||
significantly.
|
||||
- Created a feature branch named **yunsaki-morphing-invoke** which adds
|
||||
experimental support for iteratively modifying the prompt and its parameters.
|
||||
Please
|
||||
see[Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86) for
|
||||
a synopsis of how this works. Note that when this feature is eventually added
|
||||
to the main branch, it will may be modified significantly.
|
||||
|
||||
---
|
||||
|
||||
## v1.10 (25 August 2022)
|
||||
## v1.10 <small>(25 August 2022)</small>
|
||||
|
||||
- A barebones but fully functional interactive web server for online generation of txt2img and img2img.
|
||||
- A barebones but fully functional interactive web server for online generation
|
||||
of txt2img and img2img.
|
||||
|
||||
---
|
||||
|
||||
## v1.09 (24 August 2022)
|
||||
## v1.09 <small>(24 August 2022)</small>
|
||||
|
||||
- A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [
|
||||
See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||
- Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave).
|
||||
[ See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||
- Added ability to personalize text to image generation (kudos to
|
||||
[Oceanswave](https://github.com/Oceanswave) and
|
||||
[nicolai256](https://github.com/nicolai256))
|
||||
- Enabled all of the samplers from k_diffusion
|
||||
|
||||
---
|
||||
|
||||
## v1.08 (24 August 2022)
|
||||
## v1.08 <small>(24 August 2022)</small>
|
||||
|
||||
- Escape single quotes on the dream> command before trying to parse. This avoids
|
||||
parse errors.
|
||||
- Escape single quotes on the invoke> command before trying to parse. This
|
||||
avoids parse errors.
|
||||
- Removed instruction to get Python3.8 as first step in Windows install.
|
||||
Anaconda3 does it for you.
|
||||
- Added bounds checks for numeric arguments that could cause crashes.
|
||||
@@ -66,40 +315,42 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.07 (23 August 2022)
|
||||
## v1.07 <small>(23 August 2022)</small>
|
||||
|
||||
- Image filenames will now never fill gaps in the sequence, but will be assigned the
|
||||
next higher name in the chosen directory. This ensures that the alphabetic and chronological
|
||||
sort orders are the same.
|
||||
- Image filenames will now never fill gaps in the sequence, but will be assigned
|
||||
the next higher name in the chosen directory. This ensures that the alphabetic
|
||||
and chronological sort orders are the same.
|
||||
|
||||
---
|
||||
|
||||
## v1.06 (23 August 2022)
|
||||
## v1.06 <small>(23 August 2022)</small>
|
||||
|
||||
- Added weighted prompt support contributed by [xraxra](https://github.com/xraxra)
|
||||
- Example of using weighted prompts to tweak a demonic figure contributed by [bmaltais](https://github.com/bmaltais)
|
||||
- Added weighted prompt support contributed by
|
||||
[xraxra](https://github.com/xraxra)
|
||||
- Example of using weighted prompts to tweak a demonic figure contributed by
|
||||
[bmaltais](https://github.com/bmaltais)
|
||||
|
||||
---
|
||||
|
||||
## v1.05 (22 August 2022 - after the drop)
|
||||
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Filenames now use the following formats:
|
||||
000010.95183149.png -- Two files produced by the same command (e.g. -n2),
|
||||
000010.26742632.png -- distinguished by a different seed.
|
||||
- Filenames now use the following formats: 000010.95183149.png -- Two files
|
||||
produced by the same command (e.g. -n2), 000010.26742632.png -- distinguished
|
||||
by a different seed.
|
||||
|
||||
000011.455191342.01.png -- Two files produced by the same command using
|
||||
000011.455191342.02.png -- a batch size>1 (e.g. -b2). They have the same seed.
|
||||
|
||||
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid can
|
||||
be regenerated with the indicated key
|
||||
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid
|
||||
can be regenerated with the indicated key
|
||||
|
||||
- It should no longer be possible for one image to overwrite another
|
||||
- You can use the "cd" and "pwd" commands at the dream> prompt to set and retrieve
|
||||
the path of the output directory.
|
||||
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and
|
||||
retrieve the path of the output directory.
|
||||
|
||||
---
|
||||
|
||||
## v1.04 (22 August 2022 - after the drop)
|
||||
## v1.04 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Updated README to reflect installation of the released weights.
|
||||
- Suppressed very noisy and inconsequential warning when loading the frozen CLIP
|
||||
@@ -107,31 +358,33 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.03 (22 August 2022)
|
||||
## v1.03 <small>(22 August 2022)</small>
|
||||
|
||||
- The original txt2img and img2img scripts from the CompViz repository have been moved into
|
||||
a subfolder named "orig_scripts", to reduce confusion.
|
||||
- The original txt2img and img2img scripts from the CompViz repository have been
|
||||
moved into a subfolder named "orig_scripts", to reduce confusion.
|
||||
|
||||
---
|
||||
|
||||
## v1.02 (21 August 2022)
|
||||
## v1.02 <small>(21 August 2022)</small>
|
||||
|
||||
- A copy of the prompt and all of its switches and options is now stored in the corresponding
|
||||
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
|
||||
or an image editor that allows you to explore the full metadata.
|
||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
||||
- A copy of the prompt and all of its switches and options is now stored in the
|
||||
corresponding image in a tEXt metadata field named "Dream". You can read the
|
||||
prompt using scripts/images2prompt.py, or an image editor that allows you to
|
||||
explore the full metadata. **Please run "conda env update" to load the k_lms
|
||||
dependencies!!**
|
||||
|
||||
---
|
||||
|
||||
## v1.01 (21 August 2022)
|
||||
## v1.01 <small>(21 August 2022)</small>
|
||||
|
||||
- added k_lms sampling.
|
||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
||||
- use half precision arithmetic by default, resulting in faster execution and lower memory requirements
|
||||
Pass argument --full_precision to dream.py to get slower but more accurate image generation
|
||||
- added k_lms sampling. **Please run "conda env update" to load the k_lms
|
||||
dependencies!!**
|
||||
- use half precision arithmetic by default, resulting in faster execution and
|
||||
lower memory requirements Pass argument --full_precision to invoke.py to get
|
||||
slower but more accurate image generation
|
||||
|
||||
---
|
||||
|
||||
## Links
|
||||
|
||||
- **[Read Me](../readme.md)**
|
||||
- **[Read Me](index.md)**
|
||||
|
||||
BIN
docs/assets/Lincoln-and-Parrot-512-transparent.png
Executable file
|
After Width: | Height: | Size: 284 KiB |
BIN
docs/assets/Lincoln-and-Parrot-512.png
Normal file
|
After Width: | Height: | Size: 252 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.deselected.png
Normal file
|
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.masked.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.selected.png
Normal file
|
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000024.801380492.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
docs/assets/invoke-web-server-1.png
Normal file
|
After Width: | Height: | Size: 983 KiB |
BIN
docs/assets/invoke-web-server-2.png
Normal file
|
After Width: | Height: | Size: 101 KiB |
BIN
docs/assets/invoke-web-server-3.png
Normal file
|
After Width: | Height: | Size: 546 KiB |
BIN
docs/assets/invoke-web-server-4.png
Normal file
|
After Width: | Height: | Size: 336 KiB |
BIN
docs/assets/invoke-web-server-5.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/assets/invoke-web-server-6.png
Normal file
|
After Width: | Height: | Size: 148 KiB |
BIN
docs/assets/invoke-web-server-7.png
Normal file
|
After Width: | Height: | Size: 637 KiB |
BIN
docs/assets/invoke-web-server-8.png
Normal file
|
After Width: | Height: | Size: 529 KiB |
BIN
docs/assets/invoke-web-server-9.png
Normal file
|
After Width: | Height: | Size: 1.1 MiB |
BIN
docs/assets/invoke_web_dark.png
Normal file
|
After Width: | Height: | Size: 838 KiB |
BIN
docs/assets/invoke_web_light.png
Normal file
|
After Width: | Height: | Size: 838 KiB |
BIN
docs/assets/invoke_web_server.png
Normal file
|
After Width: | Height: | Size: 989 KiB |
BIN
docs/assets/outpainting/curly-outcrop-2.png
Normal file
|
After Width: | Height: | Size: 635 KiB |
|
Before Width: | Height: | Size: 501 KiB After Width: | Height: | Size: 501 KiB |
|
Before Width: | Height: | Size: 473 KiB After Width: | Height: | Size: 473 KiB |
|
Before Width: | Height: | Size: 618 KiB After Width: | Height: | Size: 618 KiB |
|
Before Width: | Height: | Size: 557 KiB After Width: | Height: | Size: 557 KiB |
BIN
docs/assets/prompt_syntax/apricots--1.png
Normal file
|
After Width: | Height: | Size: 587 KiB |
BIN
docs/assets/prompt_syntax/apricots--2.png
Normal file
|
After Width: | Height: | Size: 572 KiB |
BIN
docs/assets/prompt_syntax/apricots--3.png
Normal file
|
After Width: | Height: | Size: 557 KiB |
BIN
docs/assets/prompt_syntax/apricots-0.png
Normal file
|
After Width: | Height: | Size: 571 KiB |
BIN
docs/assets/prompt_syntax/apricots-1.png
Normal file
|
After Width: | Height: | Size: 570 KiB |
BIN
docs/assets/prompt_syntax/apricots-2.png
Normal file
|
After Width: | Height: | Size: 568 KiB |
BIN
docs/assets/prompt_syntax/apricots-3.png
Normal file
|
After Width: | Height: | Size: 527 KiB |