Compare commits
1945 Commits
version_0.
...
v2.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a338616ced | ||
|
|
65a99c47d3 | ||
|
|
e4bb49b4f0 | ||
|
|
2ad489a1ef | ||
|
|
ecb904f8b7 | ||
|
|
61ead2c92d | ||
|
|
c5a8c499ab | ||
|
|
bd6278c361 | ||
|
|
e24d4dc15b | ||
|
|
3d4c70604d | ||
|
|
d73aea43b7 | ||
|
|
358f0af79a | ||
|
|
0650735f74 | ||
|
|
e469bbb89e | ||
|
|
a46633a355 | ||
|
|
7275006c37 | ||
|
|
e438d46314 | ||
|
|
bf8d6d8908 | ||
|
|
b9e1aeb2dd | ||
|
|
fd81a69b4d | ||
|
|
5a15ad3148 | ||
|
|
22a37ef714 | ||
|
|
59c8024f0c | ||
|
|
22b3a59f16 | ||
|
|
48e21486cb | ||
|
|
a6fa882b7c | ||
|
|
aa12adccf3 | ||
|
|
282a2f642b | ||
|
|
d211c34f7b | ||
|
|
e995e97690 | ||
|
|
aa247e68be | ||
|
|
895c47fd11 | ||
|
|
0c32d7b507 | ||
|
|
09625eae66 | ||
|
|
76249b3d4e | ||
|
|
d85cd99f17 | ||
|
|
f4576dcc2d | ||
|
|
62fe308f84 | ||
|
|
9b984e0d1e | ||
|
|
5502b29340 | ||
|
|
15fa246ccf | ||
|
|
4929ae6c1d | ||
|
|
16a52a607d | ||
|
|
7c68eff99f | ||
|
|
2048a47b85 | ||
|
|
f73d5a647d | ||
|
|
365e2dde1b | ||
|
|
a48e021c0b | ||
|
|
825fa6977d | ||
|
|
e332529fbd | ||
|
|
0f6aa7fe19 | ||
|
|
b8870d8290 | ||
|
|
ffa91be3f1 | ||
|
|
2d5294bca1 | ||
|
|
2468a28e66 | ||
|
|
e3ed748191 | ||
|
|
3f5bf7ac44 | ||
|
|
00378e1ea6 | ||
|
|
b45e632f23 | ||
|
|
57be9ae6c3 | ||
|
|
6c9a2761f5 | ||
|
|
2bdd738f03 | ||
|
|
7782760541 | ||
|
|
de2686d323 | ||
|
|
0b72a4a35e | ||
|
|
942a202945 | ||
|
|
1379642fc6 | ||
|
|
408cf5e092 | ||
|
|
ce298d32b5 | ||
|
|
d7107d931a | ||
|
|
147dcc2961 | ||
|
|
efd7f42414 | ||
|
|
4e1b619ad7 | ||
|
|
f26199d377 | ||
|
|
90cd791e76 | ||
|
|
5a95ce5625 | ||
|
|
89da42ad79 | ||
|
|
e8aba99c92 | ||
|
|
ced9c83e96 | ||
|
|
247816db9a | ||
|
|
80f2cfe3e3 | ||
|
|
9a15a89e20 | ||
|
|
c73a61b785 | ||
|
|
88203d8db2 | ||
|
|
881c69e905 | ||
|
|
c40278dae7 | ||
|
|
7b329b7c91 | ||
|
|
c19b02ab21 | ||
|
|
6ebddf09c2 | ||
|
|
5841e1b5be | ||
|
|
5f09ffa276 | ||
|
|
9e70c216f6 | ||
|
|
cbe8a9550c | ||
|
|
259ecb7b71 | ||
|
|
002791ef68 | ||
|
|
21e491f878 | ||
|
|
12c4c715aa | ||
|
|
fe700d27df | ||
|
|
7a4ceb0f7c | ||
|
|
bb5d77a9fb | ||
|
|
3c55baf06b | ||
|
|
ca882ad5ff | ||
|
|
6a7b4ef63f | ||
|
|
f60d22b29b | ||
|
|
6a6fbe24a3 | ||
|
|
5efd2ed7a8 | ||
|
|
62c346850c | ||
|
|
f6fafe3eb3 | ||
|
|
6547c320a9 | ||
|
|
2d32cf4eeb | ||
|
|
7a4e358d53 | ||
|
|
ac1469bbd3 | ||
|
|
c0c32d9daa | ||
|
|
52e74fef7c | ||
|
|
e431d296c0 | ||
|
|
1e7a5fda24 | ||
|
|
050d72478e | ||
|
|
d3a09f1284 | ||
|
|
e096eef049 | ||
|
|
62c97dd7e6 | ||
|
|
e58b7a7ef9 | ||
|
|
dc556cb1a7 | ||
|
|
0c8f0e3386 | ||
|
|
98f03053ba | ||
|
|
59ef2471e1 | ||
|
|
ce7651944d | ||
|
|
a3e0b285d8 | ||
|
|
3cdfedc649 | ||
|
|
531f596bd1 | ||
|
|
8683426041 | ||
|
|
631592ec99 | ||
|
|
4cd29420ef | ||
|
|
582fee6c3a | ||
|
|
2b39d1677c | ||
|
|
47342277dd | ||
|
|
f7ce6fae9a | ||
|
|
8566490e51 | ||
|
|
6151968cd3 | ||
|
|
ba4691dae8 | ||
|
|
7d16af3aa7 | ||
|
|
61ff90d1fd | ||
|
|
303a2495c7 | ||
|
|
23d54ee69e | ||
|
|
330b417a7b | ||
|
|
f70af7afb9 | ||
|
|
e7368d7231 | ||
|
|
07c3c57cde | ||
|
|
b774c8afc3 | ||
|
|
231dfe01f4 | ||
|
|
5319796e58 | ||
|
|
39daa5aea7 | ||
|
|
a7517ce0de | ||
|
|
fbfffe028f | ||
|
|
19b6c671a6 | ||
|
|
c2fab45a6e | ||
|
|
0596ebd5a9 | ||
|
|
338efa5a7a | ||
|
|
5d4d8f54df | ||
|
|
3d4a9c2deb | ||
|
|
74fad5f6ed | ||
|
|
9c264b42c3 | ||
|
|
09ee1b1877 | ||
|
|
4b27d8821d | ||
|
|
c49d9c2611 | ||
|
|
4134e2e9da | ||
|
|
e4a212dfca | ||
|
|
19bb185fd9 | ||
|
|
1eaa58c970 | ||
|
|
4245c9e0cd | ||
|
|
2b078c0d6e | ||
|
|
0f4413da7d | ||
|
|
91b491b7e7 | ||
|
|
61e8916141 | ||
|
|
da5de6a240 | ||
|
|
fdf9b1c40c | ||
|
|
bc7bfed0d3 | ||
|
|
b532e6dd17 | ||
|
|
b46921c22d | ||
|
|
13f26a99b8 | ||
|
|
3d265e28ff | ||
|
|
29d9ce03ab | ||
|
|
3caa95ced9 | ||
|
|
94cf660848 | ||
|
|
e1cb5b8251 | ||
|
|
101fe9efa9 | ||
|
|
2e9463089d | ||
|
|
8127f0691e | ||
|
|
b55dcf5943 | ||
|
|
bb5fe98e94 | ||
|
|
0290cd6814 | ||
|
|
fc4d07f198 | ||
|
|
e7aeaa310c | ||
|
|
85b5fcd5e1 | ||
|
|
e5d0c9c224 | ||
|
|
162e420e9c | ||
|
|
bfbae09a9c | ||
|
|
d2e8ecbd4b | ||
|
|
a701e4f90b | ||
|
|
f22f81b4ff | ||
|
|
63202e2467 | ||
|
|
ef68a419f1 | ||
|
|
9fc6ee0c4c | ||
|
|
ea65650883 | ||
|
|
5d76c57ce2 | ||
|
|
2c250a515e | ||
|
|
4204740cb2 | ||
|
|
bd3ba596c2 | ||
|
|
0a89d350d9 | ||
|
|
b7fcf6dc04 | ||
|
|
accb1779cb | ||
|
|
387f39407a | ||
|
|
6a32adb7ed | ||
|
|
3ab3a7d37a | ||
|
|
da5fd10bb9 | ||
|
|
9291fde960 | ||
|
|
31ef15210d | ||
|
|
aa01657678 | ||
|
|
6fb6bc6d7f | ||
|
|
da33e038ca | ||
|
|
78f7094a0b | ||
|
|
0b046c95ef | ||
|
|
c13d7aea56 | ||
|
|
f7a47c1b67 | ||
|
|
6c34b89cfb | ||
|
|
7138faf5d3 | ||
|
|
0d3a931e88 | ||
|
|
861e825ebf | ||
|
|
1ca1ab594c | ||
|
|
9425389240 | ||
|
|
9f16ff1774 | ||
|
|
2ac3c9e8fd | ||
|
|
4a9209c5e8 | ||
|
|
b78d718357 | ||
|
|
104466f5c0 | ||
|
|
2ecdfca52f | ||
|
|
e81df1a701 | ||
|
|
61013e8eee | ||
|
|
48d4fccd61 | ||
|
|
2859af386c | ||
|
|
8dee3387fd | ||
|
|
63eeac49f8 | ||
|
|
d5fdee72d3 | ||
|
|
765092eb12 | ||
|
|
2c9747fd41 | ||
|
|
62898b0f8f | ||
|
|
ac7ee9d0a5 | ||
|
|
0adb7d4676 | ||
|
|
27a7980dad | ||
|
|
a5915ccd2c | ||
|
|
d6815f61ee | ||
|
|
d71f11f55c | ||
|
|
ed45dca7c1 | ||
|
|
dd71066391 | ||
|
|
6f51b2078e | ||
|
|
d035e0e811 | ||
|
|
55a8da0f02 | ||
|
|
43de16cae4 | ||
|
|
320cbdd62d | ||
|
|
f8dce07486 | ||
|
|
37382042c1 | ||
|
|
2af8139029 | ||
|
|
a5c77ff926 | ||
|
|
15df6c148a | ||
|
|
e6226b45de | ||
|
|
ab1e207765 | ||
|
|
d2ed8883f7 | ||
|
|
3ddf1f6c3e | ||
|
|
5395707280 | ||
|
|
710e465054 | ||
|
|
30bd79ffa1 | ||
|
|
20c83d7568 | ||
|
|
67e0e97eda | ||
|
|
6bebc679c4 | ||
|
|
9406b95518 | ||
|
|
8d8f93fd00 | ||
|
|
20a3875f32 | ||
|
|
8ab428e588 | ||
|
|
e5dcae5fff | ||
|
|
329cd8a38b | ||
|
|
39f0995d78 | ||
|
|
0855ab4173 | ||
|
|
fe7ab6e480 | ||
|
|
f8dd2df953 | ||
|
|
3795bec037 | ||
|
|
35face48da | ||
|
|
864d080502 | ||
|
|
3a7b495167 | ||
|
|
9d1594cbcc | ||
|
|
c48a1092f7 | ||
|
|
35dba1381c | ||
|
|
631dce3aca | ||
|
|
ea6e998094 | ||
|
|
d551de6e06 | ||
|
|
7ce1cf6f3e | ||
|
|
2e89997d29 | ||
|
|
a7e2a7037a | ||
|
|
75d8fc77c2 | ||
|
|
4ea954fd66 | ||
|
|
8b8c1068d9 | ||
|
|
7793dbb4b4 | ||
|
|
77b93ad0c2 | ||
|
|
f99671b764 | ||
|
|
a8a30065a4 | ||
|
|
05b8de5300 | ||
|
|
387f796ebe | ||
|
|
27ba91e74d | ||
|
|
3033331f65 | ||
|
|
362b234cd1 | ||
|
|
bbe53841e4 | ||
|
|
a825210bd3 | ||
|
|
88fb2a6b46 | ||
|
|
042d3e866f | ||
|
|
0ea711e520 | ||
|
|
ef5f9600e6 | ||
|
|
acdffb1503 | ||
|
|
6679e5be69 | ||
|
|
89ad2e55d9 | ||
|
|
f8dff5b6c2 | ||
|
|
104b0ef0ba | ||
|
|
07cdf6e9cb | ||
|
|
4cf9c965d4 | ||
|
|
4039e9e368 | ||
|
|
38fd0668ba | ||
|
|
5cae8206f9 | ||
|
|
3ce60161d2 | ||
|
|
00b5466f0d | ||
|
|
6eeef7c17e | ||
|
|
219da47576 | ||
|
|
47106eeeea | ||
|
|
07e21acab5 | ||
|
|
65acdfb09b | ||
|
|
9e2ce00f7b | ||
|
|
44599a239f | ||
|
|
7b46d5f823 | ||
|
|
2115874587 | ||
|
|
cd5141f3d1 | ||
|
|
b815aa2130 | ||
|
|
19a6e904ec | ||
|
|
1200fbd3bd | ||
|
|
343ae8b7af | ||
|
|
442f584afa | ||
|
|
55482d7ce3 | ||
|
|
0c3de595df | ||
|
|
38ff75c7ea | ||
|
|
963e0f8a53 | ||
|
|
12f40cbbeb | ||
|
|
e524fb2086 | ||
|
|
eb7ccc356f | ||
|
|
4635836ebc | ||
|
|
d25bf7a55a | ||
|
|
3539f0a1da | ||
|
|
737a7f779b | ||
|
|
71dcc17fa0 | ||
|
|
a90ce61b1b | ||
|
|
d43167ac0b | ||
|
|
245cf606a3 | ||
|
|
943616044a | ||
|
|
943808b925 | ||
|
|
30745f163d | ||
|
|
e20108878c | ||
|
|
f73d349dfe | ||
|
|
dc86fc92ce | ||
|
|
aa785c3ef1 | ||
|
|
fb4feb380b | ||
|
|
9b15b228b8 | ||
|
|
99eb7e6ef2 | ||
|
|
bf50a68eb5 | ||
|
|
67a7d46a29 | ||
|
|
3e2cf8a259 | ||
|
|
624fe4794b | ||
|
|
44731f8a37 | ||
|
|
b2a3c5cbe8 | ||
|
|
e9f690bf9d | ||
|
|
0eb07b7488 | ||
|
|
16e7cbdb38 | ||
|
|
135c62f1a4 | ||
|
|
582e19056a | ||
|
|
52de5c8b33 | ||
|
|
799dc6d0df | ||
|
|
79689e87ce | ||
|
|
0d0481ce75 | ||
|
|
869d9e22c7 | ||
|
|
3f77b68a9d | ||
|
|
2daf187bdb | ||
|
|
e73a2d68b5 | ||
|
|
2dd5c0696d | ||
|
|
f25ad03011 | ||
|
|
c00da1702f | ||
|
|
83f20c23aa | ||
|
|
0050176d57 | ||
|
|
f7bb90234d | ||
|
|
1d3c43b67f | ||
|
|
ef505d2bc5 | ||
|
|
a9a59a3046 | ||
|
|
da012e1bfd | ||
|
|
90c8aa716d | ||
|
|
94cd20de05 | ||
|
|
14725f9d59 | ||
|
|
c6c146f54f | ||
|
|
90d9d6ea00 | ||
|
|
1f62517636 | ||
|
|
29eea93592 | ||
|
|
7179cc7f25 | ||
|
|
b12c8a28d7 | ||
|
|
8c2e82cc54 | ||
|
|
3ae094b673 | ||
|
|
74e6ce3e6a | ||
|
|
71426d200e | ||
|
|
9b7159720f | ||
|
|
e7c2b90bd1 | ||
|
|
d05373d35a | ||
|
|
bd8bb8c80b | ||
|
|
dac1ab0a05 | ||
|
|
2a44411f5b | ||
|
|
2f1c1e7695 | ||
|
|
2b6d78e436 | ||
|
|
b1da13a984 | ||
|
|
d03947a6ee | ||
|
|
422f2ecc91 | ||
|
|
f73a116f43 | ||
|
|
8aa40714e3 | ||
|
|
eaf6d46a7b | ||
|
|
906dafe3cd | ||
|
|
d3047c7cb0 | ||
|
|
62412f8398 | ||
|
|
f1ca789097 | ||
|
|
4104ac6270 | ||
|
|
8d5a225011 | ||
|
|
ca2f579f43 | ||
|
|
b1a2f4ab44 | ||
|
|
3c1ef48fe2 | ||
|
|
c732fd0740 | ||
|
|
04c8937fb6 | ||
|
|
4352eb6628 | ||
|
|
1ae269b8e0 | ||
|
|
dd07392045 | ||
|
|
e33971fe2c | ||
|
|
83e1c39ab8 | ||
|
|
b101be041b | ||
|
|
909740f430 | ||
|
|
aaf7a4f1d3 | ||
|
|
99d23c4d81 | ||
|
|
5e8d1ca19f | ||
|
|
fb4dc7eaf9 | ||
|
|
175c7bddfc | ||
|
|
71a1e0d0e1 | ||
|
|
ce1bfbc32d | ||
|
|
a2e53892ec | ||
|
|
7a923beb4c | ||
|
|
be8a992b85 | ||
|
|
03353ce978 | ||
|
|
c8f4a04196 | ||
|
|
9bef643bf5 | ||
|
|
f6b31d51e0 | ||
|
|
62e1cb48fd | ||
|
|
543464182f | ||
|
|
83a3cc9eb4 | ||
|
|
d12ae3bab0 | ||
|
|
61a4897b71 | ||
|
|
194c8e1c2e | ||
|
|
44e4090909 | ||
|
|
0564397ee6 | ||
|
|
3081b6b7dd | ||
|
|
37d38f196e | ||
|
|
17aee48734 | ||
|
|
9cdd78c6cb | ||
|
|
5561a95232 | ||
|
|
27f0f3e52b | ||
|
|
b159b2fe42 | ||
|
|
63902f3d34 | ||
|
|
1fb15d5c81 | ||
|
|
cc2042bd4c | ||
|
|
ee4273d760 | ||
|
|
2619a0b286 | ||
|
|
92c6a3812d | ||
|
|
230527b1fb | ||
|
|
bfe36c9f8b | ||
|
|
40388b5b90 | ||
|
|
0c34554170 | ||
|
|
b0eb864a25 | ||
|
|
1264cc2d36 | ||
|
|
f7cd98c238 | ||
|
|
8e7d744c60 | ||
|
|
9210bf7d3a | ||
|
|
8f35819ddf | ||
|
|
04d93f0445 | ||
|
|
b7ce5b4f1b | ||
|
|
7e27f189cf | ||
|
|
9472945299 | ||
|
|
f25c1f900f | ||
|
|
493eaa7389 | ||
|
|
ce6d618e3b | ||
|
|
8254ca9492 | ||
|
|
7d677a63b8 | ||
|
|
a2fb2e0d6b | ||
|
|
93cba3fba5 | ||
|
|
3e48b9ff85 | ||
|
|
a956bf9fda | ||
|
|
9f77df70c9 | ||
|
|
c04133a512 | ||
|
|
59747ecf24 | ||
|
|
a6e7aa8f97 | ||
|
|
51fdbe22d2 | ||
|
|
3b01e6e423 | ||
|
|
2e14ba8716 | ||
|
|
7308022bc7 | ||
|
|
8273c04575 | ||
|
|
ee7d4d712a | ||
|
|
d8c1b78d83 | ||
|
|
554445a985 | ||
|
|
b2bf2b08ff | ||
|
|
e7573ac90f | ||
|
|
cdb664f6e5 | ||
|
|
a127eeff20 | ||
|
|
1ca517d73b | ||
|
|
38b1dce7c3 | ||
|
|
c9f9eed04e | ||
|
|
fbea657eff | ||
|
|
55db9dba0a | ||
|
|
64051d081c | ||
|
|
ddb007af65 | ||
|
|
e574a1574f | ||
|
|
2bf9f1f0d8 | ||
|
|
8142b72bcd | ||
|
|
dc2f30a34e | ||
|
|
be7de4849c | ||
|
|
83e6ab08aa | ||
|
|
b385fdd7de | ||
|
|
d965540103 | ||
|
|
404d59b1b8 | ||
|
|
9980c4baf9 | ||
|
|
4c1267338b | ||
|
|
2e0b1c4c8b | ||
|
|
da75876639 | ||
|
|
d4d1014c9f | ||
|
|
213e12fe13 | ||
|
|
3e0a7b6229 | ||
|
|
da88097aba | ||
|
|
3f13dd3ae8 | ||
|
|
d3b0c54c14 | ||
|
|
79b4afeae7 | ||
|
|
9c61aed7d0 | ||
|
|
da223dfe81 | ||
|
|
e035397dcf | ||
|
|
899ba975a6 | ||
|
|
bfa65560eb | ||
|
|
ed9307f469 | ||
|
|
ff87239fb0 | ||
|
|
a357bf4f19 | ||
|
|
63f274f6df | ||
|
|
2ca4242f5f | ||
|
|
c9d27634b4 | ||
|
|
027990928e | ||
|
|
87469a5fdd | ||
|
|
4101127011 | ||
|
|
f6191a4f12 | ||
|
|
8c5d614c38 | ||
|
|
42883545f9 | ||
|
|
61357e4e6e | ||
|
|
c6ae9f1176 | ||
|
|
11d7e6b92f | ||
|
|
c3b992db96 | ||
|
|
1ffd4a9e06 | ||
|
|
147d39cb7c | ||
|
|
824cb201b1 | ||
|
|
582880b314 | ||
|
|
2b79a716aa | ||
|
|
d572af2acf | ||
|
|
54e6a68acb | ||
|
|
09f62032ec | ||
|
|
711ffd238f | ||
|
|
056cb0d8a8 | ||
|
|
37a204324b | ||
|
|
1fc1f8bf05 | ||
|
|
8ff507b03b | ||
|
|
33d6603fef | ||
|
|
b0b1993918 | ||
|
|
07a3df6001 | ||
|
|
92d4dfaabf | ||
|
|
bc626af6ca | ||
|
|
a45786ca2e | ||
|
|
2926c8299c | ||
|
|
32a5ffe436 | ||
|
|
62dd3b7d7d | ||
|
|
15aa7593f6 | ||
|
|
9b3ac92c24 | ||
|
|
66f6ef1b35 | ||
|
|
d93cd10b0d | ||
|
|
a488b14373 | ||
|
|
0147dd6431 | ||
|
|
9d19213b8a | ||
|
|
71c3835f3e | ||
|
|
0fbd26e9bf | ||
|
|
2a78eb96d0 | ||
|
|
3a1003f702 | ||
|
|
329a9d0b11 | ||
|
|
17d75f3da8 | ||
|
|
20551857da | ||
|
|
32122e0312 | ||
|
|
e6fc8af249 | ||
|
|
c974c95e2b | ||
|
|
3b2590243c | ||
|
|
1c2bd275fe | ||
|
|
0cf11ce488 | ||
|
|
d6195522aa | ||
|
|
3b79b935a3 | ||
|
|
4079333e29 | ||
|
|
99581dbbf7 | ||
|
|
9e599c65c5 | ||
|
|
22267475eb | ||
|
|
5eb0f8ffa7 | ||
|
|
e03a3fcf68 | ||
|
|
57bff2a663 | ||
|
|
528a183d42 | ||
|
|
b953f82346 | ||
|
|
ef2058824a | ||
|
|
6f93dc7712 | ||
|
|
a6e28d2eb7 | ||
|
|
a705a5a0aa | ||
|
|
f6bc13736a | ||
|
|
194d4c75b3 | ||
|
|
bc9c60ae71 | ||
|
|
0a7005f2bc | ||
|
|
c4fb8e304b | ||
|
|
fe2a2cfc8b | ||
|
|
32dab7d4bf | ||
|
|
1ea541baa6 | ||
|
|
82b7c118c4 | ||
|
|
1c501333e8 | ||
|
|
9a3c7800a7 | ||
|
|
11dc3ca1f8 | ||
|
|
ce5e57d828 | ||
|
|
e98fe9c22d | ||
|
|
6afc0f9b38 | ||
|
|
065a1da9d1 | ||
|
|
916f5bfbb2 | ||
|
|
7f491fd2d2 | ||
|
|
203a6d8a00 | ||
|
|
cac3f5fc61 | ||
|
|
7e33560010 | ||
|
|
759f563b6d | ||
|
|
8c47638eec | ||
|
|
8233098136 | ||
|
|
1cb365fff1 | ||
|
|
e405385e0d | ||
|
|
15c5d6a5ef | ||
|
|
132e2b3ae5 | ||
|
|
c16b7f090e | ||
|
|
057fc95aa3 | ||
|
|
94bad8555a | ||
|
|
6c0dd9b5ef | ||
|
|
1c102c71fc | ||
|
|
75f23793df | ||
|
|
9dcfa8de25 | ||
|
|
3d6650e59b | ||
|
|
7d201d7be0 | ||
|
|
cafaef11f7 | ||
|
|
1e201132ed | ||
|
|
8604fd2727 | ||
|
|
aa6aa68753 | ||
|
|
86b7b07c24 | ||
|
|
af56aee5c6 | ||
|
|
1ec92dd5f3 | ||
|
|
1c946561d3 | ||
|
|
b537e92789 | ||
|
|
7c06849c4d | ||
|
|
488334710b | ||
|
|
19341e95a6 | ||
|
|
c82e94811b | ||
|
|
c15a902e8d | ||
|
|
ca6385e6fa | ||
|
|
828ec1fb5c | ||
|
|
1c687d6d03 | ||
|
|
b9e910b5f4 | ||
|
|
101cac6a21 | ||
|
|
8ea07f3bb0 | ||
|
|
79e79b78aa | ||
|
|
2325c6cd40 | ||
|
|
3ec33414ec | ||
|
|
a61a690f6c | ||
|
|
06f542ed7a | ||
|
|
8954171eea | ||
|
|
e0e69ad279 | ||
|
|
e3e8024e15 | ||
|
|
c4cf888532 | ||
|
|
9eff9e5752 | ||
|
|
84c1825abc | ||
|
|
0621dd7ed4 | ||
|
|
67ddba9cff | ||
|
|
cbf5426d27 | ||
|
|
bac60ca21e | ||
|
|
8e0d671488 | ||
|
|
ee6deef14c | ||
|
|
5d8c048d0d | ||
|
|
f8fd6e39a3 | ||
|
|
dafca16c8b | ||
|
|
3449c05bf4 | ||
|
|
5c3fad22fd | ||
|
|
425cf67ee5 | ||
|
|
4f9529db9e | ||
|
|
f3931a031d | ||
|
|
a4995b7878 | ||
|
|
10d8d1bb25 | ||
|
|
b30ae57731 | ||
|
|
b0bfbafd3d | ||
|
|
7c50bd2039 | ||
|
|
ae4e385abd | ||
|
|
e301cd3321 | ||
|
|
2977680ca1 | ||
|
|
2a5aa6e986 | ||
|
|
3bba41ee89 | ||
|
|
179b5f7839 | ||
|
|
26d7712f03 | ||
|
|
c0b370e1b9 | ||
|
|
15cc92e54a | ||
|
|
acdd5b3922 | ||
|
|
9685fc210c | ||
|
|
f4cdc0001f | ||
|
|
3f78e9a1a3 | ||
|
|
280e2899d7 | ||
|
|
82b0bb838c | ||
|
|
8482518618 | ||
|
|
6425bda663 | ||
|
|
12413b0be6 | ||
|
|
275dca83be | ||
|
|
be5bf03ccc | ||
|
|
0c479cd706 | ||
|
|
7325b73073 | ||
|
|
49380f75a9 | ||
|
|
3d4276439f | ||
|
|
a4c36dbc15 | ||
|
|
4fbd11a1f2 | ||
|
|
8ce3d4dd7f | ||
|
|
b82c968278 | ||
|
|
bc8e86e643 | ||
|
|
1b6fab59a4 | ||
|
|
d1dd35a1d2 | ||
|
|
400f062771 | ||
|
|
40894d67ac | ||
|
|
08a0b85111 | ||
|
|
7da6fad359 | ||
|
|
b24d182237 | ||
|
|
2bdcc106f2 | ||
|
|
7a98387e8d | ||
|
|
58d0f14d03 | ||
|
|
bc9471987b | ||
|
|
dc6e60cbcc | ||
|
|
7dae5fb131 | ||
|
|
3bc1ff5e5a | ||
|
|
8ff9c69e2f | ||
|
|
988ace8029 | ||
|
|
6e9d996ece | ||
|
|
789714b0b1 | ||
|
|
773a64d4c0 | ||
|
|
bb7629d2b8 | ||
|
|
745c020aa2 | ||
|
|
c5344acb25 | ||
|
|
318eb35ea0 | ||
|
|
6e2fd2affe | ||
|
|
8faa06fb15 | ||
|
|
0b7ca6a326 | ||
|
|
ce8c238ac4 | ||
|
|
f6c37e46e1 | ||
|
|
2d69efccef | ||
|
|
f9d2aafaeb | ||
|
|
22514aec2e | ||
|
|
5a22a83f4c | ||
|
|
b1d43eae46 | ||
|
|
0b8cdb6964 | ||
|
|
aed5ad22fb | ||
|
|
dc9c16b93d | ||
|
|
f6e858a548 | ||
|
|
4c2db171ca | ||
|
|
1255127e49 | ||
|
|
1cb74a6357 | ||
|
|
5e2b250426 | ||
|
|
ad190cfbb2 | ||
|
|
542ceb051b | ||
|
|
3473669458 | ||
|
|
3170c83d8d | ||
|
|
3046dabde2 | ||
|
|
1b02074fea | ||
|
|
f15fd2c3d3 | ||
|
|
081271d6a1 | ||
|
|
27f62999c9 | ||
|
|
89d130edf4 | ||
|
|
0e551a3844 | ||
|
|
31869885d9 | ||
|
|
4c026d9d92 | ||
|
|
435231ef08 | ||
|
|
19a79caf41 | ||
|
|
7b095f8f97 | ||
|
|
f5dfd5b0dc | ||
|
|
9579a401b5 | ||
|
|
47a97f7e97 | ||
|
|
3c146ebf9e | ||
|
|
efbcbb0d91 | ||
|
|
578d8b0cb4 | ||
|
|
2b1aaf4ee7 | ||
|
|
4a7f5c7469 | ||
|
|
98fe044dee | ||
|
|
62d4bb05d4 | ||
|
|
02b1040264 | ||
|
|
dfd5899611 | ||
|
|
8ea88f49b1 | ||
|
|
a62541d976 | ||
|
|
fbd9a49899 | ||
|
|
4e571e12b8 | ||
|
|
2567f5faa5 | ||
|
|
97684d78d3 | ||
|
|
57791834ab | ||
|
|
3b0c4b74b6 | ||
|
|
7a701506a4 | ||
|
|
5157cbeda1 | ||
|
|
3d7bc074cf | ||
|
|
b296933ba0 | ||
|
|
70bb7f4a61 | ||
|
|
45cc867b0c | ||
|
|
9c9cb71544 | ||
|
|
173dc34194 | ||
|
|
333219be35 | ||
|
|
c1230da3ab | ||
|
|
a7515624b2 | ||
|
|
9f34ddfcea | ||
|
|
6499b99dad | ||
|
|
c6611b2ad6 | ||
|
|
395445e7b0 | ||
|
|
89c6c11214 | ||
|
|
c6a7be63b8 | ||
|
|
75165957c9 | ||
|
|
4f247a3672 | ||
|
|
d60df54f69 | ||
|
|
1f25f52af9 | ||
|
|
7541c7cf5d | ||
|
|
a6cdde3ce4 | ||
|
|
a53b9a443f | ||
|
|
6e1328d4c2 | ||
|
|
440065f7f8 | ||
|
|
2c27e759cd | ||
|
|
82481a6f9c | ||
|
|
90d64388ab | ||
|
|
3444c8e6b8 | ||
|
|
74419f41a3 | ||
|
|
d84321e080 | ||
|
|
6542556ebd | ||
|
|
542ee56c77 | ||
|
|
461e662644 | ||
|
|
58d73f5cae | ||
|
|
0c1c220bb9 | ||
|
|
bf5ccfffa5 | ||
|
|
70bbb670ec | ||
|
|
7b270ec3b0 | ||
|
|
e4ef7bdbb9 | ||
|
|
5f42d08945 | ||
|
|
911c99f125 | ||
|
|
c7ccb9dacd | ||
|
|
7a0d4c3350 | ||
|
|
2154dd2349 | ||
|
|
f3050fefce | ||
|
|
595d15455a | ||
|
|
183b98384f | ||
|
|
40d7141a4d | ||
|
|
6d475ee290 | ||
|
|
c430f5452b | ||
|
|
97de5e31f9 | ||
|
|
a99aab6309 | ||
|
|
5a40f7ad15 | ||
|
|
2f29b78a00 | ||
|
|
bcb6e2e506 | ||
|
|
194b875cf3 | ||
|
|
b2cd98259d | ||
|
|
4d5b208601 | ||
|
|
488890e6bb | ||
|
|
3feda31d82 | ||
|
|
0f55d89e20 | ||
|
|
c4b4a0e56e | ||
|
|
95c7742c9c | ||
|
|
44e3995425 | ||
|
|
7e6443c882 | ||
|
|
5dd9e30c2f | ||
|
|
8a8be92eac | ||
|
|
f368f682e1 | ||
|
|
d16f0c8a8f | ||
|
|
18e667f98e | ||
|
|
a09c64a1fe | ||
|
|
4c482fe24a | ||
|
|
609983ffa8 | ||
|
|
0f9bff66bc | ||
|
|
7f31a79431 | ||
|
|
c5a0fc8f68 | ||
|
|
87cb35f5da | ||
|
|
5d911b43c0 | ||
|
|
483097f31c | ||
|
|
7a3eae4572 | ||
|
|
db349aa3ce | ||
|
|
b5c114c5b7 | ||
|
|
f34279b3e7 | ||
|
|
9318719b9e | ||
|
|
815addc452 | ||
|
|
d2db92236a | ||
|
|
ef20df8933 | ||
|
|
f041510659 | ||
|
|
feb405f19a | ||
|
|
2c8806341f | ||
|
|
b8e4c13746 | ||
|
|
40828df663 | ||
|
|
0a217b5f15 | ||
|
|
88a9f33422 | ||
|
|
ffcb31faef | ||
|
|
ea67040ef1 | ||
|
|
e79069a957 | ||
|
|
1ab09e7a06 | ||
|
|
7c6dbcb14a | ||
|
|
8e97bc24a4 | ||
|
|
935a9d3c75 | ||
|
|
5a88be3744 | ||
|
|
8ba5e385ec | ||
|
|
a0f4af087c | ||
|
|
958d7650dd | ||
|
|
e246e7c8b9 | ||
|
|
8e76bc2b5d | ||
|
|
72834ad16c | ||
|
|
36ac66fff2 | ||
|
|
93b1298d46 | ||
|
|
a53e1125e6 | ||
|
|
a3a8404f91 | ||
|
|
3902c467b9 | ||
|
|
40430ad29c | ||
|
|
fb6beaa347 | ||
|
|
1a0cf1320b | ||
|
|
fe28c5fbdc | ||
|
|
0c354eccaa | ||
|
|
33162355be | ||
|
|
1af86618e3 | ||
|
|
b732bcad2f | ||
|
|
a626533cd4 | ||
|
|
2d1c3d7b0b | ||
|
|
22b290daad | ||
|
|
2cbf1e6f4b | ||
|
|
3d075a6b5b | ||
|
|
c7c9abdba3 | ||
|
|
846fd32209 | ||
|
|
6197f81ba0 | ||
|
|
b09491ec45 | ||
|
|
8c9f2ae705 | ||
|
|
d3a4311c3d | ||
|
|
6b838c6105 | ||
|
|
779422d01b | ||
|
|
b947290801 | ||
|
|
f8bd1e9d78 | ||
|
|
38a9f72e11 | ||
|
|
ce3b1162ea | ||
|
|
06802150d9 | ||
|
|
e737ba09be | ||
|
|
6b56d45d85 | ||
|
|
5f4bca0147 | ||
|
|
98271a0267 | ||
|
|
743342816b | ||
|
|
fe00a8c05c | ||
|
|
36c9a7d39c | ||
|
|
acc5199f85 | ||
|
|
6e4dc229e2 | ||
|
|
d641d8ab6d | ||
|
|
8a7ca4a766 | ||
|
|
4254e4dd60 | ||
|
|
ba80f656b3 | ||
|
|
fb0341fdbf | ||
|
|
8366eee9c2 | ||
|
|
97ec1b156c | ||
|
|
6e54f504e7 | ||
|
|
f93963cd6b | ||
|
|
e49e83e944 | ||
|
|
dff4850a82 | ||
|
|
800f9615c2 | ||
|
|
29336387be | ||
|
|
984575b579 | ||
|
|
af8383c770 | ||
|
|
3491a1688b | ||
|
|
ac1999929f | ||
|
|
862a34a211 | ||
|
|
c78ae752bb | ||
|
|
cad237b4c8 | ||
|
|
c2e100e6bf | ||
|
|
bc9f892cab | ||
|
|
79f23ad031 | ||
|
|
52b952526e | ||
|
|
61790bb76a | ||
|
|
b1a3fd945d | ||
|
|
e19aab4a9b | ||
|
|
ce3fe6cce1 | ||
|
|
be99d5a4bd | ||
|
|
14616f4178 | ||
|
|
b512d198f0 | ||
|
|
61b19d406c | ||
|
|
d80fff70f2 | ||
|
|
d87bd29a68 | ||
|
|
d63897fc39 | ||
|
|
fdf6a542bf | ||
|
|
8926bfb237 | ||
|
|
3f53973a2a | ||
|
|
4247e75426 | ||
|
|
485fe67c92 | ||
|
|
b40bfb5116 | ||
|
|
f0fd138ffc | ||
|
|
f79874c586 | ||
|
|
61a3234f43 | ||
|
|
1f4306423a | ||
|
|
e759ed4bd6 | ||
|
|
f368ebea00 | ||
|
|
460dc897ad | ||
|
|
72702b9f16 | ||
|
|
db537f154e | ||
|
|
76ab7b1bfe | ||
|
|
d2b57029c8 | ||
|
|
1853870811 | ||
|
|
3f25ad59c3 | ||
|
|
d16d0d3726 | ||
|
|
66896dcbbe | ||
|
|
98950e67e9 | ||
|
|
af8d73a8e8 | ||
|
|
089327241e | ||
|
|
5e23ec25f9 | ||
|
|
9050069858 | ||
|
|
47408bb568 | ||
|
|
c78c39e676 | ||
|
|
636c356aaf | ||
|
|
3d2175c9f8 | ||
|
|
e2bd492764 | ||
|
|
65cfb0f312 | ||
|
|
66dac1884b | ||
|
|
ac51ec4939 | ||
|
|
b1d1063a25 | ||
|
|
0678b24ebb | ||
|
|
53b4c3cc60 | ||
|
|
d117d23469 | ||
|
|
16a06ba66e | ||
|
|
6858c14d94 | ||
|
|
bf21a0bf02 | ||
|
|
a3463abf13 | ||
|
|
880142708d | ||
|
|
e69aa94800 | ||
|
|
660641e720 | ||
|
|
cd8be1d0e9 | ||
|
|
413064cf45 | ||
|
|
40b3d07900 | ||
|
|
803a51d5ad | ||
|
|
5f22a72188 | ||
|
|
48aca04a72 | ||
|
|
665fd8aebf | ||
|
|
21da4592d1 | ||
|
|
f1d4862b13 | ||
|
|
88e3b6d310 | ||
|
|
0ab5f2159d | ||
|
|
9b4d328be0 | ||
|
|
bdbc76fcd4 | ||
|
|
110c4f70df | ||
|
|
28f06c7200 | ||
|
|
c0aa92ea13 | ||
|
|
8c751d342d | ||
|
|
883b2b6e62 | ||
|
|
9903ce60f0 | ||
|
|
50ac367a38 | ||
|
|
7cf7ba42fb | ||
|
|
a80119f826 | ||
|
|
069f91f930 | ||
|
|
6142cf25cc | ||
|
|
72dd5b18ee | ||
|
|
93001f48f7 | ||
|
|
19174949b6 | ||
|
|
a1739a73b4 | ||
|
|
60f0090786 | ||
|
|
6987c77e2a | ||
|
|
e91aad6527 | ||
|
|
0305c63a07 | ||
|
|
fff01f2068 | ||
|
|
25777cf922 | ||
|
|
2e5169c74b | ||
|
|
05c1810f11 | ||
|
|
2cf294e6de | ||
|
|
b93f04ee38 | ||
|
|
0632a3a2ea | ||
|
|
8731b498c0 | ||
|
|
f408ef2e6c | ||
|
|
f360e85d61 | ||
|
|
283a0d72c7 | ||
|
|
cd69d258aa | ||
|
|
1b5013ab72 | ||
|
|
e8bb39370c | ||
|
|
43c9288534 | ||
|
|
408e3774e0 | ||
|
|
1b0d6a9bdb | ||
|
|
810112577f | ||
|
|
fc61ddab3c | ||
|
|
d5209965bc | ||
|
|
18a9a7c159 | ||
|
|
3bc40506fd | ||
|
|
555f21cd25 | ||
|
|
d176fb07cd | ||
|
|
30de9fcfae | ||
|
|
e02bfd00a8 | ||
|
|
a28636dd4a | ||
|
|
b3ea8fe24e | ||
|
|
e33ed45cfc | ||
|
|
a1813fd23c | ||
|
|
7a6587d3dd | ||
|
|
cc0cf147c8 | ||
|
|
4cf4853ae4 | ||
|
|
90d8f0af73 | ||
|
|
c0e1fb5f71 | ||
|
|
e8e6be0ebe | ||
|
|
7830fd8ca1 | ||
|
|
4efee2a1ec | ||
|
|
e902b50bfc | ||
|
|
c08eedf264 | ||
|
|
1ee3023cdd | ||
|
|
3e8a861fc0 | ||
|
|
cae0579ba9 | ||
|
|
f06f69a81a | ||
|
|
b970ec4ce9 | ||
|
|
a22ae23e9e | ||
|
|
bb75174f4a | ||
|
|
27b238999f | ||
|
|
893bdca0a8 | ||
|
|
de47f68b61 | ||
|
|
6af9f2716e | ||
|
|
60b83ff07e | ||
|
|
38c9001e8e | ||
|
|
7335f908af | ||
|
|
96b90be5c3 | ||
|
|
06ad4387a2 | ||
|
|
a637c2418a | ||
|
|
5f8f2e63eb | ||
|
|
c6e4352c3f | ||
|
|
8c72da3643 | ||
|
|
23af057e5c | ||
|
|
bde9d6d33b | ||
|
|
c14bdcb8fd | ||
|
|
f816526d0d | ||
|
|
50d607ffea | ||
|
|
57577401bd | ||
|
|
58c63fe339 | ||
|
|
7b0cbb34d6 | ||
|
|
37c44ced1d | ||
|
|
e59307d284 | ||
|
|
2a6999d500 | ||
|
|
5ab7c68cc7 | ||
|
|
e92122f2c2 | ||
|
|
ead0e92bac | ||
|
|
682d74754c | ||
|
|
082df27ecd | ||
|
|
dc024845cf | ||
|
|
94ca13c494 | ||
|
|
1f29cb1dc1 | ||
|
|
f404c692ad | ||
|
|
6bf19cd897 | ||
|
|
2743e17588 | ||
|
|
f0b500fba8 | ||
|
|
aaec6baeca | ||
|
|
61611d7d0d | ||
|
|
73154a25d4 | ||
|
|
f4a275d1b5 | ||
|
|
c3712b013f | ||
|
|
3692f223e1 | ||
|
|
fccf809e3a | ||
|
|
23e62efdc5 | ||
|
|
6ea0a7699e | ||
|
|
1e8e5245eb | ||
|
|
4f926fc470 | ||
|
|
a0a9b12daf | ||
|
|
f3292a6953 | ||
|
|
062f3e8f31 | ||
|
|
20ffd4082c | ||
|
|
578638c258 | ||
|
|
cdc78cc6a1 | ||
|
|
c98ade9b25 | ||
|
|
fe0f5bcc11 | ||
|
|
df98178018 | ||
|
|
0b0cde2351 | ||
|
|
5b4c37e043 | ||
|
|
3c4c4d71c9 | ||
|
|
ea2b0828d8 | ||
|
|
045aa7a9a3 | ||
|
|
d478a241a8 | ||
|
|
0a4397094e | ||
|
|
0b786f61cc | ||
|
|
b68cb521ba | ||
|
|
e1f0ee819d | ||
|
|
f2c3fba28d | ||
|
|
676c772f11 | ||
|
|
016fd65f6a | ||
|
|
09bf6dd7c1 | ||
|
|
6e927acd58 | ||
|
|
383b870499 | ||
|
|
98f189cc69 | ||
|
|
dbc9134630 | ||
|
|
746162b578 | ||
|
|
0071f43b2c | ||
|
|
6d09f8c6b2 | ||
|
|
66e9fd4771 | ||
|
|
ef6609abcb | ||
|
|
2f93418095 | ||
|
|
9bcb0dff96 | ||
|
|
f84372efd8 | ||
|
|
334045b27d | ||
|
|
071f65a892 | ||
|
|
e30827e19b | ||
|
|
af98524179 | ||
|
|
e994073b5b | ||
|
|
ad292b095d | ||
|
|
d8685ad66b | ||
|
|
239f41f3e0 | ||
|
|
e0951f28cf | ||
|
|
100f2e8f57 | ||
|
|
7ade11c4f3 | ||
|
|
2faa116238 | ||
|
|
c94b8cd959 | ||
|
|
0c1a2b68bf | ||
|
|
c06dc5b85b | ||
|
|
34fa6e38e7 | ||
|
|
7b9958e59d | ||
|
|
f8775f2f2d | ||
|
|
b74354795d | ||
|
|
9461c8127d | ||
|
|
b5ed668eff | ||
|
|
c6c19f1b3c | ||
|
|
20ba51ce7d | ||
|
|
e45f46d673 | ||
|
|
b3e026aa4e | ||
|
|
89540f293b | ||
|
|
ed8ee8c690 | ||
|
|
31daf1f0d7 | ||
|
|
5b692f4720 | ||
|
|
b89aadb3c9 | ||
|
|
b9183b00a0 | ||
|
|
7b28b5c9a1 | ||
|
|
994c6b7512 | ||
|
|
42072fc15c | ||
|
|
103b30f915 | ||
|
|
1799bf5e42 | ||
|
|
17e755e062 | ||
|
|
ae963fcfdc | ||
|
|
3c732500e7 | ||
|
|
cd494c2f6c | ||
|
|
443fcd030f | ||
|
|
fefcdffb55 | ||
|
|
fa7fe382b7 | ||
|
|
d8d30ab4cb | ||
|
|
61f46cac31 | ||
|
|
df4c80f177 | ||
|
|
df95a7ddf2 | ||
|
|
fb7a9f37e4 | ||
|
|
1e3200801f | ||
|
|
b4debcc4ad | ||
|
|
622db491b2 | ||
|
|
0db8d6943c | ||
|
|
37e2418ee0 | ||
|
|
d81bc46218 | ||
|
|
40b61870f6 | ||
|
|
6cab2e0ca0 | ||
|
|
ba4892e03f | ||
|
|
2b9f8e7218 | ||
|
|
6cb6c4a911 | ||
|
|
693bed5514 | ||
|
|
fe12c6c099 | ||
|
|
67fbaa7c31 | ||
|
|
ddc68b01f7 | ||
|
|
f9feaac8c7 | ||
|
|
d1de1e357a | ||
|
|
cbac95b02a | ||
|
|
00d2d0e90e | ||
|
|
d1a2c4cd8c | ||
|
|
403d02d94f | ||
|
|
9a8fecb2cb | ||
|
|
45af30f3a4 | ||
|
|
58baf9533b | ||
|
|
f59b399f52 | ||
|
|
10f4c0c6b3 | ||
|
|
f9b272a7b9 | ||
|
|
96d7639d2a | ||
|
|
e6011631a1 | ||
|
|
54b9cb49c1 | ||
|
|
60b731e7ab | ||
|
|
ec2dc24ad7 | ||
|
|
357e1ad35f | ||
|
|
340189fa0d | ||
|
|
8d2afefe6a | ||
|
|
9faf7025c6 | ||
|
|
511924c9ab | ||
|
|
4d997145b4 | ||
|
|
9df743e2bf | ||
|
|
ccb2b7c2fb | ||
|
|
30e69f8b32 | ||
|
|
df4d1162b5 | ||
|
|
81bb44319a | ||
|
|
bb05a43787 | ||
|
|
66ff890b85 | ||
|
|
dd3fff1d3e | ||
|
|
d8d2043467 | ||
|
|
94a7b3cc07 | ||
|
|
b02ea331df | ||
|
|
9208bfd151 | ||
|
|
80579a30e5 | ||
|
|
5818528aa6 | ||
|
|
6ec7eab85a | ||
|
|
e6179af46a | ||
|
|
d15c75ecae | ||
|
|
2e438542e9 | ||
|
|
54c5665635 | ||
|
|
8a8c093795 | ||
|
|
7fa45b0540 | ||
|
|
89da371f48 | ||
|
|
10c51b4f35 | ||
|
|
ecb84ecc10 | ||
|
|
0d1aad53ef | ||
|
|
d0a71dc361 | ||
|
|
f31aa32e4d | ||
|
|
e1a6d0c138 | ||
|
|
0aa3dfbc35 | ||
|
|
5ad080f056 | ||
|
|
d4941ca833 | ||
|
|
00b002f731 | ||
|
|
82a223c5f6 | ||
|
|
654ec17000 | ||
|
|
e1f6ea2be7 | ||
|
|
5941ee620c | ||
|
|
a18d0b9ef1 | ||
|
|
eeecc33aaa | ||
|
|
dfad1dccf4 | ||
|
|
d016017b6d | ||
|
|
9b28c65e4b | ||
|
|
0a6c98e47d | ||
|
|
dedf8a3692 | ||
|
|
993158fc6a | ||
|
|
5e15f1e017 | ||
|
|
b9592ff2dc | ||
|
|
0bc6779361 | ||
|
|
2a292d5b82 | ||
|
|
4a5a228fd8 | ||
|
|
6665f4494f | ||
|
|
dbf2c63c90 | ||
|
|
bf1beaa607 | ||
|
|
7dee9efb24 | ||
|
|
9d6d728b51 | ||
|
|
1c649e4663 | ||
|
|
ea60d036d1 | ||
|
|
4d197f699e | ||
|
|
a3e07fb84a | ||
|
|
9fa1f31bf2 | ||
|
|
77db46f99e | ||
|
|
190ba78960 | ||
|
|
012c0dfdeb | ||
|
|
25d9ccc509 | ||
|
|
9cdf3aca7d | ||
|
|
49a96b90d8 | ||
|
|
aba94b85e8 | ||
|
|
aac5102cf3 | ||
|
|
c705ff5e72 | ||
|
|
b20f2bcd7e | ||
|
|
95f4ae4c1e | ||
|
|
a73017939f | ||
|
|
45673e8723 | ||
|
|
3f8a289e9b | ||
|
|
0ab5a36464 | ||
|
|
443a4ad87c | ||
|
|
585b47fdd1 | ||
|
|
5e433728b5 | ||
|
|
7708f4fb98 | ||
|
|
b86a1deb00 | ||
|
|
4951e66103 | ||
|
|
79b445b0ca | ||
|
|
a323070a4d | ||
|
|
f7662c1808 | ||
|
|
93c242c9fb | ||
|
|
c7c6cd7735 | ||
|
|
77ca83e103 | ||
|
|
0ea145d188 | ||
|
|
162285ae86 | ||
|
|
37c921dfe2 | ||
|
|
4f72cb44ad | ||
|
|
878ef2e9e0 | ||
|
|
4923118610 | ||
|
|
defafc0e8e | ||
|
|
16f6a6731d | ||
|
|
19fb66f3d5 | ||
|
|
0881d429f2 | ||
|
|
9a29d442b4 | ||
|
|
d301836fbd | ||
|
|
da95729d90 | ||
|
|
70aa674e9e | ||
|
|
737a97c898 | ||
|
|
8748370f44 | ||
|
|
839e30e4b8 | ||
|
|
e21938c12d | ||
|
|
eeff8e9033 | ||
|
|
336e16ef85 | ||
|
|
eceb7d2b54 | ||
|
|
9775a3502c | ||
|
|
f240e878e5 | ||
|
|
529fc57f2b | ||
|
|
0ca9d1f228 | ||
|
|
b656d333de | ||
|
|
7136603604 | ||
|
|
5cbea51f31 | ||
|
|
2cf8de9234 | ||
|
|
f9239af7dc | ||
|
|
97c0c4bfe8 | ||
|
|
c6be8f320d | ||
|
|
bfb2781279 | ||
|
|
5c43988862 | ||
|
|
62863ac586 | ||
|
|
99122708ca | ||
|
|
817c4a26de | ||
|
|
ecc6b75a3e | ||
|
|
bf707d9e75 | ||
|
|
db52991b9d | ||
|
|
a34d8813b6 | ||
|
|
103b3e7965 | ||
|
|
f74e52079b | ||
|
|
e3be28ecca | ||
|
|
dbfc35ece2 | ||
|
|
4185afea5c | ||
|
|
723d074442 | ||
|
|
6d2084e030 | ||
|
|
4a0354c604 | ||
|
|
424f4fe244 | ||
|
|
348b4b8be5 | ||
|
|
75f633cda8 | ||
|
|
2b3acc7b87 | ||
|
|
044e1ec2a8 | ||
|
|
10db192cc4 | ||
|
|
79ac0f3420 | ||
|
|
c41599746d | ||
|
|
c85ae00b33 | ||
|
|
7f0cc7072b | ||
|
|
1b5aae3ef3 | ||
|
|
6abf739315 | ||
|
|
db825b8138 | ||
|
|
33874bae8d | ||
|
|
afee7f9cea | ||
|
|
653144694f | ||
|
|
c33a84cdfd | ||
|
|
bd1715ff5c | ||
|
|
f8a540881c | ||
|
|
c71d8750f7 | ||
|
|
244239e5f6 | ||
|
|
711d49ed30 | ||
|
|
7996a30e3a | ||
|
|
d0832bfcaa | ||
|
|
a69ca31f34 | ||
|
|
049ea02fc7 | ||
|
|
ab39bc0bac | ||
|
|
5c6b612a72 | ||
|
|
56f155c590 | ||
|
|
bd4fc64156 | ||
|
|
41687746be | ||
|
|
171f8db742 | ||
|
|
d7e67b62f0 | ||
|
|
d1d044aa87 | ||
|
|
8b0d1e59fe | ||
|
|
edada042b3 | ||
|
|
29ab3c2028 | ||
|
|
7670ecc63f | ||
|
|
dd2aedacaf | ||
|
|
dc500946ad | ||
|
|
a48c03e0f4 | ||
|
|
f6284777e6 | ||
|
|
7647490617 | ||
|
|
dbc8fc7900 | ||
|
|
eef788981c | ||
|
|
5b22acca6d | ||
|
|
8c8b34a889 | ||
|
|
7ff94383ce | ||
|
|
0891910cac | ||
|
|
720e5cd651 | ||
|
|
1ad2a8e567 | ||
|
|
52d8bb2836 | ||
|
|
caf4ea3d89 | ||
|
|
95c088b303 | ||
|
|
a20113d5a3 | ||
|
|
0f93dadd6a | ||
|
|
f4004f660e | ||
|
|
4406fd138d | ||
|
|
fd7a72e147 | ||
|
|
3a2be621f3 | ||
|
|
5116c8178c | ||
|
|
91e826e5f4 | ||
|
|
751283a2de | ||
|
|
6266d9e8d6 | ||
|
|
c22c3dec56 | ||
|
|
138956e516 | ||
|
|
60be735e80 | ||
|
|
d0d95d3a2a | ||
|
|
b90a215000 | ||
|
|
6270e313b8 | ||
|
|
a01b7bdc40 | ||
|
|
1eee8111b9 | ||
|
|
64eca42610 | ||
|
|
21a1f681dc | ||
|
|
2882c2d0a6 | ||
|
|
fb857f05ba | ||
|
|
4ffdf73412 | ||
|
|
9130ad7e08 | ||
|
|
d66010410c | ||
|
|
6566c2298c | ||
|
|
063b4a1995 | ||
|
|
18cdb556bd | ||
|
|
8d16a69b80 | ||
|
|
a406b588b4 | ||
|
|
5454a0edc2 | ||
|
|
fe5cc79249 | ||
|
|
361cc42829 | ||
|
|
91cce6b4c3 | ||
|
|
9d88abe2ea | ||
|
|
a61e49bc97 | ||
|
|
d0df894c9f | ||
|
|
f46916d521 | ||
|
|
12755c6ef6 | ||
|
|
cc4f33bf3a | ||
|
|
d8c0d020eb | ||
|
|
1a4bed2e55 | ||
|
|
02bee4fdb1 | ||
|
|
e918cb1a8a | ||
|
|
d922b53c26 | ||
|
|
0163310a47 | ||
|
|
423d25716d | ||
|
|
1d999ba974 | ||
|
|
27d4bb5624 | ||
|
|
c78b496da6 | ||
|
|
dd2af3f93c | ||
|
|
2d65b03f05 | ||
|
|
2288412ef2 | ||
|
|
6bff985496 | ||
|
|
918ade12ed | ||
|
|
70ef83ac30 | ||
|
|
b6cf8b9052 | ||
|
|
68f62c8352 | ||
|
|
33936430d0 | ||
|
|
81b3de9c65 | ||
|
|
ad6cf6f2f7 | ||
|
|
ecef72ca39 | ||
|
|
92d1ed744a | ||
|
|
da4bf95fbc | ||
|
|
d43c5c01e3 | ||
|
|
51278c7a10 | ||
|
|
6ef7c1ad4e | ||
|
|
33cc16473f | ||
|
|
1701c2ea94 | ||
|
|
2e299a1daf | ||
|
|
0b582a40d0 | ||
|
|
1306457b27 | ||
|
|
f4a19af04f | ||
|
|
58545ba057 | ||
|
|
4fe265735a | ||
|
|
2b7f32502c | ||
|
|
3ee82d8a3b | ||
|
|
629ca09fda | ||
|
|
833de06299 | ||
|
|
68eabab2af | ||
|
|
a4f69e62d7 | ||
|
|
7db51d0171 | ||
|
|
1b3c7acce3 | ||
|
|
e6b2c15fc5 | ||
|
|
d319b8a762 | ||
|
|
db580ccefd | ||
|
|
9e99fcbc16 | ||
|
|
346c9b66ec | ||
|
|
a52870684a | ||
|
|
2455bb38a4 | ||
|
|
01e05a98de | ||
|
|
2cac4697aa | ||
|
|
c5e95adb49 | ||
|
|
91565970c2 | ||
|
|
09bd9fa47e | ||
|
|
dc30adfbb4 | ||
|
|
fa98601bfb | ||
|
|
66fe110148 | ||
|
|
bf50ab9dd6 | ||
|
|
70119602a0 | ||
|
|
28fe84177e | ||
|
|
35d3f0ed90 | ||
|
|
0433b3d625 | ||
|
|
4b560b50c2 | ||
|
|
9ad79207c2 | ||
|
|
0be2351c97 | ||
|
|
ed513397b2 | ||
|
|
c52ba1b022 | ||
|
|
d022d0dd11 | ||
|
|
a14fd69a5a | ||
|
|
0d2e6f90c8 | ||
|
|
58e3562652 | ||
|
|
b622819051 | ||
|
|
a547c33327 | ||
|
|
31b77dbaf8 | ||
|
|
4280788c18 | ||
|
|
146e75a1de | ||
|
|
8a2b849620 | ||
|
|
462a1961e4 | ||
|
|
84c10346fb | ||
|
|
2aa8393272 | ||
|
|
c83d01b369 | ||
|
|
5354122094 | ||
|
|
64444025a9 | ||
|
|
d566ee092a | ||
|
|
b983d61e93 | ||
|
|
153c93bdd4 | ||
|
|
3be1cee17c | ||
|
|
bdb0651eb2 | ||
|
|
1480ef84dc | ||
|
|
1714816fe2 | ||
|
|
b5565d2c82 | ||
|
|
4fad71cd8c | ||
|
|
d126db2413 | ||
|
|
7811d20f21 | ||
|
|
d524e5797d | ||
|
|
8ca4d6542d | ||
|
|
a51e18ea98 | ||
|
|
8bf321f6ae | ||
|
|
5d13207aa6 | ||
|
|
dae2b26765 | ||
|
|
713b2a03dc | ||
|
|
186d0f9d10 | ||
|
|
55b448818e | ||
|
|
b4babf7680 | ||
|
|
85f32752fe | ||
|
|
b757384aba | ||
|
|
a5d21d7c94 | ||
|
|
8f3520e2d5 | ||
|
|
19e4298cf9 | ||
|
|
42ffcd7204 | ||
|
|
d48299e56c | ||
|
|
2e22d9ecf1 | ||
|
|
18597ad1d9 | ||
|
|
0173d3a8fc | ||
|
|
e7658b941e | ||
|
|
a7a62d39d4 | ||
|
|
24ce56b3db | ||
|
|
3220f73f0a | ||
|
|
27a1044e65 | ||
|
|
39c56f20be | ||
|
|
f6b2ec61b2 | ||
|
|
e57d6fd1a6 | ||
|
|
1b40a31a89 | ||
|
|
4fce1063c4 | ||
|
|
f9862a3d88 | ||
|
|
81ad239197 | ||
|
|
ed38c97ed8 | ||
|
|
4f8e7356b3 | ||
|
|
c363f033e8 | ||
|
|
22c25b3615 | ||
|
|
7fe7cdc8c9 | ||
|
|
e26fee78b5 | ||
|
|
63178c6a8c | ||
|
|
6fb2f1ed6e | ||
|
|
38701a6d7b | ||
|
|
31fa92a83f | ||
|
|
0abfc3cac6 | ||
|
|
d483fcb53a | ||
|
|
c7db038c96 | ||
|
|
132d23e55d | ||
|
|
90cbc6362c | ||
|
|
f33ae1bdf4 | ||
|
|
754525be82 | ||
|
|
d9eab7f383 | ||
|
|
f695988915 | ||
|
|
5d19294810 | ||
|
|
77803cf233 | ||
|
|
4acfb76be6 | ||
|
|
fd13526454 | ||
|
|
7718af041c | ||
|
|
30dbf0e589 | ||
|
|
070795a3b4 | ||
|
|
e351d6ffe5 | ||
|
|
46464ac677 | ||
|
|
03d8eb19e0 | ||
|
|
fef632e0e1 | ||
|
|
05061a70b3 | ||
|
|
617a029ae7 | ||
|
|
7ae79b350e | ||
|
|
9a8cd9684e | ||
|
|
18899be4ae | ||
|
|
3ea505bc2d | ||
|
|
e2ae6d288d | ||
|
|
41b26e0520 | ||
|
|
b6053108c1 | ||
|
|
22365a3f12 | ||
|
|
594c0eeb8c | ||
|
|
529040708b | ||
|
|
f0e2fa781f | ||
|
|
87b7446228 | ||
|
|
8a517fdc17 | ||
|
|
373a2d9c32 | ||
|
|
1f8bc9482a | ||
|
|
b85773f332 | ||
|
|
ddc0e9b4d8 | ||
|
|
44a48d0981 | ||
|
|
8bbe7936bd | ||
|
|
9e7865704a | ||
|
|
ac02a775e4 | ||
|
|
7c485a1a4a | ||
|
|
36bc989a27 | ||
|
|
ea2ee33be8 | ||
|
|
5d67986997 | ||
|
|
7dfca3dcb5 | ||
|
|
e0de42bd03 | ||
|
|
614974a8e8 | ||
|
|
6e49c070bb | ||
|
|
08a9702b73 | ||
|
|
042a9043d1 | ||
|
|
a7ac93a899 | ||
|
|
3b2569ebdd | ||
|
|
8b9a520c5c | ||
|
|
ba03289c14 | ||
|
|
d1551b1bd4 | ||
|
|
fab9e1a423 | ||
|
|
59be6c815d | ||
|
|
ff6c11406b | ||
|
|
6f90c7daf6 | ||
|
|
38ed6393fa | ||
|
|
a5a3300fc6 | ||
|
|
0ab03a5fde | ||
|
|
800132970e | ||
|
|
555f13e469 | ||
|
|
9b5101cd8d | ||
|
|
7040995ceb | ||
|
|
5129f256a3 | ||
|
|
b0b4ccf521 | ||
|
|
ed72ff3268 | ||
|
|
89805a5239 | ||
|
|
e00397f9ca | ||
|
|
12f59e1daa | ||
|
|
cf750f62db | ||
|
|
0f28663805 | ||
|
|
f3fad22cb6 | ||
|
|
7bf0bc5208 | ||
|
|
4e5aa7e714 | ||
|
|
46a223f229 | ||
|
|
eb9f0be91a | ||
|
|
4f02b72c9c | ||
|
|
dd670200bb | ||
|
|
8f89a2456a | ||
|
|
407d70a987 | ||
|
|
f1ffb5b51b | ||
|
|
4f1664ec4f | ||
|
|
fcdd95b652 | ||
|
|
470a62dbbe | ||
|
|
2c08cf7175 | ||
|
|
539c15966d | ||
|
|
5f844807cb | ||
|
|
cb86b9ae6e | ||
|
|
3a30a8f2d2 | ||
|
|
60ed004328 | ||
|
|
dbb9132f4d | ||
|
|
5711b6d611 | ||
|
|
f1bed52530 | ||
|
|
23fb4a72bb | ||
|
|
c38b6964b4 | ||
|
|
e202441f0c | ||
|
|
d051d86df6 | ||
|
|
b49475a54f | ||
|
|
797de3257c | ||
|
|
31b22e057d | ||
|
|
078859207d | ||
|
|
a10baf5808 | ||
|
|
0eba55ddbc | ||
|
|
19fa222810 | ||
|
|
b3e3b0e861 | ||
|
|
dde2994d10 | ||
|
|
888ca39ce2 | ||
|
|
f4c95bfec0 | ||
|
|
91d3e4605e | ||
|
|
652c67c90e | ||
|
|
2114c386ad | ||
|
|
6d2b4cbda1 | ||
|
|
562831fc4b | ||
|
|
d04518e65e | ||
|
|
d598b6c79d | ||
|
|
4ec21a5423 | ||
|
|
b64c902354 | ||
|
|
2ada3288e7 | ||
|
|
91966e9ffa | ||
|
|
2ad73246f9 | ||
|
|
d3a802db69 | ||
|
|
b95908daec | ||
|
|
79add5f0b6 | ||
|
|
650ae3eb13 | ||
|
|
0e3059728c | ||
|
|
b7735b3788 | ||
|
|
39b55ae016 | ||
|
|
e82c5eba18 | ||
|
|
1c8ecacddf | ||
|
|
26dc05e0e0 | ||
|
|
49247b4aa4 | ||
|
|
eb58276a2c | ||
|
|
72a9d75330 | ||
|
|
1a7743f3c2 | ||
|
|
0b4459b707 | ||
|
|
c521ac08ee | ||
|
|
29727f3e12 | ||
|
|
51b9a1d8d3 | ||
|
|
ab131cb55e | ||
|
|
269fcf92d9 | ||
|
|
8b682ac83b | ||
|
|
36e4130f1c | ||
|
|
b978536385 | ||
|
|
0a7fe6f2d9 | ||
|
|
b12955c963 | ||
|
|
9133087850 | ||
|
|
25fa0ad1f2 | ||
|
|
df9f088eb4 | ||
|
|
b1600d4ca3 | ||
|
|
0efc3bf780 | ||
|
|
dd16fe16bb | ||
|
|
4d72644db4 | ||
|
|
7ea168227c | ||
|
|
ef8ddffe46 | ||
|
|
81cbcb919e | ||
|
|
1eec6b776b | ||
|
|
776c747978 | ||
|
|
caf4dd4155 | ||
|
|
ee10021ea2 | ||
|
|
ca82acfd3b | ||
|
|
feea5fb063 | ||
|
|
b5cdbd3b0b | ||
|
|
e043f238af | ||
|
|
47a5da25b7 | ||
|
|
f55f4d7156 | ||
|
|
5055e9e1d5 | ||
|
|
c6b5e930dc | ||
|
|
d33e1bf563 | ||
|
|
923466387f | ||
|
|
56f7b0f434 | ||
|
|
c24a16ccb0 | ||
|
|
ab8ee9bbb6 | ||
|
|
37609d6e53 | ||
|
|
fb9b845fda | ||
|
|
9050ce152b | ||
|
|
73901a2777 | ||
|
|
decd1a58d2 | ||
|
|
7f4a5e946d | ||
|
|
4bc64a6aff | ||
|
|
02cf5879a1 | ||
|
|
d495bac307 | ||
|
|
3393b8cad1 | ||
|
|
886f1c0138 | ||
|
|
9588444f0e | ||
|
|
24b11ecf9f | ||
|
|
84989f0d05 | ||
|
|
a93a79568d | ||
|
|
7081a84600 | ||
|
|
1df1e5c38b | ||
|
|
5a513426bd | ||
|
|
611ccb991e | ||
|
|
bde956647f | ||
|
|
8952196bbf | ||
|
|
050dffd269 | ||
|
|
0cdf5e61b0 | ||
|
|
de1cea92ce | ||
|
|
3a58988e4a | ||
|
|
7a67d3d837 | ||
|
|
9050f3d399 | ||
|
|
a21156e3e3 | ||
|
|
716dbbdf8c | ||
|
|
1f2e52a1d6 | ||
|
|
dc788f92b3 | ||
|
|
13774912f4 | ||
|
|
cb9e6d544a | ||
|
|
a6d6bafd13 | ||
|
|
9d1343dce3 | ||
|
|
11c0df07b7 | ||
|
|
ca8a799373 | ||
|
|
710b908290 | ||
|
|
c80ce4fff5 | ||
|
|
bc7b1fdd37 | ||
|
|
1b7d414784 | ||
|
|
6d1219deec | ||
|
|
e019de34ac | ||
|
|
88563fd27a | ||
|
|
18289dabcb | ||
|
|
e70169257e | ||
|
|
2afa87e911 | ||
|
|
281e381cfc | ||
|
|
9a121f6190 | ||
|
|
a20827697c | ||
|
|
9391eaff0e | ||
|
|
e1d52822c5 | ||
|
|
e4eb775b63 | ||
|
|
a3632f5b4f | ||
|
|
63989ce6ff | ||
|
|
24b88c6fc5 | ||
|
|
2736d7e15e | ||
|
|
7cb5149a02 | ||
|
|
ea3501a8c4 | ||
|
|
8caa27bef0 | ||
|
|
ddf0ef3af1 | ||
|
|
aa2729d868 | ||
|
|
5f352aec87 | ||
|
|
c4c4974b39 | ||
|
|
194f43f00b | ||
|
|
325bc5280e | ||
|
|
11cc8e545b | ||
|
|
9adac56f4e | ||
|
|
5d5307dcb4 | ||
|
|
3c74dd41c4 | ||
|
|
f5450bad61 | ||
|
|
2ace56313c | ||
|
|
78aba5b770 | ||
|
|
49f0d31fac | ||
|
|
bb91ca0462 | ||
|
|
d340afc9e5 | ||
|
|
7085d1910b | ||
|
|
a997e09c48 | ||
|
|
503f962f68 | ||
|
|
41f0afbcb6 | ||
|
|
6650b98e7c | ||
|
|
1ca3dc553c | ||
|
|
09afcc321c | ||
|
|
7b2335068c | ||
|
|
d3eff4d827 | ||
|
|
0d23a0f899 | ||
|
|
985948c8b9 | ||
|
|
6ae09f6e46 | ||
|
|
ae821ce0e6 | ||
|
|
ce5b94bf40 | ||
|
|
b5d9981125 | ||
|
|
9a237015da | ||
|
|
5eff5d4cd2 | ||
|
|
4527ef15f9 | ||
|
|
0cea751476 | ||
|
|
a5fb8469ed | ||
|
|
9eaef0c5a8 | ||
|
|
4cb5fc5ed4 | ||
|
|
d8926fb8c0 | ||
|
|
80c0e30099 | ||
|
|
ac440a1197 | ||
|
|
bb46c70ec5 | ||
|
|
2b2ebd19e7 | ||
|
|
74f238d310 | ||
|
|
58f1962671 | ||
|
|
87fb4186d4 | ||
|
|
750408f793 | ||
|
|
bf76c4f283 | ||
|
|
7b8c883b07 | ||
|
|
be6ab334c2 | ||
|
|
831bbd7a54 | ||
|
|
c477525036 |
32
.dev_scripts/diff_images.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import argparse
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def read_image_int16(image_path):
|
||||
image = Image.open(image_path)
|
||||
return np.array(image).astype(np.int16)
|
||||
|
||||
|
||||
def calc_images_mean_L1(image1_path, image2_path):
|
||||
image1 = read_image_int16(image1_path)
|
||||
image2 = read_image_int16(image2_path)
|
||||
assert image1.shape == image2.shape
|
||||
|
||||
mean_L1 = np.abs(image1 - image2).mean()
|
||||
return mean_L1
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('image1_path')
|
||||
parser.add_argument('image2_path')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
mean_L1 = calc_images_mean_L1(args.image1_path, args.image2_path)
|
||||
print(mean_L1)
|
||||
|
After Width: | Height: | Size: 416 KiB |
1
.dev_scripts/sample_command.txt
Normal file
@@ -0,0 +1 @@
|
||||
"a photograph of an astronaut riding a horse" -s50 -S42
|
||||
19
.dev_scripts/test_regression_txt2img_dream_v1_4.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
# generate an image
|
||||
PROMPT_FILE=".dev_scripts/sample_command.txt"
|
||||
OUT_DIR="outputs/img-samples/test_regression_txt2img_v1_4"
|
||||
SAMPLES_DIR=${OUT_DIR}
|
||||
python scripts/dream.py \
|
||||
--from_file ${PROMPT_FILE} \
|
||||
--outdir ${OUT_DIR} \
|
||||
--sampler plms
|
||||
|
||||
# original output by CompVis/stable-diffusion
|
||||
IMAGE1=".dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png"
|
||||
# new output
|
||||
IMAGE2=`ls -A ${SAMPLES_DIR}/*.png | sort | tail -n 1`
|
||||
|
||||
echo ""
|
||||
echo "comparing the following two images"
|
||||
echo "IMAGE1: ${IMAGE1}"
|
||||
echo "IMAGE2: ${IMAGE2}"
|
||||
python .dev_scripts/diff_images.py ${IMAGE1} ${IMAGE2}
|
||||
23
.dev_scripts/test_regression_txt2img_v1_4.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
# generate an image
|
||||
PROMPT="a photograph of an astronaut riding a horse"
|
||||
OUT_DIR="outputs/txt2img-samples/test_regression_txt2img_v1_4"
|
||||
SAMPLES_DIR="outputs/txt2img-samples/test_regression_txt2img_v1_4/samples"
|
||||
python scripts/orig_scripts/txt2img.py \
|
||||
--prompt "${PROMPT}" \
|
||||
--outdir ${OUT_DIR} \
|
||||
--plms \
|
||||
--ddim_steps 50 \
|
||||
--n_samples 1 \
|
||||
--n_iter 1 \
|
||||
--seed 42
|
||||
|
||||
# original output by CompVis/stable-diffusion
|
||||
IMAGE1=".dev_scripts/images/v1_4_astronaut_rides_horse_plms_step50_seed42.png"
|
||||
# new output
|
||||
IMAGE2=`ls -A ${SAMPLES_DIR}/*.png | sort | tail -n 1`
|
||||
|
||||
echo ""
|
||||
echo "comparing the following two images"
|
||||
echo "IMAGE1: ${IMAGE1}"
|
||||
echo "IMAGE2: ${IMAGE2}"
|
||||
python .dev_scripts/diff_images.py ${IMAGE1} ${IMAGE2}
|
||||
3
.dockerignore
Normal file
@@ -0,0 +1,3 @@
|
||||
*
|
||||
!environment*.yml
|
||||
!docker-build
|
||||
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Auto normalizes line endings on commit so devs don't need to change local settings.
|
||||
# Only affects text files and ignores other file types.
|
||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||
* text=auto
|
||||
4
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
||||
102
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
name: 🐞 Bug Report
|
||||
|
||||
description: File a bug report
|
||||
|
||||
title: '[bug]: '
|
||||
|
||||
labels: ['bug']
|
||||
|
||||
# assignees:
|
||||
# - moderator_bot
|
||||
# - lstein
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this Bug Report!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: |
|
||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
irst to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
|
||||
- type: dropdown
|
||||
id: os_dropdown
|
||||
attributes:
|
||||
label: OS
|
||||
description: Which operating System did you use when the bug occured
|
||||
multiple: false
|
||||
options:
|
||||
- 'Linux'
|
||||
- 'Windows'
|
||||
- 'macOS'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: gpu_dropdown
|
||||
attributes:
|
||||
label: GPU
|
||||
description: Which kind of Graphic-Adapter is your System using
|
||||
multiple: false
|
||||
options:
|
||||
- 'cuda'
|
||||
- 'amd'
|
||||
- 'mps'
|
||||
- 'cpu'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: vram
|
||||
attributes:
|
||||
label: VRAM
|
||||
description: Size of the VRAM if known
|
||||
placeholder: 8GB
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: |
|
||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
placeholder: this is what the result looked like <screenshot>
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
validations:
|
||||
required: false
|
||||
56
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Feature Request
|
||||
description: Commit a idea or Request a new feature
|
||||
title: '[enhancement]: '
|
||||
labels: ['enhancement']
|
||||
# assignees:
|
||||
# - lstein
|
||||
# - tildebyte
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this Feature request!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: |
|
||||
Please make use of the [search function](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
||||
to see if a simmilar issue already exists for the feature you want to request
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How could we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: whatisexpected
|
||||
attributes:
|
||||
label: What should this feature add?
|
||||
description: Please try to explain the functionality this feature should add
|
||||
placeholder: |
|
||||
Instead of one huge textfield, it would be nice to have forms for bug-reports, feature-requests, ...
|
||||
Great benefits with automatic labeling, assigning and other functionalitys not available in that form
|
||||
via old-fashioned markdown-templates. I would also love to see the use of a moderator bot 🤖 like
|
||||
https://github.com/marketplace/actions/issue-moderator-with-commands to auto close old issues and other things
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives
|
||||
description: Describe alternatives you've considered
|
||||
placeholder: A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Aditional Content
|
||||
description: Add any other context or screenshots about the feature request here.
|
||||
placeholder: This is a Mockup of the design how I imagine it <screenshot>
|
||||
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Project-Documentation
|
||||
url: https://invoke-ai.github.io/InvokeAI/
|
||||
about: Should be your first place to go when looking for manuals/FAQs regarding our InvokeAI Toolkit
|
||||
- name: Discord
|
||||
url: https://discord.gg/ZmtBAhwWhy
|
||||
about: Our Discord Community could maybe help you out via live-chat
|
||||
- name: GitHub Community Support
|
||||
url: https://github.com/orgs/community/discussions
|
||||
about: Please ask and answer questions regarding the GitHub Platform here.
|
||||
- name: GitHub Security Bug Bounty
|
||||
url: https://bounty.github.com/
|
||||
about: Please report security vulnerabilities of the GitHub Platform here.
|
||||
42
.github/workflows/build-container.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# Building the Image without pushing to confirm it is still buildable
|
||||
# confirum functionality would unfortunately need way more resources
|
||||
name: build container image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: prepare docker-tag
|
||||
env:
|
||||
repository: ${{ github.repository }}
|
||||
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: buildx-${{ hashFiles('docker-build/Dockerfile') }}
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:latest
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
80
.github/workflows/create-caches.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Create Caches
|
||||
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
os_matrix:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macos-latest
|
||||
environment-file: environment-mac.yml
|
||||
default-shell: bash -l {0}
|
||||
name: Test invoke.py on ${{ matrix.os }} with conda
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: setup miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-activate-base: false
|
||||
auto-update-conda: false
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set environment
|
||||
run: |
|
||||
[[ "$GITHUB_REF" == 'refs/heads/main' ]] \
|
||||
&& echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV \
|
||||
|| echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
echo "CONDA_ROOT=$CONDA" >> $GITHUB_ENV
|
||||
echo "CONDA_ENV_NAME=invokeai" >> $GITHUB_ENV
|
||||
|
||||
- name: Use Cached Stable Diffusion v1.4 Model
|
||||
id: cache-sd-v1-4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-sd-v1-4
|
||||
with:
|
||||
path: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: ${{ env.cache-name }}
|
||||
|
||||
- name: Download Stable Diffusion v1.4 Model
|
||||
if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||
[[ -r models/ldm/stable-diffusion-v1/model.ckpt ]] \
|
||||
|| curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
-L https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
|
||||
- name: Activate Conda Env
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
|
||||
- name: Use Cached Huggingface and Torch models
|
||||
id: cache-hugginface-torch
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-hugginface-torch
|
||||
with:
|
||||
path: ~/.cache
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }}
|
||||
|
||||
- name: run preload_models.py
|
||||
run: python scripts/preload_models.py
|
||||
40
.github/workflows/mkdocs-material.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: mkdocs-material
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
mkdocs-material:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: install requirements
|
||||
run: |
|
||||
python -m \
|
||||
pip install -r requirements-mkdocs.txt
|
||||
|
||||
- name: confirm buildability
|
||||
run: |
|
||||
python -m \
|
||||
mkdocs build \
|
||||
--clean \
|
||||
--verbose
|
||||
|
||||
- name: deploy to gh-pages
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: |
|
||||
python -m \
|
||||
mkdocs gh-deploy \
|
||||
--clean \
|
||||
--force
|
||||
113
.github/workflows/test-invoke-conda.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: Test invoke.py
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
# - 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
||||
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macOS-12
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macOS-12
|
||||
environment-file: environment-mac.yml
|
||||
default-shell: bash -l {0}
|
||||
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||
# stable-diffusion-model-switch: stable-diffusion-1.4
|
||||
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-switch: stable-diffusion-1.5
|
||||
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
CONDA_ENV_NAME: invokeai
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: create models.yaml from example
|
||||
run: cp configs/models.yaml.example configs/models.yaml
|
||||
|
||||
- name: Use cached conda packages
|
||||
id: use-cached-conda-packages
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/conda_pkgs_dir
|
||||
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
||||
|
||||
- name: Activate Conda Env
|
||||
id: activate-conda-env
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||
id: download-stable-diffusion-model
|
||||
run: |
|
||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
||||
-L ${{ matrix.stable-diffusion-model }}
|
||||
|
||||
- name: run preload_models.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--model ${{ matrix.stable-diffusion-model-switch }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
||||
path: outputs/img-samples
|
||||
210
.gitignore
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
ldm/invoke/restoration/codeformer/weights
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
|
||||
# ignore a directory which serves as a place for initial images
|
||||
inputs/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# emacs autosave and recovery files
|
||||
*~
|
||||
.#*
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
src
|
||||
**/__pycache__/
|
||||
outputs
|
||||
|
||||
# Logs and associated folders
|
||||
# created from generated embeddings.
|
||||
logs
|
||||
testtube
|
||||
checkpoints
|
||||
# If it's a Mac
|
||||
.DS_Store
|
||||
|
||||
# Let the frontend manage its own gitignore
|
||||
!frontend/*
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
.vscode/
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
# config file (will be created by installer)
|
||||
configs/models.yaml
|
||||
|
||||
# weights (will be created by installer)
|
||||
models/ldm/stable-diffusion-v1/*.ckpt
|
||||
0
.gitmodules
vendored
Normal file
13
.prettierrc.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
endOfLine: lf
|
||||
tabWidth: 2
|
||||
useTabs: false
|
||||
singleQuote: true
|
||||
quoteProps: as-needed
|
||||
embeddedLanguageFormatting: auto
|
||||
overrides:
|
||||
- files: '*.md'
|
||||
options:
|
||||
proseWrap: always
|
||||
printWidth: 80
|
||||
parser: markdown
|
||||
cursorOffset: -1
|
||||
19
LICENSE
@@ -1,9 +1,16 @@
|
||||
All rights reserved by the authors.
|
||||
You must not distribute the weights provided to you directly or indirectly without explicit consent of the authors.
|
||||
You must not distribute harmful, offensive, dehumanizing content or otherwise harmful representations of people or their environments, cultures, religions, etc. produced with the model weights
|
||||
or other generated content described in the "Misuse and Malicious Use" section in the model card.
|
||||
The model weights are provided for research purposes only.
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 InvokeAI Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
@@ -11,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
SOFTWARE.
|
||||
|
||||
294
LICENSE-ModelWeights.txt
Normal file
@@ -0,0 +1,294 @@
|
||||
Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
|
||||
|
||||
CreativeML Open RAIL-M
|
||||
dated August 22, 2022
|
||||
|
||||
Section I: PREAMBLE
|
||||
|
||||
Multimodal generative models are being widely adopted and used, and
|
||||
have the potential to transform the way artists, among other
|
||||
individuals, conceive and benefit from AI or ML technologies as a tool
|
||||
for content creation.
|
||||
|
||||
Notwithstanding the current and potential benefits that these
|
||||
artifacts can bring to society at large, there are also concerns about
|
||||
potential misuses of them, either due to their technical limitations
|
||||
or ethical considerations.
|
||||
|
||||
In short, this license strives for both the open and responsible
|
||||
downstream use of the accompanying model. When it comes to the open
|
||||
character, we took inspiration from open source permissive licenses
|
||||
regarding the grant of IP rights. Referring to the downstream
|
||||
responsible use, we added use-based restrictions not permitting the
|
||||
use of the Model in very specific scenarios, in order for the licensor
|
||||
to be able to enforce the license in case potential misuses of the
|
||||
Model may occur. At the same time, we strive to promote open and
|
||||
responsible research on generative models for art and content
|
||||
generation.
|
||||
|
||||
Even though downstream derivative versions of the model could be
|
||||
released under different licensing terms, the latter will always have
|
||||
to include - at minimum - the same use-based restrictions as the ones
|
||||
in the original license (this license). We believe in the intersection
|
||||
between open and responsible AI development; thus, this License aims
|
||||
to strike a balance between both in order to enable responsible
|
||||
open-science in the field of AI.
|
||||
|
||||
This License governs the use of the model (and its derivatives) and is
|
||||
informed by the model card associated with the model.
|
||||
|
||||
NOW THEREFORE, You and Licensor agree as follows:
|
||||
|
||||
1. Definitions
|
||||
|
||||
- "License" means the terms and conditions for use, reproduction, and
|
||||
Distribution as defined in this document.
|
||||
|
||||
- "Data" means a collection of information and/or content extracted
|
||||
from the dataset used with the Model, including to train, pretrain,
|
||||
or otherwise evaluate the Model. The Data is not licensed under this
|
||||
License.
|
||||
|
||||
- "Output" means the results of operating a Model as embodied in
|
||||
informational content resulting therefrom.
|
||||
|
||||
- "Model" means any accompanying machine-learning based assemblies
|
||||
(including checkpoints), consisting of learnt weights, parameters
|
||||
(including optimizer states), corresponding to the model
|
||||
architecture as embodied in the Complementary Material, that have
|
||||
been trained or tuned, in whole or in part on the Data, using the
|
||||
Complementary Material.
|
||||
|
||||
- "Derivatives of the Model" means all modifications to the Model,
|
||||
works based on the Model, or any other model which is created or
|
||||
initialized by transfer of patterns of the weights, parameters,
|
||||
activations or output of the Model, to the other model, in order to
|
||||
cause the other model to perform similarly to the Model, including -
|
||||
but not limited to - distillation methods entailing the use of
|
||||
intermediate data representations or methods based on the generation
|
||||
of synthetic data by the Model for training the other model.
|
||||
|
||||
- "Complementary Material" means the accompanying source code and
|
||||
scripts used to define, run, load, benchmark or evaluate the Model,
|
||||
and used to prepare data for training or evaluation, if any. This
|
||||
includes any accompanying documentation, tutorials, examples, etc,
|
||||
if any.
|
||||
|
||||
- "Distribution" means any transmission, reproduction, publication or
|
||||
other sharing of the Model or Derivatives of the Model to a third
|
||||
party, including providing the Model as a hosted service made
|
||||
available by electronic or other remote means - e.g. API-based or
|
||||
web access.
|
||||
|
||||
- "Licensor" means the copyright owner or entity authorized by the
|
||||
copyright owner that is granting the License, including the persons
|
||||
or entities that may have rights in the Model and/or distributing
|
||||
the Model.
|
||||
|
||||
- "You" (or "Your") means an individual or Legal Entity exercising
|
||||
permissions granted by this License and/or making use of the Model
|
||||
for whichever purpose and in any field of use, including usage of
|
||||
the Model in an end-use application - e.g. chatbot, translator,
|
||||
image generator.
|
||||
|
||||
- "Third Parties" means individuals or legal entities that are not
|
||||
under common control with Licensor or You.
|
||||
|
||||
- "Contribution" means any work of authorship, including the original
|
||||
version of the Model and any modifications or additions to that
|
||||
Model or Derivatives of the Model thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Model by the copyright
|
||||
owner or by an individual or Legal Entity authorized to submit on
|
||||
behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written
|
||||
communication sent to the Licensor or its representatives, including
|
||||
but not limited to communication on electronic mailing lists, source
|
||||
code control systems, and issue tracking systems that are managed
|
||||
by, or on behalf of, the Licensor for the purpose of discussing and
|
||||
improving the Model, but excluding communication that is
|
||||
conspicuously marked or otherwise designated in writing by the
|
||||
copyright owner as "Not a Contribution."
|
||||
|
||||
- "Contributor" means Licensor and any individual or Legal Entity on
|
||||
behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Model.
|
||||
|
||||
Section II: INTELLECTUAL PROPERTY RIGHTS
|
||||
|
||||
Both copyright and patent grants apply to the Model, Derivatives of
|
||||
the Model and Complementary Material. The Model and Derivatives of the
|
||||
Model are subject to additional terms as described in Section III.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare, publicly display, publicly
|
||||
perform, sublicense, and distribute the Complementary Material, the
|
||||
Model, and Derivatives of the Model.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License and where and as applicable, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge,
|
||||
royalty-free, irrevocable (except as stated in this paragraph) patent
|
||||
license to make, have made, use, offer to sell, sell, import, and
|
||||
otherwise transfer the Model and the Complementary Material, where
|
||||
such license applies only to those patent claims licensable by such
|
||||
Contributor that are necessarily infringed by their Contribution(s)
|
||||
alone or by combination of their Contribution(s) with the Model to
|
||||
which such Contribution(s) was submitted. If You institute patent
|
||||
litigation against any entity (including a cross-claim or counterclaim
|
||||
in a lawsuit) alleging that the Model and/or Complementary Material or
|
||||
a Contribution incorporated within the Model and/or Complementary
|
||||
Material constitutes direct or contributory patent infringement, then
|
||||
any patent licenses granted to You under this License for the Model
|
||||
and/or Work shall terminate as of the date such litigation is asserted
|
||||
or filed.
|
||||
|
||||
Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
|
||||
|
||||
4. Distribution and Redistribution. You may host for Third Party
|
||||
remote access purposes (e.g. software-as-a-service), reproduce and
|
||||
distribute copies of the Model or Derivatives of the Model thereof in
|
||||
any medium, with or without modifications, provided that You meet the
|
||||
following conditions: Use-based restrictions as referenced in
|
||||
paragraph 5 MUST be included as an enforceable provision by You in any
|
||||
type of legal agreement (e.g. a license) governing the use and/or
|
||||
distribution of the Model or Derivatives of the Model, and You shall
|
||||
give notice to subsequent users You Distribute to, that the Model or
|
||||
Derivatives of the Model are subject to paragraph 5. This provision
|
||||
does not apply to the use of Complementary Material. You must give
|
||||
any Third Party recipients of the Model or Derivatives of the Model a
|
||||
copy of this License; You must cause any modified files to carry
|
||||
prominent notices stating that You changed the files; You must retain
|
||||
all copyright, patent, trademark, and attribution notices excluding
|
||||
those notices that do not pertain to any part of the Model,
|
||||
Derivatives of the Model. You may add Your own copyright statement to
|
||||
Your modifications and may provide additional or different license
|
||||
terms and conditions - respecting paragraph 4.a. - for use,
|
||||
reproduction, or Distribution of Your modifications, or for any such
|
||||
Derivatives of the Model as a whole, provided Your use, reproduction,
|
||||
and Distribution of the Model otherwise complies with the conditions
|
||||
stated in this License.
|
||||
|
||||
5. Use-based restrictions. The restrictions set forth in Attachment A
|
||||
are considered Use-based restrictions. Therefore You cannot use the
|
||||
Model and the Derivatives of the Model for the specified restricted
|
||||
uses. You may use the Model subject to this License, including only
|
||||
for lawful purposes and in accordance with the License. Use may
|
||||
include creating any content with, finetuning, updating, running,
|
||||
training, evaluating and/or reparametrizing the Model. You shall
|
||||
require all of Your users who use the Model or a Derivative of the
|
||||
Model to comply with the terms of this paragraph (paragraph 5).
|
||||
|
||||
6. The Output You Generate. Except as set forth herein, Licensor
|
||||
claims no rights in the Output You generate using the Model. You are
|
||||
accountable for the Output you generate and its subsequent uses. No
|
||||
use of the output can contravene any provision as stated in the
|
||||
License.
|
||||
|
||||
Section IV: OTHER PROVISIONS
|
||||
|
||||
7. Updates and Runtime Restrictions. To the maximum extent permitted
|
||||
by law, Licensor reserves the right to restrict (remotely or
|
||||
otherwise) usage of the Model in violation of this License, update the
|
||||
Model through electronic means, or modify the Output of the Model
|
||||
based on updates. You shall undertake reasonable efforts to use the
|
||||
latest version of the Model.
|
||||
|
||||
8. Trademarks and related. Nothing in this License permits You to make
|
||||
use of Licensors’ trademarks, trade names, logos or to otherwise
|
||||
suggest endorsement or misrepresent the relationship between the
|
||||
parties; and any rights not expressly granted herein are reserved by
|
||||
the Licensors.
|
||||
|
||||
9. Disclaimer of Warranty. Unless required by applicable law or agreed
|
||||
to in writing, Licensor provides the Model and the Complementary
|
||||
Material (and each Contributor provides its Contributions) on an "AS
|
||||
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||
express or implied, including, without limitation, any warranties or
|
||||
conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
|
||||
A PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Model, Derivatives of
|
||||
the Model, and the Complementary Material and assume any risks
|
||||
associated with Your exercise of permissions under this License.
|
||||
|
||||
10. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise, unless
|
||||
required by applicable law (such as deliberate and grossly negligent
|
||||
acts) or agreed to in writing, shall any Contributor be liable to You
|
||||
for damages, including any direct, indirect, special, incidental, or
|
||||
consequential damages of any character arising as a result of this
|
||||
License or out of the use or inability to use the Model and the
|
||||
Complementary Material (including but not limited to damages for loss
|
||||
of goodwill, work stoppage, computer failure or malfunction, or any
|
||||
and all other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
11. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Model, Derivatives of the Model and the Complementary Material
|
||||
thereof, You may choose to offer, and charge a fee for, acceptance of
|
||||
support, warranty, indemnity, or other liability obligations and/or
|
||||
rights consistent with this License. However, in accepting such
|
||||
obligations, You may act only on Your own behalf and on Your sole
|
||||
responsibility, not on behalf of any other Contributor, and only if
|
||||
You agree to indemnify, defend, and hold each Contributor harmless for
|
||||
any liability incurred by, or claims asserted against, such
|
||||
Contributor by reason of your accepting any such warranty or
|
||||
additional liability.
|
||||
|
||||
12. If any provision of this License is held to be invalid, illegal or
|
||||
unenforceable, the remaining provisions shall be unaffected thereby
|
||||
and remain valid as if such provision had not been set forth herein.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
|
||||
|
||||
Attachment A
|
||||
|
||||
Use Restrictions
|
||||
|
||||
You agree not to use the Model or Derivatives of the Model:
|
||||
|
||||
- In any way that violates any applicable national, federal, state,
|
||||
local or international law or regulation;
|
||||
|
||||
- For the purpose of exploiting, harming or attempting to exploit or
|
||||
harm minors in any way;
|
||||
|
||||
- To generate or disseminate verifiably false information and/or
|
||||
content with the purpose of harming others;
|
||||
|
||||
- To generate or disseminate personal identifiable information that
|
||||
can be used to harm an individual;
|
||||
|
||||
- To defame, disparage or otherwise harass others;
|
||||
|
||||
- For fully automated decision making that adversely impacts an
|
||||
individual’s legal rights or otherwise creates or modifies a
|
||||
binding, enforceable obligation;
|
||||
|
||||
pp- For any use intended to or which has the effect of discriminating
|
||||
against or harming individuals or groups based on online or offline
|
||||
social behavior or known or predicted personal or personality
|
||||
characteristics;
|
||||
|
||||
- To exploit any of the vulnerabilities of a specific group of persons
|
||||
based on their age, social, physical or mental characteristics, in
|
||||
order to materially distort the behavior of a person pertaining to
|
||||
that group in a manner that causes or is likely to cause that person
|
||||
or another person physical or psychological harm;
|
||||
|
||||
- For any use intended to or which has the effect of discriminating
|
||||
against individuals or groups based on legally protected
|
||||
characteristics or categories;
|
||||
|
||||
- To provide medical advice and medical results interpretation;
|
||||
|
||||
- To generate or disseminate information for the purpose to be used
|
||||
for administration of justice, law enforcement, immigration or
|
||||
asylum processes, such as predicting an individual will commit
|
||||
fraud/crime commitment (e.g. by text profiling, drawing causal
|
||||
relationships between assertions made in documents, indiscriminate
|
||||
and arbitrarily-targeted use).
|
||||
423
README.md
@@ -1,314 +1,217 @@
|
||||
# Stable Diffusion
|
||||
<div align="center">
|
||||
|
||||
This is a fork of CompVis/stable-diffusion, the wonderful open source
|
||||
text-to-image generator.
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
The original has been modified in several minor ways:
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||
## Simplified API for text to image generation
|
||||

|
||||
|
||||
There is now a simplified API for text to image generation, which
|
||||
lets you create images from a prompt in just three lines of code:
|
||||
[![discord badge]][discord link]
|
||||
|
||||
~~~~
|
||||
from ldm.simplet2i import T2I
|
||||
model = T2I()
|
||||
model.text2image("a unicorn in manhattan")
|
||||
~~~~
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
|
||||
Please see ldm/simplet2i.py for more information.
|
||||
[![CI checks on main badge]][CI checks on main link] [![CI checks on dev badge]][CI checks on dev link] [![latest commit to dev badge]][latest commit to dev link]
|
||||
|
||||
## Interactive command-line interface similar to the Discord bot
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
||||
|
||||
There is now a command-line script, located in scripts/dream.py, which
|
||||
provides an interactive interface to image generation similar to
|
||||
the "dream mothership" bot that Stable AI provided on its Discord
|
||||
server. The advantage of this is that the lengthy model
|
||||
initialization only happens once. After that image generation is
|
||||
fast.
|
||||
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
[github forks link]: https://useful-forks.github.io/?repo=invoke-ai%2FInvokeAI
|
||||
[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
[github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||
[github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
</div>
|
||||
|
||||
Note that this has only been tested in the Linux environment!
|
||||
This is a fork of
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ ./scripts/dream.py
|
||||
* Initializing, be patient...
|
||||
Loading model from models/ldm/text2img-large/model.ckpt
|
||||
LatentDiffusion: Running in eps-prediction mode
|
||||
DiffusionWrapper has 872.30 M params.
|
||||
making attention of type 'vanilla' with 512 in_channels
|
||||
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
making attention of type 'vanilla' with 512 in_channels
|
||||
Loading Bert tokenizer from "models/bert"
|
||||
setting sampler to plms
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
* Initialization done! Awaiting your command...
|
||||
dream> ashley judd riding a camel -n2
|
||||
Outputs:
|
||||
outputs/txt2img-samples/00009.png: "ashley judd riding a camel" -n2 -S 416354203
|
||||
outputs/txt2img-samples/00010.png: "ashley judd riding a camel" -n2 -S 1362479620
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
dream> "your prompt here" -n6 -g
|
||||
...
|
||||
~~~~
|
||||
|
||||
Command-line arguments (`./scripts/dream.py -h`) allow you to change
|
||||
various defaults, and select between the mature stable-diffusion
|
||||
weights (512x512) and the older (256x256) latent diffusion weights
|
||||
(laion400m). Within the script, the switches are (mostly) identical to
|
||||
those used in the Discord bot, except you don't need to type "!dream".
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||
|
||||
## Workaround for machines with limited internet connectivity
|
||||
## Table of Contents
|
||||
|
||||
My development machine is a GPU node in a high-performance compute
|
||||
cluster which has no connection to the internet. During model
|
||||
initialization, stable-diffusion tries to download the Bert tokenizer
|
||||
and a file needed by the kornia library. This obviously didn't work
|
||||
for me.
|
||||
1. [Installation](#installation)
|
||||
2. [Hardware Requirements](#hardware-requirements)
|
||||
3. [Features](#features)
|
||||
4. [Latest Changes](#latest-changes)
|
||||
5. [Troubleshooting](#troubleshooting)
|
||||
6. [Contributing](#contributing)
|
||||
7. [Contributors](#contributors)
|
||||
8. [Support](#support)
|
||||
9. [Further Reading](#further-reading)
|
||||
|
||||
To work around this, I have modified ldm/modules/encoders/modules.py
|
||||
to look for locally cached Bert files rather than attempting to
|
||||
download them. For this to work, you must run
|
||||
"scripts/preload_models.py" once from an internet-connected machine
|
||||
prior to running the code on an isolated one. This assumes that both
|
||||
machines share a common network-mounted filesystem with a common
|
||||
.cache directory.
|
||||
### Installation
|
||||
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/preload_models.py
|
||||
preloading bert tokenizer...
|
||||
Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s]
|
||||
Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s]
|
||||
Downloading: 100%|██████████████████████████████████| 455k/455k [00:00<00:00, 4.36MB/s]
|
||||
Downloading: 100%|██████████████████████████████████| 570/570 [00:00<00:00, 477kB/s]
|
||||
...success
|
||||
preloading kornia requirements...
|
||||
Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth
|
||||
100%|███████████████████████████████████████████████| 5.10M/5.10M [00:00<00:00, 101MB/s]
|
||||
...success
|
||||
~~~~
|
||||
This fork is supported across multiple platforms. You can find individual installation instructions
|
||||
below.
|
||||
|
||||
If you don't need this change and want to download the files just in
|
||||
time, copy over the file ldm/modules/encoders/modules.py from the
|
||||
CompVis/stable-diffusion repository. Or you can run preload_models.py
|
||||
on the target machine.
|
||||
- #### [Linux](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_LINUX/)
|
||||
|
||||
## Minor fixes
|
||||
- #### [Windows](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_WINDOWS/)
|
||||
|
||||
I added the requirement for torchmetrics to environment.yaml.
|
||||
- #### [Macintosh](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_MAC/)
|
||||
|
||||
## Installation and support
|
||||
### Hardware Requirements
|
||||
|
||||
Follow the directions from the original README, which starts below, to
|
||||
configure the environment and install requirements. For support,
|
||||
please use this repository's GitHub Issues tracking service. Feel free
|
||||
to send me an email if you use and like the script.
|
||||
#### System
|
||||
|
||||
*Author:* Lincoln D. Stein <lincoln.stein@gmail.com>
|
||||
You wil need one of the following:
|
||||
|
||||
# Original README from CompViz/stable-diffusion
|
||||
*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:*
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An Apple computer with an M1 chip.
|
||||
|
||||
[**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)<br/>
|
||||
[Robin Rombach](https://github.com/rromb)\*,
|
||||
[Andreas Blattmann](https://github.com/ablattmann)\*,
|
||||
[Dominik Lorenz](https://github.com/qp-qp)\,
|
||||
[Patrick Esser](https://github.com/pesser),
|
||||
[Björn Ommer](https://hci.iwr.uni-heidelberg.de/Staff/bommer)<br/>
|
||||
#### Memory
|
||||
|
||||
which is available on [GitHub](https://github.com/CompVis/latent-diffusion).
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||

|
||||
[Stable Diffusion](#stable-diffusion-v1) is a latent text-to-image diffusion
|
||||
model.
|
||||
Thanks to a generous compute donation from [Stability AI](https://stability.ai/) and support from [LAION](https://laion.ai/), we were able to train a Latent Diffusion Model on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database.
|
||||
Similar to Google's [Imagen](https://arxiv.org/abs/2205.11487),
|
||||
this model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts.
|
||||
With its 860M UNet and 123M text encoder, the model is relatively lightweight and runs on a GPU with at least 10GB VRAM.
|
||||
See [this section](#stable-diffusion-v1) below and the [model card](https://huggingface.co/CompVis/stable-diffusion).
|
||||
#### Disk
|
||||
|
||||
|
||||
## Requirements
|
||||
A suitable [conda](https://conda.io/) environment named `ldm` can be created
|
||||
and activated with:
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
```
|
||||
conda env create -f environment.yaml
|
||||
conda activate ldm
|
||||
**Note**
|
||||
|
||||
If you have a Nvidia 10xx series card (e.g. the 1080ti), please
|
||||
run the dream script in full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter
|
||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
|
||||
You can also update an existing [latent diffusion](https://github.com/CompVis/latent-diffusion) environment by running
|
||||
### Features
|
||||
|
||||
```
|
||||
conda install pytorch torchvision -c pytorch
|
||||
pip install transformers==4.19.2
|
||||
pip install -e .
|
||||
```
|
||||
#### Major Features
|
||||
|
||||
- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||
- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/)
|
||||
- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/)
|
||||
- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||
- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/)
|
||||
- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/)
|
||||
- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file)
|
||||
- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending)
|
||||
- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options)
|
||||
- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts)
|
||||
- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/)
|
||||
- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||
- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api)
|
||||
|
||||
## Stable Diffusion v1
|
||||
#### Other Features
|
||||
|
||||
Stable Diffusion v1 refers to a specific configuration of the model
|
||||
architecture that uses a downsampling-factor 8 autoencoder with an 860M UNet
|
||||
and CLIP ViT-L/14 text encoder for the diffusion model. The model was pretrained on 256x256 images and
|
||||
then finetuned on 512x512 images.
|
||||
- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab)
|
||||
- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling)
|
||||
- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds)
|
||||
- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models)
|
||||
|
||||
*Note: Stable Diffusion v1 is a general text-to-image diffusion model and therefore mirrors biases and (mis-)conceptions that are present
|
||||
in its training data.
|
||||
Details on the training procedure and data, as well as the intended use of the model can be found in the corresponding [model card](https://huggingface.co/CompVis/stable-diffusion).
|
||||
Research into the safe deployment of general text-to-image models is an ongoing effort. To prevent misuse and harm, we currently provide access to the checkpoints only for [academic research purposes upon request](https://stability.ai/academia-access-form).
|
||||
**This is an experiment in safe and community-driven publication of a capable and general text-to-image model. We are working on a public release with a more permissive license that also incorporates ethical considerations.***
|
||||
### Latest Changes
|
||||
|
||||
[Request access to Stable Diffusion v1 checkpoints for academic research](https://stability.ai/academia-access-form)
|
||||
### v2.1.0 major changes <small>(2 November 2022)</small>
|
||||
|
||||
### Weights
|
||||
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/) support in the WebGUI
|
||||
- Greatly improved navigation and user experience in the [WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||
- The prompt syntax has been enhanced with [prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
||||
- You can now load [multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing) without leaving the CLI.
|
||||
- The installation process (via `scripts/preload_models.py`) now lets you select among several popular [Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/) and downloads and installs them on your behalf. Among other models, this script will install the current Stable Diffusion 1.5 model as well as a StabilityAI variable autoencoder (VAE) which improves face generation.
|
||||
- Tired of struggling with photoeditors to get the masked region of for inpainting just right? Let the AI make the mask for you using [text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p). This feature allows you to specify the part of the image to paint over using just English-language phrases.
|
||||
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI with the [outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
||||
- Tired of seeing your subject's bodies duplicated or mangled when generating larger-dimension images? Check out the `--hires` option in the CLI, or select the corresponding toggle in the WebGUI.
|
||||
- We now support textual inversion and fine-tune .bin styles and subjects from the Hugging Face archive of [SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file using the `--embedding_path` option. (The next version will support merging and loading of multiple simultaneous models).
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/CHANGELOG/>Complete Changelog</a>
|
||||
|
||||
We currently provide three checkpoints, `sd-v1-1.ckpt`, `sd-v1-2.ckpt` and `sd-v1-3.ckpt`,
|
||||
which were trained as follows,
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en).
|
||||
194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`).
|
||||
- `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`.
|
||||
515k steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en,
|
||||
filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)).
|
||||
- `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-improved-aesthetics" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
|
||||
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
|
||||
steps show the relative improvements of the checkpoints:
|
||||

|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
### Text-to-Image with Stable Diffusion
|
||||

|
||||

|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
problems and other issues.
|
||||
|
||||
Stable Diffusion is a latent diffusion model conditioned on the (non-pooled) text embeddings of a CLIP ViT-L/14 text encoder.
|
||||
# Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||
|
||||
#### Sampling Script
|
||||
A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
||||
important thing is to **make your pull request against the "development" branch**, and not against
|
||||
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
||||
changes.
|
||||
|
||||
After [obtaining the weights](#weights), link them
|
||||
```
|
||||
mkdir -p models/ldm/stable-diffusion-v1/
|
||||
ln -s <path/to/model.ckpt> models/ldm/stable-diffusion-v1/model.ckpt
|
||||
```
|
||||
and sample with
|
||||
```
|
||||
python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms
|
||||
```
|
||||
### Contributors
|
||||
|
||||
By default, this uses a guidance scale of `--scale 7.5`, [Katherine Crowson's implementation](https://github.com/CompVis/latent-diffusion/pull/51) of the [PLMS](https://arxiv.org/abs/2202.09778) sampler,
|
||||
and renders images of size 512x512 (which it was trained on) in 50 steps. All supported arguments are listed below (type `python scripts/txt2img.py --help`).
|
||||
This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
```commandline
|
||||
usage: txt2img.py [-h] [--prompt [PROMPT]] [--outdir [OUTDIR]] [--skip_grid] [--skip_save] [--ddim_steps DDIM_STEPS] [--plms] [--laion400m] [--fixed_code] [--ddim_eta DDIM_ETA] [--n_iter N_ITER] [--H H] [--W W] [--C C] [--f F] [--n_samples N_SAMPLES] [--n_rows N_ROWS]
|
||||
[--scale SCALE] [--from-file FROM_FILE] [--config CONFIG] [--ckpt CKPT] [--seed SEED] [--precision {full,autocast}]
|
||||
### Support
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--prompt [PROMPT] the prompt to render
|
||||
--outdir [OUTDIR] dir to write results to
|
||||
--skip_grid do not save a grid, only individual samples. Helpful when evaluating lots of samples
|
||||
--skip_save do not save individual samples. For speed measurements.
|
||||
--ddim_steps DDIM_STEPS
|
||||
number of ddim sampling steps
|
||||
--plms use plms sampling
|
||||
--laion400m uses the LAION400M model
|
||||
--fixed_code if enabled, uses the same starting code across samples
|
||||
--ddim_eta DDIM_ETA ddim eta (eta=0.0 corresponds to deterministic sampling
|
||||
--n_iter N_ITER sample this often
|
||||
--H H image height, in pixel space
|
||||
--W W image width, in pixel space
|
||||
--C C latent channels
|
||||
--f F downsampling factor
|
||||
--n_samples N_SAMPLES
|
||||
how many samples to produce for each given prompt. A.k.a. batch size
|
||||
--n_rows N_ROWS rows in the grid (default: n_samples)
|
||||
--scale SCALE unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))
|
||||
--from-file FROM_FILE
|
||||
if specified, load prompts from this file
|
||||
--config CONFIG path to config which constructs model
|
||||
--ckpt CKPT path to checkpoint of model
|
||||
--seed SEED the seed (for reproducible sampling)
|
||||
--precision {full,autocast}
|
||||
evaluate at this precision
|
||||
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
||||
email if you use and like the script.
|
||||
|
||||
```
|
||||
Note: The inference config for all v1 versions is designed to be used with EMA-only checkpoints.
|
||||
For this reason `use_ema=False` is set in the configuration, otherwise the code will try to switch from
|
||||
non-EMA to EMA weights. If you want to examine the effect of EMA vs no EMA, we provide "full" checkpoints
|
||||
which contain both types of weights. For these, `use_ema=False` will load and use the non-EMA weights.
|
||||
|
||||
|
||||
#### Diffusers Integration
|
||||
|
||||
Another way to download and sample Stable Diffusion is by using the [diffusers library](https://github.com/huggingface/diffusers/tree/main#new--stable-diffusion-is-now-fully-compatible-with-diffusers)
|
||||
```py
|
||||
# make sure you're logged in with `huggingface-cli login`
|
||||
from torch import autocast
|
||||
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-3-diffusers",
|
||||
use_auth_token=True
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
with autocast("cuda"):
|
||||
image = pipe(prompt)["sample"][0]
|
||||
|
||||
image.save("astronaut_rides_horse.png")
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Image Modification with Stable Diffusion
|
||||
|
||||
By using a diffusion-denoising mechanism as first proposed by [SDEdit](https://arxiv.org/abs/2108.01073), the model can be used for different
|
||||
tasks such as text-guided image-to-image translation and upscaling. Similar to the txt2img sampling script,
|
||||
we provide a script to perform image modification with Stable Diffusion.
|
||||
|
||||
The following describes an example where a rough sketch made in [Pinta](https://www.pinta-project.com/) is converted into a detailed artwork.
|
||||
```
|
||||
python scripts/img2img.py --prompt "A fantasy landscape, trending on artstation" --init-img <path-to-img.jpg> --strength 0.8
|
||||
```
|
||||
Here, strength is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image.
|
||||
Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. See the following example.
|
||||
|
||||
**Input**
|
||||
|
||||

|
||||
|
||||
**Outputs**
|
||||
|
||||

|
||||

|
||||
|
||||
This procedure can, for example, also be used to upscale samples from the base model.
|
||||
|
||||
|
||||
## Comments
|
||||
|
||||
- Our codebase for the diffusion models builds heavily on [OpenAI's ADM codebase](https://github.com/openai/guided-diffusion)
|
||||
and [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch).
|
||||
Thanks for open-sourcing!
|
||||
|
||||
- The implementation of the transformer encoder is from [x-transformers](https://github.com/lucidrains/x-transformers) by [lucidrains](https://github.com/lucidrains?tab=repositories).
|
||||
|
||||
|
||||
## BibTeX
|
||||
|
||||
```
|
||||
@misc{rombach2021highresolution,
|
||||
title={High-Resolution Image Synthesis with Latent Diffusion Models},
|
||||
author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer},
|
||||
year={2021},
|
||||
eprint={2112.10752},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV}
|
||||
}
|
||||
|
||||
```
|
||||
Original portions of the software are Copyright (c) 2020
|
||||
[Lincoln D. Stein](https://github.com/lstein)
|
||||
|
||||
### Further Reading
|
||||
|
||||
Please see the original README for more information on this software and underlying algorithm,
|
||||
located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/).
|
||||
|
||||
BIN
assets/caution.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
1264
backend/invoke_ai_web_server.py
Normal file
55
backend/modules/create_cmd_parser.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import argparse
|
||||
import os
|
||||
from ldm.invoke.args import PRECISION_CHOICES
|
||||
|
||||
|
||||
def create_cmd_parser():
|
||||
parser = argparse.ArgumentParser(description="InvokeAI web UI")
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
type=str,
|
||||
help="The host to serve on",
|
||||
default="localhost",
|
||||
)
|
||||
parser.add_argument("--port", type=int, help="The port to serve on", default=9090)
|
||||
parser.add_argument(
|
||||
"--cors",
|
||||
nargs="*",
|
||||
type=str,
|
||||
help="Additional allowed origins, comma-separated",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--embedding_path",
|
||||
type=str,
|
||||
help="Path to a pre-trained embedding manager checkpoint - can only be set on command line",
|
||||
)
|
||||
# TODO: Can't get flask to serve images from any dir (saving to the dir does work when specified)
|
||||
# parser.add_argument(
|
||||
# "--output_dir",
|
||||
# default="outputs/",
|
||||
# type=str,
|
||||
# help="Directory for output images",
|
||||
# )
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Enables verbose logging",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--precision",
|
||||
dest="precision",
|
||||
type=str,
|
||||
choices=PRECISION_CHOICES,
|
||||
metavar="PRECISION",
|
||||
help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}',
|
||||
default="auto",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--free_gpu_mem',
|
||||
dest='free_gpu_mem',
|
||||
action='store_true',
|
||||
help='Force free gpu memory before final decoding',
|
||||
)
|
||||
|
||||
return parser
|
||||
69
backend/modules/parameters.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from backend.modules.parse_seed_weights import parse_seed_weights
|
||||
import argparse
|
||||
|
||||
SAMPLER_CHOICES = [
|
||||
"ddim",
|
||||
"k_dpm_2_a",
|
||||
"k_dpm_2",
|
||||
"k_euler_a",
|
||||
"k_euler",
|
||||
"k_heun",
|
||||
"k_lms",
|
||||
"plms",
|
||||
]
|
||||
|
||||
|
||||
def parameters_to_command(params):
|
||||
"""
|
||||
Converts dict of parameters into a `invoke.py` REPL command.
|
||||
"""
|
||||
|
||||
switches = list()
|
||||
|
||||
if "prompt" in params:
|
||||
switches.append(f'"{params["prompt"]}"')
|
||||
if "steps" in params:
|
||||
switches.append(f'-s {params["steps"]}')
|
||||
if "seed" in params:
|
||||
switches.append(f'-S {params["seed"]}')
|
||||
if "width" in params:
|
||||
switches.append(f'-W {params["width"]}')
|
||||
if "height" in params:
|
||||
switches.append(f'-H {params["height"]}')
|
||||
if "cfg_scale" in params:
|
||||
switches.append(f'-C {params["cfg_scale"]}')
|
||||
if "sampler_name" in params:
|
||||
switches.append(f'-A {params["sampler_name"]}')
|
||||
if "seamless" in params and params["seamless"] == True:
|
||||
switches.append(f"--seamless")
|
||||
if "hires_fix" in params and params["hires_fix"] == True:
|
||||
switches.append(f"--hires")
|
||||
if "init_img" in params and len(params["init_img"]) > 0:
|
||||
switches.append(f'-I {params["init_img"]}')
|
||||
if "init_mask" in params and len(params["init_mask"]) > 0:
|
||||
switches.append(f'-M {params["init_mask"]}')
|
||||
if "init_color" in params and len(params["init_color"]) > 0:
|
||||
switches.append(f'--init_color {params["init_color"]}')
|
||||
if "strength" in params and "init_img" in params:
|
||||
switches.append(f'-f {params["strength"]}')
|
||||
if "fit" in params and params["fit"] == True:
|
||||
switches.append(f"--fit")
|
||||
if "facetool" in params:
|
||||
switches.append(f'-ft {params["facetool"]}')
|
||||
if "facetool_strength" in params and params["facetool_strength"]:
|
||||
switches.append(f'-G {params["facetool_strength"]}')
|
||||
elif "gfpgan_strength" in params and params["gfpgan_strength"]:
|
||||
switches.append(f'-G {params["gfpgan_strength"]}')
|
||||
if "codeformer_fidelity" in params:
|
||||
switches.append(f'-cf {params["codeformer_fidelity"]}')
|
||||
if "upscale" in params and params["upscale"]:
|
||||
switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
|
||||
if "variation_amount" in params and params["variation_amount"] > 0:
|
||||
switches.append(f'-v {params["variation_amount"]}')
|
||||
if "with_variations" in params:
|
||||
seed_weight_pairs = ",".join(
|
||||
f"{seed}:{weight}" for seed, weight in params["with_variations"]
|
||||
)
|
||||
switches.append(f"-V {seed_weight_pairs}")
|
||||
|
||||
return " ".join(switches)
|
||||
47
backend/modules/parse_seed_weights.py
Normal file
@@ -0,0 +1,47 @@
|
||||
def parse_seed_weights(seed_weights):
|
||||
"""
|
||||
Accepts seed weights as string in "12345:0.1,23456:0.2,3456:0.3" format
|
||||
Validates them
|
||||
If valid: returns as [[12345, 0.1], [23456, 0.2], [3456, 0.3]]
|
||||
If invalid: returns False
|
||||
"""
|
||||
|
||||
# Must be a string
|
||||
if not isinstance(seed_weights, str):
|
||||
return False
|
||||
# String must not be empty
|
||||
if len(seed_weights) == 0:
|
||||
return False
|
||||
|
||||
pairs = []
|
||||
|
||||
for pair in seed_weights.split(","):
|
||||
split_values = pair.split(":")
|
||||
|
||||
# Seed and weight are required
|
||||
if len(split_values) != 2:
|
||||
return False
|
||||
|
||||
if len(split_values[0]) == 0 or len(split_values[1]) == 1:
|
||||
return False
|
||||
|
||||
# Try casting the seed to int and weight to float
|
||||
try:
|
||||
seed = int(split_values[0])
|
||||
weight = float(split_values[1])
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
# Seed must be 0 or above
|
||||
if not seed >= 0:
|
||||
return False
|
||||
|
||||
# Weight must be between 0 and 1
|
||||
if not (weight >= 0 and weight <= 1):
|
||||
return False
|
||||
|
||||
# This pair is valid
|
||||
pairs.append([seed, weight])
|
||||
|
||||
# All pairs are valid
|
||||
return pairs
|
||||
@@ -1,54 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 16
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,53 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 4
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,54 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 3
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,53 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 64
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 64
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16,8]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
@@ -1,86 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: models/first_stage_models/vq-f4/model.ckpt
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.CelebAHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.CelebAHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,98 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 256
|
||||
attention_resolutions:
|
||||
#note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 32 for f8
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 16384
|
||||
ckpt_path: configs/first_stage_models/vq-f8/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 64
|
||||
num_workers: 12
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetTrain
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetValidation
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,68 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 192
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 5
|
||||
num_heads: 1
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
n_classes: 1001
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
@@ -1,85 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 42
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.FFHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.FFHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,85 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNBedroomsTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNBedroomsValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,91 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False'
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0155
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: "image"
|
||||
cond_stage_key: "image"
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: False
|
||||
concat_mode: False
|
||||
scale_by_std: True
|
||||
monitor: 'val/loss_simple_ema'
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [10000]
|
||||
cycle_lengths: [10000000000000]
|
||||
f_start: [1.e-6]
|
||||
f_max: [1.]
|
||||
f_min: [ 1.]
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 192
|
||||
attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2
|
||||
num_heads: 8
|
||||
use_scale_shift_norm: True
|
||||
resblock_updown: True
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: "val/rec_loss"
|
||||
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config: "__is_unconditional__"
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 96
|
||||
num_workers: 5
|
||||
wrap: False
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNChurchesTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNChurchesValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -1,71 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.012
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_heads: 8
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 1280
|
||||
use_checkpoint: true
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.BERTEmbedder
|
||||
params:
|
||||
n_embed: 1280
|
||||
n_layer: 32
|
||||
27
configs/models.yaml.example
Normal file
@@ -0,0 +1,27 @@
|
||||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
stable-diffusion-1.5:
|
||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: true
|
||||
stable-diffusion-1.4:
|
||||
description: Stable Diffusion inference model version 1.4
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
weights: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
inpainting-1.5:
|
||||
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
@@ -1,68 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.015
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: jpg
|
||||
cond_stage_key: nix
|
||||
image_size: 48
|
||||
channels: 16
|
||||
cond_stage_trainable: false
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_by_std: false
|
||||
scale_factor: 0.22765929
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 48
|
||||
in_channels: 16
|
||||
out_channels: 16
|
||||
model_channels: 448
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
use_scale_shift_norm: false
|
||||
resblock_updown: false
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: true
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 16
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: torch.nn.Identity
|
||||
110
configs/stable-diffusion/v1-finetune.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-03
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: true # Note: different from the one we trained before
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
embedding_reg_weight: 0.0
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ["sculpture"]
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 1
|
||||
num_workers: 2
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.personalized.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: train
|
||||
per_image_tokens: false
|
||||
repeats: 100
|
||||
validation:
|
||||
target: ldm.data.personalized.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: val
|
||||
per_image_tokens: false
|
||||
repeats: 10
|
||||
|
||||
lightning:
|
||||
modelcheckpoint:
|
||||
params:
|
||||
every_n_train_steps: 500
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 500
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
max_steps: 4000000
|
||||
# max_steps: 4000
|
||||
|
||||
103
configs/stable-diffusion/v1-finetune_style.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-03
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: true # Note: different from the one we trained before
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
embedding_reg_weight: 0.0
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ["painting"]
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 2
|
||||
num_workers: 16
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.personalized_style.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: train
|
||||
per_image_tokens: false
|
||||
repeats: 100
|
||||
validation:
|
||||
target: ldm.data.personalized_style.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: val
|
||||
per_image_tokens: false
|
||||
repeats: 10
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 500
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
@@ -26,6 +26,15 @@ model:
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
@@ -67,4 +76,4 @@ model:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
|
||||
79
configs/stable-diffusion/v1-inpainting-inference.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
model:
|
||||
base_learning_rate: 7.5e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false # Note: different from the one we trained before
|
||||
conditioning_key: hybrid # important
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
finetune_keys: null
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
||||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||
f_start: [ 1.e-6 ]
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
110
configs/stable-diffusion/v1-m1-finetune.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-03
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: true # Note: different from the one we trained before
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
embedding_reg_weight: 0.0
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 6
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 1
|
||||
num_workers: 2
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.personalized.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: train
|
||||
per_image_tokens: false
|
||||
repeats: 100
|
||||
validation:
|
||||
target: ldm.data.personalized.PersonalizedBase
|
||||
params:
|
||||
size: 512
|
||||
set: val
|
||||
per_image_tokens: false
|
||||
repeats: 10
|
||||
|
||||
lightning:
|
||||
modelcheckpoint:
|
||||
params:
|
||||
every_n_train_steps: 500
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 500
|
||||
max_images: 5
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: False
|
||||
max_steps: 6200
|
||||
# max_steps: 4000
|
||||
|
||||
74
docker-build/Dockerfile
Normal file
@@ -0,0 +1,74 @@
|
||||
FROM ubuntu AS get_miniconda
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# install wget
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# download and install miniconda
|
||||
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||
ARG conda_prefix=/opt/conda
|
||||
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||
&& rm -f /miniconda.sh
|
||||
|
||||
FROM ubuntu AS invokeai
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
|
||||
# clean bashrc
|
||||
RUN echo "" > ~/.bashrc
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# clone repository and create symlinks
|
||||
ARG invokeai_git=https://github.com/invoke-ai/InvokeAI.git
|
||||
ARG project_name=invokeai
|
||||
RUN git clone ${invokeai_git} /${project_name} \
|
||||
&& mkdir /${project_name}/models/ldm/stable-diffusion-v1 \
|
||||
&& ln -s /data/models/sd-v1-4.ckpt /${project_name}/models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
&& ln -s /data/outputs/ /${project_name}/outputs
|
||||
|
||||
# set workdir
|
||||
WORKDIR /${project_name}
|
||||
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
ARG conda_env_file=environment.yml
|
||||
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix}
|
||||
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name ${project_name} \
|
||||
--file ${conda_env_file} \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc \
|
||||
&& ln -s /data/models/GFPGANv1.4.pth ./src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth \
|
||||
&& conda activate ${project_name} \
|
||||
&& python scripts/preload_models.py
|
||||
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX=${conda_prefix}
|
||||
ENV PROJECT_NAME=${project_name}
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
81
docker-build/build.sh
Executable file
@@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
||||
# configure values by using env when executing build.sh
|
||||
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-https://github.com/invoke-ai/InvokeAI.git}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
|
||||
# print the settings
|
||||
echo "You are using these values:"
|
||||
echo -e "project_name:\t\t ${project_name}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
_runAlpine() {
|
||||
docker run \
|
||||
--rm \
|
||||
--interactive \
|
||||
--tty \
|
||||
--mount source="$volumename",target=/data \
|
||||
--workdir /data \
|
||||
alpine "$@"
|
||||
}
|
||||
|
||||
_copyCheckpoints() {
|
||||
echo "creating subfolders for models and outputs"
|
||||
_runAlpine mkdir models
|
||||
_runAlpine mkdir outputs
|
||||
echo -n "downloading sd-v1-4.ckpt"
|
||||
_runAlpine wget --header="Authorization: Bearer ${huggingface_token}" -O models/sd-v1-4.ckpt https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
echo "done"
|
||||
echo "downloading GFPGANv1.4.pth"
|
||||
_runAlpine wget -O models/GFPGANv1.4.pth https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
|
||||
}
|
||||
|
||||
_checkVolumeContent() {
|
||||
_runAlpine ls -lhA /data/models
|
||||
}
|
||||
|
||||
_getModelMd5s() {
|
||||
_runAlpine \
|
||||
alpine sh -c "md5sum /data/models/*"
|
||||
}
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||
echo "looks empty, copying checkpoint"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
echo "Models in ${volumename}:"
|
||||
_checkVolumeContent
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${volumename}"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${platform}" \
|
||||
--tag "${invokeai_tag}" \
|
||||
--build-arg project_name="${project_name}" \
|
||||
--build-arg conda_version="${invokeai_conda_version}" \
|
||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||
--build-arg invokeai_git="${invokeai_git}" \
|
||||
--file ./docker-build/Dockerfile \
|
||||
.
|
||||
8
docker-build/entrypoint.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||
conda activate "${PROJECT_NAME}"
|
||||
|
||||
python scripts/invoke.py \
|
||||
${@:---web --host=0.0.0.0}
|
||||
13
docker-build/env.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
project_name=${PROJECT_NAME:-invokeai}
|
||||
volumename=${VOLUMENAME:-${project_name}_data}
|
||||
arch=${ARCH:-x86_64}
|
||||
platform=${PLATFORM:-Linux/${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||
|
||||
export project_name
|
||||
export volumename
|
||||
export arch
|
||||
export platform
|
||||
export invokeai_tag
|
||||
15
docker-build/run.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--platform "$platform" \
|
||||
--name "$project_name" \
|
||||
--hostname "$project_name" \
|
||||
--mount source="$volumename",target=/data \
|
||||
--publish 9090:9090 \
|
||||
"$invokeai_tag" ${1:+$@}
|
||||
280
docs/CHANGELOG.md
Normal file
@@ -0,0 +1,280 @@
|
||||
---
|
||||
title: Changelog
|
||||
---
|
||||
|
||||
# :octicons-log-16: **Changelog**
|
||||
|
||||
## v2.1.0 (2 November 2022)
|
||||
- update mac instructions to use invokeai for env name by @willwillems in https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579 missing after merge by @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Update generate.py by @unreleased in https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- Update gitignore to ignore codeformer weights at new location by @spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||
- update mac instructions to use invokeai for env name by @willwillems in https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579 missing after merge by @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Add text prompt to inpaint mask support by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in https://github.com/invoke-ai/InvokeAI/pull/976
|
||||
- WebUI: Adds Codeformer support by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||
- Add Asymmetric Tiling by @carson-katri in https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||
- fix clipseg loading problems by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||
- Correct color channels in upscale using array slicing by @wfng92 in https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||
- fix a number of bugs in textual inversion by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||
- Improve !fetch, add !replay command by @ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/882
|
||||
- Fix generation of image with s>1000 by @holstvoogd in https://github.com/invoke-ai/InvokeAI/pull/951
|
||||
- Web UI: Gallery improvements by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||
- outcropping improvements by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||
- add support for loading VAE autoencoders by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||
- remove duplicate fix_func for MPS by @wfng92 in https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||
- Metadata storage and retrieval fixes by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||
- nix: add shell.nix file by @Cloudef in https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||
- Allow user to generate images with initial noise as on M1 / mps system by @ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||
- feat: adding filename format template by @plucked in https://github.com/invoke-ai/InvokeAI/pull/968
|
||||
- Web UI: Fixes broken bundle by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||
- Support runwayML custom inpainting model by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||
- Update IMG2IMG.md by @talitore in https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||
- New dockerfile - including a build- and a run- script as well as a GH-Action by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||
- cut over from karras to model noise schedule for higher steps by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||
- Outpainting implementation by @Kyle0654 in https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||
- fixing aspect ratio on hires by @tjennings in https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||
- Fix-build-container-action by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||
- handle all unicode characters by @damian0815 in https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||
- adds models.user.yml to .gitignore by @JakeHL in https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||
- remove debug branch, set fail-fast to false by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||
- Protect-secrets-on-pr by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||
- Use proper authentication to download model by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||
- Prevent indexing error for mode RGB by @spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove unecesarry caches by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to preload_models step by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a contained environment (if necessary) before running the normal installation script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- preload_models.py script downloads the weight files by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 (13 October 2022)
|
||||
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
## v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for [inpainting](features/INPAINTING.md) and [outpainting](features/OUTPAINTING.md)
|
||||
- img2img runs on all k* samplers
|
||||
- Support for [negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for [post-processing of previously-generated images](features/POSTPROCESS.md)
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows [larger images to be created without duplicating elements](features/CLI.md#this-is-an-example-of-txt2img), at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see [Thresholding and Perlin Noise Initialization](features/OTHER.md#thresholding-and-perlin-noise-initialization-options))
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved [command-line completion behavior](features/CLI.md)
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
## v1.14 <small>(11 September 2022)</small>
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
## v1.13 <small>(3 September 2022)</small>
|
||||
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
---
|
||||
|
||||
## v1.12 <small>(28 August 2022)</small>
|
||||
|
||||
- Improved file handling, including ability to read prompts from standard input.
|
||||
(kudos to [Yunsaki](https://github.com/yunsaki)
|
||||
- The web server is now integrated with the invoke.py script. Invoke by adding --web to
|
||||
the invoke.py command arguments.
|
||||
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
|
||||
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
|
||||
VRAM requirements are modestly reduced. Thanks to both [Blessedcoolant](https://github.com/blessedcoolant) and
|
||||
[Oceanswave](https://github.com/oceanswave) for their work on this.
|
||||
- You can now swap samplers on the invoke> command line. [Blessedcoolant](https://github.com/blessedcoolant)
|
||||
|
||||
---
|
||||
|
||||
## v1.11 <small>(26 August 2022)</small>
|
||||
|
||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
|
||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
|
||||
Seed memory only extends back to the previous command, but will work on all images generated with the -n# switch.
|
||||
- Variant generation support temporarily disabled pending more general solution.
|
||||
- Created a feature branch named **yunsaki-morphing-invoke** which adds experimental support for
|
||||
iteratively modifying the prompt and its parameters. Please see[Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
|
||||
for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
|
||||
significantly.
|
||||
|
||||
---
|
||||
|
||||
## v1.10 <small>(25 August 2022)</small>
|
||||
|
||||
- A barebones but fully functional interactive web server for online generation of txt2img and img2img.
|
||||
|
||||
---
|
||||
|
||||
## v1.09 <small>(24 August 2022)</small>
|
||||
|
||||
- A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [
|
||||
See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||
- Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
|
||||
- Enabled all of the samplers from k_diffusion
|
||||
|
||||
---
|
||||
|
||||
## v1.08 <small>(24 August 2022)</small>
|
||||
|
||||
- Escape single quotes on the invoke> command before trying to parse. This avoids
|
||||
parse errors.
|
||||
- Removed instruction to get Python3.8 as first step in Windows install.
|
||||
Anaconda3 does it for you.
|
||||
- Added bounds checks for numeric arguments that could cause crashes.
|
||||
- Cleaned up the copyright and license agreement files.
|
||||
|
||||
---
|
||||
|
||||
## v1.07 <small>(23 August 2022)</small>
|
||||
|
||||
- Image filenames will now never fill gaps in the sequence, but will be assigned the
|
||||
next higher name in the chosen directory. This ensures that the alphabetic and chronological
|
||||
sort orders are the same.
|
||||
|
||||
---
|
||||
|
||||
## v1.06 <small>(23 August 2022)</small>
|
||||
|
||||
- Added weighted prompt support contributed by [xraxra](https://github.com/xraxra)
|
||||
- Example of using weighted prompts to tweak a demonic figure contributed by [bmaltais](https://github.com/bmaltais)
|
||||
|
||||
---
|
||||
|
||||
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Filenames now use the following formats:
|
||||
000010.95183149.png -- Two files produced by the same command (e.g. -n2),
|
||||
000010.26742632.png -- distinguished by a different seed.
|
||||
|
||||
000011.455191342.01.png -- Two files produced by the same command using
|
||||
000011.455191342.02.png -- a batch size>1 (e.g. -b2). They have the same seed.
|
||||
|
||||
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid can
|
||||
be regenerated with the indicated key
|
||||
|
||||
- It should no longer be possible for one image to overwrite another
|
||||
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and retrieve
|
||||
the path of the output directory.
|
||||
|
||||
---
|
||||
|
||||
## v1.04 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Updated README to reflect installation of the released weights.
|
||||
- Suppressed very noisy and inconsequential warning when loading the frozen CLIP
|
||||
tokenizer.
|
||||
|
||||
---
|
||||
|
||||
## v1.03 <small>(22 August 2022)</small>
|
||||
|
||||
- The original txt2img and img2img scripts from the CompViz repository have been moved into
|
||||
a subfolder named "orig_scripts", to reduce confusion.
|
||||
|
||||
---
|
||||
|
||||
## v1.02 <small>(21 August 2022)</small>
|
||||
|
||||
- A copy of the prompt and all of its switches and options is now stored in the corresponding
|
||||
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
|
||||
or an image editor that allows you to explore the full metadata.
|
||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
||||
|
||||
---
|
||||
|
||||
## v1.01 <small>(21 August 2022)</small>
|
||||
|
||||
- added k_lms sampling.
|
||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
||||
- use half precision arithmetic by default, resulting in faster execution and lower memory requirements
|
||||
Pass argument --full_precision to invoke.py to get slower but more accurate image generation
|
||||
|
||||
---
|
||||
|
||||
## Links
|
||||
|
||||
- **[Read Me](index.md)**
|
||||
BIN
docs/assets/Lincoln-and-Parrot-512-transparent.png
Executable file
|
After Width: | Height: | Size: 284 KiB |
BIN
docs/assets/Lincoln-and-Parrot-512.png
Normal file
|
After Width: | Height: | Size: 252 KiB |
BIN
docs/assets/colab_notebook.png
Normal file
|
After Width: | Height: | Size: 799 KiB |
BIN
docs/assets/dream-py-demo.png
Normal file
|
After Width: | Height: | Size: 499 KiB |
BIN
docs/assets/dream_web_server.png
Normal file
|
After Width: | Height: | Size: 536 KiB |
BIN
docs/assets/img2img/000019.1592514025.png
Normal file
|
After Width: | Height: | Size: 270 KiB |
BIN
docs/assets/img2img/000019.steps.png
Normal file
|
After Width: | Height: | Size: 60 KiB |
BIN
docs/assets/img2img/000030.1592514025.png
Normal file
|
After Width: | Height: | Size: 184 KiB |
BIN
docs/assets/img2img/000030.step-0.png
Normal file
|
After Width: | Height: | Size: 6.6 KiB |
BIN
docs/assets/img2img/000030.steps.gravity.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/assets/img2img/000032.1592514025.png
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
docs/assets/img2img/000032.step-0.png
Normal file
|
After Width: | Height: | Size: 6.9 KiB |
BIN
docs/assets/img2img/000032.steps.gravity.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/assets/img2img/000034.1592514025.png
Normal file
|
After Width: | Height: | Size: 151 KiB |
BIN
docs/assets/img2img/000034.steps.png
Normal file
|
After Width: | Height: | Size: 221 KiB |
BIN
docs/assets/img2img/000035.1592514025.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docs/assets/img2img/000035.steps.gravity.png
Normal file
|
After Width: | Height: | Size: 121 KiB |
BIN
docs/assets/img2img/000045.1592514025.png
Normal file
|
After Width: | Height: | Size: 159 KiB |
BIN
docs/assets/img2img/000045.steps.gravity.png
Normal file
|
After Width: | Height: | Size: 117 KiB |
BIN
docs/assets/img2img/000046.1592514025.png
Normal file
|
After Width: | Height: | Size: 148 KiB |
BIN
docs/assets/img2img/000046.steps.gravity.png
Normal file
|
After Width: | Height: | Size: 121 KiB |
BIN
docs/assets/img2img/fire-drawing.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.deselected.png
Normal file
|
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.masked.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.selected.png
Normal file
|
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000024.801380492.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
docs/assets/invoke-web-server-1.png
Normal file
|
After Width: | Height: | Size: 983 KiB |
BIN
docs/assets/invoke-web-server-2.png
Normal file
|
After Width: | Height: | Size: 101 KiB |
BIN
docs/assets/invoke-web-server-3.png
Normal file
|
After Width: | Height: | Size: 546 KiB |
BIN
docs/assets/invoke-web-server-4.png
Normal file
|
After Width: | Height: | Size: 336 KiB |
BIN
docs/assets/invoke-web-server-5.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/assets/invoke-web-server-6.png
Normal file
|
After Width: | Height: | Size: 148 KiB |
BIN
docs/assets/invoke-web-server-7.png
Normal file
|
After Width: | Height: | Size: 637 KiB |
BIN
docs/assets/invoke-web-server-8.png
Normal file
|
After Width: | Height: | Size: 529 KiB |
BIN
docs/assets/invoke-web-server-9.png
Normal file
|
After Width: | Height: | Size: 1.1 MiB |
BIN
docs/assets/invoke_web_dark.png
Normal file
|
After Width: | Height: | Size: 838 KiB |
BIN
docs/assets/invoke_web_light.png
Normal file
|
After Width: | Height: | Size: 838 KiB |
BIN
docs/assets/invoke_web_server.png
Normal file
|
After Width: | Height: | Size: 989 KiB |
BIN
docs/assets/join-us-on-discord-image.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
docs/assets/logo.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/negative_prompt_walkthru/step1.png
Normal file
|
After Width: | Height: | Size: 451 KiB |
BIN
docs/assets/negative_prompt_walkthru/step2.png
Normal file
|
After Width: | Height: | Size: 453 KiB |
BIN
docs/assets/negative_prompt_walkthru/step3.png
Normal file
|
After Width: | Height: | Size: 463 KiB |
BIN
docs/assets/negative_prompt_walkthru/step4.png
Normal file
|
After Width: | Height: | Size: 435 KiB |
BIN
docs/assets/outpainting/curly-outcrop-2.png
Normal file
|
After Width: | Height: | Size: 635 KiB |
BIN
docs/assets/outpainting/curly-outcrop.png
Normal file
|
After Width: | Height: | Size: 500 KiB |
BIN
docs/assets/outpainting/curly-outpaint.png
Normal file
|
After Width: | Height: | Size: 422 KiB |
BIN
docs/assets/outpainting/curly.png
Normal file
|
After Width: | Height: | Size: 428 KiB |
|
After Width: | Height: | Size: 284 KiB |
BIN
docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png
Normal file
|
After Width: | Height: | Size: 252 KiB |