Compare commits
654 Commits
v3.5.0dev1
...
invoke-3.7
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cc54915e3 | ||
|
|
228497a375 | ||
|
|
d43cd17f0a | ||
|
|
e4f6a1078e | ||
|
|
e9da116642 | ||
|
|
9024c2f11c | ||
|
|
a1cf091f2e | ||
|
|
a7ff82247c | ||
|
|
97e29cf595 | ||
|
|
e444e1272c | ||
|
|
f177798894 | ||
|
|
cf6b2904b1 | ||
|
|
2643f9aa30 | ||
|
|
13449b96ef | ||
|
|
f36b5990ed | ||
|
|
5706237ec7 | ||
|
|
163b22a7b3 | ||
|
|
c5aeb36230 | ||
|
|
5e77f0d93b | ||
|
|
d3acb81743 | ||
|
|
e0f2404c00 | ||
|
|
5ed7972e5f | ||
|
|
792131be01 | ||
|
|
fc278c5cb1 | ||
|
|
d7f6af1f07 | ||
|
|
ff9bd040cc | ||
|
|
17d5f7bebd | ||
|
|
30dae0f5aa | ||
|
|
161000cde6 | ||
|
|
de832f6862 | ||
|
|
21ba3c63de | ||
|
|
a948bd1310 | ||
|
|
2071972a8c | ||
|
|
5ed2f6e6c1 | ||
|
|
b77f6bd0ad | ||
|
|
34cc26a4ed | ||
|
|
9d6e4ff1fb | ||
|
|
85bbf65967 | ||
|
|
3726293258 | ||
|
|
8bd65be8c8 | ||
|
|
783442c40d | ||
|
|
8a147bd6e6 | ||
|
|
273994b742 | ||
|
|
3339ad4df8 | ||
|
|
c3b2a8cb27 | ||
|
|
daa780940b | ||
|
|
2289680ae1 | ||
|
|
cda85a0637 | ||
|
|
1d9801e7be | ||
|
|
3ecb1e580f | ||
|
|
6301e58a2e | ||
|
|
5dd552effa | ||
|
|
25ce505628 | ||
|
|
1dd07fb1eb | ||
|
|
e82c21b5ba | ||
|
|
50b93992cf | ||
|
|
f8e566d62a | ||
|
|
f588b95c7f | ||
|
|
67daf1751c | ||
|
|
7d80261d47 | ||
|
|
67cbfeb33d | ||
|
|
f7998b4be0 | ||
|
|
675c73c94f | ||
|
|
0a27b0379f | ||
|
|
0ef18b6477 | ||
|
|
6539ef7c9f | ||
|
|
14c9a1e4f3 | ||
|
|
64b0feca31 | ||
|
|
0be9a2d906 | ||
|
|
d925f721b9 | ||
|
|
4e5be1891a | ||
|
|
156d4ec3b2 | ||
|
|
c45a43519a | ||
|
|
763816ca0c | ||
|
|
83a7c9059f | ||
|
|
c5f069a255 | ||
|
|
cd169ee082 | ||
|
|
66b106f107 | ||
|
|
b10d745dae | ||
|
|
d20f98fb4f | ||
|
|
c9c150f850 | ||
|
|
a60e2b7c77 | ||
|
|
da6e5b2ba1 | ||
|
|
c65d497cbc | ||
|
|
a68d8fe203 | ||
|
|
5de2288cfa | ||
|
|
2ce70b4457 | ||
|
|
6c5f743e2b | ||
|
|
bb242c4e1e | ||
|
|
c9e246ed1b | ||
|
|
2175fe3823 | ||
|
|
f64fc2c8b7 | ||
|
|
3d1b5c57ea | ||
|
|
31b9538976 | ||
|
|
97c1545cca | ||
|
|
6a8a3b50bc | ||
|
|
5a816818dc | ||
|
|
1cb866d1fc | ||
|
|
29bcc4b595 | ||
|
|
ca2bb6f0cc | ||
|
|
1c8fc908b2 | ||
|
|
d397beaa47 | ||
|
|
60eea09629 | ||
|
|
5b7b1122cb | ||
|
|
dfc8d1bb10 | ||
|
|
f9fa62164e | ||
|
|
d47905d2fb | ||
|
|
03b1cde97d | ||
|
|
7162ff04df | ||
|
|
32b1e974ca | ||
|
|
82c3c7fc38 | ||
|
|
3dcbb79ef7 | ||
|
|
3b41104427 | ||
|
|
35bf7ee66d | ||
|
|
430e17a5d2 | ||
|
|
400d66fa5d | ||
|
|
800c481515 | ||
|
|
79ae9c4e64 | ||
|
|
0dc6cb0535 | ||
|
|
810fc19e43 | ||
|
|
e0e106367d | ||
|
|
14472dc09d | ||
|
|
e8095b73ae | ||
|
|
c979cf5ecc | ||
|
|
1b4dbd283e | ||
|
|
fb50a221f8 | ||
|
|
52e07db06b | ||
|
|
6643b5cec4 | ||
|
|
e8bf9ea058 | ||
|
|
ce3d37e829 | ||
|
|
8a61063e84 | ||
|
|
87ff96553a | ||
|
|
209bf105bc | ||
|
|
804dbeba34 | ||
|
|
067cd4dc2e | ||
|
|
feb4a3f242 | ||
|
|
4a886c0a4a | ||
|
|
8e500283b6 | ||
|
|
3205371654 | ||
|
|
d713620d9e | ||
|
|
c1300fa8b1 | ||
|
|
0976ddba23 | ||
|
|
3ebb806410 | ||
|
|
9f274c79dc | ||
|
|
88c08bbfc7 | ||
|
|
c2af124622 | ||
|
|
f972fe9836 | ||
|
|
dcfc883ab3 | ||
|
|
1d2bd6b8f7 | ||
|
|
f2777f5096 | ||
|
|
d3320dc4ee | ||
|
|
72db2ee352 | ||
|
|
60c3a4ad5e | ||
|
|
cf7a7928af | ||
|
|
1057314508 | ||
|
|
73a077956b | ||
|
|
5e1e50bd47 | ||
|
|
413fe566b8 | ||
|
|
c9b5f06c42 | ||
|
|
b53e432b0f | ||
|
|
88164447e9 | ||
|
|
1ac85fd049 | ||
|
|
ee6fc4ab1d | ||
|
|
9f793bdae8 | ||
|
|
a0eecaecd0 | ||
|
|
d532073f5b | ||
|
|
198e8c9d55 | ||
|
|
30367deeca | ||
|
|
e73298aea2 | ||
|
|
59279851a3 | ||
|
|
2965357d99 | ||
|
|
8bd32ee142 | ||
|
|
a4f892dcfb | ||
|
|
e675983e20 | ||
|
|
e9558f97c4 | ||
|
|
a1a611f8cb | ||
|
|
182dc859a0 | ||
|
|
c0240a8568 | ||
|
|
02bcff29e8 | ||
|
|
d4ed64df7d | ||
|
|
701f14c1e3 | ||
|
|
45bf2c7da6 | ||
|
|
67ada70a26 | ||
|
|
06bcc07f65 | ||
|
|
4410ecf62c | ||
|
|
9f6b9d4d23 | ||
|
|
b24e8dd829 | ||
|
|
25291a2e01 | ||
|
|
332f3930a5 | ||
|
|
ed466a99ec | ||
|
|
f68f8898c0 | ||
|
|
a0996b1c0a | ||
|
|
522ff4a042 | ||
|
|
a769f93be0 | ||
|
|
2c5ef92979 | ||
|
|
5d773dc94c | ||
|
|
088e3420e6 | ||
|
|
14efc95707 | ||
|
|
f48a2c5fd2 | ||
|
|
74ae4d7774 | ||
|
|
191203ea0c | ||
|
|
6aceae5c22 | ||
|
|
8c6b3efd39 | ||
|
|
4602efd598 | ||
|
|
f70c0936ca | ||
|
|
0d4de4cc63 | ||
|
|
1e855f8290 | ||
|
|
bb2787584d | ||
|
|
a04981b418 | ||
|
|
d7f16b7c87 | ||
|
|
4477e04d59 | ||
|
|
30e11b4b42 | ||
|
|
b93695b78f | ||
|
|
b01311813b | ||
|
|
5ae80fab87 | ||
|
|
c4291f2136 | ||
|
|
287d3c2b04 | ||
|
|
7fde19730e | ||
|
|
13575642d8 | ||
|
|
3f5370b284 | ||
|
|
d048eb5b20 | ||
|
|
dd7031a472 | ||
|
|
4160d5ef26 | ||
|
|
51bdf2fd19 | ||
|
|
6a44697911 | ||
|
|
7a1d0ec228 | ||
|
|
b5928fd411 | ||
|
|
2f345d1976 | ||
|
|
f5d0721fa8 | ||
|
|
c3b36cb61d | ||
|
|
189c430e46 | ||
|
|
b922ee566a | ||
|
|
89da69f647 | ||
|
|
138caa34de | ||
|
|
26c3378ede | ||
|
|
aa134a2db8 | ||
|
|
d0391cb430 | ||
|
|
c955ea9de0 | ||
|
|
fc29a5d439 | ||
|
|
7e9942dbab | ||
|
|
c003967eaa | ||
|
|
b28fcc6be5 | ||
|
|
418cdbabb7 | ||
|
|
18e61e92d9 | ||
|
|
de20711637 | ||
|
|
55e91b97be | ||
|
|
f79bbd2d6e | ||
|
|
e1c2c3905d | ||
|
|
03ac93bfc7 | ||
|
|
89da976949 | ||
|
|
57dafd294d | ||
|
|
e611baa4b4 | ||
|
|
fc448d5b6d | ||
|
|
e59954f956 | ||
|
|
e160cbb1e9 | ||
|
|
86c857b9c2 | ||
|
|
0a13d7d2c7 | ||
|
|
68da5c6d22 | ||
|
|
f82744b95e | ||
|
|
5a67bc68a1 | ||
|
|
61cf4d4c70 | ||
|
|
9d20a2d5a3 | ||
|
|
8b0ac451e3 | ||
|
|
470dbe75a2 | ||
|
|
b7d19b8130 | ||
|
|
3dc13221d8 | ||
|
|
35184dbd86 | ||
|
|
0868fc2558 | ||
|
|
92fb09c4df | ||
|
|
b4cf5496b6 | ||
|
|
a0e68705dd | ||
|
|
7cb49e65bd | ||
|
|
39fedb090b | ||
|
|
f36a691219 | ||
|
|
6a2eb1d2e4 | ||
|
|
13123daa3f | ||
|
|
c859eb865e | ||
|
|
8f5e2cbcc7 | ||
|
|
2aed6e2dba | ||
|
|
52b51a6088 | ||
|
|
52b24e01e2 | ||
|
|
1178fd8bd3 | ||
|
|
a0187cc9df | ||
|
|
2f656cc357 | ||
|
|
71f9ac9985 | ||
|
|
8bbdfc45fa | ||
|
|
3cbb1a7671 | ||
|
|
b74e0de74a | ||
|
|
e7e7793896 | ||
|
|
504bdac14a | ||
|
|
b76d2cd716 | ||
|
|
022b32c724 | ||
|
|
653b820da1 | ||
|
|
68232e642f | ||
|
|
4ba0bf4dcf | ||
|
|
5e4daf4bc6 | ||
|
|
7e0713c869 | ||
|
|
099d516ac0 | ||
|
|
b94f6a4a29 | ||
|
|
4caf63d53d | ||
|
|
6057229ceb | ||
|
|
6a2856e46f | ||
|
|
4dedd63b74 | ||
|
|
db74837eb1 | ||
|
|
892fe62264 | ||
|
|
3c79476785 | ||
|
|
dad364da17 | ||
|
|
37bc4f78d0 | ||
|
|
de0b43c81d | ||
|
|
ea1d2d6a4c | ||
|
|
fafe8ccc59 | ||
|
|
4b88cfac19 | ||
|
|
5fa13fba36 | ||
|
|
f28f761436 | ||
|
|
27d7889780 | ||
|
|
a1cf153097 | ||
|
|
d121eefa12 | ||
|
|
c92e25a6a7 | ||
|
|
8be03dead5 | ||
|
|
1197133d06 | ||
|
|
850458a554 | ||
|
|
e96ad41729 | ||
|
|
53cf518390 | ||
|
|
b00ace852d | ||
|
|
be72765d02 | ||
|
|
580d29257c | ||
|
|
5d068c1da1 | ||
|
|
8e2ccab1f0 | ||
|
|
6f478eef62 | ||
|
|
1ff1c370df | ||
|
|
5ef87ef2a6 | ||
|
|
d0709d4f4e | ||
|
|
2a081b0a27 | ||
|
|
d902533387 | ||
|
|
1174713223 | ||
|
|
4b1740ad19 | ||
|
|
e03c88ce32 | ||
|
|
b917ffecbe | ||
|
|
2967a78c5a | ||
|
|
aa25ea62a5 | ||
|
|
1ab0e86085 | ||
|
|
c9ddbb4241 | ||
|
|
415a1c7a4f | ||
|
|
84a4836ab7 | ||
|
|
dbd6c9c6ed | ||
|
|
4f95c077d4 | ||
|
|
0a4cbc4e16 | ||
|
|
d45b76fab4 | ||
|
|
9722135cda | ||
|
|
7366913a31 | ||
|
|
bd31b5606c | ||
|
|
2953dea4a0 | ||
|
|
f3fed0b10f | ||
|
|
db57d426d9 | ||
|
|
4536e4a8b6 | ||
|
|
426a7b900f | ||
|
|
cc571d9ab2 | ||
|
|
296c861e7d | ||
|
|
aa45d21fd2 | ||
|
|
ac42513da9 | ||
|
|
e2387546fe | ||
|
|
c8929b35f0 | ||
|
|
c000e270a0 | ||
|
|
8ff28da3b4 | ||
|
|
b7b376103c | ||
|
|
08d379bb29 | ||
|
|
74e644c4ba | ||
|
|
d4c36da3ee | ||
|
|
dfe0b73890 | ||
|
|
c0c8fa9a89 | ||
|
|
ad7139829c | ||
|
|
a24e63d440 | ||
|
|
59437a02c3 | ||
|
|
98a44d7fa1 | ||
|
|
07416753be | ||
|
|
630854ce26 | ||
|
|
b55c2b99a7 | ||
|
|
f81d36c95f | ||
|
|
26b7aadd32 | ||
|
|
8e7e3c2b4a | ||
|
|
f2e8b66be4 | ||
|
|
ff09fd30dc | ||
|
|
9fcc30c3d6 | ||
|
|
b29a6522ef | ||
|
|
936d19cd60 | ||
|
|
f25b6ee5d1 | ||
|
|
7dea079220 | ||
|
|
7fc08962fb | ||
|
|
71155d9e72 | ||
|
|
6ccd72349d | ||
|
|
30e12376d3 | ||
|
|
23c8a893e1 | ||
|
|
7d93329401 | ||
|
|
968fb655a4 | ||
|
|
80ec9f4131 | ||
|
|
f19def5f7b | ||
|
|
9e1dd8ac9c | ||
|
|
ebd68b7a6c | ||
|
|
68a231afea | ||
|
|
21ab650ac0 | ||
|
|
b501bd709f | ||
|
|
4082f25062 | ||
|
|
63d74b4ba6 | ||
|
|
da5907613b | ||
|
|
3a9201bd31 | ||
|
|
d6e2cb7cef | ||
|
|
0809e832d4 | ||
|
|
7269c9f02e | ||
|
|
d86d7e5c33 | ||
|
|
5d87578746 | ||
|
|
04aef021fc | ||
|
|
0fc08bb384 | ||
|
|
5779542084 | ||
|
|
ebda81e96e | ||
|
|
3fe332e85f | ||
|
|
3428ea1b3c | ||
|
|
6024fc7baf | ||
|
|
75c1c4ce5a | ||
|
|
ffa05a0bb3 | ||
|
|
a20e17330b | ||
|
|
4e83644433 | ||
|
|
604f0083f2 | ||
|
|
2a8a158823 | ||
|
|
f8c3db72e9 | ||
|
|
60815807f9 | ||
|
|
196fb0e014 | ||
|
|
eba668956d | ||
|
|
ee5ec023f4 | ||
|
|
d59661e0af | ||
|
|
f51e8eeae1 | ||
|
|
6e06935e75 | ||
|
|
f7f697849c | ||
|
|
8e17e29a5c | ||
|
|
12e9f17f7a | ||
|
|
cb7e56a9a3 | ||
|
|
1a710a4c12 | ||
|
|
d8d266d3be | ||
|
|
4716632c23 | ||
|
|
3c4150d153 | ||
|
|
b71b14d582 | ||
|
|
73481d4aec | ||
|
|
2c049a3b94 | ||
|
|
367de44a8b | ||
|
|
f5f378d04b | ||
|
|
823edbfdef | ||
|
|
29bbb27289 | ||
|
|
a23502f7ff | ||
|
|
ce64dbefce | ||
|
|
b47afdc3b5 | ||
|
|
cde9c3090f | ||
|
|
6924b04d7c | ||
|
|
83fbd4bdf2 | ||
|
|
6460dcc7e0 | ||
|
|
59aa009c93 | ||
|
|
59d2a012cd | ||
|
|
7e3b620830 | ||
|
|
e16b55816f | ||
|
|
895cb8637e | ||
|
|
fe5bceb1ed | ||
|
|
5d475a40f5 | ||
|
|
bca7ea1674 | ||
|
|
f27bb402fb | ||
|
|
dd32c632cd | ||
|
|
9e2e740033 | ||
|
|
d6362ce0bd | ||
|
|
2347a00a70 | ||
|
|
0b7dc721cf | ||
|
|
ac04a834ef | ||
|
|
bbca053b48 | ||
|
|
fcf2006502 | ||
|
|
ac0d0019bd | ||
|
|
2d922a0a65 | ||
|
|
8db14911d7 | ||
|
|
01bab58b20 | ||
|
|
7a57bc99cf | ||
|
|
d3b6d86e74 | ||
|
|
360b6cb286 | ||
|
|
8f9e9e639e | ||
|
|
6930d8ba41 | ||
|
|
7ad74e680d | ||
|
|
c56a6a4ddd | ||
|
|
afad764a00 | ||
|
|
49a72bd714 | ||
|
|
8cf14287b6 | ||
|
|
0db47dd5e7 | ||
|
|
71f6f77ae8 | ||
|
|
6f16229c41 | ||
|
|
0cc0d794d1 | ||
|
|
535639cb95 | ||
|
|
2250bca8d9 | ||
|
|
4ce39a5974 | ||
|
|
644e9287f0 | ||
|
|
6a5e0be022 | ||
|
|
707f0f7091 | ||
|
|
8e709fe05a | ||
|
|
154da609cb | ||
|
|
21975d6268 | ||
|
|
31035b3e63 | ||
|
|
6c05818887 | ||
|
|
77c5b051f0 | ||
|
|
4fdc4c15f9 | ||
|
|
1a4be78013 | ||
|
|
eb16ad3d6f | ||
|
|
1fee08639d | ||
|
|
7caaf40835 | ||
|
|
6bfe994622 | ||
|
|
8a6f03cd46 | ||
|
|
4ce9f9dc36 | ||
|
|
00297716d6 | ||
|
|
50c0dc71eb | ||
|
|
29ccc6a3d8 | ||
|
|
f92a5cbabc | ||
|
|
acbf10f7ba | ||
|
|
46d830b9fa | ||
|
|
db17ec7a4b | ||
|
|
6320d18846 | ||
|
|
37c8b9d06a | ||
|
|
7ba2108eb0 | ||
|
|
8aeeee4752 | ||
|
|
930de51910 | ||
|
|
b1b5c0d3b2 | ||
|
|
ebe717099e | ||
|
|
06245bc761 | ||
|
|
b4c0dafdc8 | ||
|
|
0cefacb3a2 | ||
|
|
baa5f75976 | ||
|
|
989aaedc7f | ||
|
|
93e08df849 | ||
|
|
4a43e1c1b8 | ||
|
|
2bbab9d94e | ||
|
|
a456f6e6f0 | ||
|
|
a408f562d6 | ||
|
|
cefdf9ed00 | ||
|
|
5413bf07e2 | ||
|
|
4cffe282bd | ||
|
|
ae8ffe9d51 | ||
|
|
870cc5b733 | ||
|
|
0b4eb888c5 | ||
|
|
11f1cb5391 | ||
|
|
1e2e26cfc2 | ||
|
|
e9bce6e1c3 | ||
|
|
799ef0e7c1 | ||
|
|
61c10a7ca8 | ||
|
|
93880223e6 | ||
|
|
271456b745 | ||
|
|
cecee33bc0 | ||
|
|
4f43eda09b | ||
|
|
011757c497 | ||
|
|
2700d0e769 | ||
|
|
d256d93a2a | ||
|
|
f3c8e986a5 | ||
|
|
48f5e4f313 | ||
|
|
5950ffe064 | ||
|
|
49ca949cd6 | ||
|
|
5d69f1cbf5 | ||
|
|
9169006171 | ||
|
|
28b74523d0 | ||
|
|
9359c03c3c | ||
|
|
598241e0f2 | ||
|
|
e698a8006c | ||
|
|
34e7b5a7fb | ||
|
|
5c3dd62ae0 | ||
|
|
7e2eeec1f3 | ||
|
|
7eb79266c4 | ||
|
|
5d4610d981 | ||
|
|
7c548c5bf3 | ||
|
|
2a38606342 | ||
|
|
793cf39964 | ||
|
|
ab3e689ee0 | ||
|
|
20f497054f | ||
|
|
6209fef63d | ||
|
|
5168415999 | ||
|
|
b490c8ae27 | ||
|
|
6f354f16ba | ||
|
|
e108a2302e | ||
|
|
2ffecef792 | ||
|
|
2663a07e94 | ||
|
|
8d2ef5afc3 | ||
|
|
539887b215 | ||
|
|
2ba505cce9 | ||
|
|
bd92a31d15 | ||
|
|
ee2529f3fd | ||
|
|
89b7082bc0 | ||
|
|
55dfabb892 | ||
|
|
2a41fd0b29 | ||
|
|
966919ea4a | ||
|
|
d3acdcf12f | ||
|
|
52f9749bf5 | ||
|
|
2a661450c3 | ||
|
|
2d96c62fdb | ||
|
|
3e6173ee8c | ||
|
|
4e9841c924 | ||
|
|
f4ea495d23 | ||
|
|
43a4b815e8 | ||
|
|
4134f18319 | ||
|
|
cd292f6c1c | ||
|
|
3ce8f3d6fe | ||
|
|
10fd4f6a61 | ||
|
|
47b1fd4bce | ||
|
|
300805a25a | ||
|
|
56527da73e | ||
|
|
ca4b8e65c1 | ||
|
|
f5194f9e2d | ||
|
|
ccbbb417f9 | ||
|
|
37786a26a5 | ||
|
|
4f2930412e | ||
|
|
83049a3a5b | ||
|
|
38256f97b3 | ||
|
|
77f2aabda4 | ||
|
|
e32eb2a649 | ||
|
|
f4cdfa3b9c | ||
|
|
e99b715e9e | ||
|
|
ed96c40239 | ||
|
|
1b3bb932b9 | ||
|
|
f0b102d830 | ||
|
|
a47d91f0e7 | ||
|
|
358c1f5791 | ||
|
|
faec320d48 | ||
|
|
fd074abdc4 | ||
|
|
d8eb58cd58 | ||
|
|
8937d66412 | ||
|
|
a6935ae7fb | ||
|
|
69968eb67b | ||
|
|
e57f5f129c | ||
|
|
1b8651fa26 | ||
|
|
f6664960ca | ||
|
|
84a001720c | ||
|
|
c9951cd86b | ||
|
|
83a9e26cd8 | ||
|
|
80812cf7cd | ||
|
|
2a6c940047 | ||
|
|
78fe9b642d | ||
|
|
53b835945f | ||
|
|
acba51c888 | ||
|
|
daa9d50d95 | ||
|
|
e38d0e39b7 | ||
|
|
2c632a811b | ||
|
|
6afeb37ce5 | ||
|
|
85726c164b | ||
|
|
17e1ef0140 | ||
|
|
cdfc01d938 | ||
|
|
dc632a787a | ||
|
|
4e04ea0c0d | ||
|
|
f51bb00b5e | ||
|
|
12f2357e70 | ||
|
|
60629cba3c | ||
|
|
5196e4bc38 | ||
|
|
89e7848079 | ||
|
|
5b38b5ea7f | ||
|
|
88c1af969f | ||
|
|
41cd40541a | ||
|
|
21ed2d42cd | ||
|
|
45470a3ac8 | ||
|
|
b6ed4ba559 |
8
.github/CODEOWNERS
vendored
@@ -1,5 +1,5 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
@@ -26,9 +26,7 @@
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
|
||||
|
||||
98
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -6,10 +6,6 @@ title: '[bug]: '
|
||||
|
||||
labels: ['bug']
|
||||
|
||||
# assignees:
|
||||
# - moderator_bot
|
||||
# - lstein
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@@ -18,10 +14,9 @@ body:
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
label: Is there an existing issue for this problem?
|
||||
description: |
|
||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
irst to see if an issue already exists for the bug you encountered.
|
||||
Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -33,80 +28,119 @@ body:
|
||||
- type: dropdown
|
||||
id: os_dropdown
|
||||
attributes:
|
||||
label: OS
|
||||
description: Which operating System did you use when the bug occured
|
||||
label: Operating system
|
||||
description: Your computer's operating system.
|
||||
multiple: false
|
||||
options:
|
||||
- 'Linux'
|
||||
- 'Windows'
|
||||
- 'macOS'
|
||||
- 'other'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: gpu_dropdown
|
||||
attributes:
|
||||
label: GPU
|
||||
description: Which kind of Graphic-Adapter is your System using
|
||||
label: GPU vendor
|
||||
description: Your GPU's vendor.
|
||||
multiple: false
|
||||
options:
|
||||
- 'cuda'
|
||||
- 'amd'
|
||||
- 'mps'
|
||||
- 'cpu'
|
||||
- 'Nvidia (CUDA)'
|
||||
- 'AMD (ROCm)'
|
||||
- 'Apple Silicon (MPS)'
|
||||
- 'None (CPU)'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: gpu_model
|
||||
attributes:
|
||||
label: GPU model
|
||||
description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
|
||||
placeholder: ex. RTX 2080 Ti, Mac M1 Pro
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: vram
|
||||
attributes:
|
||||
label: VRAM
|
||||
description: Size of the VRAM if known
|
||||
label: GPU VRAM
|
||||
description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
|
||||
placeholder: 8GB
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: input
|
||||
id: version-number
|
||||
attributes:
|
||||
label: What version did you experience this issue on?
|
||||
label: Version number
|
||||
description: |
|
||||
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: X.X.X
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: Python dependencies
|
||||
description: |
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: What happened
|
||||
description: |
|
||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
||||
Describe what happened. Include any relevant error messages, stack traces and screenshots here.
|
||||
placeholder: I clicked button X and then Y happened.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-you-expected
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
placeholder: this is what the result looked like <screenshot>
|
||||
label: What you expected to happen
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: I expected Z to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: how-to-repro
|
||||
attributes:
|
||||
label: How to reproduce the problem
|
||||
description: List steps to reproduce the problem.
|
||||
placeholder: Start the app, generate an image with these settings, then click button X.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
description: Any other context that might help us to understand the problem.
|
||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
id: discord-username
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
label: Discord username
|
||||
description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
|
||||
placeholder: supercoolusername123
|
||||
validations:
|
||||
required: false
|
||||
|
||||
33
.github/actions/install-frontend-deps/action.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: Install frontend dependencies
|
||||
description: Installs frontend dependencies with pnpm, with caching
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 8
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/cache@v3
|
||||
name: Setup pnpm cache
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: pnpm install --prefer-frozen-lockfile
|
||||
shell: bash
|
||||
working-directory: invokeai/frontend/web
|
||||
11
.github/actions/install-python-deps/action.yml
vendored
@@ -1,11 +0,0 @@
|
||||
name: Install python dependencies
|
||||
description: Install python dependencies with pip, with caching
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
59
.github/pr_labels.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
Root:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '*'
|
||||
|
||||
PythonDeps:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'pyproject.toml'
|
||||
|
||||
Python:
|
||||
- changed-files:
|
||||
- all-globs-to-any-file:
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
|
||||
PythonTests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'tests/**'
|
||||
|
||||
CICD:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
Docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
Installer:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: installer/**
|
||||
|
||||
Documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Invocations:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/invocations/**'
|
||||
|
||||
Backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/backend/**'
|
||||
|
||||
Api:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/api/**'
|
||||
|
||||
Services:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/services/**'
|
||||
|
||||
FrontendDeps:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- '**/*/package.json'
|
||||
- '**/*/pnpm-lock.yaml'
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/frontend/web/**'
|
||||
7
.github/workflows/build-container.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
- 'docker/docker-entrypoint.sh'
|
||||
- 'workflows/build-container.yml'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -40,10 +40,14 @@ jobs:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -91,6 +95,7 @@ jobs:
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
timeout-minutes: 40
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
|
||||
34
.github/workflows/check-frontend.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 'Check: frontend'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
jobs:
|
||||
check-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up frontend
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: Run tsc check
|
||||
run: 'pnpm run lint:tsc'
|
||||
shell: bash
|
||||
|
||||
- name: Run madge check
|
||||
run: 'pnpm run lint:madge'
|
||||
shell: bash
|
||||
|
||||
- name: Run eslint check
|
||||
run: 'pnpm run lint:eslint'
|
||||
shell: bash
|
||||
|
||||
- name: Run prettier check
|
||||
run: 'pnpm run lint:prettier'
|
||||
shell: bash
|
||||
26
.github/workflows/check-python.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: 'Check: python'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
check-backend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install python dependencies
|
||||
uses: ./.github/actions/install-python-deps
|
||||
|
||||
- name: Install ruff
|
||||
run: pip install ruff
|
||||
shell: bash
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check --output-format=github .
|
||||
shell: bash
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format --check .
|
||||
shell: bash
|
||||
16
.github/workflows/label-pr.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: "Pull Request Labeler"
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: .github/pr_labels.yml
|
||||
43
.github/workflows/lint-frontend.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Lint frontend
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
jobs:
|
||||
lint-frontend:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: '8.12.1'
|
||||
- name: Install dependencies
|
||||
run: 'pnpm install --prefer-frozen-lockfile'
|
||||
- name: Typescript
|
||||
run: 'pnpm run lint:tsc'
|
||||
- name: Madge
|
||||
run: 'pnpm run lint:madge'
|
||||
- name: ESLint
|
||||
run: 'pnpm run lint:eslint'
|
||||
- name: Prettier
|
||||
run: 'pnpm run lint:prettier'
|
||||
34
.github/workflows/on-change-check-frontend.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 'On change: run check-frontend'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
check-changed-frontend-files:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
frontend_any_changed: ${{ steps.changed-files.outputs.frontend_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check for changed frontend files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v40
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
- 'invokeai/frontend/web/**'
|
||||
|
||||
run-check-frontend:
|
||||
needs: check-changed-frontend-files
|
||||
if: ${{ needs.check-changed-frontend-files.outputs.frontend_any_changed == 'true' }}
|
||||
uses: ./.github/workflows/check-frontend.yml
|
||||
37
.github/workflows/on-change-check-python.yml
vendored
@@ -1,37 +0,0 @@
|
||||
name: 'On change: run check-python'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
check-changed-python-files:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
python_any_changed: ${{ steps.changed-files.outputs.python_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check for changed python files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v40
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
run-check-python:
|
||||
needs: check-changed-python-files
|
||||
if: ${{ needs.check-changed-python-files.outputs.python_any_changed == 'true' }}
|
||||
uses: ./.github/workflows/check-python.yml
|
||||
37
.github/workflows/on-change-pytest.yml
vendored
@@ -1,37 +0,0 @@
|
||||
name: 'On change: run pytest'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
check-changed-python-files:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
python_any_changed: ${{ steps.changed-files.outputs.python_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check for changed python files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v40
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
run-pytest:
|
||||
needs: check-changed-python-files
|
||||
if: ${{ needs.check-changed-python-files.outputs.python_any_changed == 'true' }}
|
||||
uses: ./.github/workflows/check-pytest.yml
|
||||
103
.github/workflows/release.yml
vendored
@@ -1,103 +0,0 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_code_checks:
|
||||
description: 'Skip code checks'
|
||||
required: true
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
check-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: samuelcolvin/check-python-version@v4
|
||||
id: check-python-version
|
||||
with:
|
||||
version_file_path: invokeai/version/invokeai_version.py
|
||||
|
||||
check-frontend:
|
||||
if: github.event.inputs.skip_code_checks != 'true'
|
||||
uses: ./.github/workflows/check-frontend.yml
|
||||
|
||||
check-python:
|
||||
if: github.event.inputs.skip_code_checks != 'true'
|
||||
uses: ./.github/workflows/check-python.yml
|
||||
|
||||
check-pytest:
|
||||
if: github.event.inputs.skip_code_checks != 'true'
|
||||
uses: ./.github/workflows/check-pytest.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install python dependencies
|
||||
uses: ./.github/actions/install-python-deps
|
||||
|
||||
- name: Install pypa/build
|
||||
run: pip install --upgrade build
|
||||
|
||||
- name: Setup frontend
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: Run create_installer.sh
|
||||
id: create_installer
|
||||
run: ./create_installer.sh --skip_frontend_checks
|
||||
working-directory: installer
|
||||
|
||||
- name: Upload python distribution artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ steps.create_installer.outputs.DIST_PATH }}
|
||||
|
||||
- name: Upload installer artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.create_installer.outputs.INSTALLER_FILENAME }}
|
||||
path: ${{ steps.create_installer.outputs.INSTALLER_PATH }}
|
||||
|
||||
publish-testpypi:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [check-version, check-frontend, check-python, check-pytest, build]
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
environment:
|
||||
name: testpypi
|
||||
url: https://test.pypi.org/p/invokeai
|
||||
steps:
|
||||
- name: Download distribution from build job
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Publish distribution to TestPyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
|
||||
publish-pypi:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [check-version, check-frontend, check-python, check-pytest, build]
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/invokeai
|
||||
steps:
|
||||
- name: Download distribution from build job
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
24
.github/workflows/style-checks.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: style checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: main
|
||||
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies with pip
|
||||
run: |
|
||||
pip install ruff
|
||||
|
||||
- run: ruff check --output-format=github .
|
||||
- run: ruff format --check .
|
||||
@@ -1,8 +1,15 @@
|
||||
name: 'Check: pytest'
|
||||
|
||||
name: Test invoke.py pip
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -10,9 +17,11 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
- linux-cuda-11_7
|
||||
@@ -43,12 +52,27 @@ jobs:
|
||||
env:
|
||||
PIP_USE_PEP517: '1'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Check for changed python files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v41
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: setup python
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -56,6 +80,7 @@ jobs:
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install invokeai
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
@@ -63,6 +88,7 @@ jobs:
|
||||
--editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
id: run-pytest
|
||||
run: pytest
|
||||
|
||||
@@ -7,7 +7,7 @@ embeddedLanguageFormatting: auto
|
||||
overrides:
|
||||
- files: '*.md'
|
||||
options:
|
||||
proseWrap: preserve
|
||||
proseWrap: always
|
||||
printWidth: 80
|
||||
parser: markdown
|
||||
cursorOffset: -1
|
||||
|
||||
14
README.md
@@ -1,10 +1,10 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# Invoke AI - Generative AI for Professional Creatives
|
||||
## Professional Creative Tools for Stable Diffusion, Custom-Trained Models, and more.
|
||||
To learn more about Invoke AI, get started instantly, or implement our Business solutions, visit [invoke.ai](https://invoke.ai)
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||
|
||||
|
||||
|
||||
[![discord badge]][discord link]
|
||||
@@ -56,7 +56,9 @@ the foundation for multiple commercial products.
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
</div>
|
||||
|
||||
@@ -167,7 +169,7 @@ the command `npm install -g pnpm` if needed)
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
_For non-GPU systems:_
|
||||
|
||||
@@ -2,14 +2,17 @@
|
||||
## Any environment variables supported by InvokeAI can be specified here,
|
||||
## in addition to the examples below.
|
||||
|
||||
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
||||
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||
# Outputs will also be stored here by default.
|
||||
# This **must** be an absolute path.
|
||||
INVOKEAI_ROOT=
|
||||
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
|
||||
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||
# INVOKEAI_ROOT=~/invokeai
|
||||
|
||||
# Get this value from your HuggingFace account settings page.
|
||||
# HUGGING_FACE_HUB_TOKEN=
|
||||
|
||||
## optional variables specific to the docker setup.
|
||||
# GPU_DRIVER=cuda # or rocm
|
||||
# GPU_DRIVER=nvidia #| rocm
|
||||
# CONTAINER_UID=1000
|
||||
|
||||
@@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.1.0
|
||||
ARG TORCHVISION_VERSION=0.16
|
||||
ARG TORCH_VERSION=2.1.2
|
||||
ARG TORCHVISION_VERSION=0.16.2
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
@@ -54,12 +54,12 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install -e ".[xformers]"; \
|
||||
else \
|
||||
pip install -e "."; \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:18-slim AS web-builder
|
||||
FROM node:20-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack enable
|
||||
@@ -68,7 +68,7 @@ WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN pnpm run build
|
||||
RUN npx vite build
|
||||
|
||||
#### Runtime stage ---------------------------------------
|
||||
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
# InvokeAI Containerized
|
||||
|
||||
All commands are to be run from the `docker` directory: `cd docker`
|
||||
All commands should be run within the `docker` directory: `cd docker`
|
||||
|
||||
## Quickstart :rocket:
|
||||
|
||||
On a known working Linux+Docker+CUDA (Nvidia) system, execute `./run.sh` in this directory. It will take a few minutes - depending on your internet speed - to install the core models. Once the application starts up, open `http://localhost:9090` in your browser to Invoke!
|
||||
|
||||
For more configuration options (using an AMD GPU, custom root directory location, etc): read on.
|
||||
|
||||
## Detailed setup
|
||||
|
||||
#### Linux
|
||||
|
||||
@@ -18,9 +26,9 @@ All commands are to be run from the `docker` directory: `cd docker`
|
||||
|
||||
This is done via Docker Desktop preferences
|
||||
|
||||
## Quickstart
|
||||
### Configure Invoke environment
|
||||
|
||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
a. the desired location of the InvokeAI runtime directory, or
|
||||
b. an existing, v3.0.0 compatible runtime directory.
|
||||
1. Execute `run.sh`
|
||||
@@ -37,19 +45,21 @@ The runtime directory (holding models and outputs) will be created in the locati
|
||||
|
||||
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
||||
|
||||
To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file.
|
||||
|
||||
## Customize
|
||||
|
||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
|
||||
|
||||
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||
|
||||
Example (values are optional, but setting `INVOKEAI_ROOT` is highly recommended):
|
||||
Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The default is `~/invokeai`. Example:
|
||||
|
||||
```bash
|
||||
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
||||
HUGGINGFACE_TOKEN=the_actual_token
|
||||
CONTAINER_UID=1000
|
||||
GPU_DRIVER=cuda
|
||||
GPU_DRIVER=nvidia
|
||||
```
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here - please see the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
|
||||
|
||||
@@ -21,7 +21,9 @@ x-invokeai: &invokeai
|
||||
ports:
|
||||
- "${INVOKEAI_PORT:-9090}:9090"
|
||||
volumes:
|
||||
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
||||
- type: bind
|
||||
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -e -o pipefail
|
||||
|
||||
run() {
|
||||
local scriptdir=$(dirname "${BASH_SOURCE[0]}")
|
||||
@@ -8,16 +8,20 @@ run() {
|
||||
local build_args=""
|
||||
local profile=""
|
||||
|
||||
[[ -f ".env" ]] &&
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
touch .env
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
|
||||
[[ -z "$profile" ]] && profile="nvidia"
|
||||
|
||||
local service_name="invokeai-$profile"
|
||||
|
||||
printf "%s\n" "docker compose build args:"
|
||||
printf "%s\n" "$build_args"
|
||||
if [[ ! -z "$build_args" ]]; then
|
||||
printf "%s\n" "docker compose build args:"
|
||||
printf "%s\n" "$build_args"
|
||||
fi
|
||||
|
||||
docker compose build $build_args
|
||||
docker compose build $build_args $service_name
|
||||
unset build_args
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
|
||||
157
docs/RELEASE.md
@@ -1,157 +0,0 @@
|
||||
# Release
|
||||
|
||||
The app is published in twice, in different build formats.
|
||||
|
||||
- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
|
||||
- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
|
||||
|
||||
## General Prep
|
||||
|
||||
Make a developer call-out for PRs to merge. Merge and test things out.
|
||||
|
||||
While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
|
||||
|
||||
## Release Workflow
|
||||
|
||||
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
|
||||
|
||||
It is triggered on **tag push**. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
|
||||
|
||||
!!! info
|
||||
|
||||
Commits are reference-counted, so as long as a something points to a commit, that commit will not be garbage-collected'd from the repo.
|
||||
|
||||
It is safe to create a release branch, tag it and have the workflow do its thing, then delete the branch. So long as the tag is not deleted, that snapshot of the repo will forever exist at the tag.
|
||||
|
||||
### Pushing the Tag
|
||||
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow.
|
||||
|
||||
### Tag Push Example
|
||||
|
||||
Any tag push will trigger the workflow, but it will publish only if the git ref (the tag) matches the app version.
|
||||
|
||||
Say `invokeai_version.py` looks like this:
|
||||
|
||||
```py
|
||||
__version__ = "3.5.0rc2"
|
||||
```
|
||||
|
||||
- If you push tag `v3.5.0rc2`, the workflow will trigger and run. If the checks and build succeed, you'll be able to publish the release.
|
||||
|
||||
- If you push tag `v3.5.0rc3` or `banana-sushi`, the workflow will trigger and run. Even if the checks and build succeed, you'll _will not_ be able to publish the release, because the tag doesn't match the app version.
|
||||
|
||||
!!! info
|
||||
|
||||
Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc.
|
||||
|
||||
### code quality jobs
|
||||
|
||||
Three jobs are run concurrently:
|
||||
|
||||
- **`pytest`**: runs `pytest` on matrix of platforms
|
||||
- **`check-python`**: runs `ruff` (format and lint)
|
||||
- **`check-frontend`**: runs `prettier` (format), `eslint` (lint), `madge` (circular refs) and `tsc` (static type check)
|
||||
|
||||
If any fail, the release workflow bails.
|
||||
|
||||
!!! info Future Enhancement
|
||||
|
||||
We should add `mypy` or `pyright` to the **`check-python`** job at some point.
|
||||
|
||||
### `build`
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
|
||||
|
||||
- **`dist`**: the python distribution, to be published on PyPI
|
||||
- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
|
||||
|
||||
!!! info
|
||||
|
||||
The installer uses the uses the same wheel file that is included in the PyPI distribution, so you _should_ get exactly the same thing using the installer or PyPI dist.
|
||||
|
||||
### Sanity Check & Smoke Test
|
||||
|
||||
At this point, the release workflow pauses (the remaining jobs all require approval).
|
||||
|
||||
The maintainer who is running this release should go to the **Summary** tab of the workflow, download the installer and test it.
|
||||
|
||||
You could also download the `dist`, unzip it and install directly from the wheel. That same wheel will be uploaded to PyPI.
|
||||
|
||||
### Publish
|
||||
|
||||
The publish jobs use [GitHub environments], which are configured as [trusted publishers] on PyPI.
|
||||
|
||||
Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
|
||||
|
||||
- Click the **Review deployments** button
|
||||
- Select the environment (either `testpypi` or `pypi`)
|
||||
- Click **Approve and deploy**
|
||||
|
||||
#### Skip and Failure Conditions
|
||||
|
||||
The publish jobs may skip or fail in certain situations:
|
||||
|
||||
- **If code checks or build fail, the jobs will be skipped.**
|
||||
- **If code checks were skipped, the jobs will be skipped.** This can only happen when [manually] running the workflow.
|
||||
- **If the git ref targetted by the workflow doesn't match the app version, the jobs will fail.** This protects us from accidentally publishing the wrong version to PyPI. This is achieved with [samuelcolvin/check-python-version].
|
||||
- **If the version already exists on PyPI, the jobs will fail.** PyPI only allows a particular version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
|
||||
|
||||
#### `publish-testpypi`
|
||||
|
||||
Publishes the distribution on the [Test PyPI] index using the `testpypi` GitHub environment.
|
||||
|
||||
This job is optional:
|
||||
|
||||
- It is not require for the final `publish-pypi` job to run.
|
||||
- The wheel used in the installer and PyPI dist (uploaded as artifacts from the workflow, as described above) are identical, so this job _should_ be extraneous.
|
||||
|
||||
That said, you could approve it and then test installing from PyPI before running the production PyPI publish job:
|
||||
|
||||
```sh
|
||||
# Create a new virtual environment
|
||||
python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
|
||||
# Install the distribution from Test PyPI
|
||||
pip install --index-url https://test.pypi.org/simple/ invokeai
|
||||
# Run and test the app
|
||||
invokeai-web
|
||||
# Cleanup
|
||||
deactivate
|
||||
rm -rf ~/.test-invokeai-dist
|
||||
```
|
||||
|
||||
#### `publish-pypi`
|
||||
|
||||
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
|
||||
|
||||
Once this finishes, `pip install invokeai` will get the release!
|
||||
|
||||
## Publish the GitHub RC with installer
|
||||
|
||||
1. [Draft a new release] on GitHub, choosing the tag that initiated the release.
|
||||
2. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
|
||||
3. Upload the zip file created in [Build the installer] into the Assets section of the release notes. You can also upload the zip into the body of the release notes, since it can be hard for users to find the Assets section.
|
||||
4. Check the **Set as a pre-release** and **Create a discussion for this release** checkboxes at the bottom of the release page.
|
||||
5. Publish the pre-release.
|
||||
6. Announce the pre-release in Discord.
|
||||
|
||||
!!! info Future Enhancement
|
||||
|
||||
Workflows can do things like create a release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
|
||||
|
||||
## Manually Running the Release Workflow
|
||||
|
||||
The release workflow can be kicked off manually. This is useful to get an installer build and test it out without needing to push a tag.
|
||||
|
||||
When run this way, you'll see **Skip code checks** checkbox. This allows the workflow to run without the time-consuming 3 code quality check jobs. The publish jobs will be skipped if enabled.
|
||||
|
||||
[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[PyPI]: https://pypi.org/
|
||||
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
|
||||
[Test PyPI]: https://test.pypi.org/
|
||||
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
|
||||
[ncipollo/release-action]: https://github.com/ncipollo/release-action
|
||||
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
|
||||
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
|
||||
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
|
||||
[manually]: #manually-running-the-release-workflow
|
||||
|
Before Width: | Height: | Size: 297 KiB After Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 4.9 MiB |
|
Before Width: | Height: | Size: 169 KiB After Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 194 KiB After Width: | Height: | Size: 131 KiB |
|
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 122 KiB |
|
Before Width: | Height: | Size: 114 KiB After Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 187 KiB After Width: | Height: | Size: 123 KiB |
|
Before Width: | Height: | Size: 112 KiB After Width: | Height: | Size: 107 KiB |
|
Before Width: | Height: | Size: 132 KiB After Width: | Height: | Size: 61 KiB |
|
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 119 KiB |
|
Before Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 60 KiB |
BIN
docs/assets/nodes/workflow_library.png
Normal file
|
After Width: | Height: | Size: 129 KiB |
@@ -15,8 +15,13 @@ model. These are the:
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
* _DownloadQueueServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**)
|
||||
|
||||
* _ModelMetadataStore_ and _ModelMetaDataFetch_ Backend modules that
|
||||
are able to retrieve metadata from online model repositories,
|
||||
transform them into Pydantic models, and cache them to the InvokeAI
|
||||
SQL database.
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
@@ -30,13 +35,13 @@ model. These are the:
|
||||
|
||||
## Location of the Code
|
||||
|
||||
All four of these services can be found in
|
||||
The four main services can be found in
|
||||
`invokeai/app/services` in the following directories:
|
||||
|
||||
* `invokeai/app/services/model_records/`
|
||||
* `invokeai/app/services/model_install/`
|
||||
* `invokeai/app/services/downloads/`
|
||||
* `invokeai/app/services/model_loader/` (**under development**)
|
||||
* `invokeai/app/services/downloads/`(**under development**)
|
||||
|
||||
Code related to the FastAPI web API can be found in
|
||||
`invokeai/app/api/routers/model_records.py`.
|
||||
@@ -402,15 +407,18 @@ functionality:
|
||||
the download, installation and registration process.
|
||||
|
||||
- Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir` (_implementation pending_).
|
||||
`models_dir`.
|
||||
|
||||
- Special handling for Civitai model URLs which allow the user to
|
||||
paste in a model page's URL or download link (_implementation pending_).
|
||||
|
||||
paste in a model page's URL or download link
|
||||
|
||||
- Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16. (_implementation pending_)
|
||||
variants such as fp16.
|
||||
|
||||
- Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only Civitai and HuggingFace).
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
@@ -426,16 +434,24 @@ following initialization pattern:
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import ModelRecordServiceSQL
|
||||
from invokeai.app.services.model_install import ModelInstallService
|
||||
from invokeai.app.services.download import DownloadQueueService
|
||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
|
||||
logger = InvokeAILogger.get_logger(config=config)
|
||||
db = SqliteDatabase(config, logger)
|
||||
record_store = ModelRecordServiceSQL(db)
|
||||
queue = DownloadQueueService()
|
||||
queue.start()
|
||||
|
||||
store = ModelRecordServiceSQL(db)
|
||||
installer = ModelInstallService(config, store)
|
||||
installer = ModelInstallService(app_config=config,
|
||||
record_store=record_store,
|
||||
download_queue=queue
|
||||
)
|
||||
installer.start()
|
||||
```
|
||||
|
||||
The full form of `ModelInstallService()` takes the following
|
||||
@@ -443,9 +459,12 @@ required parameters:
|
||||
|
||||
| **Argument** | **Type** | **Description** |
|
||||
|------------------|------------------------------|------------------------------|
|
||||
| `config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `app_config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
||||
| `event_bus` | EventServiceBase | Optional event bus to send download/install progress events to |
|
||||
| `download_queue` | DownloadQueueServiceBase | Download queue object |
|
||||
| `metadata_store` | Optional[ModelMetadataStore] | Metadata storage object |
|
||||
|`session` | Optional[requests.Session] | Swap in a different Session object (usually for debugging) |
|
||||
|
||||
|
||||
Once initialized, the installer will provide the following methods:
|
||||
|
||||
@@ -474,14 +493,14 @@ source7 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
source2job = installer.wait_for_installs()
|
||||
source2job = installer.wait_for_installs(timeout=120)
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
if job.status == "completed":
|
||||
if job.complete:
|
||||
model_config = job.config_out
|
||||
model_key = model_config.key
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.status == "error":
|
||||
elif job.errored:
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
```
|
||||
@@ -515,43 +534,117 @@ The full list of arguments to `import_model()` is as follows:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `source` | Union[str, Path, AnyHttpUrl] | | The source of the model, Path, URL or repo_id |
|
||||
| `inplace` | bool | True | Leave a local model in its current location |
|
||||
| `variant` | str | None | Desired variant, such as 'fp16' or 'onnx' (HuggingFace only) |
|
||||
| `subfolder` | str | None | Repository subfolder (HuggingFace only) |
|
||||
| `source` | ModelSource | None | The source of the model, Path, URL or repo_id |
|
||||
| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
||||
| `access_token` | str | None | Provide authorization information needed to download |
|
||||
|
||||
|
||||
The `inplace` field controls how local model Paths are handled. If
|
||||
True (the default), then the model is simply registered in its current
|
||||
location by the installer's `ModelConfigRecordService`. Otherwise, a
|
||||
copy of the model put into the location specified by the `models_dir`
|
||||
application configuration parameter.
|
||||
|
||||
The `variant` field is used for HuggingFace repo_ids only. If
|
||||
provided, the repo_id download handler will look for and download
|
||||
tensors files that follow the convention for the selected variant:
|
||||
|
||||
- "fp16" will select files named "*model.fp16.{safetensors,bin}"
|
||||
- "onnx" will select files ending with the suffix ".onnx"
|
||||
- "openvino" will select files beginning with "openvino_model"
|
||||
|
||||
In the special case of the "fp16" variant, the installer will select
|
||||
the 32-bit version of the files if the 16-bit version is unavailable.
|
||||
|
||||
`subfolder` is used for HuggingFace repo_ids only. If provided, the
|
||||
model will be downloaded from the designated subfolder rather than the
|
||||
top-level repository folder. If a subfolder is attached to the repo_id
|
||||
using the format `repo_owner/repo_name:subfolder`, then the subfolder
|
||||
specified by the repo_id will override the subfolder argument.
|
||||
The next few sections describe the various types of ModelSource that
|
||||
can be passed to `import_model()`.
|
||||
|
||||
`config` can be used to override all or a portion of the configuration
|
||||
attributes returned by the model prober. See the section below for
|
||||
details.
|
||||
|
||||
`access_token` is passed to the download queue and used to access
|
||||
repositories that require it.
|
||||
|
||||
#### LocalModelSource
|
||||
|
||||
This is used for a model that is located on a locally-accessible Posix
|
||||
filesystem, such as a local disk or networked fileshare.
|
||||
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `path` | str | Path | None | Path to the model file or directory |
|
||||
| `inplace` | bool | False | If set, the model file(s) will be left in their location; otherwise they will be copied into the InvokeAI root's `models` directory |
|
||||
|
||||
#### URLModelSource
|
||||
|
||||
This is used for a single-file model that is accessible via a URL. The
|
||||
fields are:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `url` | AnyHttpUrl | None | The URL for the model file. |
|
||||
| `access_token` | str | None | An access token needed to gain access to this file. |
|
||||
|
||||
The `AnyHttpUrl` class can be imported from `pydantic.networks`.
|
||||
|
||||
Ordinarily, no metadata is retrieved from these sources. However,
|
||||
there is special-case code in the installer that looks for HuggingFace
|
||||
and Civitai URLs and fetches the corresponding model metadata from
|
||||
the corresponding repo.
|
||||
|
||||
#### CivitaiModelSource
|
||||
|
||||
This is used for a model that is hosted by the Civitai web site.
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `version_id` | int | None | The ID of the particular version of the desired model. |
|
||||
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||
|
||||
Civitai has two model IDs, both of which are integers. The `model_id`
|
||||
corresponds to a collection of model versions that may different in
|
||||
arbitrary ways, such as derivation from different checkpoint training
|
||||
steps, SFW vs NSFW generation, pruned vs non-pruned, etc. The
|
||||
`version_id` points to a specific version. Please use the latter.
|
||||
|
||||
Some Civitai models require an access token to download. These can be
|
||||
generated from the Civitai profile page of a logged-in
|
||||
account. Somewhat annoyingly, if you fail to provide the access token
|
||||
when downloading a model that needs it, Civitai generates a redirect
|
||||
to a login page rather than a 403 Forbidden error. The installer
|
||||
attempts to catch this event and issue an informative error
|
||||
message. Otherwise you will get an "unrecognized model suffix" error
|
||||
when the model prober tries to identify the type of the HTML login
|
||||
page.
|
||||
|
||||
#### HFModelSource
|
||||
|
||||
HuggingFace has the most complicated `ModelSource` structure:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `repo_id` | str | None | The ID of the desired model. |
|
||||
| `variant` | ModelRepoVariant | ModelRepoVariant('fp16') | The desired variant. |
|
||||
| `subfolder` | Path | None | Look for the model in a subfolder of the repo. |
|
||||
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||
|
||||
|
||||
The `repo_id` is the repository ID, such as `stabilityai/sdxl-turbo`.
|
||||
|
||||
The `variant` is one of the various diffusers formats that HuggingFace
|
||||
supports and is used to pick out from the hodgepodge of files that in
|
||||
a typical HuggingFace repository the particular components needed for
|
||||
a complete diffusers model. `ModelRepoVariant` is an enum that can be
|
||||
imported from `invokeai.backend.model_manager` and has the following
|
||||
values:
|
||||
|
||||
| **Name** | **String Value** |
|
||||
|----------------------------|---------------------------|
|
||||
| ModelRepoVariant.DEFAULT | "default" |
|
||||
| ModelRepoVariant.FP16 | "fp16" |
|
||||
| ModelRepoVariant.FP32 | "fp32" |
|
||||
| ModelRepoVariant.ONNX | "onnx" |
|
||||
| ModelRepoVariant.OPENVINO | "openvino" |
|
||||
| ModelRepoVariant.FLAX | "flax" |
|
||||
|
||||
You can also pass the string forms to `variant` directly. Note that
|
||||
InvokeAI may not be able to load and run all variants. At the current
|
||||
time, specifying `ModelRepoVariant.DEFAULT` will retrieve model files
|
||||
that are unqualified, e.g. `pytorch_model.safetensors` rather than
|
||||
`pytorch_model.fp16.safetensors`. These are usually the 32-bit
|
||||
safetensors forms of the model.
|
||||
|
||||
If `subfolder` is specified, then the requested model resides in a
|
||||
subfolder of the main model repository. This is typically used to
|
||||
fetch and install VAEs.
|
||||
|
||||
Some models require you to be registered with HuggingFace and logged
|
||||
in. To download these files, you must provide an
|
||||
`access_token`. Internally, if no access token is provided, then
|
||||
`HfFolder.get_token()` will be called to fill it in with the cached
|
||||
one.
|
||||
|
||||
|
||||
#### Monitoring the install job process
|
||||
|
||||
@@ -563,7 +656,8 @@ The `ModelInstallJob` class has the following structure:
|
||||
|
||||
| **Attribute** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `status` | `InstallStatus` | An enum of ["waiting", "running", "completed" and "error" |
|
||||
| `id` | `int` | Integer ID for this job |
|
||||
| `status` | `InstallStatus` | An enum of [`waiting`, `downloading`, `running`, `completed`, `error` and `cancelled`]|
|
||||
| `config_in` | `dict` | Overriding configuration values provided by the caller |
|
||||
| `config_out` | `AnyModelConfig`| After successful completion, contains the configuration record written to the database |
|
||||
| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path |
|
||||
@@ -578,30 +672,70 @@ broadcast to the InvokeAI event bus. The events will appear on the bus
|
||||
as an event of type `EventServiceBase.model_event`, a timestamp and
|
||||
the following event names:
|
||||
|
||||
- `model_install_started`
|
||||
##### `model_install_downloading`
|
||||
|
||||
The payload will contain the keys `timestamp` and `source`. The latter
|
||||
indicates the requested model source for installation.
|
||||
For remote models only, `model_install_downloading` events will be issued at regular
|
||||
intervals as the download progresses. The event's payload contains the
|
||||
following keys:
|
||||
|
||||
- `model_install_progress`
|
||||
| **Key** | **Type** | **Description** |
|
||||
|----------------|-----------|------------------|
|
||||
| `source` | str | String representation of the requested source |
|
||||
| `local_path` | str | String representation of the path to the downloading model (usually a temporary directory) |
|
||||
| `bytes` | int | How many bytes downloaded so far |
|
||||
| `total_bytes` | int | Total size of all the files that make up the model |
|
||||
| `parts` | List[Dict]| Information on the progress of the individual files that make up the model |
|
||||
|
||||
Emitted at regular intervals when downloading a remote model, the
|
||||
payload will contain the keys `timestamp`, `source`, `current_bytes`
|
||||
and `total_bytes`. These events are _not_ emitted when a local model
|
||||
already on the filesystem is imported.
|
||||
|
||||
- `model_install_completed`
|
||||
The parts is a list of dictionaries that give information on each of
|
||||
the components pieces of the download. The dictionary's keys are
|
||||
`source`, `local_path`, `bytes` and `total_bytes`, and correspond to
|
||||
the like-named keys in the main event.
|
||||
|
||||
Issued once at the end of a successful installation. The payload will
|
||||
contain the keys `timestamp`, `source` and `key`, where `key` is the
|
||||
ID under which the model has been registered.
|
||||
Note that downloading events will not be issued for local models, and
|
||||
that downloading events occur *before* the running event.
|
||||
|
||||
- `model_install_error`
|
||||
##### `model_install_running`
|
||||
|
||||
`model_install_running` is issued when all the required downloads have completed (if applicable) and the
|
||||
model probing, copying and registration process has now started.
|
||||
|
||||
The payload will contain the key `source`.
|
||||
|
||||
##### `model_install_completed`
|
||||
|
||||
`model_install_completed` is issued once at the end of a successful
|
||||
installation. The payload will contain the keys `source`,
|
||||
`total_bytes` and `key`, where `key` is the ID under which the model
|
||||
has been registered.
|
||||
|
||||
##### `model_install_error`
|
||||
|
||||
`model_install_error` is emitted if the installation process fails for
|
||||
some reason. The payload will contain the keys `source`, `error_type`
|
||||
and `error`. `error_type` is a short message indicating the nature of
|
||||
the error, and `error` is the long traceback to help debug the
|
||||
problem.
|
||||
|
||||
##### `model_install_cancelled`
|
||||
|
||||
`model_install_cancelled` is issued if the model installation is
|
||||
cancelled, or if one or more of its files' downloads are
|
||||
cancelled. The payload will contain `source`.
|
||||
|
||||
##### Following the model status
|
||||
|
||||
You may poll the `ModelInstallJob` object returned by `import_model()`
|
||||
to ascertain the state of the install. The job status can be read from
|
||||
the job's `status` attribute, an `InstallStatus` enum which has the
|
||||
enumerated values `WAITING`, `DOWNLOADING`, `RUNNING`, `COMPLETED`,
|
||||
`ERROR` and `CANCELLED`.
|
||||
|
||||
For convenience, install jobs also provided the following boolean
|
||||
properties: `waiting`, `downloading`, `running`, `complete`, `errored`
|
||||
and `cancelled`, as well as `in_terminal_state`. The last will return
|
||||
True if the job is in the complete, errored or cancelled states.
|
||||
|
||||
Emitted if the installation process fails for some reason. The payload
|
||||
will contain the keys `timestamp`, `source`, `error_type` and
|
||||
`error`. `error_type` is a short message indicating the nature of the
|
||||
error, and `error` is the long traceback to help debug the problem.
|
||||
|
||||
#### Model confguration and probing
|
||||
|
||||
@@ -621,17 +755,9 @@ overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
`SchedulerPredictionType` and `name` for an sd-2 model:
|
||||
|
||||
This is typically used to set
|
||||
the model's name and description, but can also be used to overcome
|
||||
cases in which automatic probing is unable to (correctly) determine
|
||||
the model's attribute. The most common situation is the
|
||||
`prediction_type` field for sd-2 (and rare sd-1) models. Here is an
|
||||
example of how it works:
|
||||
|
||||
```
|
||||
install_job = installer.import_model(
|
||||
source='stabilityai/stable-diffusion-2-1',
|
||||
variant='fp16',
|
||||
source=HFModelSource(repo_id='stabilityai/stable-diffusion-2-1',variant='fp32'),
|
||||
config=dict(
|
||||
prediction_type=SchedulerPredictionType('v_prediction')
|
||||
name='stable diffusion 2 base model',
|
||||
@@ -643,29 +769,38 @@ install_job = installer.import_model(
|
||||
|
||||
This section describes additional methods provided by the installer class.
|
||||
|
||||
#### jobs = installer.wait_for_installs()
|
||||
#### jobs = installer.wait_for_installs([timeout])
|
||||
|
||||
Block until all pending installs are completed or errored and then
|
||||
returns a list of completed jobs.
|
||||
returns a list of completed jobs. The optional `timeout` argument will
|
||||
return from the call if jobs aren't completed in the specified
|
||||
time. An argument of 0 (the default) will block indefinitely.
|
||||
|
||||
#### jobs = installer.list_jobs([source])
|
||||
#### jobs = installer.list_jobs()
|
||||
|
||||
Return a list of all active and complete `ModelInstallJobs`. An
|
||||
optional `source` argument allows you to filter the returned list by a
|
||||
model source string pattern using a partial string match.
|
||||
Return a list of all active and complete `ModelInstallJobs`.
|
||||
|
||||
#### jobs = installer.get_job(source)
|
||||
#### jobs = installer.get_job_by_source(source)
|
||||
|
||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||
model source.
|
||||
|
||||
#### jobs = installer.get_job_by_id(id)
|
||||
|
||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||
model id.
|
||||
|
||||
#### jobs = installer.cancel_job(job)
|
||||
|
||||
Cancel the indicated job.
|
||||
|
||||
#### installer.prune_jobs
|
||||
|
||||
Remove non-pending jobs (completed or errored) from the job list
|
||||
returned by `list_jobs()` and `get_job()`.
|
||||
Remove jobs that are in a terminal state (i.e. complete, errored or
|
||||
cancelled) from the job list returned by `list_jobs()` and
|
||||
`get_job()`.
|
||||
|
||||
#### installer.app_config, installer.record_store,
|
||||
installer.event_bus
|
||||
#### installer.app_config, installer.record_store, installer.event_bus
|
||||
|
||||
Properties that provide access to the installer's `InvokeAIAppConfig`,
|
||||
`ModelRecordServiceBase` and `EventServiceBase` objects.
|
||||
@@ -726,120 +861,6 @@ the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
# The remainder of this documentation is provisional, pending implementation of the Download and Load services
|
||||
|
||||
## Let's get loaded, the lowdown on ModelLoadService
|
||||
|
||||
The `ModelLoadService` is responsible for loading a named model into
|
||||
memory so that it can be used for inference. Despite the fact that it
|
||||
does a lot under the covers, it is very straightforward to use.
|
||||
|
||||
An application-wide model loader is created at API initialization time
|
||||
and stored in
|
||||
`ApiDependencies.invoker.services.model_loader`. However, you can
|
||||
create alternative instances if you wish.
|
||||
|
||||
### Creating a ModelLoadService object
|
||||
|
||||
The class is defined in
|
||||
`invokeai.app.services.model_loader_service`. It is initialized with
|
||||
an InvokeAIAppConfig object, from which it gets configuration
|
||||
information such as the user's desired GPU and precision, and with a
|
||||
previously-created `ModelRecordServiceBase` object, from which it
|
||||
loads the requested model's configuration information.
|
||||
|
||||
Here is a typical initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||
from invokeai.app.services.model_loader_service import ModelLoadService
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
store = ModelRecordServiceBase.open(config)
|
||||
loader = ModelLoadService(config, store)
|
||||
```
|
||||
|
||||
Note that we are relying on the contents of the application
|
||||
configuration to choose the implementation of
|
||||
`ModelRecordServiceBase`.
|
||||
|
||||
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
||||
|
||||
*** TO DO: change to get_model(key, context=None, **kwargs)
|
||||
|
||||
The `get_model()` method, like its similarly-named cousin in
|
||||
`ModelRecordService`, receives the unique key that identifies the
|
||||
model. It loads the model into memory, gets the model ready for use,
|
||||
and returns a `ModelInfo` object.
|
||||
|
||||
The optional second argument, `subtype` is a `SubModelType` string
|
||||
enum, such as "vae". It is mandatory when used with a main model, and
|
||||
is used to select which part of the main model to load.
|
||||
|
||||
The optional third argument, `context` can be provided by
|
||||
an invocation to trigger model load event reporting. See below for
|
||||
details.
|
||||
|
||||
The returned `ModelInfo` object shares some fields in common with
|
||||
`ModelConfigBase`, but is otherwise a completely different beast:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `key` | str | The model key derived from the ModelRecordService database |
|
||||
| `name` | str | Name of this model |
|
||||
| `base_model` | BaseModelType | Base model for this model |
|
||||
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
||||
| `location` | Path or str | Location of the model on the filesystem |
|
||||
| `precision` | torch.dtype | The torch.precision to use for inference |
|
||||
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
||||
|
||||
The types for `ModelInfo` and `SubModelType` can be imported from
|
||||
`invokeai.app.services.model_loader_service`.
|
||||
|
||||
To use the model, you use the `ModelInfo` as a context manager using
|
||||
the following pattern:
|
||||
|
||||
```
|
||||
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
||||
with model_info as vae:
|
||||
image = vae.decode(latents)[0]
|
||||
```
|
||||
|
||||
The `vae` model will stay locked in the GPU during the period of time
|
||||
it is in the context manager's scope.
|
||||
|
||||
`get_model()` may raise any of the following exceptions:
|
||||
|
||||
- `UnknownModelException` -- key not in database
|
||||
- `ModelNotFoundException` -- key in database but model not found at path
|
||||
- `InvalidModelException` -- the model is guilty of a variety of sins
|
||||
|
||||
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
||||
ModelConfig.path.
|
||||
|
||||
### Emitting model loading events
|
||||
|
||||
When the `context` argument is passed to `get_model()`, it will
|
||||
retrieve the invocation event bus from the passed `InvocationContext`
|
||||
object to emit events on the invocation bus. The two events are
|
||||
"model_load_started" and "model_load_completed". Both carry the
|
||||
following payload:
|
||||
|
||||
```
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_key=model_key,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
)
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Get on line: The Download Queue
|
||||
@@ -879,7 +900,6 @@ following fields:
|
||||
| `job_started` | float | | Timestamp for when the job started running |
|
||||
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
||||
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
||||
| `preserve_partial_downloads`| bool | False | Resume partial downloads when relaunched. |
|
||||
| `error` | Exception | | A copy of the Exception that caused an error during download |
|
||||
|
||||
When you create a job, you can assign it a `priority`. If multiple
|
||||
@@ -1184,3 +1204,362 @@ other resources that it might have been using.
|
||||
This will start/pause/cancel all jobs that have been submitted to the
|
||||
queue and have not yet reached a terminal state.
|
||||
|
||||
***
|
||||
|
||||
## This Meta be Good: Model Metadata Storage
|
||||
|
||||
The modules found under `invokeai.backend.model_manager.metadata`
|
||||
provide a straightforward API for fetching model metadatda from online
|
||||
repositories. Currently two repositories are supported: HuggingFace
|
||||
and Civitai. However, the modules are easily extended for additional
|
||||
repos, provided that they have defined APIs for metadata access.
|
||||
|
||||
Metadata comprises any descriptive information that is not essential
|
||||
for getting the model to run. For example "author" is metadata, while
|
||||
"type", "base" and "format" are not. The latter fields are part of the
|
||||
model's config, as defined in `invokeai.backend.model_manager.config`.
|
||||
|
||||
### Example Usage:
|
||||
|
||||
```
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
CivitaiMetadataFetch,
|
||||
CivitaiMetadata
|
||||
ModelMetadataStore,
|
||||
)
|
||||
# to access the initialized sql database
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
|
||||
civitai = CivitaiMetadataFetch()
|
||||
|
||||
# fetch the metadata
|
||||
model_metadata = civitai.from_url("https://civitai.com/models/215796")
|
||||
|
||||
# get some common metadata fields
|
||||
author = model_metadata.author
|
||||
tags = model_metadata.tags
|
||||
|
||||
# get some Civitai-specific fields
|
||||
assert isinstance(model_metadata, CivitaiMetadata)
|
||||
|
||||
trained_words = model_metadata.trained_words
|
||||
base_model = model_metadata.base_model_trained_on
|
||||
thumbnail = model_metadata.thumbnail_url
|
||||
|
||||
# cache the metadata to the database using the key corresponding to
|
||||
# an existing model config record in the `model_config` table
|
||||
sql_cache = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
sql_cache.add_metadata('fb237ace520b6716adc98bcb16e8462c', model_metadata)
|
||||
|
||||
# now we can search the database by tag, author or model name
|
||||
# matches will contain a list of model keys that match the search
|
||||
matches = sql_cache.search_by_tag({"tool", "turbo"})
|
||||
```
|
||||
|
||||
### Structure of the Metadata objects
|
||||
|
||||
There is a short class hierarchy of Metadata objects, all of which
|
||||
descend from the Pydantic `BaseModel`.
|
||||
|
||||
#### `ModelMetadataBase`
|
||||
|
||||
This is the common base class for metadata:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `name` | str | Repository's name for the model |
|
||||
| `author` | str | Model's author |
|
||||
| `tags` | Set[str] | Model tags |
|
||||
|
||||
|
||||
Note that the model config record also has a `name` field. It is
|
||||
intended that the config record version be locally customizable, while
|
||||
the metadata version is read-only. However, enforcing this is expected
|
||||
to be part of the business logic.
|
||||
|
||||
Descendents of the base add additional fields.
|
||||
|
||||
#### `HuggingFaceMetadata`
|
||||
|
||||
This descends from `ModelMetadataBase` and adds the following fields:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `type` | Literal["huggingface"] | Used for the discriminated union of metadata classes|
|
||||
| `id` | str | HuggingFace repo_id |
|
||||
| `tag_dict` | Dict[str, Any] | A dictionary of tag/value pairs provided in addition to `tags` |
|
||||
| `last_modified`| datetime | Date of last commit of this model to the repo |
|
||||
| `files` | List[Path] | List of the files in the model repo |
|
||||
|
||||
|
||||
#### `CivitaiMetadata`
|
||||
|
||||
This descends from `ModelMetadataBase` and adds the following fields:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `type` | Literal["civitai"] | Used for the discriminated union of metadata classes|
|
||||
| `id` | int | Civitai model id |
|
||||
| `version_name` | str | Name of this version of the model (distinct from model name) |
|
||||
| `version_id` | int | Civitai model version id (distinct from model id) |
|
||||
| `created` | datetime | Date this version of the model was created |
|
||||
| `updated` | datetime | Date this version of the model was last updated |
|
||||
| `published` | datetime | Date this version of the model was published to Civitai |
|
||||
| `description` | str | Model description. Quite verbose and contains HTML tags |
|
||||
| `version_description` | str | Model version description, usually describes changes to the model |
|
||||
| `nsfw` | bool | Whether the model tends to generate NSFW content |
|
||||
| `restrictions` | LicenseRestrictions | An object that describes what is and isn't allowed with this model |
|
||||
| `trained_words`| Set[str] | Trigger words for this model, if any |
|
||||
| `download_url` | AnyHttpUrl | URL for downloading this version of the model |
|
||||
| `base_model_trained_on` | str | Name of the model that this version was trained on |
|
||||
| `thumbnail_url` | AnyHttpUrl | URL to access a representative thumbnail image of the model's output |
|
||||
| `weight_min` | int | For LoRA sliders, the minimum suggested weight to apply |
|
||||
| `weight_max` | int | For LoRA sliders, the maximum suggested weight to apply |
|
||||
|
||||
Note that `weight_min` and `weight_max` are not currently populated
|
||||
and take the default values of (-1.0, +2.0). The issue is that these
|
||||
values aren't part of the structured data but appear in the text
|
||||
description. Some regular expression or LLM coding may be able to
|
||||
extract these values.
|
||||
|
||||
Also be aware that `base_model_trained_on` is free text and doesn't
|
||||
correspond to our `ModelType` enum.
|
||||
|
||||
`CivitaiMetadata` also defines some convenience properties relating to
|
||||
licensing restrictions: `credit_required`, `allow_commercial_use`,
|
||||
`allow_derivatives` and `allow_different_license`.
|
||||
|
||||
#### `AnyModelRepoMetadata`
|
||||
|
||||
This is a discriminated Union of `CivitaiMetadata` and
|
||||
`HuggingFaceMetadata`.
|
||||
|
||||
### Fetching Metadata from Online Repos
|
||||
|
||||
The `HuggingFaceMetadataFetch` and `CivitaiMetadataFetch` classes will
|
||||
retrieve metadata from their corresponding repositories and return
|
||||
`AnyModelRepoMetadata` objects. Their base class
|
||||
`ModelMetadataFetchBase` is an abstract class that defines two
|
||||
methods: `from_url()` and `from_id()`. The former accepts the type of
|
||||
model URLs that the user will try to cut and paste into the model
|
||||
import form. The latter accepts a string ID in the format recognized
|
||||
by the repository of choice. Both methods return an
|
||||
`AnyModelRepoMetadata`.
|
||||
|
||||
The base class also has a class method `from_json()` which will take
|
||||
the JSON representation of a `ModelMetadata` object, validate it, and
|
||||
return the corresponding `AnyModelRepoMetadata` object.
|
||||
|
||||
When initializing one of the metadata fetching classes, you may
|
||||
provide a `requests.Session` argument. This allows you to customize
|
||||
the low-level HTTP fetch requests and is used, for instance, in the
|
||||
testing suite to avoid hitting the internet.
|
||||
|
||||
The HuggingFace and Civitai fetcher subclasses add additional
|
||||
repo-specific fetching methods:
|
||||
|
||||
|
||||
#### HuggingFaceMetadataFetch
|
||||
|
||||
This overrides its base class `from_json()` method to return a
|
||||
`HuggingFaceMetadata` object directly.
|
||||
|
||||
#### CivitaiMetadataFetch
|
||||
|
||||
This adds the following methods:
|
||||
|
||||
`from_civitai_modelid()` This takes the ID of a model, finds the
|
||||
default version of the model, and then retrieves the metadata for
|
||||
that version, returning a `CivitaiMetadata` object directly.
|
||||
|
||||
`from_civitai_versionid()` This takes the ID of a model version and
|
||||
retrieves its metadata. Functionally equivalent to `from_id()`, the
|
||||
only difference is that it returna a `CivitaiMetadata` object rather
|
||||
than an `AnyModelRepoMetadata`.
|
||||
|
||||
|
||||
### Metadata Storage
|
||||
|
||||
The `ModelMetadataStore` provides a simple facility to store model
|
||||
metadata in the `invokeai.db` database. The data is stored as a JSON
|
||||
blob, with a few common fields (`name`, `author`, `tags`) broken out
|
||||
to be searchable.
|
||||
|
||||
When a metadata object is saved to the database, it is identified
|
||||
using the model key, _and this key must correspond to an existing
|
||||
model key in the model_config table_. There is a foreign key integrity
|
||||
constraint between the `model_config.id` field and the
|
||||
`model_metadata.id` field such that if you attempt to save metadata
|
||||
under an unknown key, the attempt will result in an
|
||||
`UnknownModelException`. Likewise, when a model is deleted from
|
||||
`model_config`, the deletion of the corresponding metadata record will
|
||||
be triggered.
|
||||
|
||||
Tags are stored in a normalized fashion in the tables `model_tags` and
|
||||
`tags`. Triggers keep the tag table in sync with the `model_metadata`
|
||||
table.
|
||||
|
||||
To create the storage object, initialize it with the InvokeAI
|
||||
`SqliteDatabase` object. This is often done this way:
|
||||
|
||||
```
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
metadata_store = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
```
|
||||
|
||||
You can then access the storage with the following methods:
|
||||
|
||||
#### `add_metadata(key, metadata)`
|
||||
|
||||
Add the metadata using a previously-defined model key.
|
||||
|
||||
There is currently no `delete_metadata()` method. The metadata will
|
||||
persist until the matching config is deleted from the `model_config`
|
||||
table.
|
||||
|
||||
#### `get_metadata(key) -> AnyModelRepoMetadata`
|
||||
|
||||
Retrieve the metadata corresponding to the model key.
|
||||
|
||||
#### `update_metadata(key, new_metadata)`
|
||||
|
||||
Update an existing metadata record with new metadata.
|
||||
|
||||
#### `search_by_tag(tags: Set[str]) -> Set[str]`
|
||||
|
||||
Given a set of tags, find models that are tagged with them. If
|
||||
multiple tags are provided then a matching model must be tagged with
|
||||
*all* the tags in the set. This method returns a set of model keys and
|
||||
is intended to be used in conjunction with the `ModelRecordService`:
|
||||
|
||||
```
|
||||
model_config_store = ApiDependencies.invoker.services.model_records
|
||||
matches = metadata_store.search_by_tag({'license:other'})
|
||||
models = [model_config_store.get(x) for x in matches]
|
||||
```
|
||||
|
||||
#### `search_by_name(name: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given name and return a
|
||||
set of keys to the corresponding model config objects.
|
||||
|
||||
#### `search_by_author(author: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given author and return
|
||||
a set of keys to the corresponding model config objects.
|
||||
|
||||
# The remainder of this documentation is provisional, pending implementation of the Load service
|
||||
|
||||
## Let's get loaded, the lowdown on ModelLoadService
|
||||
|
||||
The `ModelLoadService` is responsible for loading a named model into
|
||||
memory so that it can be used for inference. Despite the fact that it
|
||||
does a lot under the covers, it is very straightforward to use.
|
||||
|
||||
An application-wide model loader is created at API initialization time
|
||||
and stored in
|
||||
`ApiDependencies.invoker.services.model_loader`. However, you can
|
||||
create alternative instances if you wish.
|
||||
|
||||
### Creating a ModelLoadService object
|
||||
|
||||
The class is defined in
|
||||
`invokeai.app.services.model_loader_service`. It is initialized with
|
||||
an InvokeAIAppConfig object, from which it gets configuration
|
||||
information such as the user's desired GPU and precision, and with a
|
||||
previously-created `ModelRecordServiceBase` object, from which it
|
||||
loads the requested model's configuration information.
|
||||
|
||||
Here is a typical initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||
from invokeai.app.services.model_loader_service import ModelLoadService
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
store = ModelRecordServiceBase.open(config)
|
||||
loader = ModelLoadService(config, store)
|
||||
```
|
||||
|
||||
Note that we are relying on the contents of the application
|
||||
configuration to choose the implementation of
|
||||
`ModelRecordServiceBase`.
|
||||
|
||||
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
||||
|
||||
*** TO DO: change to get_model(key, context=None, **kwargs)
|
||||
|
||||
The `get_model()` method, like its similarly-named cousin in
|
||||
`ModelRecordService`, receives the unique key that identifies the
|
||||
model. It loads the model into memory, gets the model ready for use,
|
||||
and returns a `ModelInfo` object.
|
||||
|
||||
The optional second argument, `subtype` is a `SubModelType` string
|
||||
enum, such as "vae". It is mandatory when used with a main model, and
|
||||
is used to select which part of the main model to load.
|
||||
|
||||
The optional third argument, `context` can be provided by
|
||||
an invocation to trigger model load event reporting. See below for
|
||||
details.
|
||||
|
||||
The returned `ModelInfo` object shares some fields in common with
|
||||
`ModelConfigBase`, but is otherwise a completely different beast:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `key` | str | The model key derived from the ModelRecordService database |
|
||||
| `name` | str | Name of this model |
|
||||
| `base_model` | BaseModelType | Base model for this model |
|
||||
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
||||
| `location` | Path or str | Location of the model on the filesystem |
|
||||
| `precision` | torch.dtype | The torch.precision to use for inference |
|
||||
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
||||
|
||||
The types for `ModelInfo` and `SubModelType` can be imported from
|
||||
`invokeai.app.services.model_loader_service`.
|
||||
|
||||
To use the model, you use the `ModelInfo` as a context manager using
|
||||
the following pattern:
|
||||
|
||||
```
|
||||
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
||||
with model_info as vae:
|
||||
image = vae.decode(latents)[0]
|
||||
```
|
||||
|
||||
The `vae` model will stay locked in the GPU during the period of time
|
||||
it is in the context manager's scope.
|
||||
|
||||
`get_model()` may raise any of the following exceptions:
|
||||
|
||||
- `UnknownModelException` -- key not in database
|
||||
- `ModelNotFoundException` -- key in database but model not found at path
|
||||
- `InvalidModelException` -- the model is guilty of a variety of sins
|
||||
|
||||
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
||||
ModelConfig.path.
|
||||
|
||||
### Emitting model loading events
|
||||
|
||||
When the `context` argument is passed to `get_model()`, it will
|
||||
retrieve the invocation event bus from the passed `InvocationContext`
|
||||
object to emit events on the invocation bus. The two events are
|
||||
"model_load_started" and "model_load_completed". Both carry the
|
||||
following payload:
|
||||
|
||||
```
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_key=model_key,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
# Contributing to the Frontend
|
||||
|
||||
# InvokeAI Web UI
|
||||
|
||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
||||
|
||||
Code is located in `invokeai/frontend/web/` for review.
|
||||
|
||||
## Stack
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
|
||||
2. Install [pnpm](https://pnpm.io/) and confirm it is installed by running this:
|
||||
```bash
|
||||
npm install --global pnpm
|
||||
pnpm --version
|
||||
```
|
||||
|
||||
From `invokeai/frontend/web/` run `pnpm install` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
1. Ensure your virtual environment is running
|
||||
2. Start the dev server: `pnpm dev`
|
||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
||||
|
||||
### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `pnpm build`.
|
||||
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](./contributingToFrontend.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
|
||||
53
docs/deprecated/2to3.md
Normal file
@@ -0,0 +1,53 @@
|
||||
## :octicons-log-16: Important Changes Since Version 2.3
|
||||
|
||||
### Nodes
|
||||
|
||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||
"nodes," small unitary operations that can be combined into graphs to
|
||||
form arbitrary workflows. For example, there is a prompt node that
|
||||
processes the prompt string and feeds it to a text2latent node that
|
||||
generates a latent image. The latents are then fed to a latent2image
|
||||
node that translates the latent image into a PNG.
|
||||
|
||||
The WebGUI has a node editor that allows you to graphically design and
|
||||
execute custom node graphs. The ability to save and load graphs is
|
||||
still a work in progress, but coming soon.
|
||||
|
||||
### Command-Line Interface Retired
|
||||
|
||||
All "invokeai" command-line interfaces have been retired as of version
|
||||
3.4.
|
||||
|
||||
To launch the Web GUI from the command-line, use the command
|
||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||
|
||||
### ControlNet
|
||||
|
||||
This version of InvokeAI features ControlNet, a system that allows you
|
||||
to achieve exact poses for human and animal figures by providing a
|
||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||
|
||||
### New Schedulers
|
||||
|
||||
The list of schedulers has been completely revamped and brought up to date:
|
||||
|
||||
| **Short Name** | **Scheduler** | **Notes** |
|
||||
|----------------|---------------------------------|-----------------------------|
|
||||
| **ddim** | DDIMScheduler | |
|
||||
| **ddpm** | DDPMScheduler | |
|
||||
| **deis** | DEISMultistepScheduler | |
|
||||
| **lms** | LMSDiscreteScheduler | |
|
||||
| **pndm** | PNDMScheduler | |
|
||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||
| **lcm** | LCMScheduler | |
|
||||
|
||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
||||
@@ -94,6 +94,8 @@ A model that helps generate creative QR codes that still scan. Can also be used
|
||||
**Openpose**:
|
||||
The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image.
|
||||
|
||||
*Note:* The DWPose Processor has replaced the OpenPose processor in Invoke. Workflows and generations that relied on the OpenPose Processor will need to be updated to use the DWPose Processor instead.
|
||||
|
||||
**Mediapipe Face**:
|
||||
|
||||
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
|
||||
|
||||
@@ -229,29 +229,28 @@ clarity on the intent and common use cases we expect for utilizing them.
|
||||
currently being rendered by your browser into a merged copy of the image. This
|
||||
lowers the resource requirements and should improve performance.
|
||||
|
||||
### Seam Correction
|
||||
### Compositing / Seam Correction
|
||||
|
||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||
by Stable Diffusion into your existing image. To do this, the area around the
|
||||
`seam` at the boundary between your image and the new generation is
|
||||
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the the boundary between your image and the new generation is
|
||||
automatically blended to produce a seamless output. In a fully automatic
|
||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
||||
process, a mask is generated to cover the boundary, and then the area of the boundary is
|
||||
Inpainted.
|
||||
|
||||
Although the default options should work well most of the time, sometimes it can
|
||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
||||
a blur setting of about 1/3 of the seam have been noted as producing
|
||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||
help to alter the parameters that control the Compositing. A larger blur and
|
||||
a blur setting have been noted as producing
|
||||
consistently strong results . Strength of 0.7 is best for reducing hard seams.
|
||||
|
||||
- **Mode** - What part of the image will have the the Compositing applied to it.
|
||||
- **Mask edge** will apply Compositing to the edge of the masked area
|
||||
- **Mask** will apply Compositing to the entire masked area
|
||||
- **Unmasked** will apply Compositing to the entire image
|
||||
- **Steps** - Number of generation steps that will occur during the Coherence Pass, similar to Denoising Steps. Higher step counts will generally have better results.
|
||||
- **Strength** - How much noise is added for the Coherence Pass, similar to Denoising Strength. A strength of 0 will result in an unchanged image, while a strength of 1 will result in an image with a completely new area as defined by the Mode setting.
|
||||
- **Blur** - Adjusts the pixel radius of the the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
|
||||
- **Blur Method** - The method of blur applied to the masked area.
|
||||
|
||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
||||
mask around the seam.
|
||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
||||
masked area.
|
||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
||||
Inpainting generation that is applied to the seam area.
|
||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
||||
the seam.
|
||||
|
||||
### Infill & Scaling
|
||||
|
||||
|
||||
BIN
docs/img/favicon.ico
Normal file
|
After Width: | Height: | Size: 4.2 KiB |
@@ -18,7 +18,7 @@ title: Home
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
height: 50px;
|
||||
background-color: #448AFF;
|
||||
background-color: #35A4DB;
|
||||
color: #fff;
|
||||
font-size: 16px;
|
||||
border: none;
|
||||
@@ -43,7 +43,7 @@ title: Home
|
||||
<div align="center" markdown>
|
||||
|
||||
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
@@ -117,6 +117,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
### Installation
|
||||
- [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
- [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
- [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
|
||||
### The InvokeAI Web Interface
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
@@ -145,60 +150,6 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
||||
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
||||
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
||||
|
||||
## :octicons-log-16: Important Changes Since Version 2.3
|
||||
|
||||
### Nodes
|
||||
|
||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||
"nodes," small unitary operations that can be combined into graphs to
|
||||
form arbitrary workflows. For example, there is a prompt node that
|
||||
processes the prompt string and feeds it to a text2latent node that
|
||||
generates a latent image. The latents are then fed to a latent2image
|
||||
node that translates the latent image into a PNG.
|
||||
|
||||
The WebGUI has a node editor that allows you to graphically design and
|
||||
execute custom node graphs. The ability to save and load graphs is
|
||||
still a work in progress, but coming soon.
|
||||
|
||||
### Command-Line Interface Retired
|
||||
|
||||
All "invokeai" command-line interfaces have been retired as of version
|
||||
3.4.
|
||||
|
||||
To launch the Web GUI from the command-line, use the command
|
||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||
|
||||
### ControlNet
|
||||
|
||||
This version of InvokeAI features ControlNet, a system that allows you
|
||||
to achieve exact poses for human and animal figures by providing a
|
||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||
|
||||
### New Schedulers
|
||||
|
||||
The list of schedulers has been completely revamped and brought up to date:
|
||||
|
||||
| **Short Name** | **Scheduler** | **Notes** |
|
||||
|----------------|---------------------------------|-----------------------------|
|
||||
| **ddim** | DDIMScheduler | |
|
||||
| **ddpm** | DDPMScheduler | |
|
||||
| **deis** | DEISMultistepScheduler | |
|
||||
| **lms** | LMSDiscreteScheduler | |
|
||||
| **pndm** | PNDMScheduler | |
|
||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||
| **lcm** | LCMScheduler | |
|
||||
|
||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
Please check out our **[:material-frequently-asked-questions:
|
||||
|
||||
@@ -477,7 +477,7 @@ Then type the following commands:
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
@@ -154,7 +154,7 @@ manager, please follow these steps:
|
||||
=== "ROCm (AMD)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
@@ -230,13 +230,13 @@ manager, please follow these steps:
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web --host 0.0.0.0
|
||||
invokeai-web --host 0.0.0.0
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
@@ -313,7 +313,7 @@ code for InvokeAI. For this to work, you will need to install the
|
||||
on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md).
|
||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md).
|
||||
|
||||
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
||||
|
||||
@@ -345,7 +345,7 @@ installation protocol (important!)
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
@@ -361,7 +361,7 @@ installation protocol (important!)
|
||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||
dot ("."). It is part of the command.
|
||||
|
||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md) and do a production build of the UI as described.
|
||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md) and do a production build of the UI as described.
|
||||
|
||||
6. You can now run `invokeai` and its related commands. The code will be
|
||||
read from the repository, so that you can edit the .py source files
|
||||
@@ -402,4 +402,4 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
|
||||
@@ -134,7 +134,7 @@ recipes are available
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||
https://download.pytorch.org/whl/rocm5.6` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
|
||||
@@ -69,7 +69,7 @@ a token and copy it, since you will need in for the next step.
|
||||
|
||||
### Setup
|
||||
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `env.sample` and name it `.env`. Make changes as necessary.
|
||||
Set up your environmnent variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary.
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail.
|
||||
|
||||
|
||||
@@ -18,13 +18,18 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
|
||||
## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
## **[Automated Installer (Recommended)](010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
third party libraries and InvokeAI itself.
|
||||
|
||||
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
|
||||
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
|
||||
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
|
||||
|
||||
|
||||
|
||||
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
||||
This method is recommended for experienced users and developers.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
document.addEventListener("DOMContentLoaded", function () {
|
||||
var script = document.createElement("script");
|
||||
script.src = "https://widget.kapa.ai/kapa-widget.bundle.js";
|
||||
script.setAttribute("data-website-id", "b5973bb1-476b-451e-8cf4-98de86745a10");
|
||||
script.setAttribute("data-project-name", "Invoke.AI");
|
||||
script.setAttribute("data-project-color", "#11213C");
|
||||
script.setAttribute("data-project-logo", "https://avatars.githubusercontent.com/u/113954515?s=280&v=4");
|
||||
script.async = true;
|
||||
document.head.appendChild(script);
|
||||
});
|
||||
document.addEventListener("DOMContentLoaded", function () {
|
||||
var script = document.createElement("script");
|
||||
script.src = "https://widget.kapa.ai/kapa-widget.bundle.js";
|
||||
script.setAttribute("data-website-id", "b5973bb1-476b-451e-8cf4-98de86745a10");
|
||||
script.setAttribute("data-project-name", "Invoke.AI");
|
||||
script.setAttribute("data-project-color", "#11213C");
|
||||
script.setAttribute("data-project-logo", "https://avatars.githubusercontent.com/u/113954515?s=280&v=4");
|
||||
script.async = true;
|
||||
document.head.appendChild(script);
|
||||
});
|
||||
|
||||
@@ -6,10 +6,17 @@ If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](
|
||||
|
||||
## Features
|
||||
|
||||
### Workflow Library
|
||||
The Workflow Library enables you to save workflows to the Invoke database, allowing you to easily creating, modify and share workflows as needed.
|
||||
|
||||
A curated set of workflows are provided by default - these are designed to help explain important nodes' usage in the Workflow Editor.
|
||||
|
||||

|
||||
|
||||
### Linear View
|
||||
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
||||
|
||||
To add an input to the Linear UI, right click on the input label and select "Add to Linear View".
|
||||
To add an input to the Linear UI, right click on the **input label** and select "Add to Linear View".
|
||||
|
||||
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
||||
|
||||
@@ -30,7 +37,7 @@ Any node or input field can be renamed in the workflow editor. If the input fiel
|
||||
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
||||
|
||||
|
||||
## Important Concepts
|
||||
## Important Nodes & Concepts
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
@@ -56,7 +63,7 @@ The ImageToLatents node takes in a pixel image and a VAE and outputs a latents.
|
||||
|
||||
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||
|
||||

|
||||

|
||||
|
||||
### ControlNet
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
|
||||
- Community Nodes
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
@@ -25,7 +26,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
+ [Halftone](#halftone)
|
||||
+ [Ideal Size](#ideal-size)
|
||||
+ [Hand Refiner with MeshGraphormer](#hand-refiner-with-meshgraphormer)
|
||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||
+ [Image Dominant Color](#image-dominant-color)
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
@@ -36,10 +37,12 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Mask Operations](#mask-operations)
|
||||
+ [Match Histogram](#match-histogram)
|
||||
+ [Metadata-Linked](#metadata-linked-nodes)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Nightmare Promptgen](#nightmare-promptgen)
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
@@ -66,6 +69,17 @@ Note: These are inherited from the core nodes so any update to the core nodes sh
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/adapters-linked-nodes
|
||||
|
||||
--------------------------------
|
||||
### Autostereogram Nodes
|
||||
|
||||
**Description:** Generate autostereogram images from a depth map. This is not a very practically useful node but more a 90s nostalgic indulgence as I used to love these images as a kid.
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/autostereogram_nodes
|
||||
|
||||
**Example Usage:**
|
||||
</br>
|
||||
<img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider-depth.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-dots.png" width="200" /> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-pattern.png" width="200" />
|
||||
|
||||
--------------------------------
|
||||
### Average Images
|
||||
|
||||
@@ -196,13 +210,18 @@ CMYK Halftone Output:
|
||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Ideal Size
|
||||
|
||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
||||
### Hand Refiner with MeshGraphormer
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
||||
**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion
|
||||
|
||||
**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer
|
||||
|
||||
**View**
|
||||
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_meshgraphormer/main/assets/preview.jpg" />
|
||||
|
||||
--------------------------------
|
||||
|
||||
### Image and Mask Composition Pack
|
||||
|
||||
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
||||
@@ -346,6 +365,13 @@ Node Link: https://github.com/VeyDlin/negative-image-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Nightmare Promptgen
|
||||
|
||||
**Description:** Nightmare Prompt Generator - Uses a local text generation model to create unique imaginative (but usually nightmarish) prompts for InvokeAI. By default, it allows you to choose from some gpt-neo models I finetuned on over 2500 of my own InvokeAI prompts in Compel format, but you're able to add your own, as well. Offers support for replacing any troublesome words with a random choice from list you can also define.
|
||||
|
||||
**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen)
|
||||
|
||||
--------------------------------
|
||||
### Oobabooga
|
||||
|
||||
@@ -409,6 +435,17 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||
|
||||
--------------------------------
|
||||
|
||||
### BriaAI Remove Background
|
||||
|
||||
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
|
||||
|
||||
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg
|
||||
|
||||
**View**
|
||||
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_bria_rmbg/main/assets/preview.jpg" />
|
||||
|
||||
--------------------------------
|
||||
### Remove Background
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ their descriptions.
|
||||
| Integer Math | Perform basic math operations on two integers |
|
||||
| Convert Image Mode | Converts an image to a different mode. |
|
||||
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
||||
| Ideal Size | Calculates an ideal image size for latents for a first pass of a multi-pass upscaling to avoid duplication and other artifacts |
|
||||
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
||||
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
||||
| Image Primitive | An image primitive value |
|
||||
@@ -80,7 +81,7 @@ their descriptions.
|
||||
| ONNX Text to Latents | Generates latents from conditionings. |
|
||||
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
||||
| OpenCV Inpaint | Simple inpaint using opencv. |
|
||||
| Openpose Processor | Applies Openpose processing to image |
|
||||
| DW Openpose Processor | Applies Openpose processing to image |
|
||||
| PIDI Processor | Applies PIDI processing to image |
|
||||
| Prompts from File | Loads prompts from a text file |
|
||||
| Random Integer | Outputs a single random integer. |
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Example Workflows
|
||||
|
||||
We've curated some example workflows for you to get started with Workflows in InvokeAI
|
||||
We've curated some example workflows for you to get started with Workflows in InvokeAI! These can also be found in the Workflow Library, located in the Workflow Editor of Invoke.
|
||||
|
||||
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
||||
|
||||
|
||||
@@ -13,46 +13,69 @@ We thank them for all of their time and hard work.
|
||||
|
||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||
|
||||
## **Current core team**
|
||||
## **Current Core Team**
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @josh is toast (Josh Corbett) - Web Development
|
||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @sunija - Standalone version
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @JPPhoto - Core image generation nodes
|
||||
* @dunkeroni - Image generation backend
|
||||
* @SkunkWorxDark - Image generation backend
|
||||
* @keturn (Kevin Turner) - Diffusers
|
||||
* @millu (Millun Atluri) - Community Wizard, Documentation, Node-wrangler,
|
||||
* @glimmerleaf (Devon Hopkins) - Community Wizard
|
||||
* @gogurt enjoyer - Discord moderator and end user support
|
||||
* @whosawhatsis - Discord moderator and end user support
|
||||
* @dwinrger - Discord moderator and end user support
|
||||
* @526christian - Discord moderator and end user support
|
||||
* @harvester62 - Discord moderator and end user support
|
||||
|
||||
|
||||
## **Honored Team Alumni**
|
||||
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @netsvetaev (Artur) - Localization support
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @tildebyte - Installation and configuration
|
||||
* @mauwii (Matthias Wilde) - Installation, release, continuous integration
|
||||
|
||||
|
||||
## **Full List of Contributors by Commit Name**
|
||||
|
||||
- 이승석
|
||||
- AbdBarho
|
||||
- ablattmann
|
||||
- AdamOStark
|
||||
- Adam Rice
|
||||
- Airton Silva
|
||||
- Aldo Hoeben
|
||||
- Alexander Eichhorn
|
||||
- Alexandre D. Roberge
|
||||
- Alexandre Macabies
|
||||
- Alfie John
|
||||
- Andreas Rozek
|
||||
- Andre LaBranche
|
||||
- Andy Bearman
|
||||
- Andy Luhrs
|
||||
- Andy Pilate
|
||||
- Anonymous
|
||||
- Anthony Monthe
|
||||
- Any-Winter-4079
|
||||
- apolinario
|
||||
- Ar7ific1al
|
||||
- ArDiouscuros
|
||||
- Armando C. Santisbon
|
||||
- Arnold Cordewiner
|
||||
- Arthur Holstvoogd
|
||||
- artmen1516
|
||||
- Artur
|
||||
@@ -64,13 +87,16 @@ We thank them for all of their time and hard work.
|
||||
- blhook
|
||||
- BlueAmulet
|
||||
- Bouncyknighter
|
||||
- Brandon
|
||||
- Brandon Rising
|
||||
- Brent Ozar
|
||||
- Brian Racer
|
||||
- bsilvereagle
|
||||
- c67e708d
|
||||
- camenduru
|
||||
- CapableWeb
|
||||
- Carson Katri
|
||||
- chainchompa
|
||||
- Chloe
|
||||
- Chris Dawson
|
||||
- Chris Hayes
|
||||
@@ -86,30 +112,45 @@ We thank them for all of their time and hard work.
|
||||
- cpacker
|
||||
- Cragin Godley
|
||||
- creachec
|
||||
- CrypticWit
|
||||
- d8ahazard
|
||||
- damian
|
||||
- damian0815
|
||||
- Damian at mba
|
||||
- Damian Stewart
|
||||
- Daniel Manzke
|
||||
- Danny Beer
|
||||
- Dan Sully
|
||||
- Darren Ringer
|
||||
- David Burnett
|
||||
- David Ford
|
||||
- David Regla
|
||||
- David Sisco
|
||||
- David Wager
|
||||
- Daya Adianto
|
||||
- db3000
|
||||
- DekitaRPG
|
||||
- Denis Olshin
|
||||
- Dennis
|
||||
- dependabot[bot]
|
||||
- Dmitry Parnas
|
||||
- Dobrynia100
|
||||
- Dominic Letz
|
||||
- DrGunnarMallon
|
||||
- Drun555
|
||||
- dunkeroni
|
||||
- Edward Johan
|
||||
- elliotsayes
|
||||
- Elrik
|
||||
- ElrikUnderlake
|
||||
- Eric Khun
|
||||
- Eric Wolf
|
||||
- Eugene
|
||||
- Eugene Brodsky
|
||||
- ExperimentalCyborg
|
||||
- Fabian Bahl
|
||||
- Fabio 'MrWHO' Torchetti
|
||||
- Fattire
|
||||
- fattire
|
||||
- Felipe Nogueira
|
||||
- Félix Sanz
|
||||
@@ -118,8 +159,12 @@ We thank them for all of their time and hard work.
|
||||
- gabrielrotbart
|
||||
- gallegonovato
|
||||
- Gérald LONLAS
|
||||
- Gille
|
||||
- GitHub Actions Bot
|
||||
- glibesyck
|
||||
- gogurtenjoyer
|
||||
- Gohsuke Shimada
|
||||
- greatwolf
|
||||
- greentext2
|
||||
- Gregg Helt
|
||||
- H4rk
|
||||
@@ -131,6 +176,7 @@ We thank them for all of their time and hard work.
|
||||
- Hosted Weblate
|
||||
- Iman Karim
|
||||
- ismail ihsan bülbül
|
||||
- ItzAttila
|
||||
- Ivan Efimov
|
||||
- jakehl
|
||||
- Jakub Kolčář
|
||||
@@ -141,6 +187,7 @@ We thank them for all of their time and hard work.
|
||||
- Jason Toffaletti
|
||||
- Jaulustus
|
||||
- Jeff Mahoney
|
||||
- Jennifer Player
|
||||
- jeremy
|
||||
- Jeremy Clark
|
||||
- JigenD
|
||||
@@ -148,19 +195,26 @@ We thank them for all of their time and hard work.
|
||||
- Johan Roxendal
|
||||
- Johnathon Selstad
|
||||
- Jonathan
|
||||
- Jordan Hewitt
|
||||
- Joseph Dries III
|
||||
- Josh Corbett
|
||||
- JPPhoto
|
||||
- jspraul
|
||||
- junzi
|
||||
- Justin Wong
|
||||
- Juuso V
|
||||
- Kaspar Emanuel
|
||||
- Katsuyuki-Karasawa
|
||||
- Keerigan45
|
||||
- Kent Keirsey
|
||||
- Kevin Brack
|
||||
- Kevin Coakley
|
||||
- Kevin Gibbons
|
||||
- Kevin Schaul
|
||||
- Kevin Turner
|
||||
- Kieran Klaassen
|
||||
- krummrey
|
||||
- Kyle
|
||||
- Kyle Lacy
|
||||
- Kyle Schouviller
|
||||
- Lawrence Norton
|
||||
@@ -171,10 +225,15 @@ We thank them for all of their time and hard work.
|
||||
- Lynne Whitehorn
|
||||
- majick
|
||||
- Marco Labarile
|
||||
- Marta Nahorniuk
|
||||
- Martin Kristiansen
|
||||
- Mary Hipp
|
||||
- maryhipp
|
||||
- Mary Hipp Rogers
|
||||
- mastercaster
|
||||
- mastercaster9000
|
||||
- Matthias Wild
|
||||
- mauwii
|
||||
- michaelk71
|
||||
- mickr777
|
||||
- Mihai
|
||||
@@ -182,11 +241,15 @@ We thank them for all of their time and hard work.
|
||||
- Mikhail Tishin
|
||||
- Millun Atluri
|
||||
- Minjune Song
|
||||
- Mitchell Allain
|
||||
- mitien
|
||||
- mofuzz
|
||||
- Muhammad Usama
|
||||
- Name
|
||||
- _nderscore
|
||||
- Neil Wang
|
||||
- nekowaiz
|
||||
- nemuruibai
|
||||
- Netzer R
|
||||
- Nicholas Koh
|
||||
- Nicholas Körfer
|
||||
@@ -197,9 +260,11 @@ We thank them for all of their time and hard work.
|
||||
- ofirkris
|
||||
- Olivier Louvignes
|
||||
- owenvincent
|
||||
- pand4z31
|
||||
- Patrick Esser
|
||||
- Patrick Tien
|
||||
- Patrick von Platen
|
||||
- Paul Curry
|
||||
- Paul Sajna
|
||||
- pejotr
|
||||
- Peter Baylies
|
||||
@@ -207,6 +272,7 @@ We thank them for all of their time and hard work.
|
||||
- plucked
|
||||
- prixt
|
||||
- psychedelicious
|
||||
- psychedelicious@windows
|
||||
- Rainer Bernhardt
|
||||
- Riccardo Giovanetti
|
||||
- Rich Jones
|
||||
@@ -215,16 +281,22 @@ We thank them for all of their time and hard work.
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- Rohinish
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
- Ryan
|
||||
- Ryan Cao
|
||||
- Ryan Dick
|
||||
- Saifeddine
|
||||
- Saifeddine ALOUI
|
||||
- Sam
|
||||
- SammCheese
|
||||
- Sam McLeod
|
||||
- Sammy
|
||||
- sammyf
|
||||
- Samuel Husso
|
||||
- Saurav Maheshkar
|
||||
- Scott Lahteine
|
||||
- Sean McLellan
|
||||
- Sebastian Aigner
|
||||
@@ -232,16 +304,21 @@ We thank them for all of their time and hard work.
|
||||
- Sergey Krashevich
|
||||
- Shapor Naghibzadeh
|
||||
- Shawn Zhong
|
||||
- Simona Liliac
|
||||
- Simon Vans-Colina
|
||||
- skunkworxdark
|
||||
- slashtechno
|
||||
- SoheilRezaei
|
||||
- Song, Pengcheng
|
||||
- spezialspezial
|
||||
- ssantos
|
||||
- StAlKeR7779
|
||||
- Stefan Tobler
|
||||
- Stephan Koglin-Fischer
|
||||
- SteveCaruso
|
||||
- Steve Martinelli
|
||||
- Steven Frank
|
||||
- Surisen
|
||||
- System X - Files
|
||||
- Taylor Kems
|
||||
- techicode
|
||||
@@ -260,26 +337,34 @@ We thank them for all of their time and hard work.
|
||||
- tyler
|
||||
- unknown
|
||||
- user1
|
||||
- vedant-3010
|
||||
- Vedant Madane
|
||||
- veprogames
|
||||
- wa.code
|
||||
- wfng92
|
||||
- whjms
|
||||
- whosawhatsis
|
||||
- Will
|
||||
- William Becher
|
||||
- William Chong
|
||||
- Wilson E. Alvarez
|
||||
- woweenie
|
||||
- Wubbbi
|
||||
- xra
|
||||
- Yeung Yiu Hung
|
||||
- ymgenesis
|
||||
- Yorzaren
|
||||
- Yosuke Shinya
|
||||
- yun saki
|
||||
- ZachNagengast
|
||||
- Zadagu
|
||||
- zeptofine
|
||||
- Zerdoumi
|
||||
- Васянатор
|
||||
- 冯不游
|
||||
- 唐澤 克幸
|
||||
|
||||
## **Original CompVis Authors**
|
||||
## **Original CompVis (Stable Diffusion) Authors**
|
||||
|
||||
- [Robin Rombach](https://github.com/rromb)
|
||||
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
||||
|
||||
5
docs/stylesheets/extra.css
Normal file
@@ -0,0 +1,5 @@
|
||||
:root {
|
||||
--md-primary-fg-color: #35A4DB;
|
||||
--md-primary-fg-color--light: #35A4DB;
|
||||
--md-primary-fg-color--dark: #35A4DB;
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"name": "Text to Image",
|
||||
"name": "Text to Image - SD1.5",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||
"version": "1.0.1",
|
||||
"version": "1.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD1.5, SD2, default",
|
||||
"notes": "",
|
||||
@@ -18,10 +18,19 @@
|
||||
{
|
||||
"nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"fieldName": "width"
|
||||
},
|
||||
{
|
||||
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"fieldName": "height"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "1.0.0"
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -30,44 +39,56 @@
|
||||
"data": {
|
||||
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "compel",
|
||||
"label": "Negative Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"type": "string",
|
||||
"fieldKind": "input",
|
||||
"label": "Negative Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"type": "ClipField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Negative Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 261,
|
||||
"height": 259,
|
||||
"position": {
|
||||
"x": 995.7263915923627,
|
||||
"y": 239.67783573351227
|
||||
"x": 1000,
|
||||
"y": 350
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -76,37 +97,60 @@
|
||||
"data": {
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
|
||||
"name": "seed",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
|
||||
"name": "width",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "16298330-e2bf-4872-a514-d6923df53cbb",
|
||||
"name": "height",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
|
||||
"name": "use_cpu",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
@@ -114,35 +158,40 @@
|
||||
"noise": {
|
||||
"id": "50f650dc-0184-4e23-a927-0497a96fe954",
|
||||
"name": "noise",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
|
||||
"name": "width",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
|
||||
"name": "height",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 389,
|
||||
"height": 388,
|
||||
"position": {
|
||||
"x": 993.4442117555518,
|
||||
"y": 605.6757415334787
|
||||
"x": 600,
|
||||
"y": 325
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -151,13 +200,24 @@
|
||||
"data": {
|
||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "993eabd2-40fd-44fe-bce7-5d0c7075ddab",
|
||||
"name": "model",
|
||||
"type": "MainModelField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "stable-diffusion-v1-5",
|
||||
"base_model": "sd-1",
|
||||
@@ -169,35 +229,40 @@
|
||||
"unet": {
|
||||
"id": "5c18c9db-328d-46d0-8cb9-143391c410be",
|
||||
"name": "unet",
|
||||
"type": "UNetField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "6effcac0-ec2f-4bf5-a49e-a2c29cf921f4",
|
||||
"name": "clip",
|
||||
"type": "ClipField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "57683ba3-f5f5-4f58-b9a2-4b83dacad4a1",
|
||||
"name": "vae",
|
||||
"type": "VaeField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 226,
|
||||
"position": {
|
||||
"x": 163.04436745878343,
|
||||
"y": 254.63156870373479
|
||||
"x": 600,
|
||||
"y": 25
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -206,44 +271,56 @@
|
||||
"data": {
|
||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "compel",
|
||||
"label": "Positive Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"type": "string",
|
||||
"fieldKind": "input",
|
||||
"label": "Positive Prompt",
|
||||
"value": ""
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": "Super cute tiger cub, national geographic award-winning photograph"
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"type": "ClipField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Positive Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 261,
|
||||
"height": 259,
|
||||
"position": {
|
||||
"x": 595.7263915923627,
|
||||
"y": 239.67783573351227
|
||||
"x": 1000,
|
||||
"y": 25
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -252,21 +329,36 @@
|
||||
"data": {
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"type": "rand_int",
|
||||
"label": "Random Seed",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
|
||||
"name": "low",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
|
||||
"name": "high",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
@@ -274,23 +366,20 @@
|
||||
"value": {
|
||||
"id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
|
||||
"name": "value",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Random Seed",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 218,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 541.094822888628,
|
||||
"y": 694.5704476446829
|
||||
"x": 600,
|
||||
"y": 275
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -299,144 +388,224 @@
|
||||
"data": {
|
||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "90b7f4f8-ada7-4028-8100-d2e54f192052",
|
||||
"name": "positive_conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "9393779e-796c-4f64-b740-902a1177bf53",
|
||||
"name": "negative_conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"noise": {
|
||||
"id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1",
|
||||
"name": "noise",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"steps": {
|
||||
"id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88",
|
||||
"name": "steps",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"value": 10
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 50
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "87dd04d3-870e-49e1-98bf-af003a810109",
|
||||
"name": "cfg_scale",
|
||||
"type": "FloatPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "f369d80f-4931-4740-9bcd-9f0620719fab",
|
||||
"name": "denoising_start",
|
||||
"type": "float",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "747d10e5-6f02-445c-994c-0604d814de8c",
|
||||
"name": "denoising_end",
|
||||
"type": "float",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b",
|
||||
"name": "scheduler",
|
||||
"type": "Scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"value": "euler"
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "unipc"
|
||||
},
|
||||
"unet": {
|
||||
"id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b",
|
||||
"name": "unet",
|
||||
"type": "UNetField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"control": {
|
||||
"id": "077b64cb-34be-4fcc-83f2-e399807a02bd",
|
||||
"name": "control",
|
||||
"type": "ControlPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "1d6948f7-3a65-4a65-a20c-768b287251aa",
|
||||
"name": "ip_adapter",
|
||||
"type": "IPAdapterPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "75e67b09-952f-4083-aaf4-6b804d690412",
|
||||
"name": "t2i_adapter",
|
||||
"type": "T2IAdapterPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "9101f0a6-5fe0-4826-b7b3-47e5d506826c",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43",
|
||||
"name": "latents",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa",
|
||||
"name": "denoise_mask",
|
||||
"type": "DenoiseMaskField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38",
|
||||
"name": "latents",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0",
|
||||
"name": "width",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d",
|
||||
"name": "height",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.4.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 646,
|
||||
"height": 703,
|
||||
"position": {
|
||||
"x": 1476.5794704734735,
|
||||
"y": 256.80174342731783
|
||||
"x": 1400,
|
||||
"y": 25
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -445,153 +614,185 @@
|
||||
"data": {
|
||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"metadata": {
|
||||
"id": "ab375f12-0042-4410-9182-29e30db82c85",
|
||||
"name": "metadata",
|
||||
"type": "MetadataField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
},
|
||||
"latents": {
|
||||
"id": "3a7e7efd-bff5-47d7-9d48-615127afee78",
|
||||
"name": "latents",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b",
|
||||
"name": "vae",
|
||||
"type": "VaeField",
|
||||
"fieldKind": "input",
|
||||
"label": ""
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
},
|
||||
"tiled": {
|
||||
"id": "da52059a-0cee-4668-942f-519aa794d739",
|
||||
"name": "tiled",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "c4841df3-b24e-4140-be3b-ccd454c2522c",
|
||||
"name": "fp32",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"value": false
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "72d667d0-cf85-459d-abf2-28bd8b823fe7",
|
||||
"name": "image",
|
||||
"type": "ImageField",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc",
|
||||
"name": "width",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "230f359c-b4ea-436c-b372-332d7dcdca85",
|
||||
"name": "height",
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 267,
|
||||
"height": 266,
|
||||
"position": {
|
||||
"x": 2037.9648469717395,
|
||||
"y": 426.10844427600136
|
||||
"x": 1800,
|
||||
"y": 25
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"sourceHandle": "value",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"targetHandle": "seed",
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"type": "default"
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "clip",
|
||||
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"targetHandle": "clip",
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"targetHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||
"type": "default"
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"sourceHandle": "noise",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "noise",
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||
"type": "default"
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"sourceHandle": "conditioning",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "positive_conditioning",
|
||||
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "negative_conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "unet",
|
||||
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "unet",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"sourceHandle": "latents",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"targetHandle": "latents",
|
||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "vae",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"targetHandle": "vae",
|
||||
"type": "default",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||
"type": "default"
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "default",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,23 +2,36 @@
|
||||
|
||||
set -e
|
||||
|
||||
BCYAN="\033[1;36m"
|
||||
BYELLOW="\033[1;33m"
|
||||
BGREEN="\033[1;32m"
|
||||
BRED="\033[1;31m"
|
||||
RED="\033[31m"
|
||||
RESET="\033[0m"
|
||||
BCYAN="\e[1;36m"
|
||||
BYELLOW="\e[1;33m"
|
||||
BGREEN="\e[1;32m"
|
||||
BRED="\e[1;31m"
|
||||
RED="\e[31m"
|
||||
RESET="\e[0m"
|
||||
|
||||
function is_bin_in_path {
|
||||
builtin type -P "$1" &>/dev/null
|
||||
}
|
||||
|
||||
function git_show {
|
||||
git show -s --format='%h %s' $1
|
||||
git show -s --format=oneline --abbrev-commit "$1" | cat
|
||||
}
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo
|
||||
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||
echo "The current working directory is $(pwd)"
|
||||
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||
echo
|
||||
|
||||
# Some machines only have `python3` in PATH, others have `python` - make an alias.
|
||||
# We can use a function to approximate an alias within a non-interactive shell.
|
||||
if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
@@ -27,58 +40,17 @@ if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
}
|
||||
fi
|
||||
|
||||
if [ -n "${VIRTUAL_ENV+set}" ]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
)
|
||||
VERSION="v${VERSION}"
|
||||
PATCH=""
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show
|
||||
git_show HEAD
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
echo
|
||||
echo "Installing frontend dependencies..."
|
||||
echo
|
||||
pnpm i --frozen-lockfile
|
||||
echo
|
||||
echo "Building frontend..."
|
||||
if [[ -v CI ]]; then
|
||||
# In CI, we have already done the frontend checks and can just build
|
||||
pnpm vite build
|
||||
else
|
||||
# This runs all the frontend checks and builds
|
||||
pnpm build
|
||||
fi
|
||||
echo
|
||||
popd
|
||||
|
||||
# ---------------------- BACKEND ----------------------
|
||||
|
||||
echo
|
||||
echo "Building wheel..."
|
||||
echo
|
||||
|
||||
# install the 'build' package in the user site packages, if needed
|
||||
# could be improved by using a temporary venv, but it's tiny and harmless
|
||||
if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then
|
||||
pip install --user build
|
||||
fi
|
||||
|
||||
rm -rf ../build
|
||||
|
||||
python -m build --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo
|
||||
@@ -97,33 +69,19 @@ done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Copy the wheel
|
||||
cp dist/*.whl InvokeAI-Installer/lib/
|
||||
|
||||
# Install scripts
|
||||
# Mac/Linux
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+x InvokeAI-Installer/install.sh
|
||||
|
||||
# Windows
|
||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in >InvokeAI-Installer/install.bat
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
FILENAME=InvokeAI-installer-$VERSION.zip
|
||||
|
||||
# Zip everything up
|
||||
zip -r $FILENAME InvokeAI-Installer
|
||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||
|
||||
if [[ ! -v CI ]]; then
|
||||
# clean up, but only if we are not in a github action
|
||||
rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/
|
||||
fi
|
||||
|
||||
if [[ -v CI ]]; then
|
||||
# Set the output variable for github action
|
||||
echo "INSTALLER_FILENAME=$FILENAME" >>$GITHUB_OUTPUT
|
||||
echo "INSTALLER_PATH=installer/$FILENAME" >>$GITHUB_OUTPUT
|
||||
echo "DIST_PATH=installer/dist/" >>$GITHUB_OUTPUT
|
||||
fi
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -15,7 +15,6 @@ if "%1" == "use-cache" (
|
||||
@rem Config
|
||||
@rem The version in the next line is replaced by an up to date release number
|
||||
@rem when create_installer.sh is run. Change the release number there.
|
||||
set INVOKEAI_VERSION=latest
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
|
||||
@@ -11,7 +11,7 @@ import sys
|
||||
import venv
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Union
|
||||
from typing import Optional, Tuple
|
||||
|
||||
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||
@@ -21,40 +21,20 @@ OS = platform.uname().system
|
||||
ARCH = platform.uname().machine
|
||||
VERSION = "latest"
|
||||
|
||||
### Feature flags
|
||||
# Install the virtualenv into the runtime dir
|
||||
FF_VENV_IN_RUNTIME = True
|
||||
|
||||
# Install the wheel packaged with the installer
|
||||
FF_USE_LOCAL_WHEEL = True
|
||||
|
||||
|
||||
class Installer:
|
||||
"""
|
||||
Deploys an InvokeAI installation into a given path
|
||||
"""
|
||||
|
||||
reqs: list[str] = INSTALLER_REQS
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.reqs = INSTALLER_REQS
|
||||
self.preflight()
|
||||
if os.getenv("VIRTUAL_ENV") is not None:
|
||||
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||
sys.exit(-1)
|
||||
self.bootstrap()
|
||||
|
||||
def preflight(self) -> None:
|
||||
"""
|
||||
Preflight checks
|
||||
"""
|
||||
|
||||
# TODO
|
||||
# verify python version
|
||||
# on macOS verify XCode tools are present
|
||||
# verify libmesa, libglx on linux
|
||||
# check that the system arch is not i386 (?)
|
||||
# check that the system has a GPU, and the type of GPU
|
||||
|
||||
pass
|
||||
self.available_releases = get_github_releases()
|
||||
|
||||
def mktemp_venv(self) -> TemporaryDirectory:
|
||||
"""
|
||||
@@ -78,12 +58,9 @@ class Installer:
|
||||
|
||||
return venv_dir
|
||||
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
|
||||
"""
|
||||
Bootstrap the installer venv with packages required at install time
|
||||
|
||||
:return: path to the virtual environment directory that was bootstrapped
|
||||
:rtype: TemporaryDirectory
|
||||
"""
|
||||
|
||||
print("Initializing the installer. This may take a minute - please wait...")
|
||||
@@ -95,39 +72,27 @@ class Installer:
|
||||
cmd.extend(self.reqs)
|
||||
|
||||
try:
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
# upgrade pip to the latest version to avoid a confusing message
|
||||
res = upgrade_pip(Path(venv_dir.name))
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
# run the install prerequisites installation
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
return venv_dir
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
|
||||
def app_venv(self, path: str = None):
|
||||
def app_venv(self, venv_parent) -> Path:
|
||||
"""
|
||||
Create a virtualenv for the InvokeAI installation
|
||||
"""
|
||||
|
||||
# explicit venv location
|
||||
# currently unused in normal operation
|
||||
# useful for testing or special cases
|
||||
if path is not None:
|
||||
venv_dir = Path(path)
|
||||
|
||||
# experimental / testing
|
||||
elif not FF_VENV_IN_RUNTIME:
|
||||
if OS == "Windows":
|
||||
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
||||
elif OS == "Darwin":
|
||||
# there is no environment variable on macOS to find this
|
||||
# TODO: confirm this is working as expected
|
||||
venv_dir_parent = "~/Library/Application Support"
|
||||
elif OS == "Linux":
|
||||
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
||||
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
||||
|
||||
# stable / current
|
||||
else:
|
||||
venv_dir = self.dest / ".venv"
|
||||
venv_dir = venv_parent / ".venv"
|
||||
|
||||
# Prefer to copy python executables
|
||||
# so that updates to system python don't break InvokeAI
|
||||
@@ -141,7 +106,7 @@ class Installer:
|
||||
return venv_dir
|
||||
|
||||
def install(
|
||||
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
|
||||
self, version=None, root: str = "~/invokeai", yes_to_all=False, find_links: Optional[Path] = None
|
||||
) -> None:
|
||||
"""
|
||||
Install the InvokeAI application into the given runtime path
|
||||
@@ -158,15 +123,20 @@ class Installer:
|
||||
|
||||
import messages
|
||||
|
||||
messages.welcome()
|
||||
messages.welcome(self.available_releases)
|
||||
|
||||
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
|
||||
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
||||
version = messages.choose_version(self.available_releases)
|
||||
|
||||
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
|
||||
destination = auto_dest if yes_to_all else messages.dest_path(root)
|
||||
if destination is None:
|
||||
print("Could not find or create the destination directory. Installation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
# create the venv for the app
|
||||
self.venv = self.app_venv()
|
||||
self.venv = self.app_venv(venv_parent=destination)
|
||||
|
||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
||||
self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
|
||||
|
||||
# install dependencies and the InvokeAI application
|
||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||
@@ -190,7 +160,7 @@ class InvokeAiInstance:
|
||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
||||
def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
|
||||
self.runtime = runtime
|
||||
self.venv = venv
|
||||
self.pip = get_pip_from_venv(venv)
|
||||
@@ -199,6 +169,7 @@ class InvokeAiInstance:
|
||||
set_sys_path(venv)
|
||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||
upgrade_pip(venv)
|
||||
|
||||
def get(self) -> tuple[Path, Path]:
|
||||
"""
|
||||
@@ -212,54 +183,7 @@ class InvokeAiInstance:
|
||||
|
||||
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install this instance, including dependencies and the app itself
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
# install torch first to ensure the correct version gets installed.
|
||||
# works with either source or wheel install with negligible impact on installation times.
|
||||
messages.simple_banner("Installing PyTorch :fire:")
|
||||
self.install_torch(extra_index_url, find_links)
|
||||
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
self.install_app(extra_index_url, optional_modules, find_links)
|
||||
|
||||
def install_torch(self, extra_index_url=None, find_links=None):
|
||||
"""
|
||||
Install PyTorch
|
||||
"""
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch==2.1.1",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision>=0.16.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
|
||||
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install the application with pip.
|
||||
Supports installation from PyPi or from a local source directory.
|
||||
Install the package from PyPi.
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
@@ -271,53 +195,52 @@ class InvokeAiInstance:
|
||||
:type find_links: Path
|
||||
"""
|
||||
|
||||
## this only applies to pypi installs; TODO actually use this
|
||||
if self.version == "pre":
|
||||
import messages
|
||||
|
||||
# not currently used, but may be useful for "install most recent version" option
|
||||
if self.version == "prerelease":
|
||||
version = None
|
||||
pre = "--pre"
|
||||
pre_flag = "--pre"
|
||||
elif self.version == "stable":
|
||||
version = None
|
||||
pre_flag = None
|
||||
else:
|
||||
version = self.version
|
||||
pre = None
|
||||
pre_flag = None
|
||||
|
||||
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
||||
if FF_USE_LOCAL_WHEEL:
|
||||
# if no wheel, try to do a source install before giving up
|
||||
try:
|
||||
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
|
||||
except StopIteration:
|
||||
try:
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
# if the above directory contains one of these files, we'll do a source install
|
||||
next(src.glob("pyproject.toml"))
|
||||
next(src.glob("invokeai"))
|
||||
except StopIteration:
|
||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||
src = "invokeai"
|
||||
if optional_modules:
|
||||
src += optional_modules
|
||||
if version:
|
||||
src += f"=={version}"
|
||||
|
||||
elif version == "source":
|
||||
# this makes an assumption about the location of the installer package in the source tree
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
else:
|
||||
# will install from PyPi
|
||||
src = f"invokeai=={version}" if version is not None else "invokeai"
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
|
||||
from plumbum import FG, local
|
||||
from plumbum import FG, ProcessExecutionError, local # type: ignore
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--use-pep517",
|
||||
str(src) + (optional_modules if optional_modules else ""),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
pipeline = pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--force-reinstall",
|
||||
"--use-pep517",
|
||||
str(src),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre_flag,
|
||||
]
|
||||
|
||||
try:
|
||||
_ = pipeline & FG
|
||||
except ProcessExecutionError as e:
|
||||
print(f"Error: {e}")
|
||||
print(
|
||||
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def configure(self):
|
||||
"""
|
||||
@@ -373,7 +296,6 @@ class InvokeAiInstance:
|
||||
|
||||
ext = "bat" if OS == "Windows" else "sh"
|
||||
|
||||
# scripts = ['invoke', 'update']
|
||||
scripts = ["invoke"]
|
||||
|
||||
for script in scripts:
|
||||
@@ -408,6 +330,23 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
def upgrade_pip(venv_path: Path) -> str | None:
|
||||
"""
|
||||
Upgrade the pip executable in the given virtual environment
|
||||
"""
|
||||
|
||||
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
|
||||
python = str(venv_path.expanduser().resolve() / python)
|
||||
|
||||
try:
|
||||
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
result = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def set_sys_path(venv_path: Path) -> None:
|
||||
"""
|
||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||
@@ -431,7 +370,43 @@ def set_sys_path(venv_path: Path) -> None:
|
||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||
|
||||
|
||||
def get_torch_source() -> (Union[str, None], str):
|
||||
def get_github_releases() -> tuple[list, list] | None:
|
||||
"""
|
||||
Query Github for published (pre-)release versions.
|
||||
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
|
||||
Return None if the query fails for any reason.
|
||||
"""
|
||||
|
||||
import requests
|
||||
|
||||
## get latest releases using github api
|
||||
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
releases, pre_releases = [], []
|
||||
try:
|
||||
res = requests.get(url)
|
||||
res.raise_for_status()
|
||||
tag_info = res.json()
|
||||
for tag in tag_info:
|
||||
if not tag["prerelease"]:
|
||||
releases.append(tag["tag_name"].lstrip("v"))
|
||||
else:
|
||||
pre_releases.append(tag["tag_name"].lstrip("v"))
|
||||
except requests.HTTPError as e:
|
||||
print(f"Error: {e}")
|
||||
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
|
||||
return
|
||||
|
||||
releases.sort(reverse=True)
|
||||
pre_releases.sort(reverse=True)
|
||||
|
||||
return releases, pre_releases
|
||||
|
||||
|
||||
def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
"""
|
||||
Determine the extra index URL for pip to use for torch installation.
|
||||
This depends on the OS and the graphics accelerator in use.
|
||||
@@ -446,25 +421,26 @@ def get_torch_source() -> (Union[str, None], str):
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
from messages import graphical_accelerator
|
||||
from messages import select_gpu
|
||||
|
||||
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
||||
device = graphical_accelerator()
|
||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||
device = select_gpu()
|
||||
|
||||
url = None
|
||||
optional_modules = "[onnx]"
|
||||
if OS == "Linux":
|
||||
if device == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||
elif device == "cpu":
|
||||
if device.value == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
elif device.value == "cpu":
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
|
||||
if device == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
elif OS == "Windows":
|
||||
if device.value == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device.value == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
|
||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||
|
||||
|
||||
@@ -5,10 +5,11 @@ Installer user interaction
|
||||
|
||||
import os
|
||||
import platform
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from prompt_toolkit import HTML, prompt
|
||||
from prompt_toolkit.completion import PathCompleter
|
||||
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
@@ -35,16 +36,26 @@ else:
|
||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||
|
||||
|
||||
def welcome():
|
||||
def welcome(available_releases: tuple | None = None) -> None:
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) != "":
|
||||
if (platform_specific := _platform_specific_help()) is not None:
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||
justify="center",
|
||||
)
|
||||
if available_releases is not None:
|
||||
latest_stable = available_releases[0][0]
|
||||
last_pre = available_releases[1][0]
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
|
||||
)
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
|
||||
)
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -61,19 +72,30 @@ def welcome():
|
||||
console.line()
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
||||
dest_confirmed = Confirm.ask(
|
||||
":stop_sign: (re)install in this location?",
|
||||
default=False,
|
||||
)
|
||||
else:
|
||||
print(f"InvokeAI will be installed in {dest}")
|
||||
dest_confirmed = Confirm.ask("Use this location?", default=True)
|
||||
def choose_version(available_releases: tuple | None = None) -> str:
|
||||
"""
|
||||
Prompt the user to choose an Invoke version to install
|
||||
"""
|
||||
|
||||
# short circuit if we couldn't get a version list
|
||||
# still try to install the latest stable version
|
||||
if available_releases is None:
|
||||
return "stable"
|
||||
|
||||
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
|
||||
|
||||
choices = available_releases[0] + available_releases[1]
|
||||
|
||||
response = prompt(
|
||||
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
|
||||
complete_while_typing=True,
|
||||
completer=FuzzyWordCompleter(choices),
|
||||
)
|
||||
console.print(f" Version {choices[0] if response == '' else response} will be installed.")
|
||||
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
return "stable" if response == "" else response
|
||||
|
||||
|
||||
def user_wants_auto_configuration() -> bool:
|
||||
@@ -109,7 +131,23 @@ def user_wants_auto_configuration() -> bool:
|
||||
return choice.lower().startswith("a")
|
||||
|
||||
|
||||
def dest_path(dest=None) -> Path:
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":stop_sign: Directory {dest} already exists!")
|
||||
print(" Is this location correct?")
|
||||
default = False
|
||||
else:
|
||||
print(f":file_folder: InvokeAI will be installed in {dest}")
|
||||
default = True
|
||||
|
||||
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
|
||||
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
|
||||
|
||||
def dest_path(dest=None) -> Path | None:
|
||||
"""
|
||||
Prompt the user for the destination path and create the path
|
||||
|
||||
@@ -124,25 +162,21 @@ def dest_path(dest=None) -> Path:
|
||||
else:
|
||||
dest = Path.cwd().expanduser().resolve()
|
||||
prev_dest = init_path = dest
|
||||
|
||||
dest_confirmed = confirm_install(dest)
|
||||
dest_confirmed = False
|
||||
|
||||
while not dest_confirmed:
|
||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
||||
# since we can't read their mind, start browsing at Path.cwd().
|
||||
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
||||
browse_start = (dest or Path.cwd()).expanduser().resolve()
|
||||
|
||||
path_completer = PathCompleter(
|
||||
only_directories=True,
|
||||
expanduser=True,
|
||||
get_paths=lambda: [browse_start], # noqa: B023
|
||||
get_paths=lambda: [str(browse_start)], # noqa: B023
|
||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||
)
|
||||
|
||||
console.line()
|
||||
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
|
||||
|
||||
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
|
||||
selected = prompt(
|
||||
">>> ",
|
||||
complete_in_thread=True,
|
||||
@@ -155,6 +189,7 @@ def dest_path(dest=None) -> Path:
|
||||
)
|
||||
prev_dest = dest
|
||||
dest = Path(selected)
|
||||
|
||||
console.line()
|
||||
|
||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||
@@ -182,41 +217,45 @@ def dest_path(dest=None) -> Path:
|
||||
console.rule("Goodbye!")
|
||||
|
||||
|
||||
def graphical_accelerator():
|
||||
class GpuType(Enum):
|
||||
CUDA = "cuda"
|
||||
CUDA_AND_DML = "cuda_and_dml"
|
||||
ROCM = "rocm"
|
||||
CPU = "cpu"
|
||||
AUTODETECT = "autodetect"
|
||||
|
||||
|
||||
def select_gpu() -> GpuType:
|
||||
"""
|
||||
Prompt the user to select the graphical accelerator in their system
|
||||
This does not validate user's choices (yet), but only offers choices
|
||||
valid for the platform.
|
||||
CUDA is the fallback.
|
||||
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
||||
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
||||
Prompt the user to select the GPU driver
|
||||
"""
|
||||
|
||||
if ARCH == "arm64" and OS != "Darwin":
|
||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||
return "cpu"
|
||||
return GpuType.CPU
|
||||
|
||||
nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
"cuda",
|
||||
GpuType.CUDA,
|
||||
)
|
||||
nvidia_with_dml = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
||||
"cuda_and_dml",
|
||||
GpuType.CUDA_AND_DML,
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
"rocm",
|
||||
GpuType.ROCM,
|
||||
)
|
||||
cpu = (
|
||||
"no compatible GPU, or specifically prefer to use the CPU",
|
||||
"cpu",
|
||||
"Do not install any GPU support, use CPU for generation (slow)",
|
||||
GpuType.CPU,
|
||||
)
|
||||
idk = (
|
||||
autodetect = (
|
||||
"I'm not sure what to choose",
|
||||
"idk",
|
||||
GpuType.AUTODETECT,
|
||||
)
|
||||
|
||||
options = []
|
||||
if OS == "Windows":
|
||||
options = [nvidia, nvidia_with_dml, cpu]
|
||||
if OS == "Linux":
|
||||
@@ -230,7 +269,7 @@ def graphical_accelerator():
|
||||
return options[0][1]
|
||||
|
||||
# "I don't know" is always added the last option
|
||||
options.append(idk)
|
||||
options.append(autodetect) # type: ignore
|
||||
|
||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||
|
||||
@@ -265,9 +304,9 @@ def graphical_accelerator():
|
||||
),
|
||||
)
|
||||
|
||||
if options[choice][1] == "idk":
|
||||
if options[choice][1] is GpuType.AUTODETECT:
|
||||
console.print(
|
||||
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
||||
"No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
|
||||
)
|
||||
|
||||
return options[choice][1]
|
||||
@@ -291,7 +330,7 @@ def windows_long_paths_registry() -> None:
|
||||
"""
|
||||
|
||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||
syntax = Syntax(code.read(), line_numbers=True)
|
||||
syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
@@ -301,7 +340,7 @@ def windows_long_paths_registry() -> None:
|
||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||
"",
|
||||
"This is the change that will be applied:",
|
||||
syntax,
|
||||
str(syntax),
|
||||
]
|
||||
)
|
||||
),
|
||||
@@ -340,7 +379,7 @@ def introduction() -> None:
|
||||
console.line(2)
|
||||
|
||||
|
||||
def _platform_specific_help() -> str:
|
||||
def _platform_specific_help() -> Text | None:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||
@@ -354,5 +393,5 @@ def _platform_specific_help() -> str:
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||
)
|
||||
else:
|
||||
text = ""
|
||||
return
|
||||
return text
|
||||
|
||||
@@ -2,16 +2,12 @@
|
||||
|
||||
set -e
|
||||
|
||||
BCYAN="\033[1;36m"
|
||||
BYELLOW="\033[1;33m"
|
||||
BGREEN="\033[1;32m"
|
||||
BRED="\033[1;31m"
|
||||
RED="\033[31m"
|
||||
RESET="\033[0m"
|
||||
|
||||
function is_bin_in_path {
|
||||
builtin type -P "$1" &>/dev/null
|
||||
}
|
||||
BCYAN="\e[1;36m"
|
||||
BYELLOW="\e[1;33m"
|
||||
BGREEN="\e[1;32m"
|
||||
BRED="\e[1;31m"
|
||||
RED="\e[31m"
|
||||
RESET="\e[0m"
|
||||
|
||||
function does_tag_exist {
|
||||
git rev-parse --quiet --verify "refs/tags/$1" >/dev/null
|
||||
@@ -25,14 +21,6 @@ function git_show {
|
||||
git show -s --format='%h %s' $1
|
||||
}
|
||||
|
||||
# Some machines only have `python3` in PATH, others have `python` - make an alias.
|
||||
# We can use a function to approximate an alias within a non-interactive shell.
|
||||
if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
function python {
|
||||
python3 "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
@@ -57,31 +45,27 @@ echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show
|
||||
echo
|
||||
|
||||
echo -e "${BGREEN}git remote -v${RESET}:"
|
||||
git remote -v
|
||||
echo
|
||||
|
||||
echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on origin remote${RESET}? "
|
||||
echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? "
|
||||
read -e -p 'y/n [n]: ' input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
echo
|
||||
echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on origin remote..."
|
||||
git push origin :refs/tags/$VERSION
|
||||
echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..."
|
||||
git push --delete origin $VERSION
|
||||
|
||||
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} on locally..."
|
||||
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..."
|
||||
if ! git tag -fa $VERSION; then
|
||||
echo "Existing/invalid tag"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on origin remote..."
|
||||
git push origin :refs/tags/$LATEST_TAG
|
||||
echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..."
|
||||
git push --delete origin $LATEST_TAG
|
||||
|
||||
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..."
|
||||
git tag -fa $LATEST_TAG
|
||||
|
||||
echo -e "Pushing updated tags to origin remote..."
|
||||
echo -e "Pushing updated tags to remote..."
|
||||
git push origin --tags
|
||||
fi
|
||||
exit 0
|
||||
|
||||
@@ -15,7 +15,7 @@ echo 4. Download and install models
|
||||
echo 5. Change InvokeAI startup options
|
||||
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
|
||||
echo 7. Open the developer console
|
||||
echo 8. Update InvokeAI
|
||||
echo 8. Update InvokeAI (DEPRECATED - please use the installer)
|
||||
echo 9. Run the InvokeAI image database maintenance script
|
||||
echo 10. Command-line help
|
||||
echo Q - Quit
|
||||
@@ -52,8 +52,10 @@ IF /I "%choice%" == "1" (
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "8" (
|
||||
echo Running invokeai-update...
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
|
||||
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
|
||||
timeout 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
) ELSE IF /I "%choice%" == "9" (
|
||||
echo Running the db maintenance script...
|
||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||
@@ -77,4 +79,3 @@ pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
|
||||
|
||||
@@ -90,7 +90,9 @@ do_choice() {
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "Update InvokeAI\n"
|
||||
printf "UPDATING FROM WITHIN THE APP IS BEING DEPRECATED\n"
|
||||
printf "Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.\n"
|
||||
sleep 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
;;
|
||||
9)
|
||||
@@ -122,7 +124,7 @@ do_dialog() {
|
||||
5 "Change InvokeAI startup options"
|
||||
6 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
7 "Open the developer console"
|
||||
8 "Update InvokeAI"
|
||||
8 "Update InvokeAI (DEPRECATED - please use the installer)"
|
||||
9 "Run the InvokeAI image database maintenance script"
|
||||
10 "Command-line help"
|
||||
)
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_VERSION=latest
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,2%" equ "/?" (
|
||||
echo Usage: update.bat ^<release name or branch^>
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the version or branch for the release you want, and pass it as the argument.
|
||||
echo For example '.\update.bat v2.2.5' for release 2.2.5.
|
||||
echo '.\update.bat main' for the latest development version
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent release, equivalent to
|
||||
echo '.\update.bat latest'
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_VERSION=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
set INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/!INVOKE_AI_VERSION!.zip"
|
||||
set INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/!INVOKE_AI_VERSION!/environments-and-requirements/requirements-base.txt
|
||||
set INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
call curl -I "%INVOKE_AI_DEP%" -fs >.tmp.out
|
||||
if %errorlevel% neq 0 (
|
||||
echo '!INVOKE_AI_VERSION!' is not a known branch name or tag. Please check the version and try again.
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
del .tmp.out
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call curl -L "%INVOKE_AI_DEP%" > environments-and-requirements/requirements-base.txt
|
||||
call curl -L "%INVOKE_AI_MODELS%" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
call .venv\Scripts\python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python -mpip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem call .venv\Scripts\invokeai-configure --root=.
|
||||
|
||||
@rem if %errorlevel% neq 0 (
|
||||
@rem echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
@rem pause
|
||||
@rem exit /b
|
||||
@rem )
|
||||
|
||||
echo InvokeAI has been updated to '%INVOKE_AI_VERSION%'
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:2}" == "-h" ]; then
|
||||
echo "Usage: update.sh <release>"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the version or branch for the release you want, and pass it as the argument."
|
||||
echo "For example: update.sh v2.2.5 for release 2.2.5."
|
||||
echo " update.sh main for the current development version."
|
||||
echo ""
|
||||
echo "If no argument provided then will install the version tagged with 'latest', equivalent to"
|
||||
echo "update.sh latest"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_VERSION=${1:-latest}
|
||||
|
||||
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/$INVOKE_AI_VERSION.zip"
|
||||
INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/environments-and-requirements/requirements-base.txt
|
||||
INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
if ! curl -I "$INVOKE_AI_DEP" -fs >/dev/null; then
|
||||
echo \'$INVOKE_AI_VERSION\' is not a known branch name or tag. Please check the version and try again.
|
||||
exit
|
||||
fi
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to version \'$INVOKE_AI_VERSION\'.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
curl -L "$INVOKE_AI_DEP" > environments-and-requirements/requirements-base.txt
|
||||
curl -L "$INVOKE_AI_MODELS" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
./.venv/bin/python -mpip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
./.venv/bin/python -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
echo InvokeAI updated to \'$INVOKE_AI_VERSION\'
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
@@ -21,7 +23,6 @@ from ..services.invocation_queue.invocation_queue_memory import MemoryInvocation
|
||||
from ..services.invocation_services import InvocationServices
|
||||
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
|
||||
from ..services.invoker import Invoker
|
||||
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
|
||||
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
|
||||
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
|
||||
from ..services.model_install import ModelInstallService
|
||||
@@ -61,7 +62,7 @@ class ApiDependencies:
|
||||
invoker: Invoker
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger) -> None:
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
@@ -79,7 +80,7 @@ class ApiDependencies:
|
||||
board_records = SqliteBoardRecordStorage(db=db)
|
||||
boards = BoardService()
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
|
||||
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
|
||||
image_records = SqliteImageRecordStorage(db=db)
|
||||
images = ImageService()
|
||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||
@@ -87,8 +88,13 @@ class ApiDependencies:
|
||||
model_manager = ModelManagerService(config, logger)
|
||||
model_record_service = ModelRecordServiceSQL(db=db)
|
||||
download_queue_service = DownloadQueueService(event_bus=events)
|
||||
metadata_store = ModelMetadataStore(db=db)
|
||||
model_install_service = ModelInstallService(
|
||||
app_config=config, record_store=model_record_service, event_bus=events
|
||||
app_config=config,
|
||||
record_store=model_record_service,
|
||||
download_queue=download_queue_service,
|
||||
metadata_store=metadata_store,
|
||||
event_bus=events,
|
||||
)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
@@ -131,6 +137,6 @@ class ApiDependencies:
|
||||
db.clean()
|
||||
|
||||
@staticmethod
|
||||
def shutdown():
|
||||
def shutdown() -> None:
|
||||
if ApiDependencies.invoker:
|
||||
ApiDependencies.invoker.stop()
|
||||
|
||||
28
invokeai/app/api/no_cache_staticfiles.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from typing import Any
|
||||
|
||||
from starlette.responses import Response
|
||||
from starlette.staticfiles import StaticFiles
|
||||
|
||||
|
||||
class NoCacheStaticFiles(StaticFiles):
|
||||
"""
|
||||
This class is used to override the default caching behavior of starlette for static files,
|
||||
ensuring we *never* cache static files. It modifies the file response headers to strictly
|
||||
never cache the files.
|
||||
|
||||
Static files include the javascript bundles, fonts, locales, and some images. Generated
|
||||
images are not included, as they are served by a router.
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any):
|
||||
self.cachecontrol = "max-age=0, no-cache, no-store, , must-revalidate"
|
||||
self.pragma = "no-cache"
|
||||
self.expires = "0"
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def file_response(self, *args: Any, **kwargs: Any) -> Response:
|
||||
resp = super().file_response(*args, **kwargs)
|
||||
resp.headers.setdefault("Cache-Control", self.cachecontrol)
|
||||
resp.headers.setdefault("Pragma", self.pragma)
|
||||
resp.headers.setdefault("Expires", self.expires)
|
||||
return resp
|
||||
@@ -1,10 +1,10 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for model configuration records."""
|
||||
|
||||
|
||||
import pathlib
|
||||
from hashlib import sha1
|
||||
from random import randbytes
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
|
||||
from fastapi import Body, Path, Query, Response
|
||||
from fastapi.routing import APIRouter
|
||||
@@ -16,13 +16,19 @@ from invokeai.app.services.model_install import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
InvalidModelException,
|
||||
ModelRecordOrderBy,
|
||||
ModelSummary,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -32,11 +38,20 @@ model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager
|
||||
class ModelsList(BaseModel):
|
||||
"""Return list of configs."""
|
||||
|
||||
models: list[AnyModelConfig]
|
||||
models: List[AnyModelConfig]
|
||||
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
class ModelTagSet(BaseModel):
|
||||
"""Return tags for a set of models."""
|
||||
|
||||
key: str
|
||||
name: str
|
||||
author: str
|
||||
tags: Set[str]
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/",
|
||||
operation_id="list_model_records",
|
||||
@@ -45,7 +60,7 @@ async def list_model_records(
|
||||
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
||||
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
||||
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
||||
model_format: Optional[str] = Query(
|
||||
model_format: Optional[ModelFormat] = Query(
|
||||
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
||||
),
|
||||
) -> ModelsList:
|
||||
@@ -86,6 +101,59 @@ async def get_model_record(
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@model_records_router.get("/meta", operation_id="list_model_summary")
|
||||
async def list_model_summary(
|
||||
page: int = Query(default=0, description="The page to get"),
|
||||
per_page: int = Query(default=10, description="The number of models per page"),
|
||||
order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"),
|
||||
) -> PaginatedResults[ModelSummary]:
|
||||
"""Gets a page of model summary data."""
|
||||
return ApiDependencies.invoker.services.model_records.list_models(page=page, per_page=per_page, order_by=order_by)
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/meta/i/{key}",
|
||||
operation_id="get_model_metadata",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "No metadata available"},
|
||||
},
|
||||
)
|
||||
async def get_model_metadata(
|
||||
key: str = Path(description="Key of the model repo metadata to fetch."),
|
||||
) -> Optional[AnyModelRepoMetadata]:
|
||||
"""Get a model metadata object."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
result = record_store.get_metadata(key)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="No metadata for a model with this key")
|
||||
return result
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/tags",
|
||||
operation_id="list_tags",
|
||||
)
|
||||
async def list_tags() -> Set[str]:
|
||||
"""Get a unique set of all the model tags."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
return record_store.list_tags()
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/tags/search",
|
||||
operation_id="search_by_metadata_tags",
|
||||
)
|
||||
async def search_by_metadata_tags(
|
||||
tags: Set[str] = Query(default=None, description="Tags to search for"),
|
||||
) -> ModelsList:
|
||||
"""Get a list of models."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
results = record_store.search_by_metadata_tag(tags)
|
||||
return ModelsList(models=results)
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/i/{key}",
|
||||
operation_id="update_model_record",
|
||||
@@ -159,9 +227,7 @@ async def del_model_record(
|
||||
async def add_model_record(
|
||||
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model using the configuration information appropriate for its type.
|
||||
"""
|
||||
"""Add a model using the configuration information appropriate for its type."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
if config.key == "<NOKEY>":
|
||||
@@ -243,7 +309,7 @@ async def import_model(
|
||||
Installation occurs in the background. Either use list_model_install_jobs()
|
||||
to poll for completion, or listen on the event bus for the following events:
|
||||
|
||||
"model_install_started"
|
||||
"model_install_running"
|
||||
"model_install_completed"
|
||||
"model_install_error"
|
||||
|
||||
@@ -279,16 +345,46 @@ async def import_model(
|
||||
operation_id="list_model_install_jobs",
|
||||
)
|
||||
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
||||
"""
|
||||
Return list of model install jobs.
|
||||
|
||||
If the optional 'source' argument is provided, then the list will be filtered
|
||||
for partial string matches against the install source.
|
||||
"""
|
||||
"""Return list of model install jobs."""
|
||||
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
||||
return jobs
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/import/{id}",
|
||||
operation_id="get_model_install_job",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
404: {"description": "No such job"},
|
||||
},
|
||||
)
|
||||
async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob:
|
||||
"""Return model install job corresponding to the given source."""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.model_install.get_job_by_id(id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@model_records_router.delete(
|
||||
"/import/{id}",
|
||||
operation_id="cancel_model_install_job",
|
||||
responses={
|
||||
201: {"description": "The job was cancelled successfully"},
|
||||
415: {"description": "No such job"},
|
||||
},
|
||||
status_code=201,
|
||||
)
|
||||
async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None:
|
||||
"""Cancel the model install job(s) corresponding to the given job ID."""
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
try:
|
||||
job = installer.get_job_by_id(id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=415, detail=str(e))
|
||||
installer.cancel_job(job)
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/import",
|
||||
operation_id="prune_model_install_jobs",
|
||||
@@ -298,9 +394,7 @@ async def list_model_install_jobs() -> List[ModelInstallJob]:
|
||||
},
|
||||
)
|
||||
async def prune_model_install_jobs() -> Response:
|
||||
"""
|
||||
Prune all completed and errored jobs from the install job list.
|
||||
"""
|
||||
"""Prune all completed and errored jobs from the install job list."""
|
||||
ApiDependencies.invoker.services.model_install.prune_jobs()
|
||||
return Response(status_code=204)
|
||||
|
||||
@@ -315,8 +409,64 @@ async def prune_model_install_jobs() -> Response:
|
||||
)
|
||||
async def sync_models_to_config() -> Response:
|
||||
"""
|
||||
Traverse the models and autoimport directories. Model files without a corresponding
|
||||
Traverse the models and autoimport directories.
|
||||
|
||||
Model files without a corresponding
|
||||
record in the database are added. Orphan records without a models file are deleted.
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@model_records_router.put(
|
||||
"/merge",
|
||||
operation_id="merge",
|
||||
)
|
||||
async def merge(
|
||||
keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3),
|
||||
merged_model_name: Optional[str] = Body(description="Name of destination model", default=None),
|
||||
alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
|
||||
force: bool = Body(
|
||||
description="Force merging of models created with different versions of diffusers",
|
||||
default=False,
|
||||
),
|
||||
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None),
|
||||
merge_dest_directory: Optional[str] = Body(
|
||||
description="Save the merged model to the designated directory (with 'merged_model_name' appended)",
|
||||
default=None,
|
||||
),
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
Merge diffusers models.
|
||||
|
||||
keys: List of 2-3 model keys to merge together. All models must use the same base type.
|
||||
merged_model_name: Name for the merged model [Concat model names]
|
||||
alpha: Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5]
|
||||
force: If true, force the merge even if the models were generated by different versions of the diffusers library [False]
|
||||
interp: Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum]
|
||||
merge_dest_directory: Specify a directory to store the merged model in [models directory]
|
||||
"""
|
||||
print(f"here i am, keys={keys}")
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
try:
|
||||
logger.info(f"Merging models: {keys} into {merge_dest_directory or '<MODELS>'}/{merged_model_name}")
|
||||
dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
merger = ModelMerger(installer)
|
||||
model_names = [installer.record_store.get_model(x).name for x in keys]
|
||||
response = merger.merge_diffusion_models_and_save(
|
||||
model_keys=keys,
|
||||
merged_model_name=merged_model_name or "+".join(model_names),
|
||||
alpha=alpha,
|
||||
interp=interp,
|
||||
force=force,
|
||||
merge_dest_directory=dest,
|
||||
)
|
||||
except UnknownModelException:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"One or more of the models '{keys}' not found",
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
return response
|
||||
|
||||
@@ -23,10 +23,11 @@ class DynamicPromptsResponse(BaseModel):
|
||||
)
|
||||
async def parse_dynamicprompts(
|
||||
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
||||
max_prompts: int = Body(default=1000, description="The max number of prompts to generate"),
|
||||
max_prompts: int = Body(ge=1, le=10000, default=1000, description="The max number of prompts to generate"),
|
||||
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
||||
) -> DynamicPromptsResponse:
|
||||
"""Creates a batch process"""
|
||||
max_prompts = min(max_prompts, 10000)
|
||||
generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator]
|
||||
try:
|
||||
error: Optional[str] = None
|
||||
|
||||
@@ -14,7 +14,7 @@ class SocketIO:
|
||||
|
||||
def __init__(self, app: FastAPI):
|
||||
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="socket.io")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
|
||||
app.mount("/ws", self.__app)
|
||||
|
||||
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# values from the command line or config file.
|
||||
import sys
|
||||
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
from .services.config import InvokeAIAppConfig
|
||||
@@ -27,8 +28,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.responses import FileResponse, HTMLResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from pydantic.json_schema import models_json_schema
|
||||
@@ -76,7 +76,7 @@ mimetypes.add_type("text/css", ".css")
|
||||
|
||||
# Create the app
|
||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
||||
app = FastAPI(title="Invoke - Community Edition", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
||||
|
||||
# Add event handler
|
||||
event_handler_id: int = id(app)
|
||||
@@ -205,8 +205,8 @@ app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid a
|
||||
def overridden_swagger() -> HTMLResponse:
|
||||
return get_swagger_ui_html(
|
||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||
title=app.title,
|
||||
swagger_favicon_url="/static/docs/favicon.ico",
|
||||
title=f"{app.title} - Swagger UI",
|
||||
swagger_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||
)
|
||||
|
||||
|
||||
@@ -214,26 +214,20 @@ def overridden_swagger() -> HTMLResponse:
|
||||
def overridden_redoc() -> HTMLResponse:
|
||||
return get_redoc_html(
|
||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||
title=app.title,
|
||||
redoc_favicon_url="/static/docs/favicon.ico",
|
||||
title=f"{app.title} - Redoc",
|
||||
redoc_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||
)
|
||||
|
||||
|
||||
web_root_path = Path(list(web_dir.__path__)[0])
|
||||
|
||||
# Only serve the UI if we it has a build
|
||||
if (web_root_path / "dist").exists():
|
||||
# Cannot add headers to StaticFiles, so we must serve index.html with a custom route
|
||||
# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release
|
||||
@app.get("/", include_in_schema=False, name="ui_root")
|
||||
def get_index() -> FileResponse:
|
||||
return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"})
|
||||
|
||||
# # Must mount *after* the other routes else it borks em
|
||||
app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets")
|
||||
app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales")
|
||||
|
||||
app.mount("/static", StaticFiles(directory=Path(web_root_path, "static/")), name="static") # docs favicon is in here
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
|
||||
131
invokeai/app/invocations/adapters_linked.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocationOutput,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import ControlField, ControlNetInvocation
|
||||
from invokeai.app.invocations.ip_adapter import IPAdapterField, IPAdapterInvocation
|
||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
|
||||
|
||||
def append_list(new_item, items, item_cls):
|
||||
"""Add an item to an exiting item or list of items then output as a list of items."""
|
||||
|
||||
result = []
|
||||
if items is None or (isinstance(items, list) and len(items) == 0):
|
||||
pass
|
||||
elif isinstance(items, item_cls):
|
||||
result.append(items)
|
||||
elif isinstance(items, list) and all(isinstance(i, item_cls) for i in items):
|
||||
result.extend(items)
|
||||
else:
|
||||
raise ValueError(f"Invalid adapter list format: {items}")
|
||||
|
||||
result.append(new_item)
|
||||
return result
|
||||
|
||||
|
||||
@invocation_output("control_list_output")
|
||||
class ControlListOutput(BaseInvocationOutput):
|
||||
# Outputs
|
||||
control_list: list[ControlField] = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation(
|
||||
"controlnet-linked",
|
||||
title="ControlNet-Linked",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ControlNetLinkedInvocation(ControlNetInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes."""
|
||||
|
||||
control_list: Optional[Union[ControlField, list[ControlField]]] = InputField(
|
||||
default=None,
|
||||
title="ControlNet-List",
|
||||
input=Input.Connection,
|
||||
ui_order=0,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ControlListOutput:
|
||||
# Call parent
|
||||
output = super().invoke(context).control
|
||||
# Append the control output to the input list
|
||||
control_list = append_list(output, self.control_list, ControlField)
|
||||
return ControlListOutput(control_list=control_list)
|
||||
|
||||
|
||||
@invocation_output("ip_adapter_list_output")
|
||||
class IPAdapterListOutput(BaseInvocationOutput):
|
||||
# Outputs
|
||||
ip_adapter_list: list[IPAdapterField] = OutputField(
|
||||
description=FieldDescriptions.ip_adapter, title="IP-Adapter-List"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"ip_adapter_linked",
|
||||
title="IP-Adapter-Linked",
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="1.1.0",
|
||||
)
|
||||
class IPAdapterLinkedInvocation(IPAdapterInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
ip_adapter_list: Optional[Union[IPAdapterField, list[IPAdapterField]]] = InputField(
|
||||
description=FieldDescriptions.ip_adapter,
|
||||
title="IP-Adapter-List",
|
||||
default=None,
|
||||
input=Input.Connection,
|
||||
ui_order=0,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IPAdapterListOutput:
|
||||
# Call parent
|
||||
output = super().invoke(context).ip_adapter
|
||||
# Append the control output to the input list
|
||||
result = append_list(output, self.ip_adapter_list, IPAdapterField)
|
||||
return IPAdapterListOutput(ip_adapter_list=result)
|
||||
|
||||
|
||||
@invocation_output("ip_adapters_output")
|
||||
class T2IAdapterListOutput(BaseInvocationOutput):
|
||||
# Outputs
|
||||
t2i_adapter_list: list[T2IAdapterField] = OutputField(
|
||||
description=FieldDescriptions.t2i_adapter, title="T2I Adapter-List"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter_linked",
|
||||
title="T2I-Adapter-Linked",
|
||||
tags=["t2i_adapter", "control"],
|
||||
category="t2i_adapter",
|
||||
version="1.0.0",
|
||||
)
|
||||
class T2IAdapterLinkedInvocation(T2IAdapterInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
|
||||
t2i_adapter_list: Optional[Union[T2IAdapterField, list[T2IAdapterField]]] = InputField(
|
||||
description=FieldDescriptions.ip_adapter,
|
||||
title="T2I-Adapter",
|
||||
default=None,
|
||||
input=Input.Connection,
|
||||
ui_order=0,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> T2IAdapterListOutput:
|
||||
# Call parent
|
||||
output = super().invoke(context).t2i_adapter
|
||||
# Append the control output to the input list
|
||||
t2i_adapter_list = append_list(output, self.t2i_adapter_list, T2IAdapterField)
|
||||
return T2IAdapterListOutput(t2i_adapter_list=t2i_adapter_list)
|
||||
@@ -17,18 +17,20 @@ from controlnet_aux import (
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
NormalBaeDetector,
|
||||
OpenposeDetector,
|
||||
PidiNetDetector,
|
||||
SamDetector,
|
||||
ZoeDetector,
|
||||
)
|
||||
from controlnet_aux.util import HWC3, ade_palette
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
@@ -75,17 +77,16 @@ class ControlField(BaseModel):
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
"""Validate that all control weights in the valid range"""
|
||||
if isinstance(v, list):
|
||||
for i in v:
|
||||
if i < -1 or i > 2:
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
else:
|
||||
if v < -1 or v > 2:
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("control_output")
|
||||
class ControlOutput(BaseInvocationOutput):
|
||||
@@ -95,17 +96,17 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.0")
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, description="The weight given to the ControlNet"
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=-1, le=2, description="When the ControlNet is first applied (% of total steps)"
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
@@ -113,6 +114,17 @@ class ControlNetInvocation(BaseInvocation):
|
||||
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||
return ControlOutput(
|
||||
control=ControlField(
|
||||
@@ -264,31 +276,6 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"openpose_image_processor",
|
||||
title="Openpose Processor",
|
||||
tags=["controlnet", "openpose", "pose"],
|
||||
category="controlnet",
|
||||
version="1.2.0",
|
||||
)
|
||||
class OpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Openpose processing to image"""
|
||||
|
||||
hand_and_face: bool = InputField(default=False, description="Whether to use hands and face mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = openpose_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
hand_and_face=self.hand_and_face,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"midas_depth_image_processor",
|
||||
title="Midas Depth Processor",
|
||||
@@ -591,3 +578,60 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
color_map = Image.fromarray(color_map)
|
||||
return color_map
|
||||
|
||||
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
|
||||
|
||||
@invocation(
|
||||
"depth_anything_image_processor",
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
if image.mode == "RGBA":
|
||||
image = image.convert("RGB")
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"dw_openpose_image_processor",
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
dw_openpose = DWOpenposeDetector()
|
||||
processed_image = dw_openpose(
|
||||
image,
|
||||
draw_face=self.draw_face,
|
||||
draw_hands=self.draw_hands,
|
||||
draw_body=self.draw_body,
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
@@ -2,7 +2,7 @@ import os
|
||||
from builtins import float
|
||||
from typing import List, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
@@ -15,6 +15,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
||||
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
|
||||
@@ -39,7 +40,6 @@ class IPAdapterField(BaseModel):
|
||||
ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.")
|
||||
image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.")
|
||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
# weight: float = Field(default=1.0, ge=0, description="The weight of the IP-Adapter.")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
@@ -47,6 +47,17 @@ class IPAdapterField(BaseModel):
|
||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("ip_adapter_output")
|
||||
class IPAdapterOutput(BaseInvocationOutput):
|
||||
@@ -54,7 +65,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.0")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.1")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -64,18 +75,27 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1
|
||||
)
|
||||
|
||||
# weight: float = InputField(default=1.0, description="The weight of the IP-Adapter.", ui_type=UIType.Float)
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, ge=-1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=-1, le=2, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
||||
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
||||
ip_adapter_info = context.services.model_manager.model_info(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import List, Literal, Optional, Union
|
||||
@@ -220,7 +221,7 @@ def get_scheduler(
|
||||
title="Denoise Latents",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.5.0",
|
||||
version="1.5.1",
|
||||
)
|
||||
class DenoiseLatentsInvocation(BaseInvocation):
|
||||
"""Denoises noisy latents to decodable images"""
|
||||
@@ -279,7 +280,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
ui_order=7,
|
||||
)
|
||||
cfg_rescale_multiplier: float = InputField(
|
||||
default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
title="CFG Rescale Multiplier", default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
)
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None,
|
||||
@@ -1228,3 +1229,57 @@ class CropLatentsCoreInvocation(BaseInvocation):
|
||||
context.services.latents.save(name, cropped_latents)
|
||||
|
||||
return build_latents_output(latents_name=name, latents=cropped_latents)
|
||||
|
||||
|
||||
@invocation_output("ideal_size_output")
|
||||
class IdealSizeOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
width: int = OutputField(description="The ideal width of the image (in pixels)")
|
||||
height: int = OutputField(description="The ideal height of the image (in pixels)")
|
||||
|
||||
|
||||
@invocation(
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.2",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
||||
width: int = InputField(default=1024, description="Final image width")
|
||||
height: int = InputField(default=576, description="Final image height")
|
||||
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
|
||||
multiplier: float = InputField(
|
||||
default=1.0,
|
||||
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)",
|
||||
)
|
||||
|
||||
def trim_to_multiple_of(self, *args, multiple_of=LATENT_SCALE_FACTOR):
|
||||
return tuple((x - x % multiple_of) for x in args)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
||||
aspect = self.width / self.height
|
||||
dimension = 512
|
||||
if self.unet.unet.base_model == BaseModelType.StableDiffusion2:
|
||||
dimension = 768
|
||||
elif self.unet.unet.base_model == BaseModelType.StableDiffusionXL:
|
||||
dimension = 1024
|
||||
dimension = dimension * self.multiplier
|
||||
min_dimension = math.floor(dimension * 0.5)
|
||||
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
||||
|
||||
if aspect > 1.0:
|
||||
init_height = max(min_dimension, math.sqrt(model_area / aspect))
|
||||
init_width = init_height * aspect
|
||||
else:
|
||||
init_width = max(min_dimension, math.sqrt(model_area * aspect))
|
||||
init_height = init_width / aspect
|
||||
|
||||
scaled_width, scaled_height = self.trim_to_multiple_of(
|
||||
math.floor(init_width),
|
||||
math.floor(init_height),
|
||||
)
|
||||
|
||||
return IdealSizeOutput(width=scaled_width, height=scaled_height)
|
||||
|
||||
1029
invokeai/app/invocations/metadata_linked.py
Normal file
@@ -1,6 +1,6 @@
|
||||
from typing import Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
@@ -14,6 +14,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType
|
||||
|
||||
@@ -37,6 +38,17 @@ class T2IAdapterField(BaseModel):
|
||||
)
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("t2i_adapter_output")
|
||||
class T2IAdapterOutput(BaseInvocationOutput):
|
||||
@@ -44,7 +56,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.0"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.1"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
@@ -61,7 +73,7 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=-1, le=2, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
default=0, ge=0, le=1, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the T2I-Adapter is last applied (% of total steps)"
|
||||
@@ -71,6 +83,17 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
description="The resize mode applied to the T2I-Adapter input image so that it matches the target output size.",
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> T2IAdapterOutput:
|
||||
return T2IAdapterOutput(
|
||||
t2i_adapter=T2IAdapterField(
|
||||
|
||||
@@ -5,12 +5,12 @@ from typing import Literal
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from PIL import Image
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
|
||||
14
invokeai/app/invocations/util.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from typing import Union
|
||||
|
||||
|
||||
def validate_weights(weights: Union[float, list[float]]) -> None:
|
||||
"""Validate that all control weights in the valid range"""
|
||||
to_validate = weights if isinstance(weights, list) else [weights]
|
||||
if any(i < -1 or i > 2 for i in to_validate):
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
|
||||
|
||||
def validate_begin_end_step(begin_step_percent: float, end_step_percent: float) -> None:
|
||||
"""Validate that begin_step_percent is less than end_step_percent"""
|
||||
if begin_step_percent >= end_step_percent:
|
||||
raise ValueError("Begin step percent must be less than or equal to end step percent")
|
||||
@@ -1,5 +1,7 @@
|
||||
"""Init file for InvokeAI configure package."""
|
||||
|
||||
from invokeai.app.services.config.config_common import PagingArgumentParser
|
||||
|
||||
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
||||
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config"]
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config", "PagingArgumentParser"]
|
||||
|
||||
@@ -173,10 +173,10 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_type_hints
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import Field, TypeAdapter
|
||||
from pydantic import Field
|
||||
from pydantic.config import JsonDict
|
||||
from pydantic_settings import SettingsConfigDict
|
||||
|
||||
@@ -209,7 +209,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
"""Configuration object for InvokeAI App."""
|
||||
|
||||
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
||||
singleton_init: ClassVar[Optional[Dict]] = None
|
||||
singleton_init: ClassVar[Optional[Dict[str, Any]]] = None
|
||||
|
||||
# fmt: off
|
||||
type: Literal["InvokeAI"] = "InvokeAI"
|
||||
@@ -251,7 +251,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
|
||||
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
|
||||
|
||||
# Development
|
||||
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
|
||||
profile_graphs : bool = Field(default=False, description="Enable graph profiling", json_schema_extra=Categories.Development)
|
||||
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development)
|
||||
profiles_dir : Path = Field(default=Path('profiles'), description="Directory for graph profiles", json_schema_extra=Categories.Development)
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
||||
|
||||
@@ -263,14 +267,14 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
# DEVICE
|
||||
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
|
||||
precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||
precision : Literal["auto", "float16", "bfloat16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||
|
||||
# GENERATION
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation)
|
||||
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
|
||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
|
||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
|
||||
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||
|
||||
# QUEUE
|
||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
|
||||
@@ -280,6 +284,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
||||
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
||||
|
||||
# MODEL IMPORT
|
||||
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
|
||||
|
||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||
@@ -289,6 +296,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
|
||||
# this is not referred to in the source code and can be removed entirely
|
||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||
|
||||
@@ -301,8 +309,8 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
self,
|
||||
argv: Optional[list[str]] = None,
|
||||
conf: Optional[DictConfig] = None,
|
||||
clobber=False,
|
||||
):
|
||||
clobber: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""
|
||||
Update settings with contents of init file, environment, and command-line settings.
|
||||
|
||||
@@ -328,16 +336,12 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
super().parse_args(argv)
|
||||
|
||||
if self.singleton_init and not clobber:
|
||||
hints = get_type_hints(self.__class__)
|
||||
for k in self.singleton_init:
|
||||
setattr(
|
||||
self,
|
||||
k,
|
||||
TypeAdapter(hints[k]).validate_python(self.singleton_init[k]),
|
||||
)
|
||||
# When setting values in this way, set validate_assignment to true if you want to validate the value.
|
||||
for k, v in self.singleton_init.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs: Dict[str, Any]) -> InvokeAIAppConfig:
|
||||
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
|
||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||
if (
|
||||
cls.singleton_config is None
|
||||
@@ -449,13 +453,18 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
disabled_in_config = not self.xformers_enabled
|
||||
return disabled_in_config and self.attention_type != "xformers"
|
||||
|
||||
@property
|
||||
def profiles_path(self) -> Path:
|
||||
"""Path to the graph profiles directory."""
|
||||
return self._resolve(self.profiles_dir)
|
||||
|
||||
@staticmethod
|
||||
def find_root() -> Path:
|
||||
"""Choose the runtime root directory when not specified on command line or init file."""
|
||||
return _find_root()
|
||||
|
||||
|
||||
def get_invokeai_config(**kwargs) -> InvokeAIAppConfig:
|
||||
def get_invokeai_config(**kwargs: Any) -> InvokeAIAppConfig:
|
||||
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
||||
return InvokeAIAppConfig.get_config(**kwargs)
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ class ServiceInactiveException(Exception):
|
||||
|
||||
|
||||
DownloadEventHandler = Callable[["DownloadJob"], None]
|
||||
DownloadExceptionHandler = Callable[["DownloadJob", Optional[Exception]], None]
|
||||
|
||||
|
||||
@total_ordering
|
||||
@@ -55,6 +56,7 @@ class DownloadJob(BaseModel):
|
||||
job_ended: Optional[str] = Field(
|
||||
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
|
||||
)
|
||||
content_type: Optional[str] = Field(default=None, description="Content type of downloaded file")
|
||||
bytes: int = Field(default=0, description="Bytes downloaded so far")
|
||||
total_bytes: int = Field(default=0, description="Total file size (bytes)")
|
||||
|
||||
@@ -70,7 +72,11 @@ class DownloadJob(BaseModel):
|
||||
_on_progress: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_complete: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_cancelled: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_error: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_error: Optional[DownloadExceptionHandler] = PrivateAttr(default=None)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash of the string representation of this object, for indexing."""
|
||||
return hash(str(self))
|
||||
|
||||
def __le__(self, other: "DownloadJob") -> bool:
|
||||
"""Return True if this job's priority is less than another's."""
|
||||
@@ -87,6 +93,26 @@ class DownloadJob(BaseModel):
|
||||
"""Call to cancel the job."""
|
||||
return self._cancelled
|
||||
|
||||
@property
|
||||
def complete(self) -> bool:
|
||||
"""Return true if job completed without errors."""
|
||||
return self.status == DownloadJobStatus.COMPLETED
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Return true if the job is running."""
|
||||
return self.status == DownloadJobStatus.RUNNING
|
||||
|
||||
@property
|
||||
def errored(self) -> bool:
|
||||
"""Return true if the job is errored."""
|
||||
return self.status == DownloadJobStatus.ERROR
|
||||
|
||||
@property
|
||||
def in_terminal_state(self) -> bool:
|
||||
"""Return true if job has finished, one way or another."""
|
||||
return self.status not in [DownloadJobStatus.WAITING, DownloadJobStatus.RUNNING]
|
||||
|
||||
@property
|
||||
def on_start(self) -> Optional[DownloadEventHandler]:
|
||||
"""Return the on_start event handler."""
|
||||
@@ -103,7 +129,7 @@ class DownloadJob(BaseModel):
|
||||
return self._on_complete
|
||||
|
||||
@property
|
||||
def on_error(self) -> Optional[DownloadEventHandler]:
|
||||
def on_error(self) -> Optional[DownloadExceptionHandler]:
|
||||
"""Return the on_error event handler."""
|
||||
return self._on_error
|
||||
|
||||
@@ -118,7 +144,7 @@ class DownloadJob(BaseModel):
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""Set the callbacks for download events."""
|
||||
self._on_start = on_start
|
||||
@@ -150,10 +176,10 @@ class DownloadQueueServiceBase(ABC):
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> DownloadJob:
|
||||
"""
|
||||
Create a download job.
|
||||
Create and enqueue download job.
|
||||
|
||||
:param source: Source of the download as a URL.
|
||||
:param dest: Path to download to. See below.
|
||||
@@ -175,6 +201,25 @@ class DownloadQueueServiceBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def submit_download_job(
|
||||
self,
|
||||
job: DownloadJob,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Enqueue a download job.
|
||||
|
||||
:param job: The DownloadJob
|
||||
:param on_start, on_progress, on_complete, on_error: Callbacks for the indicated
|
||||
events.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_jobs(self) -> List[DownloadJob]:
|
||||
"""
|
||||
@@ -197,21 +242,21 @@ class DownloadQueueServiceBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_all_jobs(self):
|
||||
def cancel_all_jobs(self) -> None:
|
||||
"""Cancel all active and enquedjobs."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def prune_jobs(self):
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune completed and errored queue items from the job list."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_job(self, job: DownloadJob):
|
||||
def cancel_job(self, job: DownloadJob) -> None:
|
||||
"""Cancel the job, clearing partial downloads and putting it into ERROR state."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def join(self):
|
||||
def join(self) -> None:
|
||||
"""Wait until all jobs are off the queue."""
|
||||
pass
|
||||
|
||||
@@ -5,10 +5,9 @@ import os
|
||||
import re
|
||||
import threading
|
||||
import traceback
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from queue import Empty, PriorityQueue
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
@@ -21,6 +20,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
from .download_base import (
|
||||
DownloadEventHandler,
|
||||
DownloadExceptionHandler,
|
||||
DownloadJob,
|
||||
DownloadJobCancelledException,
|
||||
DownloadJobStatus,
|
||||
@@ -36,18 +36,6 @@ DOWNLOAD_CHUNK_SIZE = 100000
|
||||
class DownloadQueueService(DownloadQueueServiceBase):
|
||||
"""Class for queued download of models."""
|
||||
|
||||
_jobs: Dict[int, DownloadJob]
|
||||
_max_parallel_dl: int = 5
|
||||
_worker_pool: Set[threading.Thread]
|
||||
_queue: PriorityQueue[DownloadJob]
|
||||
_stop_event: threading.Event
|
||||
_lock: threading.Lock
|
||||
_logger: Logger
|
||||
_events: Optional[EventServiceBase] = None
|
||||
_next_job_id: int = 0
|
||||
_accept_download_requests: bool = False
|
||||
_requests: requests.sessions.Session
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_parallel_dl: int = 5,
|
||||
@@ -99,6 +87,33 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._stop_event.set()
|
||||
self._worker_pool.clear()
|
||||
|
||||
def submit_download_job(
|
||||
self,
|
||||
job: DownloadJob,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""Enqueue a download job."""
|
||||
if not self._accept_download_requests:
|
||||
raise ServiceInactiveException(
|
||||
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||
)
|
||||
with self._lock:
|
||||
job.id = self._next_job_id
|
||||
self._next_job_id += 1
|
||||
job.set_callbacks(
|
||||
on_start=on_start,
|
||||
on_progress=on_progress,
|
||||
on_complete=on_complete,
|
||||
on_cancelled=on_cancelled,
|
||||
on_error=on_error,
|
||||
)
|
||||
self._jobs[job.id] = job
|
||||
self._queue.put(job)
|
||||
|
||||
def download(
|
||||
self,
|
||||
source: AnyHttpUrl,
|
||||
@@ -109,32 +124,27 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> DownloadJob:
|
||||
"""Create a download job and return its ID."""
|
||||
"""Create and enqueue a download job and return it."""
|
||||
if not self._accept_download_requests:
|
||||
raise ServiceInactiveException(
|
||||
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||
)
|
||||
with self._lock:
|
||||
id = self._next_job_id
|
||||
self._next_job_id += 1
|
||||
job = DownloadJob(
|
||||
id=id,
|
||||
source=source,
|
||||
dest=dest,
|
||||
priority=priority,
|
||||
access_token=access_token,
|
||||
)
|
||||
job.set_callbacks(
|
||||
on_start=on_start,
|
||||
on_progress=on_progress,
|
||||
on_complete=on_complete,
|
||||
on_cancelled=on_cancelled,
|
||||
on_error=on_error,
|
||||
)
|
||||
self._jobs[id] = job
|
||||
self._queue.put(job)
|
||||
job = DownloadJob(
|
||||
source=source,
|
||||
dest=dest,
|
||||
priority=priority,
|
||||
access_token=access_token,
|
||||
)
|
||||
self.submit_download_job(
|
||||
job,
|
||||
on_start=on_start,
|
||||
on_progress=on_progress,
|
||||
on_complete=on_complete,
|
||||
on_cancelled=on_cancelled,
|
||||
on_error=on_error,
|
||||
)
|
||||
return job
|
||||
|
||||
def join(self) -> None:
|
||||
@@ -150,7 +160,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
with self._lock:
|
||||
to_delete = set()
|
||||
for job_id, job in self._jobs.items():
|
||||
if self._in_terminal_state(job):
|
||||
if job.in_terminal_state:
|
||||
to_delete.add(job_id)
|
||||
for job_id in to_delete:
|
||||
del self._jobs[job_id]
|
||||
@@ -172,19 +182,12 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
with self._lock:
|
||||
job.cancel()
|
||||
|
||||
def cancel_all_jobs(self, preserve_partial: bool = False) -> None:
|
||||
def cancel_all_jobs(self) -> None:
|
||||
"""Cancel all jobs (those not in enqueued, running or paused state)."""
|
||||
for job in self._jobs.values():
|
||||
if not self._in_terminal_state(job):
|
||||
if not job.in_terminal_state:
|
||||
self.cancel_job(job)
|
||||
|
||||
def _in_terminal_state(self, job: DownloadJob) -> bool:
|
||||
return job.status in [
|
||||
DownloadJobStatus.COMPLETED,
|
||||
DownloadJobStatus.CANCELLED,
|
||||
DownloadJobStatus.ERROR,
|
||||
]
|
||||
|
||||
def _start_workers(self, max_workers: int) -> None:
|
||||
"""Start the requested number of worker threads."""
|
||||
self._stop_event.clear()
|
||||
@@ -205,7 +208,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
job = self._queue.get(timeout=1)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
try:
|
||||
job.job_started = get_iso_timestamp()
|
||||
self._do_download(job)
|
||||
@@ -214,7 +216,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
except (OSError, HTTPError) as excp:
|
||||
job.error_type = excp.__class__.__name__ + f"({str(excp)})"
|
||||
job.error = traceback.format_exc()
|
||||
self._signal_job_error(job)
|
||||
self._signal_job_error(job, excp)
|
||||
except DownloadJobCancelledException:
|
||||
self._signal_job_cancelled(job)
|
||||
self._cleanup_cancelled_job(job)
|
||||
@@ -235,6 +237,8 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
resp = self._requests.get(str(url), headers=header, stream=True)
|
||||
if not resp.ok:
|
||||
raise HTTPError(resp.reason)
|
||||
|
||||
job.content_type = resp.headers.get("Content-Type")
|
||||
content_length = int(resp.headers.get("content-length", 0))
|
||||
job.total_bytes = content_length
|
||||
|
||||
@@ -296,6 +300,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._signal_job_progress(job)
|
||||
|
||||
# if we get here we are done and can rename the file to the original dest
|
||||
self._logger.debug(f"{job.source}: saved to {job.download_path} (bytes={job.bytes})")
|
||||
in_progress_path.rename(job.download_path)
|
||||
|
||||
def _validate_filename(self, directory: str, filename: str) -> bool:
|
||||
@@ -322,7 +327,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
try:
|
||||
job.on_start(job)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_start callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_started(str(job.source), job.download_path.as_posix())
|
||||
@@ -332,7 +339,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
try:
|
||||
job.on_progress(job)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_progress callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_progress(
|
||||
@@ -348,7 +357,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
try:
|
||||
job.on_complete(job)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_complete callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_complete(
|
||||
@@ -356,29 +367,36 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
)
|
||||
|
||||
def _signal_job_cancelled(self, job: DownloadJob) -> None:
|
||||
if job.status not in [DownloadJobStatus.RUNNING, DownloadJobStatus.WAITING]:
|
||||
return
|
||||
job.status = DownloadJobStatus.CANCELLED
|
||||
if job.on_cancelled:
|
||||
try:
|
||||
job.on_cancelled(job)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_cancelled callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_download_cancelled(str(job.source))
|
||||
|
||||
def _signal_job_error(self, job: DownloadJob) -> None:
|
||||
def _signal_job_error(self, job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||
job.status = DownloadJobStatus.ERROR
|
||||
self._logger.error(f"{str(job.source)}: {traceback.format_exception(excp)}")
|
||||
if job.on_error:
|
||||
try:
|
||||
job.on_error(job)
|
||||
job.on_error(job, excp)
|
||||
except Exception as e:
|
||||
self._logger.error(e)
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_error callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.error_type
|
||||
assert job.error
|
||||
self._event_bus.emit_download_error(str(job.source), error_type=job.error_type, error=job.error)
|
||||
|
||||
def _cleanup_cancelled_job(self, job: DownloadJob) -> None:
|
||||
self._logger.warning(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
||||
self._logger.debug(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
||||
try:
|
||||
if job.download_path:
|
||||
partial_file = self._in_progress_path(job.download_path)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
@@ -404,53 +404,72 @@ class EventServiceBase:
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_started(self, source: str) -> None:
|
||||
def emit_model_install_downloading(
|
||||
self,
|
||||
source: str,
|
||||
local_path: str,
|
||||
bytes: int,
|
||||
total_bytes: int,
|
||||
parts: List[Dict[str, Union[str, int]]],
|
||||
) -> None:
|
||||
"""
|
||||
Emitted when an install job is started.
|
||||
Emit at intervals while the install job is in progress (remote models only).
|
||||
|
||||
:param source: Source of the model
|
||||
:param local_path: Where model is downloading to
|
||||
:param parts: Progress of downloading URLs that comprise the model, if any.
|
||||
:param bytes: Number of bytes downloaded so far.
|
||||
:param total_bytes: Total size of download, including all files.
|
||||
This emits a Dict with keys "source", "local_path", "bytes" and "total_bytes".
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_downloading",
|
||||
payload={
|
||||
"source": source,
|
||||
"local_path": local_path,
|
||||
"bytes": bytes,
|
||||
"total_bytes": total_bytes,
|
||||
"parts": parts,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_running(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when an install job becomes active.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_started",
|
||||
event_name="model_install_running",
|
||||
payload={"source": source},
|
||||
)
|
||||
|
||||
def emit_model_install_completed(self, source: str, key: str) -> None:
|
||||
def emit_model_install_completed(self, source: str, key: str, total_bytes: Optional[int] = None) -> None:
|
||||
"""
|
||||
Emitted when an install job is completed successfully.
|
||||
Emit when an install job is completed successfully.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
:param key: Model config record key
|
||||
:param total_bytes: Size of the model (may be None for installation of a local path)
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_completed",
|
||||
payload={
|
||||
"source": source,
|
||||
"total_bytes": total_bytes,
|
||||
"key": key,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_progress(
|
||||
self,
|
||||
source: str,
|
||||
current_bytes: int,
|
||||
total_bytes: int,
|
||||
) -> None:
|
||||
def emit_model_install_cancelled(self, source: str) -> None:
|
||||
"""
|
||||
Emitted while the install job is in progress.
|
||||
(Downloaded models only)
|
||||
Emit when an install job is cancelled.
|
||||
|
||||
:param source: Source of the model
|
||||
:param current_bytes: Number of bytes downloaded so far
|
||||
:param total_bytes: Total bytes to download
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_progress",
|
||||
payload={
|
||||
"source": source,
|
||||
"current_bytes": int,
|
||||
"total_bytes": int,
|
||||
},
|
||||
event_name="model_install_cancelled",
|
||||
payload={"source": source},
|
||||
)
|
||||
|
||||
def emit_model_install_error(
|
||||
@@ -460,10 +479,11 @@ class EventServiceBase:
|
||||
error: str,
|
||||
) -> None:
|
||||
"""
|
||||
Emitted when an install job encounters an exception.
|
||||
Emit when an install job encounters an exception.
|
||||
|
||||
:param source: Source of the model
|
||||
:param exception: The exception that raised the error
|
||||
:param error_type: The name of the exception
|
||||
:param error: A text description of the exception
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_error",
|
||||
|
||||
@@ -154,7 +154,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Image record not found")
|
||||
raise
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Problem getting image DTO")
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import time
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Event, Thread
|
||||
from typing import Optional
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.invocations.baseinvocation import InvocationContext
|
||||
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import (
|
||||
GESStatsNotFoundError,
|
||||
)
|
||||
from invokeai.app.util.profiler import Profiler
|
||||
|
||||
from ..invoker import Invoker
|
||||
from .invocation_processor_base import InvocationProcessorABC
|
||||
@@ -18,7 +23,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
__invoker: Invoker
|
||||
__threadLimit: BoundedSemaphore
|
||||
|
||||
def start(self, invoker) -> None:
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
# if we do want multithreading at some point, we could make this configurable
|
||||
self.__threadLimit = BoundedSemaphore(1)
|
||||
self.__invoker = invoker
|
||||
@@ -39,6 +44,27 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
self.__threadLimit.acquire()
|
||||
queue_item: Optional[InvocationQueueItem] = None
|
||||
|
||||
profiler = (
|
||||
Profiler(
|
||||
logger=self.__invoker.services.logger,
|
||||
output_dir=self.__invoker.services.configuration.profiles_path,
|
||||
prefix=self.__invoker.services.configuration.profile_prefix,
|
||||
)
|
||||
if self.__invoker.services.configuration.profile_graphs
|
||||
else None
|
||||
)
|
||||
|
||||
def stats_cleanup(graph_execution_state_id: str) -> None:
|
||||
if profiler:
|
||||
profile_path = profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self.__invoker.services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=graph_execution_state_id, output_path=stats_path
|
||||
)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id)
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
queue_item = self.__invoker.services.queue.get()
|
||||
@@ -49,6 +75,10 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
# do not hammer the queue
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
if profiler and profiler.profile_id != queue_item.graph_execution_state_id:
|
||||
profiler.start(profile_id=queue_item.graph_execution_state_id)
|
||||
|
||||
try:
|
||||
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
|
||||
queue_item.graph_execution_state_id
|
||||
@@ -132,13 +162,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
source_node_id=source_node_id,
|
||||
result=outputs.model_dump(),
|
||||
)
|
||||
self.__invoker.services.performance_statistics.log_stats()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
except CanceledException:
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
@@ -163,7 +192,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
# Check queue to see if this is canceled, and skip if so
|
||||
@@ -201,6 +229,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
queue_id=queue_item.session_queue_id,
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
)
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
|
||||
|
||||
@@ -30,23 +30,15 @@ writes to the system log is stored in InvocationServices.performance_statistics.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import AbstractContextManager
|
||||
from typing import Dict
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.backend.model_management.model_cache import CacheStats
|
||||
|
||||
from .invocation_stats_common import NodeLog
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary
|
||||
|
||||
|
||||
class InvocationStatsServiceBase(ABC):
|
||||
"Abstract base class for recording node memory/time performance statistics"
|
||||
|
||||
# {graph_id => NodeLog}
|
||||
_stats: Dict[str, NodeLog]
|
||||
_cache_stats: Dict[str, CacheStats]
|
||||
ram_used: float
|
||||
ram_changed: float
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self):
|
||||
"""
|
||||
@@ -71,51 +63,36 @@ class InvocationStatsServiceBase(ABC):
|
||||
@abstractmethod
|
||||
def reset_stats(self, graph_execution_state_id: str):
|
||||
"""
|
||||
Reset all statistics for the indicated graph
|
||||
:param graph_execution_state_id
|
||||
Reset all statistics for the indicated graph.
|
||||
:param graph_execution_state_id: The id of the session whose stats to reset.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset_all_stats(self):
|
||||
"""Zero all statistics"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_invocation_stats(
|
||||
self,
|
||||
graph_id: str,
|
||||
invocation_type: str,
|
||||
time_used: float,
|
||||
vram_used: float,
|
||||
):
|
||||
"""
|
||||
Add timing information on execution of a node. Usually
|
||||
used internally.
|
||||
:param graph_id: ID of the graph that is currently executing
|
||||
:param invocation_type: String literal type of the node
|
||||
:param time_used: Time used by node's exection (sec)
|
||||
:param vram_used: Maximum VRAM used during exection (GB)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def log_stats(self):
|
||||
def log_stats(self, graph_execution_state_id: str):
|
||||
"""
|
||||
Write out the accumulated statistics to the log or somewhere else.
|
||||
:param graph_execution_state_id: The id of the session whose stats to log.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_mem_stats(
|
||||
self,
|
||||
ram_used: float,
|
||||
ram_changed: float,
|
||||
):
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
"""
|
||||
Update the collector with RAM memory usage info.
|
||||
|
||||
:param ram_used: How much RAM is currently in use.
|
||||
:param ram_changed: How much RAM changed since last generation.
|
||||
Gets the accumulated statistics for the indicated graph.
|
||||
:param graph_execution_state_id: The id of the session whose stats to get.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||
"""
|
||||
Write out the accumulated statistics to the indicated path as JSON.
|
||||
:param graph_execution_state_id: The id of the session whose stats to dump.
|
||||
:param output_path: The file to write the stats to.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -1,25 +1,183 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict
|
||||
from collections import defaultdict
|
||||
from dataclasses import asdict, dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
# size of GIG in bytes
|
||||
GIG = 1073741824
|
||||
|
||||
class GESStatsNotFoundError(Exception):
|
||||
"""Raised when execution stats are not found for a given Graph Execution State."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeStats:
|
||||
"""Class for tracking execution stats of an invocation node"""
|
||||
class NodeExecutionStatsSummary:
|
||||
"""The stats for a specific type of node."""
|
||||
|
||||
calls: int = 0
|
||||
time_used: float = 0.0 # seconds
|
||||
max_vram: float = 0.0 # GB
|
||||
cache_hits: int = 0
|
||||
cache_misses: int = 0
|
||||
cache_high_watermark: int = 0
|
||||
node_type: str
|
||||
num_calls: int
|
||||
time_used_seconds: float
|
||||
peak_vram_gb: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeLog:
|
||||
"""Class for tracking node usage"""
|
||||
class ModelCacheStatsSummary:
|
||||
"""The stats for the model cache."""
|
||||
|
||||
# {node_type => NodeStats}
|
||||
nodes: Dict[str, NodeStats] = field(default_factory=dict)
|
||||
high_water_mark_gb: float
|
||||
cache_size_gb: float
|
||||
total_usage_gb: float
|
||||
cache_hits: int
|
||||
cache_misses: int
|
||||
models_cached: int
|
||||
models_cleared: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExecutionStatsSummary:
|
||||
"""The stats for the graph execution state."""
|
||||
|
||||
graph_execution_state_id: str
|
||||
execution_time_seconds: float
|
||||
# `wall_time_seconds`, `ram_usage_gb` and `ram_change_gb` are derived from the node execution stats.
|
||||
# In some situations, there are no node stats, so these values are optional.
|
||||
wall_time_seconds: Optional[float]
|
||||
ram_usage_gb: Optional[float]
|
||||
ram_change_gb: Optional[float]
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvocationStatsSummary:
|
||||
"""
|
||||
The accumulated stats for a graph execution.
|
||||
Its `__str__` method returns a human-readable stats summary.
|
||||
"""
|
||||
|
||||
vram_usage_gb: Optional[float]
|
||||
graph_stats: GraphExecutionStatsSummary
|
||||
model_cache_stats: ModelCacheStatsSummary
|
||||
node_stats: list[NodeExecutionStatsSummary]
|
||||
|
||||
def __str__(self) -> str:
|
||||
_str = ""
|
||||
_str = f"Graph stats: {self.graph_stats.graph_execution_state_id}\n"
|
||||
_str += f"{'Node':>30} {'Calls':>7} {'Seconds':>9} {'VRAM Used':>10}\n"
|
||||
|
||||
for summary in self.node_stats:
|
||||
_str += f"{summary.node_type:>30} {summary.num_calls:>7} {summary.time_used_seconds:>8.3f}s {summary.peak_vram_gb:>9.3f}G\n"
|
||||
|
||||
_str += f"TOTAL GRAPH EXECUTION TIME: {self.graph_stats.execution_time_seconds:7.3f}s\n"
|
||||
|
||||
if self.graph_stats.wall_time_seconds is not None:
|
||||
_str += f"TOTAL GRAPH WALL TIME: {self.graph_stats.wall_time_seconds:7.3f}s\n"
|
||||
|
||||
if self.graph_stats.ram_usage_gb is not None and self.graph_stats.ram_change_gb is not None:
|
||||
_str += f"RAM used by InvokeAI process: {self.graph_stats.ram_usage_gb:4.2f}G ({self.graph_stats.ram_change_gb:+5.3f}G)\n"
|
||||
|
||||
_str += f"RAM used to load models: {self.model_cache_stats.total_usage_gb:4.2f}G\n"
|
||||
if self.vram_usage_gb:
|
||||
_str += f"VRAM in use: {self.vram_usage_gb:4.3f}G\n"
|
||||
_str += "RAM cache statistics:\n"
|
||||
_str += f" Model cache hits: {self.model_cache_stats.cache_hits}\n"
|
||||
_str += f" Model cache misses: {self.model_cache_stats.cache_misses}\n"
|
||||
_str += f" Models cached: {self.model_cache_stats.models_cached}\n"
|
||||
_str += f" Models cleared from cache: {self.model_cache_stats.models_cleared}\n"
|
||||
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}/{self.model_cache_stats.cache_size_gb:4.2f}G\n"
|
||||
|
||||
return _str
|
||||
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
"""Returns the stats as a dictionary."""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeExecutionStats:
|
||||
"""Class for tracking execution stats of an invocation node."""
|
||||
|
||||
invocation_type: str
|
||||
|
||||
start_time: float # Seconds since the epoch.
|
||||
end_time: float # Seconds since the epoch.
|
||||
|
||||
start_ram_gb: float # GB
|
||||
end_ram_gb: float # GB
|
||||
|
||||
peak_vram_gb: float # GB
|
||||
|
||||
def total_time(self) -> float:
|
||||
return self.end_time - self.start_time
|
||||
|
||||
|
||||
class GraphExecutionStats:
|
||||
"""Class for tracking execution stats of a graph."""
|
||||
|
||||
def __init__(self):
|
||||
self._node_stats_list: list[NodeExecutionStats] = []
|
||||
|
||||
def add_node_execution_stats(self, node_stats: NodeExecutionStats):
|
||||
self._node_stats_list.append(node_stats)
|
||||
|
||||
def get_total_run_time(self) -> float:
|
||||
"""Get the total time spent executing nodes in the graph."""
|
||||
total = 0.0
|
||||
for node_stats in self._node_stats_list:
|
||||
total += node_stats.total_time()
|
||||
return total
|
||||
|
||||
def get_first_node_stats(self) -> NodeExecutionStats | None:
|
||||
"""Get the stats of the first node in the graph (by start_time)."""
|
||||
first_node = None
|
||||
for node_stats in self._node_stats_list:
|
||||
if first_node is None or node_stats.start_time < first_node.start_time:
|
||||
first_node = node_stats
|
||||
|
||||
assert first_node is not None
|
||||
return first_node
|
||||
|
||||
def get_last_node_stats(self) -> NodeExecutionStats | None:
|
||||
"""Get the stats of the last node in the graph (by end_time)."""
|
||||
last_node = None
|
||||
for node_stats in self._node_stats_list:
|
||||
if last_node is None or node_stats.end_time > last_node.end_time:
|
||||
last_node = node_stats
|
||||
|
||||
return last_node
|
||||
|
||||
def get_graph_stats_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||
"""Get a summary of the graph stats."""
|
||||
first_node = self.get_first_node_stats()
|
||||
last_node = self.get_last_node_stats()
|
||||
|
||||
wall_time_seconds: Optional[float] = None
|
||||
ram_usage_gb: Optional[float] = None
|
||||
ram_change_gb: Optional[float] = None
|
||||
|
||||
if last_node and first_node:
|
||||
wall_time_seconds = last_node.end_time - first_node.start_time
|
||||
ram_usage_gb = last_node.end_ram_gb
|
||||
ram_change_gb = last_node.end_ram_gb - first_node.start_ram_gb
|
||||
|
||||
return GraphExecutionStatsSummary(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
execution_time_seconds=self.get_total_run_time(),
|
||||
wall_time_seconds=wall_time_seconds,
|
||||
ram_usage_gb=ram_usage_gb,
|
||||
ram_change_gb=ram_change_gb,
|
||||
)
|
||||
|
||||
def get_node_stats_summaries(self) -> list[NodeExecutionStatsSummary]:
|
||||
"""Get a summary of the node stats."""
|
||||
summaries: list[NodeExecutionStatsSummary] = []
|
||||
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
||||
|
||||
for node_stats in self._node_stats_list:
|
||||
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
||||
|
||||
for node_type, node_type_stats_list in node_stats_by_type.items():
|
||||
num_calls = len(node_type_stats_list)
|
||||
time_used = sum([n.total_time() for n in node_type_stats_list])
|
||||
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
||||
summary = NodeExecutionStatsSummary(
|
||||
node_type=node_type, num_calls=num_calls, time_used_seconds=time_used, peak_vram_gb=peak_vram
|
||||
)
|
||||
summaries.append(summary)
|
||||
|
||||
return summaries
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import json
|
||||
import time
|
||||
from typing import Dict
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
@@ -7,161 +9,163 @@ import torch
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
|
||||
from invokeai.app.services.item_storage.item_storage_common import ItemNotFoundError
|
||||
from invokeai.backend.model_management.model_cache import CacheStats
|
||||
|
||||
from .invocation_stats_base import InvocationStatsServiceBase
|
||||
from .invocation_stats_common import GIG, NodeLog, NodeStats
|
||||
from .invocation_stats_common import (
|
||||
GESStatsNotFoundError,
|
||||
GraphExecutionStats,
|
||||
GraphExecutionStatsSummary,
|
||||
InvocationStatsSummary,
|
||||
ModelCacheStatsSummary,
|
||||
NodeExecutionStats,
|
||||
NodeExecutionStatsSummary,
|
||||
)
|
||||
|
||||
# Size of 1GB in bytes.
|
||||
GB = 2**30
|
||||
|
||||
|
||||
class InvocationStatsService(InvocationStatsServiceBase):
|
||||
"""Accumulate performance information about a running graph. Collects time spent in each node,
|
||||
as well as the maximum and current VRAM utilisation for CUDA systems"""
|
||||
|
||||
_invoker: Invoker
|
||||
|
||||
def __init__(self):
|
||||
# {graph_id => NodeLog}
|
||||
self._stats: Dict[str, NodeLog] = {}
|
||||
self._cache_stats: Dict[str, CacheStats] = {}
|
||||
self.ram_used: float = 0.0
|
||||
self.ram_changed: float = 0.0
|
||||
# Maps graph_execution_state_id to GraphExecutionStats.
|
||||
self._stats: dict[str, GraphExecutionStats] = {}
|
||||
# Maps graph_execution_state_id to model manager CacheStats.
|
||||
self._cache_stats: dict[str, CacheStats] = {}
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
class StatsContext:
|
||||
"""Context manager for collecting statistics."""
|
||||
|
||||
invocation: BaseInvocation
|
||||
collector: "InvocationStatsServiceBase"
|
||||
graph_id: str
|
||||
start_time: float
|
||||
ram_used: int
|
||||
model_manager: ModelManagerServiceBase
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
graph_id: str,
|
||||
model_manager: ModelManagerServiceBase,
|
||||
collector: "InvocationStatsServiceBase",
|
||||
):
|
||||
"""Initialize statistics for this run."""
|
||||
self.invocation = invocation
|
||||
self.collector = collector
|
||||
self.graph_id = graph_id
|
||||
self.start_time = 0.0
|
||||
self.ram_used = 0
|
||||
self.model_manager = model_manager
|
||||
|
||||
def __enter__(self):
|
||||
self.start_time = time.time()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
self.ram_used = psutil.Process().memory_info().rss
|
||||
if self.model_manager:
|
||||
self.model_manager.collect_cache_stats(self.collector._cache_stats[self.graph_id])
|
||||
|
||||
def __exit__(self, *args):
|
||||
"""Called on exit from the context."""
|
||||
ram_used = psutil.Process().memory_info().rss
|
||||
self.collector.update_mem_stats(
|
||||
ram_used=ram_used / GIG,
|
||||
ram_changed=(ram_used - self.ram_used) / GIG,
|
||||
)
|
||||
self.collector.update_invocation_stats(
|
||||
graph_id=self.graph_id,
|
||||
invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations
|
||||
time_used=time.time() - self.start_time,
|
||||
vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0,
|
||||
)
|
||||
|
||||
def collect_stats(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
graph_execution_state_id: str,
|
||||
) -> StatsContext:
|
||||
if not self._stats.get(graph_execution_state_id): # first time we're seeing this
|
||||
self._stats[graph_execution_state_id] = NodeLog()
|
||||
@contextmanager
|
||||
def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str):
|
||||
if not self._stats.get(graph_execution_state_id):
|
||||
# First time we're seeing this graph_execution_state_id.
|
||||
self._stats[graph_execution_state_id] = GraphExecutionStats()
|
||||
self._cache_stats[graph_execution_state_id] = CacheStats()
|
||||
return self.StatsContext(invocation, graph_execution_state_id, self._invoker.services.model_manager, self)
|
||||
|
||||
def reset_all_stats(self):
|
||||
"""Zero all statistics"""
|
||||
self._stats = {}
|
||||
# Prune stale stats. There should be none since we're starting a new graph, but just in case.
|
||||
self._prune_stale_stats()
|
||||
|
||||
# Record state before the invocation.
|
||||
start_time = time.time()
|
||||
start_ram = psutil.Process().memory_info().rss
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
if self._invoker.services.model_manager:
|
||||
self._invoker.services.model_manager.collect_cache_stats(self._cache_stats[graph_execution_state_id])
|
||||
|
||||
def reset_stats(self, graph_execution_id: str):
|
||||
try:
|
||||
self._stats.pop(graph_execution_id)
|
||||
except KeyError:
|
||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_id}")
|
||||
# Let the invocation run.
|
||||
yield None
|
||||
finally:
|
||||
# Record state after the invocation.
|
||||
node_stats = NodeExecutionStats(
|
||||
invocation_type=invocation.get_type(),
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
start_ram_gb=start_ram / GB,
|
||||
end_ram_gb=psutil.Process().memory_info().rss / GB,
|
||||
peak_vram_gb=torch.cuda.max_memory_allocated() / GB if torch.cuda.is_available() else 0.0,
|
||||
)
|
||||
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
|
||||
|
||||
def update_mem_stats(
|
||||
self,
|
||||
ram_used: float,
|
||||
ram_changed: float,
|
||||
):
|
||||
self.ram_used = ram_used
|
||||
self.ram_changed = ram_changed
|
||||
def _prune_stale_stats(self):
|
||||
"""Check all graphs being tracked and prune any that have completed/errored.
|
||||
|
||||
def update_invocation_stats(
|
||||
self,
|
||||
graph_id: str,
|
||||
invocation_type: str,
|
||||
time_used: float,
|
||||
vram_used: float,
|
||||
):
|
||||
if not self._stats[graph_id].nodes.get(invocation_type):
|
||||
self._stats[graph_id].nodes[invocation_type] = NodeStats()
|
||||
stats = self._stats[graph_id].nodes[invocation_type]
|
||||
stats.calls += 1
|
||||
stats.time_used += time_used
|
||||
stats.max_vram = max(stats.max_vram, vram_used)
|
||||
|
||||
def log_stats(self):
|
||||
completed = set()
|
||||
errored = set()
|
||||
for graph_id, _node_log in self._stats.items():
|
||||
This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so
|
||||
for now we call this function periodically to prevent them from accumulating.
|
||||
"""
|
||||
to_prune: list[str] = []
|
||||
for graph_execution_state_id in self._stats:
|
||||
try:
|
||||
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
|
||||
except Exception:
|
||||
errored.add(graph_id)
|
||||
graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id)
|
||||
except ItemNotFoundError:
|
||||
# TODO(ryand): What would cause this? Should this exception just be allowed to propagate?
|
||||
logger.warning(f"Failed to get graph state for {graph_execution_state_id}.")
|
||||
continue
|
||||
|
||||
if not current_graph_state.is_complete():
|
||||
if not graph_execution_state.is_complete():
|
||||
# The graph is still running, don't prune it.
|
||||
continue
|
||||
|
||||
total_time = 0
|
||||
logger.info(f"Graph stats: {graph_id}")
|
||||
logger.info(f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}")
|
||||
for node_type, stats in self._stats[graph_id].nodes.items():
|
||||
logger.info(f"{node_type:>30} {stats.calls:>4} {stats.time_used:7.3f}s {stats.max_vram:4.3f}G")
|
||||
total_time += stats.time_used
|
||||
to_prune.append(graph_execution_state_id)
|
||||
|
||||
cache_stats = self._cache_stats[graph_id]
|
||||
hwm = cache_stats.high_watermark / GIG
|
||||
tot = cache_stats.cache_size / GIG
|
||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG
|
||||
for graph_execution_state_id in to_prune:
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
|
||||
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
|
||||
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")
|
||||
logger.info(f"RAM used to load models: {loaded:4.2f}G")
|
||||
if torch.cuda.is_available():
|
||||
logger.info("VRAM in use: " + "%4.3fG" % (torch.cuda.memory_allocated() / GIG))
|
||||
logger.info("RAM cache statistics:")
|
||||
logger.info(f" Model cache hits: {cache_stats.hits}")
|
||||
logger.info(f" Model cache misses: {cache_stats.misses}")
|
||||
logger.info(f" Models cached: {cache_stats.in_cache}")
|
||||
logger.info(f" Models cleared from cache: {cache_stats.cleared}")
|
||||
logger.info(f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G")
|
||||
if len(to_prune) > 0:
|
||||
logger.info(f"Pruned stale graph stats for {to_prune}.")
|
||||
|
||||
completed.add(graph_id)
|
||||
def reset_stats(self, graph_execution_state_id: str):
|
||||
try:
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
for graph_id in completed:
|
||||
del self._stats[graph_id]
|
||||
del self._cache_stats[graph_id]
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
|
||||
node_stats_summaries = self._get_node_summaries(graph_execution_state_id)
|
||||
model_cache_stats_summary = self._get_model_cache_summary(graph_execution_state_id)
|
||||
vram_usage_gb = torch.cuda.memory_allocated() / GB if torch.cuda.is_available() else None
|
||||
|
||||
for graph_id in errored:
|
||||
del self._stats[graph_id]
|
||||
del self._cache_stats[graph_id]
|
||||
return InvocationStatsSummary(
|
||||
graph_stats=graph_stats_summary,
|
||||
model_cache_stats=model_cache_stats_summary,
|
||||
node_stats=node_stats_summaries,
|
||||
vram_usage_gb=vram_usage_gb,
|
||||
)
|
||||
|
||||
def log_stats(self, graph_execution_state_id: str) -> None:
|
||||
stats = self.get_stats(graph_execution_state_id)
|
||||
logger.info(str(stats))
|
||||
|
||||
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||
stats = self.get_stats(graph_execution_state_id)
|
||||
with open(output_path, "w") as f:
|
||||
f.write(json.dumps(stats.as_dict(), indent=2))
|
||||
|
||||
def _get_model_cache_summary(self, graph_execution_state_id: str) -> ModelCacheStatsSummary:
|
||||
try:
|
||||
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
return ModelCacheStatsSummary(
|
||||
cache_hits=cache_stats.hits,
|
||||
cache_misses=cache_stats.misses,
|
||||
high_water_mark_gb=cache_stats.high_watermark / GB,
|
||||
cache_size_gb=cache_stats.cache_size / GB,
|
||||
total_usage_gb=sum(list(cache_stats.loaded_model_sizes.values())) / GB,
|
||||
models_cached=cache_stats.in_cache,
|
||||
models_cleared=cache_stats.cleared,
|
||||
)
|
||||
|
||||
def _get_graph_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
return graph_stats.get_graph_stats_summary(graph_execution_state_id)
|
||||
|
||||
def _get_node_summaries(self, graph_execution_state_id: str) -> list[NodeExecutionStatsSummary]:
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
return graph_stats.get_node_stats_summaries()
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Callable, Generic, Optional, TypeVar
|
||||
from typing import Callable, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
|
||||
@@ -22,26 +20,26 @@ class ItemStorageABC(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def get(self, item_id: str) -> T:
|
||||
"""Gets the item, parsing it into a Pydantic model"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_raw(self, item_id: str) -> Optional[str]:
|
||||
"""Gets the raw item as a string, skipping Pydantic parsing"""
|
||||
"""
|
||||
Gets the item.
|
||||
:param item_id: the id of the item to get
|
||||
:raises ItemNotFoundError: if the item is not found
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set(self, item: T) -> None:
|
||||
"""Sets the item"""
|
||||
"""
|
||||
Sets the item. The id will be extracted based on id_field.
|
||||
:param item: the item to set
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
"""Gets a paginated list of items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
def delete(self, item_id: str) -> None:
|
||||
"""
|
||||
Deletes the item, if it exists.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
class ItemNotFoundError(KeyError):
|
||||
"""Raised when an item is not found in storage"""
|
||||
|
||||
def __init__(self, item_id: str) -> None:
|
||||
super().__init__(f"Item with id {item_id} not found")
|
||||