mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 05:58:05 -05:00
Compare commits
1480 Commits
deploy/fix
...
feat/contr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e4a45341c8 | ||
|
|
4ca325e8e6 | ||
|
|
6b8e88ad7f | ||
|
|
0497bea264 | ||
|
|
b8e32fa459 | ||
|
|
34ebee67b7 | ||
|
|
e0c998d192 | ||
|
|
b51e9a6bdb | ||
|
|
09f396ce84 | ||
|
|
abee37eab3 | ||
|
|
42e48b2bef | ||
|
|
70ece4364c | ||
|
|
f9d5f9d52c | ||
|
|
587297878a | ||
|
|
b4c998a9ae | ||
|
|
88e8e3977b | ||
|
|
24b86cffe9 | ||
|
|
a1773197e9 | ||
|
|
6c53abc034 | ||
|
|
eb7047b21d | ||
|
|
43419ac761 | ||
|
|
5cd0e90816 | ||
|
|
cfd49e3921 | ||
|
|
a8e0490133 | ||
|
|
1e08d865c9 | ||
|
|
f8bb650cc1 | ||
|
|
2cee8bebb2 | ||
|
|
ade4ec5fd8 | ||
|
|
70ffd6b03f | ||
|
|
6c551df311 | ||
|
|
24f605629e | ||
|
|
2af1ec9d02 | ||
|
|
79d53341de | ||
|
|
e40b3506c4 | ||
|
|
33912382e3 | ||
|
|
d282810e53 | ||
|
|
9df502fc77 | ||
|
|
705573f0a8 | ||
|
|
1878ea94f6 | ||
|
|
4ba5086b9a | ||
|
|
4a991b4daa | ||
|
|
80474d26f9 | ||
|
|
9a77bd9140 | ||
|
|
14cdc800c3 | ||
|
|
9cfbea4c25 | ||
|
|
5fe674e223 | ||
|
|
32200efce8 | ||
|
|
68a02da990 | ||
|
|
5b20766ea3 | ||
|
|
9a914250a0 | ||
|
|
0e3106f631 | ||
|
|
de3e6cdb02 | ||
|
|
8495764d45 | ||
|
|
8b7fac75ed | ||
|
|
9e0e26f4c4 | ||
|
|
46cac6468e | ||
|
|
2a814d886b | ||
|
|
60a2fbec41 | ||
|
|
f15a328b80 | ||
|
|
811d9ab55a | ||
|
|
e00fed5c46 | ||
|
|
a3fa38b353 | ||
|
|
2e42a4bdd9 | ||
|
|
36f72b5a49 | ||
|
|
af42d7d347 | ||
|
|
8607b1994c | ||
|
|
e051c450ed | ||
|
|
50135b726e | ||
|
|
fd715026a7 | ||
|
|
c647056287 | ||
|
|
30f20b55d5 | ||
|
|
1bca32ed16 | ||
|
|
7f91139e21 | ||
|
|
c53b7c7389 | ||
|
|
93f3658a4a | ||
|
|
68be95acbb | ||
|
|
813f79f0f9 | ||
|
|
c3ec86bc70 | ||
|
|
05a19753c6 | ||
|
|
a33327c651 | ||
|
|
6ad7cc4f2a | ||
|
|
c506355b8b | ||
|
|
d54168b8fb | ||
|
|
c91b071c47 | ||
|
|
9c57b18008 | ||
|
|
69539a0472 | ||
|
|
7bce455d16 | ||
|
|
3f45294c61 | ||
|
|
fd03c7eebe | ||
|
|
07c49a5726 | ||
|
|
8c688f8e29 | ||
|
|
3d13167d32 | ||
|
|
f2bb507ebb | ||
|
|
fe8f3381fc | ||
|
|
2a6d11e645 | ||
|
|
01f46d3c7d | ||
|
|
5f76b62553 | ||
|
|
4bbe3b0d00 | ||
|
|
9ed86a08f1 | ||
|
|
68405910ba | ||
|
|
0a50e2638c | ||
|
|
fc7c5da4dd | ||
|
|
a3357e073c | ||
|
|
d114833a12 | ||
|
|
96038bd075 | ||
|
|
2f383c2598 | ||
|
|
702a8d1f72 | ||
|
|
0a8390356f | ||
|
|
844058c0a5 | ||
|
|
7d74cbe29c | ||
|
|
62ac0ed2dc | ||
|
|
ae14adec2a | ||
|
|
6c2b39d1df | ||
|
|
0843028e6e | ||
|
|
de0fd87035 | ||
|
|
8b6c0be259 | ||
|
|
58fec84858 | ||
|
|
f223ad7776 | ||
|
|
00eabf630d | ||
|
|
6245a27650 | ||
|
|
fa1ac57c90 | ||
|
|
0f16b1c98d | ||
|
|
08e66c5451 | ||
|
|
563bf70c95 | ||
|
|
49d29420c4 | ||
|
|
ae9d0c6c1b | ||
|
|
d8d11f9bbb | ||
|
|
13fa0d3bc0 | ||
|
|
5eeb4b8e06 | ||
|
|
f5044c290d | ||
|
|
1b43276e5d | ||
|
|
294f086857 | ||
|
|
e5024bf5e9 | ||
|
|
79198b4bba | ||
|
|
1a2f0984db | ||
|
|
454683e6eb | ||
|
|
bbb2a08e8f | ||
|
|
bf116927e1 | ||
|
|
3d249c4fa3 | ||
|
|
fa338ddb6a | ||
|
|
b200451330 | ||
|
|
8283d23b74 | ||
|
|
2fc0a4d53b | ||
|
|
3ff732d583 | ||
|
|
840c632c0a | ||
|
|
40d6e4f287 | ||
|
|
fc5f9c30a6 | ||
|
|
229de2dbb8 | ||
|
|
cc22427f25 | ||
|
|
90333c0074 | ||
|
|
54e5301b35 | ||
|
|
b31fc43bfa | ||
|
|
9bcf0b2251 | ||
|
|
d4bc98c383 | ||
|
|
bc892c535c | ||
|
|
099e1e7c08 | ||
|
|
b1000e30c1 | ||
|
|
7bd94eac0e | ||
|
|
2c77563dcc | ||
|
|
603c9a587e | ||
|
|
1a5a2dfda9 | ||
|
|
090b7eeaf3 | ||
|
|
117536324c | ||
|
|
999c092b6a | ||
|
|
9e31b1f387 | ||
|
|
cb157ea530 | ||
|
|
5f6f38074d | ||
|
|
25b8dd340a | ||
|
|
fb06f5b892 | ||
|
|
1a7fb601dc | ||
|
|
cdcfda164d | ||
|
|
966b154a1f | ||
|
|
95fa66661c | ||
|
|
6247b79111 | ||
|
|
5831364f9c | ||
|
|
919b81cff1 | ||
|
|
065fff7db5 | ||
|
|
a664ee30a2 | ||
|
|
03f3ad435a | ||
|
|
2270c270ef | ||
|
|
4f7820719b | ||
|
|
fa285883ad | ||
|
|
474fca8e6a | ||
|
|
5dc0250b00 | ||
|
|
f269377a01 | ||
|
|
d0406024e3 | ||
|
|
aa3a969bd2 | ||
|
|
73a95973a8 | ||
|
|
bf4fe3c1ac | ||
|
|
d6c08ba469 | ||
|
|
69f0ba65f1 | ||
|
|
828c86964d | ||
|
|
54b7ddd63f | ||
|
|
a0dde66b5d | ||
|
|
b6b3b9f99c | ||
|
|
faa69f8a47 | ||
|
|
d92c7f5483 | ||
|
|
6b824eb112 | ||
|
|
72b4371804 | ||
|
|
fa290aff8d | ||
|
|
3d99d7ae8b | ||
|
|
2eb367969c | ||
|
|
9cdad95f48 | ||
|
|
707ed39300 | ||
|
|
6bbb5f061a | ||
|
|
6896e69e95 | ||
|
|
b17f4c1650 | ||
|
|
98493ed9e2 | ||
|
|
94c953deab | ||
|
|
fa4d88e163 | ||
|
|
b1e1e3efc7 | ||
|
|
3b9426eb72 | ||
|
|
e2e07696fc | ||
|
|
d6a959b000 | ||
|
|
c3935d3849 | ||
|
|
383e3d77cb | ||
|
|
31e97ead2a | ||
|
|
0b49995659 | ||
|
|
ff204db6b2 | ||
|
|
f74f3d6a3a | ||
|
|
713fb061e8 | ||
|
|
77b7680b32 | ||
|
|
ff63433591 | ||
|
|
31281d7181 | ||
|
|
72d1e4e404 | ||
|
|
91918e648b | ||
|
|
1390b65a9c | ||
|
|
82231369d3 | ||
|
|
7620bacc01 | ||
|
|
ea9cf04765 | ||
|
|
47301e6f85 | ||
|
|
f143fb7254 | ||
|
|
2bdb655375 | ||
|
|
41f7758977 | ||
|
|
8ae1eaaccc | ||
|
|
d66979073b | ||
|
|
c9e621093e | ||
|
|
e06ba40795 | ||
|
|
6571e4c2fd | ||
|
|
ff9240b51d | ||
|
|
18466e01fd | ||
|
|
e9821ab711 | ||
|
|
d6530df635 | ||
|
|
062b2cf46f | ||
|
|
082ecf6f25 | ||
|
|
1632ac6b9f | ||
|
|
877959b413 | ||
|
|
6e60f7517b | ||
|
|
296ee6b7ea | ||
|
|
7c7ffddb2b | ||
|
|
e1ae7842ff | ||
|
|
9687fe7bac | ||
|
|
a9a2bd90c2 | ||
|
|
47ca71a7eb | ||
|
|
a9c47237b1 | ||
|
|
33bbae2f47 | ||
|
|
fab7a1d337 | ||
|
|
cffcf80977 | ||
|
|
1a3fd05b81 | ||
|
|
c22c6ca135 | ||
|
|
3afb6a387f | ||
|
|
33e5ed7180 | ||
|
|
2067757fab | ||
|
|
b1b94a3d56 | ||
|
|
c9ee42450e | ||
|
|
10fe31c2a1 | ||
|
|
dc54cbb1fc | ||
|
|
070218aba7 | ||
|
|
f1c226b171 | ||
|
|
7004430380 | ||
|
|
1ddc620192 | ||
|
|
a7cebbd970 | ||
|
|
d97438b0b3 | ||
|
|
4522f3f4c9 | ||
|
|
6fe28980b0 | ||
|
|
4aec5d8ffc | ||
|
|
bbb4e8f5ef | ||
|
|
bce33ea62e | ||
|
|
e4705d5ce7 | ||
|
|
6764b2a854 | ||
|
|
970340cf62 | ||
|
|
043f9d9ba4 | ||
|
|
6f82801d07 | ||
|
|
3e3dd39ae4 | ||
|
|
89aa06e014 | ||
|
|
6cc00ef4b7 | ||
|
|
f31e62afad | ||
|
|
38fd2ad45d | ||
|
|
05b99b5377 | ||
|
|
08a14ee6d5 | ||
|
|
29fcc92da9 | ||
|
|
d78e3572e3 | ||
|
|
160267c71a | ||
|
|
fd47e70c92 | ||
|
|
9317b42e5f | ||
|
|
bdab73701f | ||
|
|
3ea5e78322 | ||
|
|
f609ee21a2 | ||
|
|
f51defeeb3 | ||
|
|
ee0225f4ba | ||
|
|
33a0af4637 | ||
|
|
d37b08a7dd | ||
|
|
9a796364da | ||
|
|
1ad4eb3a7b | ||
|
|
3767a453bb | ||
|
|
b0892d30a4 | ||
|
|
d9b1e4a98c | ||
|
|
a4dec8c1d6 | ||
|
|
8960ceb98b | ||
|
|
be79d088c0 | ||
|
|
009407ea3f | ||
|
|
6999d28c7f | ||
|
|
324e9eb74b | ||
|
|
56cff40362 | ||
|
|
2ba40c5e52 | ||
|
|
3ab147204c | ||
|
|
e4c89cba9c | ||
|
|
322ea84c4e | ||
|
|
f2b41c60ff | ||
|
|
754acec92f | ||
|
|
11fc7e40a5 | ||
|
|
d15bb88eb2 | ||
|
|
70ba36eefc | ||
|
|
7e70391c2b | ||
|
|
e2a94be336 | ||
|
|
63a86eefb4 | ||
|
|
b0727b9d47 | ||
|
|
d96e727dd5 | ||
|
|
fe480886dc | ||
|
|
8031d1827b | ||
|
|
b5acdb322d | ||
|
|
a4d1fe8819 | ||
|
|
10b7a58887 | ||
|
|
901a277959 | ||
|
|
aaa093bef1 | ||
|
|
bb96543d66 | ||
|
|
a2a2cfa765 | ||
|
|
18e6a2b410 | ||
|
|
db27263bc2 | ||
|
|
0e027ec3ef | ||
|
|
5acbbeecaa | ||
|
|
6ef2168b67 | ||
|
|
6d958a214c | ||
|
|
4ae4bf4ff9 | ||
|
|
fdef53b2de | ||
|
|
11bd038b9d | ||
|
|
768cfe3aab | ||
|
|
c4277b0662 | ||
|
|
020f3ccf07 | ||
|
|
7467fa5e57 | ||
|
|
e19ef7ed2f | ||
|
|
71003be6b8 | ||
|
|
c1dbafc2df | ||
|
|
dcebd71381 | ||
|
|
d855a65e73 | ||
|
|
a9007c7e0f | ||
|
|
af60304f97 | ||
|
|
6de241eead | ||
|
|
51032dc0b2 | ||
|
|
9ec3d2bc0c | ||
|
|
297931f5d9 | ||
|
|
f613c073c1 | ||
|
|
63d248622c | ||
|
|
48485fe92f | ||
|
|
07726af703 | ||
|
|
ad1004b485 | ||
|
|
0096fb2790 | ||
|
|
9c8c2e49d6 | ||
|
|
2005a96847 | ||
|
|
00a8d60c1b | ||
|
|
3aa182390a | ||
|
|
e44f1d6d4e | ||
|
|
dfdf8e2ead | ||
|
|
3a645c4e80 | ||
|
|
113129daf9 | ||
|
|
940e3b6635 | ||
|
|
7fb29dabff | ||
|
|
714ad6dbb8 | ||
|
|
c0863fa20f | ||
|
|
78b0b37ba6 | ||
|
|
5d5cdc7716 | ||
|
|
93cd818f6a | ||
|
|
598a628790 | ||
|
|
f3666eda63 | ||
|
|
754017b59e | ||
|
|
21251ce12c | ||
|
|
dc12fa6cd6 | ||
|
|
f2f4c37f19 | ||
|
|
0864fca641 | ||
|
|
5e4c0217c7 | ||
|
|
78cd106c23 | ||
|
|
6ed0efa938 | ||
|
|
ca0669c337 | ||
|
|
b59a749627 | ||
|
|
a91dee87d0 | ||
|
|
5ff98a4179 | ||
|
|
36b2f12219 | ||
|
|
5569f205ee | ||
|
|
a76cf8aab2 | ||
|
|
5c0f0d1808 | ||
|
|
951900a86a | ||
|
|
582f516fef | ||
|
|
a25bae2545 | ||
|
|
0ea35b1e3d | ||
|
|
c6f935bf1a | ||
|
|
96b4d35d43 | ||
|
|
7b0938e7e4 | ||
|
|
249522b568 | ||
|
|
39088e42cc | ||
|
|
30e0033ebe | ||
|
|
b599c40099 | ||
|
|
8f190169db | ||
|
|
1d4d705795 | ||
|
|
b3f71b3078 | ||
|
|
6059db4f15 | ||
|
|
0d5f44b153 | ||
|
|
17164a37a8 | ||
|
|
f88ccabe30 | ||
|
|
e1c85f1234 | ||
|
|
57a3eb3652 | ||
|
|
82a8972bde | ||
|
|
497a885c85 | ||
|
|
4d9f55d0f6 | ||
|
|
0c3b4bb70d | ||
|
|
33e13820fc | ||
|
|
43d991cfdb | ||
|
|
291e9cf14b | ||
|
|
a2de5c9963 | ||
|
|
5025f84627 | ||
|
|
d2c8a53c55 | ||
|
|
5659d10778 | ||
|
|
46cab81d6f | ||
|
|
dd157bce85 | ||
|
|
2f25dd7d0d | ||
|
|
e56965ad76 | ||
|
|
2273b3a8c8 | ||
|
|
05fb0ac2b2 | ||
|
|
d4acd49ee3 | ||
|
|
d98868e524 | ||
|
|
93bb27f2c7 | ||
|
|
a4c44edf8d | ||
|
|
1e94d7739a | ||
|
|
9110838fe4 | ||
|
|
ca7b267326 | ||
|
|
7f5992d6a5 | ||
|
|
88776fb2de | ||
|
|
34f567abd4 | ||
|
|
b87f3043ae | ||
|
|
3829ffbe66 | ||
|
|
ad619ae880 | ||
|
|
d22ebe08be | ||
|
|
ee0c6ad86e | ||
|
|
96adb56633 | ||
|
|
3000436121 | ||
|
|
37cdd91f5d | ||
|
|
6f3c6ddf3f | ||
|
|
0bfbda512d | ||
|
|
295b98a13c | ||
|
|
ff6b345d45 | ||
|
|
1fb307abf4 | ||
|
|
29c952dcf6 | ||
|
|
010f63a50d | ||
|
|
068bbe3a39 | ||
|
|
ad39680feb | ||
|
|
1e0ae8404c | ||
|
|
460d555a3d | ||
|
|
66ad04fcfc | ||
|
|
c7c0836721 | ||
|
|
d2c223de8f | ||
|
|
dd16f788ed | ||
|
|
b25c1af018 | ||
|
|
8f393b64b8 | ||
|
|
55b3193629 | ||
|
|
6f78c073ed | ||
|
|
c406be6f4f | ||
|
|
aeaf3737aa | ||
|
|
23d9d58c08 | ||
|
|
4c331a5d7e | ||
|
|
035425ef24 | ||
|
|
021e5a2aa3 | ||
|
|
7a1de3887e | ||
|
|
4a7a5234df | ||
|
|
6aebe1614d | ||
|
|
74292eba28 | ||
|
|
c31ff364ab | ||
|
|
f310a39381 | ||
|
|
5a7e611e0a | ||
|
|
4e29a751d8 | ||
|
|
3f94f81acd | ||
|
|
5de3c41d19 | ||
|
|
f071b03ceb | ||
|
|
b9375186a5 | ||
|
|
11bd932cba | ||
|
|
b77ccfaf32 | ||
|
|
96653eebb6 | ||
|
|
60d25f105f | ||
|
|
734b653a5f | ||
|
|
52c9e6ec91 | ||
|
|
c0f132e41a | ||
|
|
cc1160a43a | ||
|
|
adde8450bc | ||
|
|
5bf9891553 | ||
|
|
22c34c343a | ||
|
|
f7804f6126 | ||
|
|
d14b02e93f | ||
|
|
1b75d899ae | ||
|
|
d4aa79acd7 | ||
|
|
33d199c007 | ||
|
|
9c89d3452c | ||
|
|
fb0b63c580 | ||
|
|
bb2c6e5925 | ||
|
|
928caff2a6 | ||
|
|
670c79f2c7 | ||
|
|
d6efb98953 | ||
|
|
19da795274 | ||
|
|
454ba9b893 | ||
|
|
d2dc1ed26f | ||
|
|
d4fb16825e | ||
|
|
650d69ef5b | ||
|
|
ff0e79fa9a | ||
|
|
127b54f812 | ||
|
|
7025c00581 | ||
|
|
7ea995149e | ||
|
|
f9710dd6ed | ||
|
|
4e7dd7d3f6 | ||
|
|
20ca9e1fc1 | ||
|
|
8a8b09a953 | ||
|
|
9e4e386c9b | ||
|
|
eca1e449a8 | ||
|
|
ffaadb9d05 | ||
|
|
8adff96e29 | ||
|
|
7593dc19d6 | ||
|
|
b7c5a39685 | ||
|
|
bd1b84f7d0 | ||
|
|
eadfd239a8 | ||
|
|
8d75e50435 | ||
|
|
1d9c115225 | ||
|
|
30af20a056 | ||
|
|
cc21fb216c | ||
|
|
6fe62a2705 | ||
|
|
da87378713 | ||
|
|
b6f5267385 | ||
|
|
f9e78d3c64 | ||
|
|
b7b5bd1b46 | ||
|
|
9a3727d3ad | ||
|
|
d68c14516c | ||
|
|
9f4d39aa42 | ||
|
|
84b801d88f | ||
|
|
2fc70c509b | ||
|
|
34fb1c4b19 | ||
|
|
80bdd550cf | ||
|
|
7ef0d2aa35 | ||
|
|
2359b92b46 | ||
|
|
a404fb2d32 | ||
|
|
513eb11616 | ||
|
|
d2c9140e69 | ||
|
|
d95fe5925a | ||
|
|
835922ea8f | ||
|
|
e1e5266fc3 | ||
|
|
5e4457445f | ||
|
|
0221ca8f49 | ||
|
|
cf36e4029e | ||
|
|
c8a98a9a22 | ||
|
|
38ecca9362 | ||
|
|
c4681774a5 | ||
|
|
050add58d2 | ||
|
|
3d60c958c7 | ||
|
|
f5df150097 | ||
|
|
dac82adb5b | ||
|
|
b72c9787a9 | ||
|
|
2623941d91 | ||
|
|
d3a7fea939 | ||
|
|
5a7b687c84 | ||
|
|
0020457fc7 | ||
|
|
658b556544 | ||
|
|
37da0fc075 | ||
|
|
6d3e8507cc | ||
|
|
0e9470503f | ||
|
|
d2ebc6741b | ||
|
|
026d3260b4 | ||
|
|
1103ab2844 | ||
|
|
11b2076b46 | ||
|
|
78533714e3 | ||
|
|
691e1bf829 | ||
|
|
47a088d685 | ||
|
|
63db3fc22f | ||
|
|
ad0bb3f61a | ||
|
|
8f8cd90787 | ||
|
|
d796ea7bec | ||
|
|
e5b7dd63e9 | ||
|
|
af060188bd | ||
|
|
4270e7ae25 | ||
|
|
60a565d7de | ||
|
|
78cf70eaad | ||
|
|
eebaa50710 | ||
|
|
7d582553f2 | ||
|
|
4d6eea7e81 | ||
|
|
f44593331d | ||
|
|
3d9ecbf3c7 | ||
|
|
032aa1d59c | ||
|
|
35e0863bdb | ||
|
|
14070d674e | ||
|
|
108ce06c62 | ||
|
|
da364f3444 | ||
|
|
df5ba75c14 | ||
|
|
e4fb9cb33f | ||
|
|
65b527eb20 | ||
|
|
7dc9d18052 | ||
|
|
5013a4b9f3 | ||
|
|
f929359322 | ||
|
|
6522c71971 | ||
|
|
9c1e65f3a3 | ||
|
|
ebec200ba6 | ||
|
|
e559730b6e | ||
|
|
0acb8ed85d | ||
|
|
8c1c9cd702 | ||
|
|
0ece4686aa | ||
|
|
af95cef7f9 | ||
|
|
1eca7a918a | ||
|
|
9e6b958023 | ||
|
|
f7b99d93ae | ||
|
|
85d03dcd90 | ||
|
|
032555bcfe | ||
|
|
4caa1f19b2 | ||
|
|
95d4bd3012 | ||
|
|
037078c8ad | ||
|
|
6de2f66b50 | ||
|
|
cd7b248eda | ||
|
|
6d8c077f4e | ||
|
|
97127e560e | ||
|
|
27dc07d95a | ||
|
|
f7dc171c4f | ||
|
|
4b957edfec | ||
|
|
46ca7718d9 | ||
|
|
b928d7a6e6 | ||
|
|
8a836247c8 | ||
|
|
95c3644564 | ||
|
|
799cd07174 | ||
|
|
9af385468d | ||
|
|
3487388788 | ||
|
|
9a383e456d | ||
|
|
805f9f8f4a | ||
|
|
52aa0c9bbd | ||
|
|
7f5f4689cc | ||
|
|
a3f81f4b98 | ||
|
|
15c59e606f | ||
|
|
40d4cabecd | ||
|
|
3493c8119b | ||
|
|
c1e7460d39 | ||
|
|
3ffff023b2 | ||
|
|
f9384be59b | ||
|
|
6cf308004a | ||
|
|
d1029138d2 | ||
|
|
06b5800d28 | ||
|
|
483f2ccb56 | ||
|
|
93ced0bec6 | ||
|
|
4333852c37 | ||
|
|
3baa230077 | ||
|
|
9e594f9018 | ||
|
|
b0c41b4828 | ||
|
|
e0d6946b6b | ||
|
|
bf7ea8309f | ||
|
|
54b65f725f | ||
|
|
8ef49c2640 | ||
|
|
f488b1a7f2 | ||
|
|
d2edb7c402 | ||
|
|
f0a3f07b45 | ||
|
|
b42b630583 | ||
|
|
31a78d571b | ||
|
|
fdc2232ea0 | ||
|
|
e94d0b2d40 | ||
|
|
75ccbaee9c | ||
|
|
2848c8397c | ||
|
|
fe8b5193de | ||
|
|
3d1470399c | ||
|
|
fcf9c63049 | ||
|
|
7bfb5640ad | ||
|
|
15e57e3a3d | ||
|
|
279468c0e8 | ||
|
|
c565812723 | ||
|
|
ec6c8e2a38 | ||
|
|
77f2690711 | ||
|
|
c4b3a24ed7 | ||
|
|
33c69359c2 | ||
|
|
864f4bb4af | ||
|
|
5365f42a04 | ||
|
|
3dc60254b9 | ||
|
|
027a8562d7 | ||
|
|
34f3a0f0e3 | ||
|
|
d0bac1675e | ||
|
|
4e56c962f4 | ||
|
|
4ef0e43759 | ||
|
|
6945d10297 | ||
|
|
4d6cef7ac8 | ||
|
|
a7786d5ff2 | ||
|
|
6c1de975d9 | ||
|
|
a1079e455a | ||
|
|
5457c7f069 | ||
|
|
b8c1a3f96c | ||
|
|
cee8e85f76 | ||
|
|
09f166577e | ||
|
|
bcc21531fb | ||
|
|
da4eacdffe | ||
|
|
6102e560ba | ||
|
|
ff3aa57117 | ||
|
|
49db6f4fac | ||
|
|
20f6a597ab | ||
|
|
04c453721c | ||
|
|
350ffecc1f | ||
|
|
b0557aa16b | ||
|
|
1c9429a6ea | ||
|
|
206e6b1730 | ||
|
|
357cee2849 | ||
|
|
0b49997bb6 | ||
|
|
5e09dd380d | ||
|
|
c7303adb0d | ||
|
|
ed1f096a6f | ||
|
|
6ab5d28cf3 | ||
|
|
a75148cb16 | ||
|
|
f7bbc4004a | ||
|
|
cee21ca082 | ||
|
|
08ec12b391 | ||
|
|
ff5e2a9a8c | ||
|
|
e0b9b5cc6c | ||
|
|
aca4770481 | ||
|
|
5d5157fc65 | ||
|
|
fb6ef61a4d | ||
|
|
ee24ad7b13 | ||
|
|
f8e90ba3f0 | ||
|
|
ad0b70ca23 | ||
|
|
7dfa135b2c | ||
|
|
beeaa05658 | ||
|
|
6b6d654f60 | ||
|
|
853c83d0c2 | ||
|
|
1809990ed4 | ||
|
|
79d49853d2 | ||
|
|
1f608d3743 | ||
|
|
df024dd982 | ||
|
|
45da85765c | ||
|
|
bd0ad59c27 | ||
|
|
cce40acba5 | ||
|
|
bc9491ab69 | ||
|
|
f28632980d | ||
|
|
b909bac0dc | ||
|
|
8618e41b32 | ||
|
|
4687f94141 | ||
|
|
440912dcff | ||
|
|
8b87a26e7e | ||
|
|
44ae93df3e | ||
|
|
42d938fda5 | ||
|
|
8f80ba9520 | ||
|
|
25ce47c44f | ||
|
|
afd2e32092 | ||
|
|
2b213da967 | ||
|
|
e91e1eb9aa | ||
|
|
b24129fb3e | ||
|
|
350b1421bb | ||
|
|
f01c79a94f | ||
|
|
463f6352ce | ||
|
|
a80fe05e23 | ||
|
|
58d7833c5c | ||
|
|
5012f61599 | ||
|
|
85c33823c3 | ||
|
|
c83a112669 | ||
|
|
e04ada1319 | ||
|
|
d866dcb3d2 | ||
|
|
81ec476f3a | ||
|
|
1e6adf0a06 | ||
|
|
7d221e2518 | ||
|
|
742ed19d66 | ||
|
|
29c2ada23c | ||
|
|
e4196bbe5b | ||
|
|
15ffb53e59 | ||
|
|
90054ddf0d | ||
|
|
56d3cbead0 | ||
|
|
5e8c97f1ba | ||
|
|
4687ad4ed6 | ||
|
|
994b247f8e | ||
|
|
0419f50ab0 | ||
|
|
f9f40adcdc | ||
|
|
3264d30b44 | ||
|
|
4d885653e9 | ||
|
|
475b6bef53 | ||
|
|
d39de0ad38 | ||
|
|
d14a7d756e | ||
|
|
b050c1bb8f | ||
|
|
276dfc591b | ||
|
|
b49d76ebee | ||
|
|
a6be44789b | ||
|
|
a4313c26cb | ||
|
|
d4b250d509 | ||
|
|
29743a9e02 | ||
|
|
fecb77e344 | ||
|
|
779671753d | ||
|
|
d5e152b35e | ||
|
|
270657a62c | ||
|
|
3601b9c860 | ||
|
|
c8fe12cd91 | ||
|
|
deae5fbaec | ||
|
|
5b558af2b3 | ||
|
|
4150d5306f | ||
|
|
8c2e4700f9 | ||
|
|
adaecada20 | ||
|
|
258895bcc9 | ||
|
|
2eb7c25bae | ||
|
|
2e4e9434c1 | ||
|
|
0cad204e74 | ||
|
|
0bc2edc044 | ||
|
|
16488e7db8 | ||
|
|
974841926d | ||
|
|
8db20e0d95 | ||
|
|
d00d29d6b5 | ||
|
|
dc976cd665 | ||
|
|
6d6b986a66 | ||
|
|
bffdede0fa | ||
|
|
a4c258e9ec | ||
|
|
8d837558ac | ||
|
|
e673ed08ec | ||
|
|
f0e07bff5a | ||
|
|
3ec06a1fc3 | ||
|
|
6b79e2b407 | ||
|
|
0eed9dbc44 | ||
|
|
53c7832fd1 | ||
|
|
ca1cc0e2c2 | ||
|
|
5d8728c7ef | ||
|
|
a8cec4c7e6 | ||
|
|
2b5ccdc55f | ||
|
|
d92d5b5258 | ||
|
|
a591184d2a | ||
|
|
ee881e4c78 | ||
|
|
61fbb24e36 | ||
|
|
d582949488 | ||
|
|
de574eb4d9 | ||
|
|
bfd90968f1 | ||
|
|
4a924c9b54 | ||
|
|
0453d60c64 | ||
|
|
c4f4f8b1b8 | ||
|
|
3e80eaa342 | ||
|
|
00a0cb3403 | ||
|
|
ea93cad5ff | ||
|
|
4453a0d20d | ||
|
|
1e837e3c9d | ||
|
|
0f95f7cea3 | ||
|
|
0b0068ab86 | ||
|
|
31c7fa833e | ||
|
|
db16ca0079 | ||
|
|
a824f47bc6 | ||
|
|
99392debe8 | ||
|
|
0cc739afc8 | ||
|
|
0ab62b0343 | ||
|
|
75d25dd5cc | ||
|
|
2e54da13d8 | ||
|
|
f34f416bf5 | ||
|
|
021c63891d | ||
|
|
a968862e6b | ||
|
|
a08189d457 | ||
|
|
0a936696c3 | ||
|
|
55e33eaf4c | ||
|
|
3da5fb223f | ||
|
|
a3c5a664e5 | ||
|
|
b638fb2f30 | ||
|
|
c1b10b2222 | ||
|
|
bee29714d9 | ||
|
|
d40d5276dd | ||
|
|
568f0aad71 | ||
|
|
38474fa9d4 | ||
|
|
f7f974a28b | ||
|
|
3c150b384c | ||
|
|
65816049ba | ||
|
|
c1c881ded5 | ||
|
|
82c4dd8b86 | ||
|
|
711d09a107 | ||
|
|
74013b6611 | ||
|
|
790f399986 | ||
|
|
73cdd36594 | ||
|
|
50ac3eb28d | ||
|
|
d753cff91a | ||
|
|
89f1909e4b | ||
|
|
37916a22ad | ||
|
|
76e5d0595d | ||
|
|
f03cb8f134 | ||
|
|
c2a0e8afc3 | ||
|
|
31a904b903 | ||
|
|
c174cab3ee | ||
|
|
fe12938c23 | ||
|
|
4fa5c963a1 | ||
|
|
48ce256ba2 | ||
|
|
8cb2fa8600 | ||
|
|
8f460b92f1 | ||
|
|
d99a08a441 | ||
|
|
7555b1f876 | ||
|
|
a537231f19 | ||
|
|
8044d1b840 | ||
|
|
2b58ce4ae4 | ||
|
|
ef605cd76c | ||
|
|
a84b5b168f | ||
|
|
16f6ee04d0 | ||
|
|
44be057aa3 | ||
|
|
422f6967b2 | ||
|
|
4528cc8ba6 | ||
|
|
87e91ebc1d | ||
|
|
fd00d111ea | ||
|
|
b8dc9000bd | ||
|
|
58c1066765 | ||
|
|
37096a697b | ||
|
|
17d0920186 | ||
|
|
1e05538364 | ||
|
|
cf28617cd6 | ||
|
|
d0d8640711 | ||
|
|
e6158d1874 | ||
|
|
2e9d1ea8a3 | ||
|
|
59b0153236 | ||
|
|
9f8ff912c4 | ||
|
|
f0e4a2124a | ||
|
|
11ab5c7d56 | ||
|
|
3f334d9e5e | ||
|
|
ff891b1ff2 | ||
|
|
2914ee10b0 | ||
|
|
e29c2fb782 | ||
|
|
b763f1809e | ||
|
|
d26b44104a | ||
|
|
b73fd2a6d2 | ||
|
|
f258aba6d1 | ||
|
|
2e70848aa0 | ||
|
|
e973aeef0d | ||
|
|
50e1ac731d | ||
|
|
43addc1548 | ||
|
|
4901911c1a | ||
|
|
44a653925a | ||
|
|
94a07a8da7 | ||
|
|
ad41afe65e | ||
|
|
77fa7519c4 | ||
|
|
6e29148d4d | ||
|
|
3044f3bfe5 | ||
|
|
67a8627cf6 | ||
|
|
3fb433cb91 | ||
|
|
5f498e10bd | ||
|
|
fdad62e88b | ||
|
|
955c81acef | ||
|
|
e1058f3416 | ||
|
|
edf16a253d | ||
|
|
46f5ef4100 | ||
|
|
b843255236 | ||
|
|
3a968e5072 | ||
|
|
b164330e3c | ||
|
|
69433c9f68 | ||
|
|
bd8ffd36bf | ||
|
|
fd80e84ea6 | ||
|
|
4824237a98 | ||
|
|
2c9a05eb59 | ||
|
|
ecb5bdaf7e | ||
|
|
2feeb1f44c | ||
|
|
554f353773 | ||
|
|
f6cdff2c5b | ||
|
|
aee27e94c9 | ||
|
|
695893e1ac | ||
|
|
b800a8eb2e | ||
|
|
9749ef34b5 | ||
|
|
9a43362127 | ||
|
|
866024ea6c | ||
|
|
601cc1f92c | ||
|
|
d6a9a4464d | ||
|
|
dac271725a | ||
|
|
e1fbecfcf7 | ||
|
|
63d10027a4 | ||
|
|
ef0773b8a3 | ||
|
|
3daaddf15b | ||
|
|
570c3fe690 | ||
|
|
cbd1a7263a | ||
|
|
7fc5fbd4ce | ||
|
|
6f6de402ad | ||
|
|
2ec4f5af10 | ||
|
|
281662a6e1 | ||
|
|
2edd032ec7 | ||
|
|
50eb02f68b | ||
|
|
d73f3adc43 | ||
|
|
116107f464 | ||
|
|
da44bb1707 | ||
|
|
f43aed677e | ||
|
|
0d051aaae2 | ||
|
|
e4e48ff995 | ||
|
|
442a6bffa4 | ||
|
|
aab262d991 | ||
|
|
47b9910b48 | ||
|
|
0b0e6fe448 | ||
|
|
23d65e7162 | ||
|
|
024fd54d0b | ||
|
|
c44c19e911 | ||
|
|
c132dbdefa | ||
|
|
f3081e7013 | ||
|
|
f904f14f9e | ||
|
|
8917a6d99b | ||
|
|
5a4765046e | ||
|
|
d923d1d66b | ||
|
|
1f2c1e14db | ||
|
|
07e3a0ec15 | ||
|
|
427db7c7e2 | ||
|
|
dad3a7f263 | ||
|
|
5bd0bb637f | ||
|
|
f05095770c | ||
|
|
de189f2db6 | ||
|
|
cee159dfa3 | ||
|
|
4463124bdd | ||
|
|
34402cc46a | ||
|
|
54d9833db0 | ||
|
|
5fe8cb56fc | ||
|
|
7919d81fb1 | ||
|
|
9d80b28a4f | ||
|
|
1fcd91bcc5 | ||
|
|
e456e2e63a | ||
|
|
ee41b99049 | ||
|
|
111d674e71 | ||
|
|
8f048cfbd9 | ||
|
|
cd1b350dae | ||
|
|
8334757af9 | ||
|
|
7103ac6a32 | ||
|
|
f6b131e706 | ||
|
|
d1b2b99226 | ||
|
|
e356f2511b | ||
|
|
e5f8b22a43 | ||
|
|
45b84fb4bb | ||
|
|
f022c89249 | ||
|
|
ab05144716 | ||
|
|
aeb4914e67 | ||
|
|
76bcd4d44f | ||
|
|
50f5e1bc83 | ||
|
|
4c339dd4b0 | ||
|
|
bc2b9500e3 | ||
|
|
32857d81c5 | ||
|
|
7268131f57 | ||
|
|
85b020f76c | ||
|
|
a7833cc9a9 | ||
|
|
28f75d80d5 | ||
|
|
919294e977 | ||
|
|
b917ffa4d7 | ||
|
|
d44151d6ff | ||
|
|
7640acfb1f | ||
|
|
aed9ecef2a | ||
|
|
18cddd7972 | ||
|
|
e6b25f4ae3 | ||
|
|
d1c0050e65 | ||
|
|
ecdfa136a0 | ||
|
|
5cd513ee63 | ||
|
|
ab45086546 | ||
|
|
77ba7359f4 | ||
|
|
8cbe2e14d9 | ||
|
|
f682fb8040 | ||
|
|
ee86eedf01 | ||
|
|
1f89cf3343 | ||
|
|
c4e6511a59 | ||
|
|
44843be4c8 | ||
|
|
054e963bef | ||
|
|
afb66a7884 | ||
|
|
b9df9e26f2 | ||
|
|
25ae36ceb5 | ||
|
|
3ae8daedaa | ||
|
|
e11c1d66ab | ||
|
|
b913e1e11e | ||
|
|
3c4b6d5735 | ||
|
|
e6123eac19 | ||
|
|
30ca25897e | ||
|
|
abaee6b9ed | ||
|
|
4d7c9e1ab7 | ||
|
|
cc5687f26c | ||
|
|
cdb3616dca | ||
|
|
78e76f26f9 | ||
|
|
9a7580dedd | ||
|
|
dc2da8cff4 | ||
|
|
019a9f0329 | ||
|
|
fe5d9ad171 | ||
|
|
dbc0093b31 | ||
|
|
92e512b8b6 | ||
|
|
abe4dc8ac1 | ||
|
|
dc14701d20 | ||
|
|
737e0f3085 | ||
|
|
81b7ea4362 | ||
|
|
09dfde0ba1 | ||
|
|
3ba7e966b5 | ||
|
|
a1cd4834d1 | ||
|
|
a724038dc6 | ||
|
|
4221cf7731 | ||
|
|
c34ac91ff0 | ||
|
|
5fe38f7c88 | ||
|
|
bd7e515290 | ||
|
|
076fac07eb | ||
|
|
9348161600 | ||
|
|
dac3c158a5 | ||
|
|
17d8bbf330 | ||
|
|
9344687a56 | ||
|
|
cf534d735c | ||
|
|
501924bc60 | ||
|
|
d117251747 | ||
|
|
6ea61a8486 | ||
|
|
e4d903af20 | ||
|
|
2d9797da35 | ||
|
|
07ea806553 | ||
|
|
5ac0316c62 | ||
|
|
9536ba22af | ||
|
|
5503749085 | ||
|
|
9bfe2fa371 | ||
|
|
d8ce6e4426 | ||
|
|
43d2d6d98c | ||
|
|
64c233efd4 | ||
|
|
2245a4e117 | ||
|
|
9ceec40b76 | ||
|
|
0f13b90059 | ||
|
|
d91fc16ae4 | ||
|
|
bc01a96f9d | ||
|
|
85b2822f5e | ||
|
|
c33d8694bb | ||
|
|
685bd027f0 | ||
|
|
f592d620d5 | ||
|
|
2b127b73ac | ||
|
|
8855902cfe | ||
|
|
9d8ddc6a08 | ||
|
|
4ca5189e73 | ||
|
|
873597cb84 | ||
|
|
44d742f232 | ||
|
|
6e7dbf99f3 | ||
|
|
1ba1076888 | ||
|
|
cafa108f69 | ||
|
|
deeff36e16 | ||
|
|
d770b14358 | ||
|
|
20414ba4ad | ||
|
|
92721a1d45 | ||
|
|
f329fddab9 | ||
|
|
f2efde27f6 | ||
|
|
02c58f22be | ||
|
|
f751dcd245 | ||
|
|
a97107bd90 | ||
|
|
b2ce45a417 | ||
|
|
4e0b5d85ba | ||
|
|
a958ae5e29 | ||
|
|
4d50fbf8dc | ||
|
|
485f6e5954 | ||
|
|
1f6ce838ba | ||
|
|
0dc5773849 | ||
|
|
bc347f749c | ||
|
|
1b215059e7 | ||
|
|
db079a2733 | ||
|
|
26f71d3536 | ||
|
|
eb7ae2588c | ||
|
|
278c14ba2e | ||
|
|
74e83dda54 | ||
|
|
28c1fca477 | ||
|
|
1f0324102a | ||
|
|
a782ad092d | ||
|
|
eae4eb419a | ||
|
|
fb7f38f46e | ||
|
|
93d0cae455 | ||
|
|
35f6b5d562 | ||
|
|
2aefa06ef1 | ||
|
|
5906888477 | ||
|
|
f22c7d0da6 | ||
|
|
93b38707b2 | ||
|
|
6ecf53078f | ||
|
|
9c93b7cb59 | ||
|
|
7789e8319c | ||
|
|
7d7a28beb3 | ||
|
|
27a113d872 | ||
|
|
67f8f222d9 | ||
|
|
5347c12fed | ||
|
|
b194180f76 | ||
|
|
fb30b7d17a | ||
|
|
c341dcaa3d | ||
|
|
b695a2574b | ||
|
|
aa68a326c8 | ||
|
|
c2922d5991 | ||
|
|
85888030c3 | ||
|
|
7cf59c1e60 | ||
|
|
9738b0ff69 | ||
|
|
3021c78390 | ||
|
|
6eeaf8d9fb | ||
|
|
fa9afec0c2 | ||
|
|
d6862bf8c1 | ||
|
|
de01c38bbe | ||
|
|
7e811908e0 | ||
|
|
5f59f24f92 | ||
|
|
e414fcf3fb | ||
|
|
079ad8f35a | ||
|
|
a4d7e0c78e | ||
|
|
e9c2f173c5 | ||
|
|
44f489d581 | ||
|
|
cb48bbd806 | ||
|
|
0a761d7c43 | ||
|
|
a0f47aa72e | ||
|
|
f9abc6fc85 | ||
|
|
d840c597b5 | ||
|
|
3ca654d256 | ||
|
|
e0e01f6c50 | ||
|
|
d9dab1b6c7 | ||
|
|
3b2ef6e1a8 | ||
|
|
c125a3871a | ||
|
|
0996bd5acf | ||
|
|
ea77d557da | ||
|
|
1b01161ea4 | ||
|
|
2230cb9562 | ||
|
|
9e0c7c46a2 | ||
|
|
be305588d3 | ||
|
|
9f994df814 | ||
|
|
3062580006 | ||
|
|
596ba754b1 | ||
|
|
b980e563b9 | ||
|
|
7fe2606cb3 | ||
|
|
0c3b1fe3c4 | ||
|
|
c9ee2e351c | ||
|
|
e3aef20f42 | ||
|
|
60614badaf | ||
|
|
288cee9611 | ||
|
|
24aca37538 | ||
|
|
b853ceea65 | ||
|
|
3ee2798ede | ||
|
|
5c5106c14a | ||
|
|
c367b21c71 | ||
|
|
2eef6df66a | ||
|
|
300aa8d86c | ||
|
|
727f1638d7 | ||
|
|
ee6df5852a | ||
|
|
90525b1c43 | ||
|
|
bbb95dbc5b | ||
|
|
f4b7f80d59 | ||
|
|
220f7373c8 | ||
|
|
4bb5785f29 | ||
|
|
f9a7a7d161 | ||
|
|
de94c780d9 | ||
|
|
0b9230380c | ||
|
|
209a55b681 | ||
|
|
dc2f69f5d1 | ||
|
|
ad2f1b7b36 | ||
|
|
dd2d96a50f | ||
|
|
2bff28e305 | ||
|
|
d68234d879 | ||
|
|
b3babf26a5 | ||
|
|
ecca0eff31 | ||
|
|
28677f9621 | ||
|
|
caecfadf11 | ||
|
|
5cf8e3aa53 | ||
|
|
76cf2c61db | ||
|
|
b4d976f2db | ||
|
|
777d127c74 | ||
|
|
0678803803 | ||
|
|
d2fbc9f5e3 | ||
|
|
d81088dff7 | ||
|
|
1aaad9336f | ||
|
|
1f3c024d9d | ||
|
|
74a480f94e | ||
|
|
c6e8d3269c | ||
|
|
dcb5a3a740 | ||
|
|
c0ef546b02 | ||
|
|
7a78a83651 | ||
|
|
10cbf99310 | ||
|
|
b63aefcda9 | ||
|
|
6a77634b34 | ||
|
|
8ca91b1774 | ||
|
|
1c9d9e79d5 | ||
|
|
3aa1ee1218 | ||
|
|
06aa5a8120 | ||
|
|
580f9ecded | ||
|
|
270032670a | ||
|
|
4f056cdb55 | ||
|
|
c14241436b | ||
|
|
50b56d6088 | ||
|
|
8ec2ae7954 | ||
|
|
40d82b29cf | ||
|
|
0b953d98f5 | ||
|
|
8833d76709 | ||
|
|
027b316fd2 | ||
|
|
d612f11c11 | ||
|
|
250b0ab182 | ||
|
|
675dd12b6c | ||
|
|
7e76eea059 | ||
|
|
f45483e519 | ||
|
|
65047bf976 | ||
|
|
d586a82a53 | ||
|
|
28709961e9 | ||
|
|
e9f237f39d | ||
|
|
4156bfd810 | ||
|
|
fe75b95464 | ||
|
|
95954188b2 | ||
|
|
63f59201f8 | ||
|
|
370e8281b3 | ||
|
|
685df33584 | ||
|
|
4332c9c7a6 | ||
|
|
4a00f1cc74 | ||
|
|
7ff77504cb | ||
|
|
0d1854e44a | ||
|
|
fe6858f2d9 | ||
|
|
12c7db3a16 | ||
|
|
3ecdec02bf | ||
|
|
d6c24d59b0 | ||
|
|
bb3d1bb6cb | ||
|
|
14c8738a71 | ||
|
|
1a829bb998 | ||
|
|
9d339e94f2 | ||
|
|
ad7b1fa6fb | ||
|
|
42355b70c2 | ||
|
|
faa2558e2f | ||
|
|
081397737b | ||
|
|
55d36eaf4f | ||
|
|
26cd1728ac | ||
|
|
a0065da4a4 | ||
|
|
c11e823ff3 | ||
|
|
197e50a298 | ||
|
|
507e12520e | ||
|
|
2cc04de397 | ||
|
|
f4150a7829 | ||
|
|
5418bd3b24 | ||
|
|
76d5fa4694 | ||
|
|
386dda8233 | ||
|
|
8076c1697c | ||
|
|
65fc9a6e0e | ||
|
|
cde0b6ae8d | ||
|
|
b12760b976 | ||
|
|
b679a6ba37 | ||
|
|
2f5f08c35d | ||
|
|
8f48c14ed4 | ||
|
|
5d37fa6e36 | ||
|
|
f51581bd1b | ||
|
|
50ca6b6ffc | ||
|
|
63b9ec4c5e | ||
|
|
b115bc4247 | ||
|
|
dadc30f795 | ||
|
|
111d8391e2 | ||
|
|
1157b454b2 | ||
|
|
8a6473610b | ||
|
|
ea7911be89 | ||
|
|
9ee648e0c3 | ||
|
|
543682fd3b | ||
|
|
88cb63e4a1 | ||
|
|
76212d1cca | ||
|
|
a8df9e5122 | ||
|
|
2db180d909 | ||
|
|
b716fe8f06 | ||
|
|
69e2dc0404 | ||
|
|
a38b75572f | ||
|
|
e18de761b6 | ||
|
|
816ea39827 | ||
|
|
1cd4cdd0e5 | ||
|
|
768e969c90 | ||
|
|
57db66634d | ||
|
|
87789c1de8 | ||
|
|
c3c1511ec6 | ||
|
|
6b41127421 | ||
|
|
d232a439f7 | ||
|
|
c04f21e83e | ||
|
|
8762069b37 | ||
|
|
d9ebdd2684 | ||
|
|
3e4c10ef9c | ||
|
|
17eb2ca5a2 | ||
|
|
63725d7534 | ||
|
|
00f30ea457 | ||
|
|
1b2a3c7144 | ||
|
|
01a1777370 | ||
|
|
32945c7f45 | ||
|
|
b0b8846430 | ||
|
|
fdb146a43a | ||
|
|
42c1f1fc9d | ||
|
|
89a8ef86b5 | ||
|
|
f0fb767f57 | ||
|
|
4bd93464bf | ||
|
|
3d3de82ca9 | ||
|
|
c3ff9e6be8 | ||
|
|
21f79e5919 | ||
|
|
0342e25c74 | ||
|
|
91f982fb0b | ||
|
|
b9ab43a4bb | ||
|
|
6e0e48bf8a | ||
|
|
dcc8313dbf | ||
|
|
bf5831faa3 | ||
|
|
5eff035f55 | ||
|
|
7c60068388 | ||
|
|
d843fb078a | ||
|
|
41b2e4633f | ||
|
|
57144ac0cf | ||
|
|
a305b6adbf | ||
|
|
94daaa4abf | ||
|
|
901337186d | ||
|
|
7e2f64f60b | ||
|
|
126cba2324 | ||
|
|
2f9dcd7906 | ||
|
|
e537b5d8e1 | ||
|
|
e0e70c9222 | ||
|
|
1b21e5df54 | ||
|
|
4b76af37ae | ||
|
|
486c445afb | ||
|
|
4547c48013 | ||
|
|
8f21201c91 | ||
|
|
532b74a206 | ||
|
|
0b184913b9 | ||
|
|
97719e40e4 | ||
|
|
5ad3062b66 | ||
|
|
92d012a92d | ||
|
|
fc187f263e | ||
|
|
fd94f85abe | ||
|
|
4e9e1b660d | ||
|
|
d01adedff5 | ||
|
|
c247f430f7 | ||
|
|
3d6a358042 | ||
|
|
4d1dcd11de | ||
|
|
b33655b0d6 | ||
|
|
81dee04dc9 | ||
|
|
114018e3e6 | ||
|
|
ef8cf83b28 | ||
|
|
633857b0e3 | ||
|
|
214574d11f | ||
|
|
8584665ade | ||
|
|
516c56d0c5 | ||
|
|
5891b43ce2 | ||
|
|
62e75f95aa | ||
|
|
b07621e27e | ||
|
|
545d8968fd | ||
|
|
7cf2f58513 | ||
|
|
618e3e5e91 | ||
|
|
c703b60986 | ||
|
|
7c0ce5c282 | ||
|
|
82fe34b1f7 | ||
|
|
65f9aae81d | ||
|
|
2d9fac23e7 | ||
|
|
ebc4b52f41 | ||
|
|
c4e6d4b348 | ||
|
|
eab32bce6c | ||
|
|
55d2094094 | ||
|
|
a0d50a2b23 | ||
|
|
9efeb1b2ec | ||
|
|
86e2cb0428 | ||
|
|
53c2c0f91d | ||
|
|
bdc7b8b75a | ||
|
|
1bfdd54810 | ||
|
|
b4bf6c12a5 | ||
|
|
ab35c241c2 | ||
|
|
b3dccfaeb6 | ||
|
|
6477e31c1e | ||
|
|
dd4a1c998b | ||
|
|
70203e6e5a | ||
|
|
d778a7c5ca | ||
|
|
f8e59636cd | ||
|
|
2d1a0b0a05 | ||
|
|
c9b2234d90 | ||
|
|
82b224539b | ||
|
|
0b15ffb95b | ||
|
|
ce9aaab22f | ||
|
|
3f53f1186d | ||
|
|
c0aff396d2 | ||
|
|
955900507f | ||
|
|
d606abc544 | ||
|
|
44400d2a66 | ||
|
|
60a98cacef | ||
|
|
6a990565ff | ||
|
|
3f0b0f3250 | ||
|
|
1a7371ea17 | ||
|
|
850d1ee984 | ||
|
|
2c7928b163 | ||
|
|
87d1ec6a4c | ||
|
|
53c62537f7 | ||
|
|
418d93fdfd | ||
|
|
f2ce2f1778 | ||
|
|
5b6c61fc75 | ||
|
|
1d77581d96 | ||
|
|
3b921cf393 | ||
|
|
d334f7f1f6 | ||
|
|
8c9764476c | ||
|
|
b7d5a3e0b5 | ||
|
|
e0405031a7 | ||
|
|
ee24b686b3 | ||
|
|
835eb14c79 | ||
|
|
9aadf7abc1 | ||
|
|
243f9e8377 | ||
|
|
6e0c6d9cc9 | ||
|
|
a3076cf951 | ||
|
|
6696882c71 | ||
|
|
17b039e85d | ||
|
|
81539e6ab4 | ||
|
|
92304b9f8a | ||
|
|
ec1de5ae8b | ||
|
|
49198a61ef | ||
|
|
8c5773abc1 | ||
|
|
01f8c37bd3 | ||
|
|
b7718985d5 | ||
|
|
90cda11868 | ||
|
|
5cb877e096 |
@@ -1,6 +0,0 @@
|
|||||||
[run]
|
|
||||||
omit='.env/*'
|
|
||||||
source='.'
|
|
||||||
|
|
||||||
[report]
|
|
||||||
show_missing = true
|
|
||||||
@@ -4,22 +4,22 @@
|
|||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# ignore frontend/web but whitelist dist
|
||||||
**/*.pt*
|
invokeai/frontend/web/
|
||||||
**/*.ckpt
|
!invokeai/frontend/web/dist/
|
||||||
|
|
||||||
# ignore frontend but whitelist dist
|
|
||||||
invokeai/frontend/
|
|
||||||
!invokeai/frontend/dist/
|
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
invokeai/assets/
|
invokeai/assets/
|
||||||
!invokeai/assets/web/
|
!invokeai/assets/web/
|
||||||
|
|
||||||
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
|
**/*.pt*
|
||||||
|
**/*.ckpt
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
**/__pycache__/
|
**/__pycache__/
|
||||||
**/*.py[cod]
|
**/*.py[cod]
|
||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
*.egg-info/
|
**/*.egg-info/
|
||||||
*.egg
|
**/*.egg
|
||||||
|
|||||||
1
.git-blame-ignore-revs
Normal file
1
.git-blame-ignore-revs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||||
63
.github/CODEOWNERS
vendored
63
.github/CODEOWNERS
vendored
@@ -1,51 +1,34 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
/.github/workflows/ @lstein @blessedcoolant
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
|
/docs/ @lstein @blessedcoolant @hipsterusername
|
||||||
mkdocs.yml @lstein @mauwii @blessedcoolant
|
/mkdocs.yml @lstein @blessedcoolant
|
||||||
|
|
||||||
|
# nodes
|
||||||
|
/invokeai/app/ @Kyle0654 @blessedcoolant
|
||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @mauwii @lstein @ebr @blessedcoolant
|
/pyproject.toml @lstein @blessedcoolant
|
||||||
/docker/ @mauwii @lstein @blessedcoolant
|
/docker/ @lstein @blessedcoolant
|
||||||
/scripts/ @ebr @lstein @blessedcoolant
|
/scripts/ @ebr @lstein
|
||||||
/installer/ @ebr @lstein @tildebyte @blessedcoolant
|
/installer/ @lstein @ebr
|
||||||
ldm/invoke/config @lstein @ebr @blessedcoolant
|
/invokeai/assets @lstein @ebr
|
||||||
invokeai/assets @lstein @ebr @blessedcoolant
|
/invokeai/configs @lstein
|
||||||
invokeai/configs @lstein @ebr @blessedcoolant
|
/invokeai/version @lstein @blessedcoolant
|
||||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# web ui
|
# web ui
|
||||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
|
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp
|
||||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein
|
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp
|
||||||
|
|
||||||
# generation and model management
|
# generation, model management, postprocessing
|
||||||
/ldm/*.py @lstein @blessedcoolant
|
/invokeai/backend @damian0815 @lstein @blessedcoolant @jpphoto @gregghelt2 @StAlKeR7779
|
||||||
/ldm/generate.py @lstein @keturn @blessedcoolant
|
|
||||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
|
||||||
/ldm/invoke/generator @keturn @damian0815 @blessedcoolant
|
|
||||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein
|
|
||||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# attention, textual inversion, model configuration
|
# front ends
|
||||||
/ldm/models @damian0815 @keturn @lstein @blessedcoolant
|
/invokeai/frontend/CLI @lstein
|
||||||
/ldm/modules @damian0815 @keturn @lstein @blessedcoolant
|
/invokeai/frontend/install @lstein @ebr
|
||||||
|
/invokeai/frontend/merge @lstein @blessedcoolant
|
||||||
|
/invokeai/frontend/training @lstein @blessedcoolant
|
||||||
|
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp
|
||||||
|
|
||||||
# Nodes
|
|
||||||
apps/ @Kyle0654 @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# legacy REST API
|
|
||||||
# is CapableWeb still engaged?
|
|
||||||
/ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant
|
|
||||||
/scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant
|
|
||||||
/tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant
|
|
||||||
|
|
||||||
|
|||||||
10
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
10
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -65,6 +65,16 @@ body:
|
|||||||
placeholder: 8GB
|
placeholder: 8GB
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version-number
|
||||||
|
attributes:
|
||||||
|
label: What version did you experience this issue on?
|
||||||
|
description: |
|
||||||
|
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
|
placeholder: X.X.X
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: what-happened
|
||||||
|
|||||||
19
.github/stale.yaml
vendored
Normal file
19
.github/stale.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Number of days of inactivity before an issue becomes stale
|
||||||
|
daysUntilStale: 28
|
||||||
|
# Number of days of inactivity before a stale issue is closed
|
||||||
|
daysUntilClose: 14
|
||||||
|
# Issues with these labels will never be considered stale
|
||||||
|
exemptLabels:
|
||||||
|
- pinned
|
||||||
|
- security
|
||||||
|
# Label to use when marking an issue as stale
|
||||||
|
staleLabel: stale
|
||||||
|
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||||
|
markComment: >
|
||||||
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
recent activity. It will be closed if no further activity occurs. Please
|
||||||
|
update the ticket if this is still a problem on the latest release.
|
||||||
|
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||||
|
closeComment: >
|
||||||
|
Due to inactivity, this issue has been automatically closed. If this is
|
||||||
|
still a problem on the latest release, please recreate the issue.
|
||||||
23
.github/workflows/build-container.yml
vendored
23
.github/workflows/build-container.yml
vendored
@@ -5,17 +5,20 @@ on:
|
|||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/docker/*'
|
- 'update/ci/docker/*'
|
||||||
- 'update/docker/*'
|
- 'update/docker/*'
|
||||||
|
- 'dev/ci/docker/*'
|
||||||
|
- 'dev/docker/*'
|
||||||
paths:
|
paths:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- 'ldm/**'
|
- '.dockerignore'
|
||||||
- 'invokeai/backend/**'
|
- 'invokeai/**'
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
- 'docker/Dockerfile'
|
- 'docker/Dockerfile'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
@@ -24,11 +27,11 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
flavor:
|
||||||
- amd
|
- rocm
|
||||||
- cuda
|
- cuda
|
||||||
- cpu
|
- cpu
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- flavor: rocm
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
- flavor: cuda
|
- flavor: cuda
|
||||||
pip-extra-index-url: ''
|
pip-extra-index-url: ''
|
||||||
@@ -54,9 +57,9 @@ jobs:
|
|||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
type=semver,pattern={{version}}
|
type=pep440,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=pep440,pattern={{major}}.{{minor}}
|
||||||
type=semver,pattern={{major}}
|
type=pep440,pattern={{major}}
|
||||||
type=sha,enable=true,prefix=sha-,format=short
|
type=sha,enable=true,prefix=sha-,format=short
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
@@ -92,7 +95,7 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
file: ${{ env.DOCKERFILE }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
platforms: ${{ env.PLATFORMS }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||||
|
|||||||
27
.github/workflows/close-inactive-issues.yml
vendored
Normal file
27
.github/workflows/close-inactive-issues.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "00 6 * * *"
|
||||||
|
|
||||||
|
env:
|
||||||
|
DAYS_BEFORE_ISSUE_STALE: 14
|
||||||
|
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||||
|
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||||
|
stale-issue-label: "Inactive Issue"
|
||||||
|
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
|
||||||
|
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
operations-per-run: 500
|
||||||
22
.github/workflows/lint-frontend.yml
vendored
22
.github/workflows/lint-frontend.yml
vendored
@@ -3,14 +3,22 @@ name: Lint frontend
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: invokeai/frontend
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-frontend:
|
lint-frontend:
|
||||||
@@ -23,7 +31,7 @@ jobs:
|
|||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: 'yarn install --frozen-lockfile'
|
- run: 'yarn install --frozen-lockfile'
|
||||||
- run: 'yarn tsc'
|
- run: 'yarn run lint:tsc'
|
||||||
- run: 'yarn run madge'
|
- run: 'yarn run lint:madge'
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
- run: 'yarn run lint:eslint'
|
||||||
- run: 'yarn run prettier --check'
|
- run: 'yarn run lint:prettier'
|
||||||
|
|||||||
18
.github/workflows/mkdocs-material.yml
vendored
18
.github/workflows/mkdocs-material.yml
vendored
@@ -2,13 +2,19 @@ name: mkdocs-material
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'refs/heads/v2.3'
|
||||||
- 'development'
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||||
|
REPO_NAME: '${{ github.repository }}'
|
||||||
|
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||||
steps:
|
steps:
|
||||||
- name: checkout sources
|
- name: checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -19,11 +25,15 @@ jobs:
|
|||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
cache: pip
|
||||||
|
cache-dependency-path: pyproject.toml
|
||||||
|
|
||||||
- name: install requirements
|
- name: install requirements
|
||||||
|
env:
|
||||||
|
PIP_USE_PEP517: 1
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
pip install -r docs/requirements-mkdocs.txt
|
pip install ".[docs]"
|
||||||
|
|
||||||
- name: confirm buildability
|
- name: confirm buildability
|
||||||
run: |
|
run: |
|
||||||
@@ -33,7 +43,7 @@ jobs:
|
|||||||
--verbose
|
--verbose
|
||||||
|
|
||||||
- name: deploy to gh-pages
|
- name: deploy to gh-pages
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
if: ${{ github.ref == 'refs/heads/v2.3' }}
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
mkdocs gh-deploy \
|
mkdocs gh-deploy \
|
||||||
|
|||||||
2
.github/workflows/pypi-release.yml
vendored
2
.github/workflows/pypi-release.yml
vendored
@@ -3,7 +3,7 @@ name: PyPI Release
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'ldm/invoke/_version.py'
|
- 'invokeai/version/invokeai_version.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
11
.github/workflows/test-invoke-pip-skip.yml
vendored
11
.github/workflows/test-invoke-pip-skip.yml
vendored
@@ -1,12 +1,11 @@
|
|||||||
name: Test invoke.py pip
|
name: Test invoke.py pip
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths-ignore:
|
paths:
|
||||||
- 'pyproject.toml'
|
- '**'
|
||||||
- 'ldm/**'
|
- '!pyproject.toml'
|
||||||
- 'invokeai/backend/**'
|
- '!invokeai/**'
|
||||||
- 'invokeai/configs/**'
|
- 'invokeai/frontend/web/**'
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
merge_group:
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|||||||
31
.github/workflows/test-invoke-pip.yml
vendored
31
.github/workflows/test-invoke-pip.yml
vendored
@@ -5,17 +5,13 @@ on:
|
|||||||
- 'main'
|
- 'main'
|
||||||
paths:
|
paths:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- 'ldm/**'
|
- 'invokeai/**'
|
||||||
- 'invokeai/backend/**'
|
- '!invokeai/frontend/web/**'
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- 'ldm/**'
|
- 'invokeai/**'
|
||||||
- 'invokeai/backend/**'
|
- '!invokeai/frontend/web/**'
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
types:
|
types:
|
||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
- 'opened'
|
- 'opened'
|
||||||
@@ -84,11 +80,6 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: set test prompt to main branch validation
|
- name: set test prompt to main branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set test prompt to Pull Request validation
|
|
||||||
if: ${{ github.ref != 'refs/heads/main' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: setup python
|
- name: setup python
|
||||||
@@ -109,12 +100,6 @@ jobs:
|
|||||||
id: run-pytest
|
id: run-pytest
|
||||||
run: pytest
|
run: pytest
|
||||||
|
|
||||||
- name: set INVOKEAI_OUTDIR
|
|
||||||
run: >
|
|
||||||
python -c
|
|
||||||
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
|
||||||
>> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: run invokeai-configure
|
- name: run invokeai-configure
|
||||||
id: run-preload-models
|
id: run-preload-models
|
||||||
env:
|
env:
|
||||||
@@ -133,15 +118,21 @@ jobs:
|
|||||||
HF_HUB_OFFLINE: 1
|
HF_HUB_OFFLINE: 1
|
||||||
HF_DATASETS_OFFLINE: 1
|
HF_DATASETS_OFFLINE: 1
|
||||||
TRANSFORMERS_OFFLINE: 1
|
TRANSFORMERS_OFFLINE: 1
|
||||||
|
INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
||||||
run: >
|
run: >
|
||||||
invokeai
|
invokeai
|
||||||
--no-patchmatch
|
--no-patchmatch
|
||||||
--no-nsfw_checker
|
--no-nsfw_checker
|
||||||
--from_file ${{ env.TEST_PROMPTS }}
|
--precision=float32
|
||||||
|
--always_use_cpu
|
||||||
|
--use_memory_db
|
||||||
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
||||||
|
--from_file ${{ env.TEST_PROMPTS }}
|
||||||
|
|
||||||
- name: Archive results
|
- name: Archive results
|
||||||
id: archive-results
|
id: archive-results
|
||||||
|
env:
|
||||||
|
INVOKEAI_OUTDIR: ${{ github.workspace }}/results
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: results
|
name: results
|
||||||
|
|||||||
16
.gitignore
vendored
16
.gitignore
vendored
@@ -9,6 +9,8 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
|||||||
configs/models.user.yaml
|
configs/models.user.yaml
|
||||||
config/models.user.yml
|
config/models.user.yml
|
||||||
invokeai.init
|
invokeai.init
|
||||||
|
.version
|
||||||
|
.last_model
|
||||||
|
|
||||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||||
anaconda.sh
|
anaconda.sh
|
||||||
@@ -63,6 +65,7 @@ pip-delete-this-directory.txt
|
|||||||
htmlcov/
|
htmlcov/
|
||||||
.tox/
|
.tox/
|
||||||
.nox/
|
.nox/
|
||||||
|
.coveragerc
|
||||||
.coverage
|
.coverage
|
||||||
.coverage.*
|
.coverage.*
|
||||||
.cache
|
.cache
|
||||||
@@ -73,6 +76,7 @@ cov.xml
|
|||||||
*.py,cover
|
*.py,cover
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
|
.pytest.ini
|
||||||
cover/
|
cover/
|
||||||
junit/
|
junit/
|
||||||
|
|
||||||
@@ -197,8 +201,10 @@ checkpoints
|
|||||||
# If it's a Mac
|
# If it's a Mac
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
invokeai/frontend/web/dist/*
|
||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!invokeai/frontend/*
|
!invokeai/frontend/web/*
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@@ -213,11 +219,6 @@ gfpgan/
|
|||||||
# config file (will be created by installer)
|
# config file (will be created by installer)
|
||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# weights (will be created by installer)
|
|
||||||
models/ldm/stable-diffusion-v1/*.ckpt
|
|
||||||
models/clipseg
|
|
||||||
models/gfpgan
|
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
.invokeai
|
||||||
|
|
||||||
@@ -232,6 +233,3 @@ installer/install.bat
|
|||||||
installer/install.sh
|
installer/install.sh
|
||||||
installer/update.bat
|
installer/update.bat
|
||||||
installer/update.sh
|
installer/update.sh
|
||||||
|
|
||||||
# no longer stored in source directory
|
|
||||||
models
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
[pytest]
|
|
||||||
DJANGO_SETTINGS_MODULE = webtas.settings
|
|
||||||
; python_files = tests.py test_*.py *_tests.py
|
|
||||||
|
|
||||||
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml
|
|
||||||
13
README.md
13
README.md
@@ -33,6 +33,8 @@
|
|||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
_**Note: The UI is not fully functional on `main`. If you need a stable UI based on `main`, use the `pre-nodes` tag while we [migrate to a new backend](https://github.com/invoke-ai/InvokeAI/discussions/3246).**_
|
||||||
|
|
||||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
@@ -84,7 +86,7 @@ installing lots of models.
|
|||||||
|
|
||||||
6. Wait while the installer does its thing. After installing the software,
|
6. Wait while the installer does its thing. After installing the software,
|
||||||
the installer will launch a script that lets you configure InvokeAI and
|
the installer will launch a script that lets you configure InvokeAI and
|
||||||
select a set of starting image generaiton models.
|
select a set of starting image generation models.
|
||||||
|
|
||||||
7. Find the folder that InvokeAI was installed into (it is not the
|
7. Find the folder that InvokeAI was installed into (it is not the
|
||||||
same as the unpacked zip file directory!) The default location of this
|
same as the unpacked zip file directory!) The default location of this
|
||||||
@@ -139,15 +141,20 @@ not supported.
|
|||||||
_For Windows/Linux with an NVIDIA GPU:_
|
_For Windows/Linux with an NVIDIA GPU:_
|
||||||
|
|
||||||
```terminal
|
```terminal
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
```
|
```
|
||||||
|
|
||||||
_For Linux with an AMD GPU:_
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
_For non-GPU systems:_
|
||||||
|
```terminal
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
|
```
|
||||||
|
|
||||||
_For Macintoshes, either Intel or M1/M2:_
|
_For Macintoshes, either Intel or M1/M2:_
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|||||||
Binary file not shown.
@@ -1,164 +0,0 @@
|
|||||||
@echo off
|
|
||||||
|
|
||||||
@rem This script will install git (if not found on the PATH variable)
|
|
||||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
|
||||||
@rem For users who already have git, this step will be skipped.
|
|
||||||
|
|
||||||
@rem Next, it'll download the project's source code.
|
|
||||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
|
||||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
|
||||||
|
|
||||||
@rem This enables a user to install this project without manually installing git or Python
|
|
||||||
|
|
||||||
@rem change to the script's directory
|
|
||||||
PUSHD "%~dp0"
|
|
||||||
|
|
||||||
set "no_cache_dir=--no-cache-dir"
|
|
||||||
if "%1" == "use-cache" (
|
|
||||||
set "no_cache_dir="
|
|
||||||
)
|
|
||||||
|
|
||||||
echo ***** Installing InvokeAI.. *****
|
|
||||||
@rem Config
|
|
||||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
|
||||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
|
||||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
|
||||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
|
||||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
|
||||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
|
||||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
|
||||||
|
|
||||||
set PACKAGES_TO_INSTALL=
|
|
||||||
|
|
||||||
call git --version >.tmp1 2>.tmp2
|
|
||||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
|
||||||
|
|
||||||
@rem Cleanup
|
|
||||||
del /q .tmp1 .tmp2
|
|
||||||
|
|
||||||
@rem (if necessary) install git into a contained environment
|
|
||||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
|
||||||
@rem download micromamba
|
|
||||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
|
||||||
|
|
||||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
|
||||||
|
|
||||||
@rem test the mamba binary
|
|
||||||
echo ***** Micromamba version: *****
|
|
||||||
call micromamba.exe --version
|
|
||||||
|
|
||||||
@rem create the installer env
|
|
||||||
if not exist "%INSTALL_ENV_DIR%" (
|
|
||||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
|
||||||
)
|
|
||||||
|
|
||||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
|
||||||
|
|
||||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
|
||||||
|
|
||||||
if not exist "%INSTALL_ENV_DIR%" (
|
|
||||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
del /q micromamba.exe
|
|
||||||
|
|
||||||
@rem For 'git' only
|
|
||||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
|
||||||
|
|
||||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
|
||||||
set err_msg=----- InvokeAI source download failed -----
|
|
||||||
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
|
|
||||||
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- InvokeAI source unpack failed -----
|
|
||||||
tar -zxf InvokeAI.tgz
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
del /q InvokeAI.tgz
|
|
||||||
|
|
||||||
set err_msg=----- InvokeAI source copy failed -----
|
|
||||||
cd InvokeAI-*
|
|
||||||
xcopy . .. /e /h
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
@rem cleanup
|
|
||||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
|
||||||
rd /s /q .dev_scripts .github docker-build tests
|
|
||||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
|
||||||
|
|
||||||
echo ***** Unpacked InvokeAI source *****
|
|
||||||
|
|
||||||
@rem Download/unpack/clean up python-build-standalone
|
|
||||||
set err_msg=----- Python download failed -----
|
|
||||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- Python unpack failed -----
|
|
||||||
tar -zxf python.tgz
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
del /q python.tgz
|
|
||||||
|
|
||||||
echo ***** Unpacked python-build-standalone *****
|
|
||||||
|
|
||||||
@rem create venv
|
|
||||||
set err_msg=----- problem creating venv -----
|
|
||||||
.\python\python -E -s -m venv .venv
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
call .venv\Scripts\activate.bat
|
|
||||||
|
|
||||||
echo ***** Created Python virtual environment *****
|
|
||||||
|
|
||||||
@rem Print venv's Python version
|
|
||||||
set err_msg=----- problem calling venv's python -----
|
|
||||||
echo We're running under
|
|
||||||
.venv\Scripts\python --version
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- pip update failed -----
|
|
||||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
echo ***** Updated pip and wheel *****
|
|
||||||
|
|
||||||
set err_msg=----- requirements file copy failed -----
|
|
||||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- main pip install failed -----
|
|
||||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
echo ***** Installed Python dependencies *****
|
|
||||||
|
|
||||||
set err_msg=----- InvokeAI setup failed -----
|
|
||||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
|
||||||
echo ***** Installed invoke launcher script ******
|
|
||||||
|
|
||||||
@rem more cleanup
|
|
||||||
rd /s /q binary_installer installer_files
|
|
||||||
|
|
||||||
@rem preload the models
|
|
||||||
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
|
||||||
set err_msg=----- model download clone failed -----
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
deactivate
|
|
||||||
|
|
||||||
echo ***** Finished downloading models *****
|
|
||||||
|
|
||||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
|
||||||
pause
|
|
||||||
exit
|
|
||||||
|
|
||||||
:err_exit
|
|
||||||
echo %err_msg%
|
|
||||||
pause
|
|
||||||
exit
|
|
||||||
@@ -1,235 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
|
||||||
scriptdir=$(dirname "$0")
|
|
||||||
cd "$scriptdir"
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
IFS=$'\n\t'
|
|
||||||
|
|
||||||
function _err_exit {
|
|
||||||
if test "$1" -ne 0
|
|
||||||
then
|
|
||||||
echo -e "Error code $1; Error caught was '$2'"
|
|
||||||
read -p "Press any key to exit..."
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# This script will install git (if not found on the PATH variable)
|
|
||||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
|
||||||
# For users who already have git, this step will be skipped.
|
|
||||||
|
|
||||||
# Next, it'll download the project's source code.
|
|
||||||
# Then it will download a self-contained, standalone Python and unpack it.
|
|
||||||
# Finally, it'll create the Python virtual environment and preload the models.
|
|
||||||
|
|
||||||
# This enables a user to install this project without manually installing git or Python
|
|
||||||
|
|
||||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
|
||||||
|
|
||||||
export no_cache_dir="--no-cache-dir"
|
|
||||||
if [ $# -ge 1 ]; then
|
|
||||||
if [ "$1" = "use-cache" ]; then
|
|
||||||
export no_cache_dir=""
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
OS_NAME=$(uname -s)
|
|
||||||
case "${OS_NAME}" in
|
|
||||||
Linux*) OS_NAME="linux";;
|
|
||||||
Darwin*) OS_NAME="darwin";;
|
|
||||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
|
|
||||||
esac
|
|
||||||
|
|
||||||
OS_ARCH=$(uname -m)
|
|
||||||
case "${OS_ARCH}" in
|
|
||||||
x86_64*) ;;
|
|
||||||
arm64*) ;;
|
|
||||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
|
||||||
esac
|
|
||||||
|
|
||||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
|
||||||
MAMBA_OS_NAME=$OS_NAME
|
|
||||||
MAMBA_ARCH=$OS_ARCH
|
|
||||||
if [ "$OS_NAME" == "darwin" ]; then
|
|
||||||
MAMBA_OS_NAME="osx"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$OS_ARCH" == "linux" ]; then
|
|
||||||
MAMBA_ARCH="aarch64"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
|
||||||
MAMBA_ARCH="64"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PY_ARCH=$OS_ARCH
|
|
||||||
if [ "$OS_ARCH" == "arm64" ]; then
|
|
||||||
PY_ARCH="aarch64"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Compute device ('cd' segment of reqs files) detect goes here
|
|
||||||
# This needs a ton of work
|
|
||||||
# Suggestions:
|
|
||||||
# - lspci
|
|
||||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
|
||||||
# - Surely there's a similar utility for AMD?
|
|
||||||
CD="cuda"
|
|
||||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
|
||||||
CD="mps"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# config
|
|
||||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
|
||||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
|
||||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
|
||||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
|
||||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
|
||||||
if [ "$OS_NAME" == "darwin" ]; then
|
|
||||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
|
||||||
elif [ "$OS_NAME" == "linux" ]; then
|
|
||||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
|
||||||
fi
|
|
||||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
|
||||||
|
|
||||||
PACKAGES_TO_INSTALL=""
|
|
||||||
|
|
||||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
|
||||||
|
|
||||||
# (if necessary) install git and conda into a contained environment
|
|
||||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
|
||||||
# download micromamba
|
|
||||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
|
||||||
|
|
||||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
|
||||||
|
|
||||||
chmod u+x ./micromamba
|
|
||||||
|
|
||||||
# test the mamba binary
|
|
||||||
echo -e "\n***** Micromamba version: *****\n"
|
|
||||||
./micromamba --version
|
|
||||||
|
|
||||||
# create the installer env
|
|
||||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
|
||||||
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
|
||||||
|
|
||||||
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
|
|
||||||
|
|
||||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
|
||||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f micromamba.exe
|
|
||||||
|
|
||||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
|
||||||
|
|
||||||
# Download/unpack/clean up InvokeAI release sourceball
|
|
||||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
|
||||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
|
||||||
tar -zxf InvokeAI.tgz
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
rm -f InvokeAI.tgz
|
|
||||||
|
|
||||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
|
||||||
cd InvokeAI-*
|
|
||||||
cp -r . ..
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
# cleanup
|
|
||||||
rm -rf InvokeAI-*/
|
|
||||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
|
||||||
|
|
||||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
|
||||||
|
|
||||||
# Download/unpack/clean up python-build-standalone
|
|
||||||
_err_msg="\n----- Python download failed -----\n"
|
|
||||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
_err_msg="\n----- Python unpack failed -----\n"
|
|
||||||
tar -zxf python.tgz
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
rm -f python.tgz
|
|
||||||
|
|
||||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
|
||||||
|
|
||||||
# create venv
|
|
||||||
_err_msg="\n----- problem creating venv -----\n"
|
|
||||||
|
|
||||||
if [ "$OS_NAME" == "darwin" ]; then
|
|
||||||
# patch sysconfig so that extensions can build properly
|
|
||||||
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
|
|
||||||
PYTHON_INSTALL_DIR="$(pwd)/python"
|
|
||||||
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
|
|
||||||
TMPFILE="$(mktemp)"
|
|
||||||
chmod +w "${SYSCONFIG}"
|
|
||||||
cp "${SYSCONFIG}" "${TMPFILE}"
|
|
||||||
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
|
|
||||||
rm -f "${TMPFILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
./python/bin/python3 -E -s -m venv .venv
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
source .venv/bin/activate
|
|
||||||
|
|
||||||
echo -e "\n***** Created Python virtual environment *****\n"
|
|
||||||
|
|
||||||
# Print venv's Python version
|
|
||||||
_err_msg="\n----- problem calling venv's python -----\n"
|
|
||||||
echo -e "We're running under"
|
|
||||||
.venv/bin/python3 --version
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
_err_msg="\n----- pip update failed -----\n"
|
|
||||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
echo -e "\n***** Updated pip *****\n"
|
|
||||||
|
|
||||||
_err_msg="\n----- requirements file copy failed -----\n"
|
|
||||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
_err_msg="\n----- main pip install failed -----\n"
|
|
||||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
echo -e "\n***** Installed Python dependencies *****\n"
|
|
||||||
|
|
||||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
|
||||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
echo -e "\n***** Installed InvokeAI *****\n"
|
|
||||||
|
|
||||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
|
||||||
chmod a+rx ./invoke.sh
|
|
||||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
|
||||||
|
|
||||||
# more cleanup
|
|
||||||
rm -rf binary_installer/ installer_files/
|
|
||||||
|
|
||||||
# preload the models
|
|
||||||
.venv/bin/python3 scripts/configure_invokeai.py
|
|
||||||
_err_msg="\n----- model download clone failed -----\n"
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
deactivate
|
|
||||||
|
|
||||||
echo -e "\n***** Finished downloading models *****\n"
|
|
||||||
|
|
||||||
echo "All done! Run the command"
|
|
||||||
echo " $scriptdir/invoke.sh"
|
|
||||||
echo "to start InvokeAI."
|
|
||||||
read -p "Press any key to exit..."
|
|
||||||
exit
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
@echo off
|
|
||||||
|
|
||||||
PUSHD "%~dp0"
|
|
||||||
call .venv\Scripts\activate.bat
|
|
||||||
|
|
||||||
echo Do you want to generate images using the
|
|
||||||
echo 1. command-line
|
|
||||||
echo 2. browser-based UI
|
|
||||||
echo OR
|
|
||||||
echo 3. open the developer console
|
|
||||||
set /p choice="Please enter 1, 2 or 3: "
|
|
||||||
if /i "%choice%" == "1" (
|
|
||||||
echo Starting the InvokeAI command-line.
|
|
||||||
.venv\Scripts\python scripts\invoke.py %*
|
|
||||||
) else if /i "%choice%" == "2" (
|
|
||||||
echo Starting the InvokeAI browser-based UI.
|
|
||||||
.venv\Scripts\python scripts\invoke.py --web %*
|
|
||||||
) else if /i "%choice%" == "3" (
|
|
||||||
echo Developer Console
|
|
||||||
echo Python command is:
|
|
||||||
where python
|
|
||||||
echo Python version is:
|
|
||||||
python --version
|
|
||||||
echo *************************
|
|
||||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
|
||||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
|
||||||
echo *************************
|
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
|
||||||
call cmd /k
|
|
||||||
) else (
|
|
||||||
echo Invalid selection
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
deactivate
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
. .venv/bin/activate
|
|
||||||
|
|
||||||
# set required env var for torch on mac MPS
|
|
||||||
if [ "$(uname -s)" == "Darwin" ]; then
|
|
||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Do you want to generate images using the"
|
|
||||||
echo "1. command-line"
|
|
||||||
echo "2. browser-based UI"
|
|
||||||
echo "OR"
|
|
||||||
echo "3. open the developer console"
|
|
||||||
echo "Please enter 1, 2, or 3:"
|
|
||||||
read choice
|
|
||||||
|
|
||||||
case $choice in
|
|
||||||
1)
|
|
||||||
printf "\nStarting the InvokeAI command-line..\n";
|
|
||||||
.venv/bin/python scripts/invoke.py $*;
|
|
||||||
;;
|
|
||||||
2)
|
|
||||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
|
||||||
.venv/bin/python scripts/invoke.py --web $*;
|
|
||||||
;;
|
|
||||||
3)
|
|
||||||
printf "\nDeveloper Console:\n";
|
|
||||||
printf "Python command is:\n\t";
|
|
||||||
which python;
|
|
||||||
printf "Python version is:\n\t";
|
|
||||||
python --version;
|
|
||||||
echo "*************************"
|
|
||||||
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
|
||||||
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
|
||||||
printf "*************************\n"
|
|
||||||
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
|
||||||
/usr/bin/env "$SHELL";
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Invalid selection";
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
|||||||
InvokeAI
|
|
||||||
|
|
||||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
|
||||||
|
|
||||||
Installation on Windows:
|
|
||||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
|
||||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
|
||||||
file. Note that you will need to have admin privileges in order to
|
|
||||||
do this.
|
|
||||||
|
|
||||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
|
||||||
|
|
||||||
Installation on Linux and Mac:
|
|
||||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
|
||||||
|
|
||||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
|
||||||
file (on Linux/Mac) to start InvokeAI.
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
--prefer-binary
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
|
||||||
--trusted-host https://download.pytorch.org
|
|
||||||
accelerate~=0.15
|
|
||||||
albumentations
|
|
||||||
diffusers[torch]~=0.11
|
|
||||||
einops
|
|
||||||
eventlet
|
|
||||||
flask_cors
|
|
||||||
flask_socketio
|
|
||||||
flaskwebgui==1.0.3
|
|
||||||
getpass_asterisk
|
|
||||||
imageio-ffmpeg
|
|
||||||
pyreadline3
|
|
||||||
realesrgan
|
|
||||||
send2trash
|
|
||||||
streamlit
|
|
||||||
taming-transformers-rom1504
|
|
||||||
test-tube
|
|
||||||
torch-fidelity
|
|
||||||
torch==1.12.1 ; platform_system == 'Darwin'
|
|
||||||
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
|
||||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
|
||||||
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
|
||||||
transformers
|
|
||||||
picklescan
|
|
||||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
|
||||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
|
||||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
|
||||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
|
||||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
|
||||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
|
||||||
4
coverage/.gitignore
vendored
Normal file
4
coverage/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything in this directory
|
||||||
|
*
|
||||||
|
# Except this file
|
||||||
|
!.gitignore
|
||||||
@@ -4,15 +4,15 @@ ARG PYTHON_VERSION=3.9
|
|||||||
##################
|
##################
|
||||||
## base image ##
|
## base image ##
|
||||||
##################
|
##################
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||||
|
|
||||||
# prepare for buildkit cache
|
# Prepare apt for buildkit cache
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||||
|
|
||||||
# Install necessary packages
|
# Install dependencies
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
@@ -23,7 +23,7 @@ RUN \
|
|||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
libopencv-dev=4.5.*
|
libopencv-dev=4.5.*
|
||||||
|
|
||||||
# set working directory and env
|
# Set working directory and env
|
||||||
ARG APPDIR=/usr/src
|
ARG APPDIR=/usr/src
|
||||||
ARG APPNAME=InvokeAI
|
ARG APPNAME=InvokeAI
|
||||||
WORKDIR ${APPDIR}
|
WORKDIR ${APPDIR}
|
||||||
@@ -32,7 +32,7 @@ ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
|||||||
ENV PYTHONDONTWRITEBYTECODE 1
|
ENV PYTHONDONTWRITEBYTECODE 1
|
||||||
# Turns off buffering for easier container logging
|
# Turns off buffering for easier container logging
|
||||||
ENV PYTHONUNBUFFERED 1
|
ENV PYTHONUNBUFFERED 1
|
||||||
# don't fall back to legacy build system
|
# Don't fall back to legacy build system
|
||||||
ENV PIP_USE_PEP517=1
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
@@ -40,7 +40,7 @@ ENV PIP_USE_PEP517=1
|
|||||||
#######################
|
#######################
|
||||||
FROM python-base AS pyproject-builder
|
FROM python-base AS pyproject-builder
|
||||||
|
|
||||||
# Install dependencies
|
# Install build dependencies
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
@@ -51,26 +51,30 @@ RUN \
|
|||||||
gcc=4:10.2.* \
|
gcc=4:10.2.* \
|
||||||
python3-dev=3.9.*
|
python3-dev=3.9.*
|
||||||
|
|
||||||
# prepare pip for buildkit cache
|
# Prepare pip for buildkit cache
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
# create virtual environment
|
# Create virtual environment
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
python3 -m venv "${APPNAME}" \
|
python3 -m venv "${APPNAME}" \
|
||||||
--upgrade-deps
|
--upgrade-deps
|
||||||
|
|
||||||
# copy sources
|
# Install requirements
|
||||||
COPY --link . .
|
COPY --link pyproject.toml .
|
||||||
|
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
||||||
# install pyproject.toml
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
"${APPNAME}"/bin/pip install .
|
||||||
|
|
||||||
|
# Install pyproject.toml
|
||||||
|
COPY --link . .
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
"${APPNAME}/bin/pip" install .
|
"${APPNAME}/bin/pip" install .
|
||||||
|
|
||||||
# build patchmatch
|
# Build patchmatch
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
@@ -86,14 +90,14 @@ RUN useradd \
|
|||||||
-U \
|
-U \
|
||||||
"${UNAME}"
|
"${UNAME}"
|
||||||
|
|
||||||
# create volume directory
|
# Create volume directory
|
||||||
ARG VOLUME_DIR=/data
|
ARG VOLUME_DIR=/data
|
||||||
RUN mkdir -p "${VOLUME_DIR}" \
|
RUN mkdir -p "${VOLUME_DIR}" \
|
||||||
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
||||||
|
|
||||||
# setup runtime environment
|
# Setup runtime environment
|
||||||
USER ${UNAME}
|
USER ${UNAME}:${UNAME}
|
||||||
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
docker build \
|
||||||
--platform="${PLATFORM:-linux/amd64}" \
|
--platform="${PLATFORM:-linux/amd64}" \
|
||||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||||
|
|||||||
@@ -49,3 +49,6 @@ CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
|||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
||||||
|
|
||||||
|
# enable docker buildkit
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ docker run \
|
|||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM}" \
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
--name="${REPOSITORY_NAME}" \
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
--hostname="${REPOSITORY_NAME}" \
|
||||||
--mount=source="${VOLUMENAME}",target=/data \
|
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
|
||||||
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||||
--publish=9090:9090 \
|
--publish=9090:9090 \
|
||||||
@@ -32,7 +32,7 @@ docker run \
|
|||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||||
|
|
||||||
# Remove Trash folder
|
echo -e "\nCleaning trash folder ..."
|
||||||
for f in outputs/.Trash*; do
|
for f in outputs/.Trash*; do
|
||||||
if [ -e "$f" ]; then
|
if [ -e "$f" ]; then
|
||||||
rm -Rf "$f"
|
rm -Rf "$f"
|
||||||
|
|||||||
BIN
docs/assets/contributing/html-detail.png
Normal file
BIN
docs/assets/contributing/html-detail.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 470 KiB |
BIN
docs/assets/contributing/html-overview.png
Normal file
BIN
docs/assets/contributing/html-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 457 KiB |
@@ -1,105 +1,277 @@
|
|||||||
# Invocations
|
# Invocations
|
||||||
|
|
||||||
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
|
Invocations represent a single operation, its inputs, and its outputs. These
|
||||||
|
operations and their outputs can be chained together to generate and modify
|
||||||
|
images.
|
||||||
|
|
||||||
## Creating a new invocation
|
## Creating a new invocation
|
||||||
|
|
||||||
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
|
To create a new invocation, either find the appropriate module file in
|
||||||
|
`/ldm/invoke/app/invocations` to add your invocation to, or create a new one in
|
||||||
|
that folder. All invocations in that folder will be discovered and made
|
||||||
|
available to the CLI and API automatically. Invocations make use of
|
||||||
|
[typing](https://docs.python.org/3/library/typing.html) and
|
||||||
|
[pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration
|
||||||
|
into the CLI and API.
|
||||||
|
|
||||||
An invocation looks like this:
|
An invocation looks like this:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
class UpscaleInvocation(BaseInvocation):
|
class UpscaleInvocation(BaseInvocation):
|
||||||
"""Upscales an image."""
|
"""Upscales an image."""
|
||||||
type: Literal['upscale'] = 'upscale'
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["upscale"] = "upscale"
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
image: Union[ImageField,None] = Field(description="The input image")
|
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
level: Literal[2,4] = Field(default=2, description = "The upscale level")
|
level: Literal[2, 4] = Field(default=2, description="The upscale level")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["upscaling", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
image = context.services.images.get_pil_image(
|
||||||
results = context.services.generate.upscale_and_reconstruct(
|
self.image.image_origin, self.image.image_name
|
||||||
image_list = [[image, 0]],
|
)
|
||||||
upscale = (self.level, self.strength),
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
strength = 0.0, # GFPGAN strength
|
image_list=[[image, 0]],
|
||||||
save_original = False,
|
upscale=(self.level, self.strength),
|
||||||
image_callback = None,
|
strength=0.0, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Results are image and seed, unwrap for now
|
# Results are image and seed, unwrap for now
|
||||||
# TODO: can this return multiple results?
|
# TODO: can this return multiple results?
|
||||||
image_type = ImageType.RESULT
|
image_dto = context.services.images.create(
|
||||||
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
image=results[0][0],
|
||||||
context.services.images.save(image_type, image_name, results[0][0])
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
return ImageOutput(
|
image_category=ImageCategory.GENERAL,
|
||||||
image = ImageField(image_type = image_type, image_name = image_name)
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(
|
||||||
|
image_name=image_dto.image_name,
|
||||||
|
image_origin=image_dto.image_origin,
|
||||||
|
),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Each portion is important to implement correctly.
|
Each portion is important to implement correctly.
|
||||||
|
|
||||||
### Class definition and type
|
### Class definition and type
|
||||||
|
|
||||||
```py
|
```py
|
||||||
class UpscaleInvocation(BaseInvocation):
|
class UpscaleInvocation(BaseInvocation):
|
||||||
"""Upscales an image."""
|
"""Upscales an image."""
|
||||||
type: Literal['upscale'] = 'upscale'
|
type: Literal['upscale'] = 'upscale'
|
||||||
```
|
```
|
||||||
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
|
|
||||||
|
All invocations must derive from `BaseInvocation`. They should have a docstring
|
||||||
|
that declares what they do in a single, short line. They should also have a
|
||||||
|
`type` with a type hint that's `Literal["command_name"]`, where `command_name`
|
||||||
|
is what the user will type on the CLI or use in the API to create this
|
||||||
|
invocation. The `command_name` must be unique. The `type` must be assigned to
|
||||||
|
the value of the literal in the type hint.
|
||||||
|
|
||||||
### Inputs
|
### Inputs
|
||||||
|
|
||||||
```py
|
```py
|
||||||
# Inputs
|
# Inputs
|
||||||
image: Union[ImageField,None] = Field(description="The input image")
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||||
```
|
```
|
||||||
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
|
|
||||||
| Part | Value | Description |
|
|
||||||
| ---- | ----- | ----------- |
|
|
||||||
| Name | `strength` | This field is referred to as `strength` |
|
|
||||||
| Type Hint | `float` | This field must be of type `float` |
|
|
||||||
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
|
||||||
|
|
||||||
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
|
Inputs consist of three parts: a name, a type hint, and a `Field` with default,
|
||||||
|
description, and validation information. For example:
|
||||||
|
|
||||||
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
|
| Part | Value | Description |
|
||||||
|
| --------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| Name | `strength` | This field is referred to as `strength` |
|
||||||
|
| Type Hint | `float` | This field must be of type `float` |
|
||||||
|
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||||
|
|
||||||
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
|
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this
|
||||||
|
field to be parsed with `None` as a value, which enables linking to previous
|
||||||
|
invocations. All fields should either provide a default value or allow `None` as
|
||||||
|
a value, so that they can be overwritten with a linked output from another
|
||||||
|
invocation.
|
||||||
|
|
||||||
|
The special type `ImageField` is also used here. All images are passed as
|
||||||
|
`ImageField`, which protects them from pydantic validation errors (since images
|
||||||
|
only ever come from links).
|
||||||
|
|
||||||
|
Finally, note that for all linking, the `type` of the linked fields must match.
|
||||||
|
If the `name` also matches, then the field can be **automatically linked** to a
|
||||||
|
previous invocation by name and matching.
|
||||||
|
|
||||||
|
### Config
|
||||||
|
|
||||||
|
```py
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["upscaling", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This is an optional configuration for the invocation. It inherits from
|
||||||
|
pydantic's model `Config` class, and it used primarily to customize the
|
||||||
|
autogenerated OpenAPI schema.
|
||||||
|
|
||||||
|
The UI relies on the OpenAPI schema in two ways:
|
||||||
|
|
||||||
|
- An API client & Typescript types are generated from it. This happens at build
|
||||||
|
time.
|
||||||
|
- The node editor parses the schema into a template used by the UI to create the
|
||||||
|
node editor UI. This parsing happens at runtime.
|
||||||
|
|
||||||
|
In this example, a `ui` key has been added to the `schema_extra` dict to provide
|
||||||
|
some tags for the UI, to facilitate filtering nodes.
|
||||||
|
|
||||||
|
See the Schema Generation section below for more information.
|
||||||
|
|
||||||
### Invoke Function
|
### Invoke Function
|
||||||
|
|
||||||
```py
|
```py
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
image = context.services.images.get_pil_image(
|
||||||
results = context.services.generate.upscale_and_reconstruct(
|
self.image.image_origin, self.image.image_name
|
||||||
image_list = [[image, 0]],
|
)
|
||||||
upscale = (self.level, self.strength),
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
strength = 0.0, # GFPGAN strength
|
image_list=[[image, 0]],
|
||||||
save_original = False,
|
upscale=(self.level, self.strength),
|
||||||
image_callback = None,
|
strength=0.0, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Results are image and seed, unwrap for now
|
# Results are image and seed, unwrap for now
|
||||||
image_type = ImageType.RESULT
|
# TODO: can this return multiple results?
|
||||||
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
image_dto = context.services.images.create(
|
||||||
context.services.images.save(image_type, image_name, results[0][0])
|
image=results[0][0],
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
image = ImageField(image_type = image_type, image_name = image_name)
|
image=ImageField(
|
||||||
|
image_name=image_dto.image_name,
|
||||||
|
image_origin=image_dto.image_origin,
|
||||||
|
),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
|
|
||||||
|
|
||||||
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
|
The `invoke` function is the last portion of an invocation. It is provided an
|
||||||
|
`InvocationContext` which contains services to perform work as well as a
|
||||||
|
`session_id` for use as needed. It should return a class with output values that
|
||||||
|
derives from `BaseInvocationOutput`.
|
||||||
|
|
||||||
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
|
Before being called, the invocation will have all of its fields set from
|
||||||
|
defaults, inputs, and finally links (overriding in that order).
|
||||||
|
|
||||||
|
Assume that this invocation may be running simultaneously with other
|
||||||
|
invocations, may be running on another machine, or in other interesting
|
||||||
|
scenarios. If you need functionality, please provide it as a service in the
|
||||||
|
`InvocationServices` class, and make sure it can be overridden.
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
```py
|
```py
|
||||||
class ImageOutput(BaseInvocationOutput):
|
class ImageOutput(BaseInvocationOutput):
|
||||||
"""Base class for invocations that output an image"""
|
"""Base class for invocations that output an image"""
|
||||||
type: Literal['image'] = 'image'
|
|
||||||
|
|
||||||
image: ImageField = Field(default=None, description="The output image")
|
# fmt: off
|
||||||
|
type: Literal["image_output"] = "image_output"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
width: int = Field(description="The width of the image in pixels")
|
||||||
|
height: int = Field(description="The height of the image in pixels")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||||
```
|
```
|
||||||
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.
|
|
||||||
|
Output classes look like an invocation class without the invoke method. Prefer
|
||||||
|
to use an existing output class if available, and prefer to name inputs the same
|
||||||
|
as outputs when possible, to promote automatic invocation linking.
|
||||||
|
|
||||||
|
## Schema Generation
|
||||||
|
|
||||||
|
Invocation, output and related classes are used to generate an OpenAPI schema.
|
||||||
|
|
||||||
|
### Required Properties
|
||||||
|
|
||||||
|
The schema generation treat all properties with default values as optional. This
|
||||||
|
makes sense internally, but when when using these classes via the generated
|
||||||
|
schema, we end up with e.g. the `ImageOutput` class having its `image` property
|
||||||
|
marked as optional.
|
||||||
|
|
||||||
|
We know that this property will always be present, so the additional logic
|
||||||
|
needed to always check if the property exists adds a lot of extraneous cruft.
|
||||||
|
|
||||||
|
To fix this, we can leverage `pydantic`'s
|
||||||
|
[schema customisation](https://docs.pydantic.dev/usage/schema/#schema-customization)
|
||||||
|
to mark properties that we know will always be present as required.
|
||||||
|
|
||||||
|
Here's that `ImageOutput` class, without the needed schema customisation:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["image_output"] = "image_output"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
width: int = Field(description="The width of the image in pixels")
|
||||||
|
height: int = Field(description="The height of the image in pixels")
|
||||||
|
# fmt: on
|
||||||
|
```
|
||||||
|
|
||||||
|
The OpenAPI schema that results from this `ImageOutput` will have the `type`,
|
||||||
|
`image`, `width` and `height` properties marked as optional, even though we know
|
||||||
|
they will always have a value.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["image_output"] = "image_output"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
width: int = Field(description="The width of the image in pixels")
|
||||||
|
height: int = Field(description="The height of the image in pixels")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# Add schema customization
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the customization in place, the schema will now show these properties as
|
||||||
|
required, obviating the need for extensive null checks in client code.
|
||||||
|
|
||||||
|
See this `pydantic` issue for discussion on this solution:
|
||||||
|
<https://github.com/pydantic/pydantic/discussions/4577>
|
||||||
|
|||||||
83
docs/contributing/LOCAL_DEVELOPMENT.md
Normal file
83
docs/contributing/LOCAL_DEVELOPMENT.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# Local Development
|
||||||
|
|
||||||
|
If you are looking to contribute you will need to have a local development
|
||||||
|
environment. See the
|
||||||
|
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
|
||||||
|
full details.
|
||||||
|
|
||||||
|
Broadly this involves cloning the repository, installing the pre-reqs, and
|
||||||
|
InvokeAI (in editable form). Assuming this is working, choose your area of
|
||||||
|
focus.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
|
||||||
|
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
|
||||||
|
written in markdown files under the `./docs` folder and then built into a static
|
||||||
|
website for hosting with GitHub Pages at
|
||||||
|
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
|
||||||
|
|
||||||
|
To contribute to the documentation you'll need to install the dependencies. Note
|
||||||
|
the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[docs]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, to run the documentation locally with hot-reloading for changes made.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
mkdocs serve
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
|
||||||
|
access.
|
||||||
|
|
||||||
|
## Backend
|
||||||
|
|
||||||
|
The backend is contained within the `./invokeai/backend` folder structure. To
|
||||||
|
get started however please install the development dependencies.
|
||||||
|
|
||||||
|
From the root of the repository run the following command. Note the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[test]"
|
||||||
|
```
|
||||||
|
|
||||||
|
This in an optional group of packages which is defined within the
|
||||||
|
`pyproject.toml` and will be required for testing the changes you make the the
|
||||||
|
code.
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
|
||||||
|
be found under the `./tests` folder and can be run with a single `pytest`
|
||||||
|
command. Optionally, to review test coverage you can append `--cov`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov
|
||||||
|
```
|
||||||
|
|
||||||
|
Test outcomes and coverage will be reported in the terminal. In addition a more
|
||||||
|
detailed report is created in both XML and HTML format in the `./coverage`
|
||||||
|
folder. The HTML one in particular can help identify missing statements
|
||||||
|
requiring tests to ensure coverage. This can be run by opening
|
||||||
|
`./coverage/html/index.html`.
|
||||||
|
|
||||||
|
For example.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov; open ./coverage/html/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
??? info "HTML coverage report output"
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Front End
|
||||||
|
|
||||||
|
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
|
||||||
|
|
||||||
|
--8<-- "invokeai/frontend/web/README.md"
|
||||||
@@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
|
|||||||
After installation, your `models.yaml` should contain an entry that looks like
|
After installation, your `models.yaml` should contain an entry that looks like
|
||||||
this one:
|
this one:
|
||||||
|
|
||||||
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
```yml
|
||||||
description: SD inpainting v1.5 config:
|
inpainting-1.5:
|
||||||
configs/stable-diffusion/v1-inpainting-inference.yaml vae:
|
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||||
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512
|
description: SD inpainting v1.5
|
||||||
height: 512
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
```
|
||||||
|
|
||||||
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
||||||
This is strongly recommended.
|
This is strongly recommended.
|
||||||
|
|||||||
171
docs/features/LOGGING.md
Normal file
171
docs/features/LOGGING.md
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
---
|
||||||
|
title: Controlling Logging
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-image-off: Controlling Logging
|
||||||
|
|
||||||
|
## Controlling How InvokeAI Logs Status Messages
|
||||||
|
|
||||||
|
InvokeAI logs status messages using a configurable logging system. You
|
||||||
|
can log to the terminal window, to a designated file on the local
|
||||||
|
machine, to the syslog facility on a Linux or Mac, or to a properly
|
||||||
|
configured web server. You can configure several logs at the same
|
||||||
|
time, and control the level of message logged and the logging format
|
||||||
|
(to a limited extent).
|
||||||
|
|
||||||
|
Three command-line options control logging:
|
||||||
|
|
||||||
|
### `--log_handlers <handler1> <handler2> ...`
|
||||||
|
|
||||||
|
This option activates one or more log handlers. Options are "console",
|
||||||
|
"file", "syslog" and "http". To specify more than one, separate them
|
||||||
|
by spaces:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invokeai-web --log_handlers console syslog=/dev/log file=C:\Users\fred\invokeai.log
|
||||||
|
```
|
||||||
|
|
||||||
|
The format of these options is described below.
|
||||||
|
|
||||||
|
### `--log_format {plain|color|legacy|syslog}`
|
||||||
|
|
||||||
|
This controls the format of log messages written to the console. Only
|
||||||
|
the "console" log handler is currently affected by this setting.
|
||||||
|
|
||||||
|
* "plain" provides formatted messages like this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
[2023-05-24 23:18:2[2023-05-24 23:18:50,352]::[InvokeAI]::DEBUG --> this is a debug message
|
||||||
|
[2023-05-24 23:18:50,352]::[InvokeAI]::INFO --> this is an informational messages
|
||||||
|
[2023-05-24 23:18:50,352]::[InvokeAI]::WARNING --> this is a warning
|
||||||
|
[2023-05-24 23:18:50,352]::[InvokeAI]::ERROR --> this is an error
|
||||||
|
[2023-05-24 23:18:50,352]::[InvokeAI]::CRITICAL --> this is a critical error
|
||||||
|
```
|
||||||
|
|
||||||
|
* "color" produces similar output, but the text will be color coded to
|
||||||
|
indicate the severity of the message.
|
||||||
|
|
||||||
|
* "legacy" produces output similar to InvokeAI versions 2.3 and earlier:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
### this is a critical error
|
||||||
|
*** this is an error
|
||||||
|
** this is a warning
|
||||||
|
>> this is an informational messages
|
||||||
|
| this is a debug message
|
||||||
|
```
|
||||||
|
|
||||||
|
* "syslog" produces messages suitable for syslog entries:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
InvokeAI [2691178] <CRITICAL> this is a critical error
|
||||||
|
InvokeAI [2691178] <ERROR> this is an error
|
||||||
|
InvokeAI [2691178] <WARNING> this is a warning
|
||||||
|
InvokeAI [2691178] <INFO> this is an informational messages
|
||||||
|
InvokeAI [2691178] <DEBUG> this is a debug message
|
||||||
|
```
|
||||||
|
|
||||||
|
(note that the date, time and hostname will be added by the syslog
|
||||||
|
system)
|
||||||
|
|
||||||
|
### `--log_level {debug|info|warning|error|critical}`
|
||||||
|
|
||||||
|
Providing this command-line option will cause only messages at the
|
||||||
|
specified level or above to be emitted.
|
||||||
|
|
||||||
|
## Console logging
|
||||||
|
|
||||||
|
When "console" is provided to `--log_handlers`, messages will be
|
||||||
|
written to the command line window in which InvokeAI was launched. By
|
||||||
|
default, the color formatter will be used unless overridden by
|
||||||
|
`--log_format`.
|
||||||
|
|
||||||
|
## File logging
|
||||||
|
|
||||||
|
When "file" is provided to `--log_handlers`, entries will be written
|
||||||
|
to the file indicated in the path argument. By default, the "plain"
|
||||||
|
format will be used:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invokeai-web --log_handlers file=/var/log/invokeai.log
|
||||||
|
```
|
||||||
|
|
||||||
|
## Syslog logging
|
||||||
|
|
||||||
|
When "syslog" is requested, entries will be sent to the syslog
|
||||||
|
system. There are a variety of ways to control where the log message
|
||||||
|
is sent:
|
||||||
|
|
||||||
|
* Send to the local machine using the `/dev/log` socket:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers syslog=/dev/log
|
||||||
|
```
|
||||||
|
|
||||||
|
* Send to the local machine using a UDP message:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers syslog=localhost
|
||||||
|
```
|
||||||
|
|
||||||
|
* Send to the local machine using a UDP message on a nonstandard
|
||||||
|
port:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers syslog=localhost:512
|
||||||
|
```
|
||||||
|
|
||||||
|
* Send to a remote machine named "loghost" on the local LAN using
|
||||||
|
facility LOG_USER and UDP packets:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers syslog=loghost,facility=LOG_USER,socktype=SOCK_DGRAM
|
||||||
|
```
|
||||||
|
|
||||||
|
This can be abbreviated `syslog=loghost`, as LOG_USER and SOCK_DGRAM
|
||||||
|
are defaults.
|
||||||
|
|
||||||
|
* Send to a remote machine named "loghost" using the facility LOCAL0
|
||||||
|
and using a TCP socket:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers syslog=loghost,facility=LOG_LOCAL0,socktype=SOCK_STREAM
|
||||||
|
```
|
||||||
|
|
||||||
|
If no arguments are specified (just a bare "syslog"), then the logging
|
||||||
|
system will look for a UNIX socket named `/dev/log`, and if not found
|
||||||
|
try to send a UDP message to `localhost`. The Macintosh OS used to
|
||||||
|
support logging to a socket named `/var/run/syslog`, but this feature
|
||||||
|
has since been disabled.
|
||||||
|
|
||||||
|
## Web logging
|
||||||
|
|
||||||
|
If you have access to a web server that is configured to log messages
|
||||||
|
when a particular URL is requested, you can log using the "http"
|
||||||
|
method:
|
||||||
|
|
||||||
|
```
|
||||||
|
invokeai-web --log_handlers http=http://my.server/path/to/logger,method=POST
|
||||||
|
```
|
||||||
|
|
||||||
|
The optional [,method=] part can be used to specify whether the URL
|
||||||
|
accepts GET (default) or POST messages.
|
||||||
|
|
||||||
|
Currently password authentication and SSL are not supported.
|
||||||
|
|
||||||
|
## Using the configuration file
|
||||||
|
|
||||||
|
You can set and forget logging options by adding a "Logging" section
|
||||||
|
to `invokeai.yaml`:
|
||||||
|
|
||||||
|
```
|
||||||
|
InvokeAI:
|
||||||
|
[... other settings...]
|
||||||
|
Logging:
|
||||||
|
log_handlers:
|
||||||
|
- console
|
||||||
|
- syslog=/dev/log
|
||||||
|
log_level: info
|
||||||
|
log_format: color
|
||||||
|
```
|
||||||
@@ -32,7 +32,7 @@ turned on and off on the command line using `--nsfw_checker` and
|
|||||||
At installation time, InvokeAI will ask whether the checker should be
|
At installation time, InvokeAI will ask whether the checker should be
|
||||||
activated by default (neither argument given on the command line). The
|
activated by default (neither argument given on the command line). The
|
||||||
response is stored in the InvokeAI initialization file (usually
|
response is stored in the InvokeAI initialization file (usually
|
||||||
`.invokeai` in your home directory). You can change the default at any
|
`invokeai.init` in your home directory). You can change the default at any
|
||||||
time by opening this file in a text editor and commenting or
|
time by opening this file in a text editor and commenting or
|
||||||
uncommenting the line `--nsfw_checker`.
|
uncommenting the line `--nsfw_checker`.
|
||||||
|
|
||||||
|
|||||||
@@ -268,7 +268,7 @@ model is so good at inpainting, a good substitute is to use the `clipseg` text
|
|||||||
masking option:
|
masking option:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> a fluffy cat eating a hotdot
|
invoke> a fluffy cat eating a hotdog
|
||||||
Outputs:
|
Outputs:
|
||||||
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
||||||
invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ notebooks.
|
|||||||
|
|
||||||
You will need a GPU to perform training in a reasonable length of
|
You will need a GPU to perform training in a reasonable length of
|
||||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||||
library](../installation/070_INSTALL_XFORMERS) to accelerate the
|
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||||
training process further. During training, about ~8 GB is temporarily
|
training process further. During training, about ~8 GB is temporarily
|
||||||
needed in order to store intermediate models, checkpoints and logs.
|
needed in order to store intermediate models, checkpoints and logs.
|
||||||
|
|
||||||
|
|||||||
@@ -57,6 +57,9 @@ Personalize models by adding your own style or subjects.
|
|||||||
## * [The NSFW Checker](NSFW.md)
|
## * [The NSFW Checker](NSFW.md)
|
||||||
Prevent InvokeAI from displaying unwanted racy images.
|
Prevent InvokeAI from displaying unwanted racy images.
|
||||||
|
|
||||||
|
## * [Controlling Logging](LOGGING.md)
|
||||||
|
Control how InvokeAI logs status messages.
|
||||||
|
|
||||||
## * [Miscellaneous](OTHER.md)
|
## * [Miscellaneous](OTHER.md)
|
||||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||||
batch process a file of prompts, increase the "creativity" of image
|
batch process a file of prompts, increase the "creativity" of image
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ experimental versions later.
|
|||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install -y software-properties-common
|
sudo apt install -y software-properties-common
|
||||||
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
||||||
sudo apt install python3.10 python3-pip python3.10-venv
|
sudo apt install -y python3.10 python3-pip python3.10-venv
|
||||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -417,7 +417,7 @@ Then type the following commands:
|
|||||||
|
|
||||||
=== "AMD System"
|
=== "AMD System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
|
|||||||
@@ -148,13 +148,13 @@ manager, please follow these steps:
|
|||||||
=== "CUDA (NVidia)"
|
=== "CUDA (NVidia)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@@ -216,7 +216,7 @@ manager, please follow these steps:
|
|||||||
9. Run the command-line- or the web- interface:
|
9. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
From within INVOKEAI_ROOT, activate the environment
|
From within INVOKEAI_ROOT, activate the environment
|
||||||
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
|
(with `source .venv/bin/activate` or `.venv\scripts\activate`), and then run
|
||||||
the script `invokeai`. If the virtual environment you selected is NOT inside
|
the script `invokeai`. If the virtual environment you selected is NOT inside
|
||||||
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
|
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
|
||||||
`--root_dir \path\to\invokeai` to the commands below:
|
`--root_dir \path\to\invokeai` to the commands below:
|
||||||
@@ -315,7 +315,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ recipes are available
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
This will be done automatically for you if you use the installer
|
This will be done automatically for you if you use the installer
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ subset that are currently installed are found in
|
|||||||
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||||
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||||
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-inpainting|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-inpainting |
|
||||||
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||||
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||||
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ You need to have opencv installed so that pypatchmatch can be built:
|
|||||||
brew install opencv
|
brew install opencv
|
||||||
```
|
```
|
||||||
|
|
||||||
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
|
The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built.
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||||
`python`, and then at the `>>>` line type
|
`python`, and then at the `>>>` line type
|
||||||
`from patchmatch import patch_match`: It should look like the follwing:
|
`from patchmatch import patch_match`: It should look like the following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||||
@@ -108,4 +108,4 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
||||||
|
|
||||||
If you see no errors, then you're ready to go!
|
If you see no errors you're ready to go!
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ if [[ -v "VIRTUAL_ENV" ]]; then
|
|||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
LATEST_TAG="v2.3-latest"
|
LATEST_TAG="v3.0-latest"
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo Building installer for version $VERSION
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
|
|||||||
@@ -247,8 +247,8 @@ class InvokeAiInstance:
|
|||||||
pip[
|
pip[
|
||||||
"install",
|
"install",
|
||||||
"--require-virtualenv",
|
"--require-virtualenv",
|
||||||
"torch",
|
"torch~=2.0.0",
|
||||||
"torchvision",
|
"torchvision>=0.14.1",
|
||||||
"--force-reinstall",
|
"--force-reinstall",
|
||||||
"--find-links" if find_links is not None else None,
|
"--find-links" if find_links is not None else None,
|
||||||
find_links,
|
find_links,
|
||||||
@@ -291,7 +291,7 @@ class InvokeAiInstance:
|
|||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
src = Path(__file__).parents[1].expanduser().resolve()
|
||||||
# if the above directory contains one of these files, we'll do a source install
|
# if the above directory contains one of these files, we'll do a source install
|
||||||
next(src.glob("pyproject.toml"))
|
next(src.glob("pyproject.toml"))
|
||||||
next(src.glob("ldm"))
|
next(src.glob("invokeai"))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||||
|
|
||||||
@@ -342,14 +342,14 @@ class InvokeAiInstance:
|
|||||||
|
|
||||||
introduction()
|
introduction()
|
||||||
|
|
||||||
from ldm.invoke.config import invokeai_configure
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
# from the installer will also automatically propagate down to the config script.
|
# from the installer will also automatically propagate down to the config script.
|
||||||
# this may change in the future with config refactoring!
|
# this may change in the future with config refactoring!
|
||||||
succeeded = False
|
succeeded = False
|
||||||
try:
|
try:
|
||||||
invokeai_configure.main()
|
invokeai_configure()
|
||||||
succeeded = True
|
succeeded = True
|
||||||
except requests.exceptions.ConnectionError as e:
|
except requests.exceptions.ConnectionError as e:
|
||||||
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||||
@@ -456,7 +456,7 @@ def get_torch_source() -> (Union[str, None],str):
|
|||||||
optional_modules = None
|
optional_modules = None
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
if device == "rocm":
|
if device == "rocm":
|
||||||
url = "https://download.pytorch.org/whl/rocm5.2"
|
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||||
elif device == "cpu":
|
elif device == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
|
||||||
|
|||||||
@@ -7,42 +7,42 @@ call .venv\Scripts\activate.bat
|
|||||||
set INVOKEAI_ROOT=.
|
set INVOKEAI_ROOT=.
|
||||||
|
|
||||||
:start
|
:start
|
||||||
echo Do you want to generate images using the
|
echo Desired action:
|
||||||
echo 1. command-line interface
|
echo 1. Generate images with the browser-based interface
|
||||||
echo 2. browser-based UI
|
echo 2. Explore InvokeAI nodes using a command-line interface
|
||||||
echo 3. run textual inversion training
|
echo 3. Run textual inversion training
|
||||||
echo 4. merge models (diffusers type only)
|
echo 4. Merge models (diffusers type only)
|
||||||
echo 5. download and install models
|
echo 5. Download and install models
|
||||||
echo 6. change InvokeAI startup options
|
echo 6. Change InvokeAI startup options
|
||||||
echo 7. re-run the configure script to fix a broken install
|
echo 7. Re-run the configure script to fix a broken install
|
||||||
echo 8. open the developer console
|
echo 8. Open the developer console
|
||||||
echo 9. update InvokeAI
|
echo 9. Update InvokeAI
|
||||||
echo 10. command-line help
|
echo 10. Command-line help
|
||||||
echo Q - quit
|
echo Q - Quit
|
||||||
set /P restore="Please enter 1-10, Q: [2] "
|
set /P choice="Please enter 1-10, Q: [2] "
|
||||||
if not defined restore set restore=2
|
if not defined choice set choice=2
|
||||||
IF /I "%restore%" == "1" (
|
IF /I "%choice%" == "1" (
|
||||||
|
echo Starting the InvokeAI browser-based UI..
|
||||||
|
python .venv\Scripts\invokeai-web.exe %*
|
||||||
|
) ELSE IF /I "%choice%" == "2" (
|
||||||
echo Starting the InvokeAI command-line..
|
echo Starting the InvokeAI command-line..
|
||||||
python .venv\Scripts\invokeai.exe %*
|
python .venv\Scripts\invokeai.exe %*
|
||||||
) ELSE IF /I "%restore%" == "2" (
|
) ELSE IF /I "%choice%" == "3" (
|
||||||
echo Starting the InvokeAI browser-based UI..
|
|
||||||
python .venv\Scripts\invokeai.exe --web %*
|
|
||||||
) ELSE IF /I "%restore%" == "3" (
|
|
||||||
echo Starting textual inversion training..
|
echo Starting textual inversion training..
|
||||||
python .venv\Scripts\invokeai-ti.exe --gui
|
python .venv\Scripts\invokeai-ti.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "4" (
|
) ELSE IF /I "%choice%" == "4" (
|
||||||
echo Starting model merging script..
|
echo Starting model merging script..
|
||||||
python .venv\Scripts\invokeai-merge.exe --gui
|
python .venv\Scripts\invokeai-merge.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "5" (
|
) ELSE IF /I "%choice%" == "5" (
|
||||||
echo Running invokeai-model-install...
|
echo Running invokeai-model-install...
|
||||||
python .venv\Scripts\invokeai-model-install.exe
|
python .venv\Scripts\invokeai-model-install.exe
|
||||||
) ELSE IF /I "%restore%" == "6" (
|
) ELSE IF /I "%choice%" == "6" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-configure...
|
||||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||||
) ELSE IF /I "%restore%" == "7" (
|
) ELSE IF /I "%choice%" == "7" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-configure...
|
||||||
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||||
) ELSE IF /I "%restore%" == "8" (
|
) ELSE IF /I "%choice%" == "8" (
|
||||||
echo Developer Console
|
echo Developer Console
|
||||||
echo Python command is:
|
echo Python command is:
|
||||||
where python
|
where python
|
||||||
@@ -54,15 +54,15 @@ IF /I "%restore%" == "1" (
|
|||||||
echo *************************
|
echo *************************
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||||
call cmd /k
|
call cmd /k
|
||||||
) ELSE IF /I "%restore%" == "9" (
|
) ELSE IF /I "%choice%" == "9" (
|
||||||
echo Running invokeai-update...
|
echo Running invokeai-update...
|
||||||
python .venv\Scripts\invokeai-update.exe %*
|
python .venv\Scripts\invokeai-update.exe %*
|
||||||
) ELSE IF /I "%restore%" == "10" (
|
) ELSE IF /I "%choice%" == "10" (
|
||||||
echo Displaying command line help...
|
echo Displaying command line help...
|
||||||
python .venv\Scripts\invokeai.exe --help %*
|
python .venv\Scripts\invokeai.exe --help %*
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
) ELSE IF /I "%restore%" == "q" (
|
) ELSE IF /I "%choice%" == "q" (
|
||||||
echo Goodbye!
|
echo Goodbye!
|
||||||
goto ending
|
goto ending
|
||||||
) ELSE (
|
) ELSE (
|
||||||
|
|||||||
@@ -1,5 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# MIT License
|
||||||
|
|
||||||
|
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
|
||||||
|
# Copyright 2023, The InvokeAI Development Team
|
||||||
|
|
||||||
####
|
####
|
||||||
# This launch script assumes that:
|
# This launch script assumes that:
|
||||||
# 1. it is located in the runtime directory,
|
# 1. it is located in the runtime directory,
|
||||||
@@ -11,85 +16,168 @@
|
|||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||||
scriptdir=$(dirname "$0")
|
scriptdir=$(dirname "$0")
|
||||||
cd "$scriptdir"
|
cd "$scriptdir"
|
||||||
|
|
||||||
. .venv/bin/activate
|
. .venv/bin/activate
|
||||||
|
|
||||||
export INVOKEAI_ROOT="$scriptdir"
|
export INVOKEAI_ROOT="$scriptdir"
|
||||||
|
PARAMS=$@
|
||||||
|
|
||||||
# set required env var for torch on mac MPS
|
# Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
|
||||||
|
tui=true
|
||||||
|
if command -v dialog &>/dev/null; then
|
||||||
|
# This must use $@ to properly loop through the arguments passed by the user
|
||||||
|
for arg in "$@"; do
|
||||||
|
if [ "$arg" == "--no-tui" ]; then
|
||||||
|
tui=false
|
||||||
|
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
|
||||||
|
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
tui=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set required env var for torch on mac MPS
|
||||||
if [ "$(uname -s)" == "Darwin" ]; then
|
if [ "$(uname -s)" == "Darwin" ]; then
|
||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
while true
|
# Primary function for the case statement to determine user input
|
||||||
do
|
do_choice() {
|
||||||
if [ "$0" != "bash" ]; then
|
case $1 in
|
||||||
echo "Do you want to generate images using the"
|
1)
|
||||||
echo "1. command-line interface"
|
clear
|
||||||
echo "2. browser-based UI"
|
printf "Generate images with a browser-based interface\n"
|
||||||
echo "3. run textual inversion training"
|
invokeai-web $PARAMS
|
||||||
echo "4. merge models (diffusers type only)"
|
;;
|
||||||
echo "5. download and install models"
|
2)
|
||||||
echo "6. change InvokeAI startup options"
|
clear
|
||||||
echo "7. re-run the configure script to fix a broken install"
|
printf "Explore InvokeAI nodes using a command-line interface\n"
|
||||||
echo "8. open the developer console"
|
invokeai $PARAMS
|
||||||
echo "9. update InvokeAI"
|
;;
|
||||||
echo "10. command-line help"
|
3)
|
||||||
echo "Q - Quit"
|
clear
|
||||||
echo ""
|
printf "Textual inversion training\n"
|
||||||
read -p "Please enter 1-10, Q: [2] " yn
|
invokeai-ti --gui $PARAMS
|
||||||
choice=${yn:='2'}
|
;;
|
||||||
case $choice in
|
4)
|
||||||
1)
|
clear
|
||||||
echo "Starting the InvokeAI command-line..."
|
printf "Merge models (diffusers type only)\n"
|
||||||
invokeai $@
|
invokeai-merge --gui $PARAMS
|
||||||
;;
|
;;
|
||||||
2)
|
5)
|
||||||
echo "Starting the InvokeAI browser-based UI..."
|
clear
|
||||||
invokeai --web $@
|
printf "Download and install models\n"
|
||||||
;;
|
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
3)
|
;;
|
||||||
echo "Starting Textual Inversion:"
|
6)
|
||||||
invokeai-ti --gui $@
|
clear
|
||||||
;;
|
printf "Change InvokeAI startup options\n"
|
||||||
4)
|
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
echo "Merging Models:"
|
;;
|
||||||
invokeai-merge --gui $@
|
7)
|
||||||
;;
|
clear
|
||||||
5)
|
printf "Re-run the configure script to fix a broken install\n"
|
||||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
;;
|
;;
|
||||||
6)
|
8)
|
||||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
clear
|
||||||
;;
|
printf "Open the developer console\n"
|
||||||
7)
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
bash --init-file "$file_name"
|
||||||
;;
|
;;
|
||||||
8)
|
9)
|
||||||
echo "Developer Console:"
|
clear
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
printf "Update InvokeAI\n"
|
||||||
bash --init-file "$file_name"
|
invokeai-update
|
||||||
;;
|
;;
|
||||||
9)
|
10)
|
||||||
echo "Update:"
|
clear
|
||||||
invokeai-update
|
printf "Command-line help\n"
|
||||||
;;
|
invokeai --help
|
||||||
10)
|
;;
|
||||||
invokeai --help
|
"HELP 1")
|
||||||
;;
|
clear
|
||||||
[qQ])
|
printf "Command-line help\n"
|
||||||
exit 0
|
invokeai --help
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid selection"
|
clear
|
||||||
exit;;
|
printf "Exiting...\n"
|
||||||
|
exit
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
clear
|
||||||
|
}
|
||||||
|
|
||||||
|
# Dialog-based TUI for launcing Invoke functions
|
||||||
|
do_dialog() {
|
||||||
|
options=(
|
||||||
|
1 "Generate images with a browser-based interface"
|
||||||
|
2 "Generate images using a command-line interface"
|
||||||
|
3 "Textual inversion training"
|
||||||
|
4 "Merge models (diffusers type only)"
|
||||||
|
5 "Download and install models"
|
||||||
|
6 "Change InvokeAI startup options"
|
||||||
|
7 "Re-run the configure script to fix a broken install"
|
||||||
|
8 "Open the developer console"
|
||||||
|
9 "Update InvokeAI")
|
||||||
|
|
||||||
|
choice=$(dialog --clear \
|
||||||
|
--backtitle "\Zb\Zu\Z3InvokeAI" \
|
||||||
|
--colors \
|
||||||
|
--title "What would you like to do?" \
|
||||||
|
--ok-label "Run" \
|
||||||
|
--cancel-label "Exit" \
|
||||||
|
--help-button \
|
||||||
|
--help-label "CLI Help" \
|
||||||
|
--menu "Select an option:" \
|
||||||
|
0 0 0 \
|
||||||
|
"${options[@]}" \
|
||||||
|
2>&1 >/dev/tty) || clear
|
||||||
|
do_choice "$choice"
|
||||||
|
clear
|
||||||
|
}
|
||||||
|
|
||||||
|
# Command-line interface for launching Invoke functions
|
||||||
|
do_line_input() {
|
||||||
|
clear
|
||||||
|
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
|
||||||
|
printf "What would you like to do?\n"
|
||||||
|
printf "1: Generate images using the browser-based interface\n"
|
||||||
|
printf "2: Explore InvokeAI nodes using the command-line interface\n"
|
||||||
|
printf "3: Run textual inversion training\n"
|
||||||
|
printf "4: Merge models (diffusers type only)\n"
|
||||||
|
printf "5: Download and install models\n"
|
||||||
|
printf "6: Change InvokeAI startup options\n"
|
||||||
|
printf "7: Re-run the configure script to fix a broken install\n"
|
||||||
|
printf "8: Open the developer console\n"
|
||||||
|
printf "9: Update InvokeAI\n"
|
||||||
|
printf "10: Command-line help\n"
|
||||||
|
printf "Q: Quit\n\n"
|
||||||
|
read -p "Please enter 1-10, Q: [1] " yn
|
||||||
|
choice=${yn:='1'}
|
||||||
|
do_choice $choice
|
||||||
|
clear
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
|
||||||
|
if [ "$0" != "bash" ]; then
|
||||||
|
while true; do
|
||||||
|
if $tui; then
|
||||||
|
# .dialogrc must be located in the same directory as the invoke.sh script
|
||||||
|
export DIALOGRC="./.dialogrc"
|
||||||
|
do_dialog
|
||||||
|
else
|
||||||
|
do_line_input
|
||||||
|
fi
|
||||||
|
done
|
||||||
else # in developer console
|
else # in developer console
|
||||||
python --version
|
python --version
|
||||||
echo "Press ^D to exit"
|
printf "Press ^D to exit\n"
|
||||||
export PS1="(InvokeAI) \u@\h \w> "
|
export PS1="(InvokeAI) \u@\h \w> "
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|||||||
@@ -1,3 +1,11 @@
|
|||||||
After version 2.3 is released, the ldm/invoke modules will be migrated to this location
|
Organization of the source tree:
|
||||||
so that we have a proper invokeai distribution. Currently it is only being used for
|
|
||||||
data files.
|
app -- Home of nodes invocations and services
|
||||||
|
assets -- Images and other data files used by InvokeAI
|
||||||
|
backend -- Non-user facing libraries, including the rendering
|
||||||
|
core.
|
||||||
|
configs -- Configuration files used at install and run times
|
||||||
|
frontend -- User-facing scripts, including the CLI and the WebUI
|
||||||
|
version -- Current InvokeAI version string, stored
|
||||||
|
in version/invokeai_version.py
|
||||||
|
|
||||||
108
invokeai/app/api/dependencies.py
Normal file
108
invokeai/app/api/dependencies.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from logging import Logger
|
||||||
|
import os
|
||||||
|
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
|
||||||
|
from invokeai.app.services.images import ImageService
|
||||||
|
from invokeai.app.services.metadata import CoreMetadataService
|
||||||
|
from invokeai.app.services.resource_name import SimpleNameService
|
||||||
|
from invokeai.app.services.urls import LocalUrlService
|
||||||
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
|
from ..services.default_graphs import create_system_graphs
|
||||||
|
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
|
from ..services.model_manager_initializer import get_model_manager
|
||||||
|
from ..services.restoration_services import RestorationServices
|
||||||
|
from ..services.graph import GraphExecutionState, LibraryGraph
|
||||||
|
from ..services.image_file_storage import DiskImageFileStorage
|
||||||
|
from ..services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
from ..services.processor import DefaultInvocationProcessor
|
||||||
|
from ..services.sqlite import SqliteItemStorage
|
||||||
|
from .events import FastAPIEventService
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: is there a better way to achieve this?
|
||||||
|
def check_internet() -> bool:
|
||||||
|
"""
|
||||||
|
Return true if the internet is reachable.
|
||||||
|
It does this by pinging huggingface.co.
|
||||||
|
"""
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
host = "http://huggingface.co"
|
||||||
|
try:
|
||||||
|
urllib.request.urlopen(host, timeout=1)
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
logger = InvokeAILogger.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
class ApiDependencies:
|
||||||
|
"""Contains and initializes all dependencies for the API"""
|
||||||
|
|
||||||
|
invoker: Invoker = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def initialize(config, event_handler_id: int, logger: Logger = logger):
|
||||||
|
logger.info(f"Internet connectivity is {config.internet_available}")
|
||||||
|
|
||||||
|
events = FastAPIEventService(event_handler_id)
|
||||||
|
|
||||||
|
output_folder = config.output_path
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
db_location = config.db_path
|
||||||
|
db_location.parent.mkdir(parents=True,exist_ok=True)
|
||||||
|
|
||||||
|
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
)
|
||||||
|
|
||||||
|
urls = LocalUrlService()
|
||||||
|
metadata = CoreMetadataService()
|
||||||
|
image_record_storage = SqliteImageRecordStorage(db_location)
|
||||||
|
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
|
||||||
|
names = SimpleNameService()
|
||||||
|
latents = ForwardCacheLatentsStorage(
|
||||||
|
DiskLatentsStorage(f"{output_folder}/latents")
|
||||||
|
)
|
||||||
|
|
||||||
|
images = ImageService(
|
||||||
|
image_record_storage=image_record_storage,
|
||||||
|
image_file_storage=image_file_storage,
|
||||||
|
metadata=metadata,
|
||||||
|
url=urls,
|
||||||
|
logger=logger,
|
||||||
|
names=names,
|
||||||
|
graph_execution_manager=graph_execution_manager,
|
||||||
|
)
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=get_model_manager(config, logger),
|
||||||
|
events=events,
|
||||||
|
latents=latents,
|
||||||
|
images=images,
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_library=SqliteItemStorage[LibraryGraph](
|
||||||
|
filename=db_location, table_name="graphs"
|
||||||
|
),
|
||||||
|
graph_execution_manager=graph_execution_manager,
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config, logger),
|
||||||
|
configuration=config,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
create_system_graphs(services.graph_library)
|
||||||
|
|
||||||
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def shutdown():
|
||||||
|
if ApiDependencies.invoker:
|
||||||
|
ApiDependencies.invoker.stop()
|
||||||
@@ -1,11 +1,14 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import threading
|
||||||
from queue import Empty, Queue
|
from queue import Empty, Queue
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from fastapi_events.dispatcher import dispatch
|
from fastapi_events.dispatcher import dispatch
|
||||||
|
|
||||||
from ..services.events import EventServiceBase
|
from ..services.events import EventServiceBase
|
||||||
import threading
|
|
||||||
|
|
||||||
class FastAPIEventService(EventServiceBase):
|
class FastAPIEventService(EventServiceBase):
|
||||||
event_handler_id: int
|
event_handler_id: int
|
||||||
@@ -16,39 +19,34 @@ class FastAPIEventService(EventServiceBase):
|
|||||||
self.event_handler_id = event_handler_id
|
self.event_handler_id = event_handler_id
|
||||||
self.__queue = Queue()
|
self.__queue = Queue()
|
||||||
self.__stop_event = threading.Event()
|
self.__stop_event = threading.Event()
|
||||||
asyncio.create_task(self.__dispatch_from_queue(stop_event = self.__stop_event))
|
asyncio.create_task(self.__dispatch_from_queue(stop_event=self.__stop_event))
|
||||||
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|
||||||
def stop(self, *args, **kwargs):
|
def stop(self, *args, **kwargs):
|
||||||
self.__stop_event.set()
|
self.__stop_event.set()
|
||||||
self.__queue.put(None)
|
self.__queue.put(None)
|
||||||
|
|
||||||
|
|
||||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
self.__queue.put(dict(
|
self.__queue.put(dict(event_name=event_name, payload=payload))
|
||||||
event_name = event_name,
|
|
||||||
payload = payload
|
|
||||||
))
|
|
||||||
|
|
||||||
|
|
||||||
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||||
"""Get events on from the queue and dispatch them, from the correct thread"""
|
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||||
while not stop_event.is_set():
|
while not stop_event.is_set():
|
||||||
try:
|
try:
|
||||||
event = self.__queue.get(block = False)
|
event = self.__queue.get(block=False)
|
||||||
if not event: # Probably stopping
|
if not event: # Probably stopping
|
||||||
continue
|
continue
|
||||||
|
|
||||||
dispatch(
|
dispatch(
|
||||||
event.get('event_name'),
|
event.get("event_name"),
|
||||||
payload = event.get('payload'),
|
payload=event.get("payload"),
|
||||||
middleware_id = self.event_handler_id)
|
middleware_id=self.event_handler_id,
|
||||||
|
)
|
||||||
|
|
||||||
except Empty:
|
except Empty:
|
||||||
await asyncio.sleep(0.001)
|
await asyncio.sleep(0.1)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
except asyncio.CancelledError as e:
|
except asyncio.CancelledError as e:
|
||||||
raise e # Raise a proper error
|
raise e # Raise a proper error
|
||||||
237
invokeai/app/api/routers/images.py
Normal file
237
invokeai/app/api/routers/images.py
Normal file
@@ -0,0 +1,237 @@
|
|||||||
|
import io
|
||||||
|
from typing import Optional
|
||||||
|
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from fastapi.responses import FileResponse
|
||||||
|
from PIL import Image
|
||||||
|
from invokeai.app.models.image import (
|
||||||
|
ImageCategory,
|
||||||
|
ResourceOrigin,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||||
|
from invokeai.app.services.models.image_record import (
|
||||||
|
ImageDTO,
|
||||||
|
ImageRecordChanges,
|
||||||
|
ImageUrlsDTO,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.item_storage import PaginatedResults
|
||||||
|
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="upload_image",
|
||||||
|
responses={
|
||||||
|
201: {"description": "The image was uploaded successfully"},
|
||||||
|
415: {"description": "Image upload failed"},
|
||||||
|
},
|
||||||
|
status_code=201,
|
||||||
|
response_model=ImageDTO,
|
||||||
|
)
|
||||||
|
async def upload_image(
|
||||||
|
file: UploadFile,
|
||||||
|
request: Request,
|
||||||
|
response: Response,
|
||||||
|
image_category: ImageCategory = Query(description="The category of the image"),
|
||||||
|
is_intermediate: bool = Query(description="Whether this is an intermediate image"),
|
||||||
|
session_id: Optional[str] = Query(
|
||||||
|
default=None, description="The session ID associated with this upload, if any"
|
||||||
|
),
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Uploads an image"""
|
||||||
|
if not file.content_type.startswith("image"):
|
||||||
|
raise HTTPException(status_code=415, detail="Not an image")
|
||||||
|
|
||||||
|
contents = await file.read()
|
||||||
|
|
||||||
|
try:
|
||||||
|
pil_image = Image.open(io.BytesIO(contents))
|
||||||
|
except:
|
||||||
|
# Error opening the image
|
||||||
|
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||||
|
|
||||||
|
try:
|
||||||
|
image_dto = ApiDependencies.invoker.services.images.create(
|
||||||
|
image=pil_image,
|
||||||
|
image_origin=ResourceOrigin.EXTERNAL,
|
||||||
|
image_category=image_category,
|
||||||
|
session_id=session_id,
|
||||||
|
is_intermediate=is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
response.status_code = 201
|
||||||
|
response.headers["Location"] = image_dto.image_url
|
||||||
|
|
||||||
|
return image_dto
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=500, detail="Failed to create image")
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.delete("/{image_name}", operation_id="delete_image")
|
||||||
|
async def delete_image(
|
||||||
|
image_name: str = Path(description="The name of the image to delete"),
|
||||||
|
) -> None:
|
||||||
|
"""Deletes an image"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.images.delete(image_name)
|
||||||
|
except Exception as e:
|
||||||
|
# TODO: Does this need any exception handling at all?
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.patch(
|
||||||
|
"/{image_name}",
|
||||||
|
operation_id="update_image",
|
||||||
|
response_model=ImageDTO,
|
||||||
|
)
|
||||||
|
async def update_image(
|
||||||
|
image_name: str = Path(description="The name of the image to update"),
|
||||||
|
image_changes: ImageRecordChanges = Body(
|
||||||
|
description="The changes to apply to the image"
|
||||||
|
),
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Updates an image"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
return ApiDependencies.invoker.services.images.update(image_name, image_changes)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=400, detail="Failed to update image")
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/{image_name}/metadata",
|
||||||
|
operation_id="get_image_metadata",
|
||||||
|
response_model=ImageDTO,
|
||||||
|
)
|
||||||
|
async def get_image_metadata(
|
||||||
|
image_name: str = Path(description="The name of image to get"),
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Gets an image's metadata"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
return ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/{image_name}",
|
||||||
|
operation_id="get_image_full",
|
||||||
|
response_class=Response,
|
||||||
|
responses={
|
||||||
|
200: {
|
||||||
|
"description": "Return the full-resolution image",
|
||||||
|
"content": {"image/png": {}},
|
||||||
|
},
|
||||||
|
404: {"description": "Image not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_image_full(
|
||||||
|
image_name: str = Path(description="The name of full-resolution image file to get"),
|
||||||
|
) -> FileResponse:
|
||||||
|
"""Gets a full-resolution image file"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
path = ApiDependencies.invoker.services.images.get_path(image_name)
|
||||||
|
|
||||||
|
if not ApiDependencies.invoker.services.images.validate_path(path):
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
return FileResponse(
|
||||||
|
path,
|
||||||
|
media_type="image/png",
|
||||||
|
filename=image_name,
|
||||||
|
content_disposition_type="inline",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/{image_name}/thumbnail",
|
||||||
|
operation_id="get_image_thumbnail",
|
||||||
|
response_class=Response,
|
||||||
|
responses={
|
||||||
|
200: {
|
||||||
|
"description": "Return the image thumbnail",
|
||||||
|
"content": {"image/webp": {}},
|
||||||
|
},
|
||||||
|
404: {"description": "Image not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_image_thumbnail(
|
||||||
|
image_name: str = Path(description="The name of thumbnail image file to get"),
|
||||||
|
) -> FileResponse:
|
||||||
|
"""Gets a thumbnail image file"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
path = ApiDependencies.invoker.services.images.get_path(
|
||||||
|
image_name, thumbnail=True
|
||||||
|
)
|
||||||
|
if not ApiDependencies.invoker.services.images.validate_path(path):
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
return FileResponse(
|
||||||
|
path, media_type="image/webp", content_disposition_type="inline"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/{image_name}/urls",
|
||||||
|
operation_id="get_image_urls",
|
||||||
|
response_model=ImageUrlsDTO,
|
||||||
|
)
|
||||||
|
async def get_image_urls(
|
||||||
|
image_name: str = Path(description="The name of the image whose URL to get"),
|
||||||
|
) -> ImageUrlsDTO:
|
||||||
|
"""Gets an image and thumbnail URL"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
image_url = ApiDependencies.invoker.services.images.get_url(image_name)
|
||||||
|
thumbnail_url = ApiDependencies.invoker.services.images.get_url(
|
||||||
|
image_name, thumbnail=True
|
||||||
|
)
|
||||||
|
return ImageUrlsDTO(
|
||||||
|
image_name=image_name,
|
||||||
|
image_url=image_url,
|
||||||
|
thumbnail_url=thumbnail_url,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_images_with_metadata",
|
||||||
|
response_model=OffsetPaginatedResults[ImageDTO],
|
||||||
|
)
|
||||||
|
async def list_images_with_metadata(
|
||||||
|
image_origin: Optional[ResourceOrigin] = Query(
|
||||||
|
default=None, description="The origin of images to list"
|
||||||
|
),
|
||||||
|
categories: Optional[list[ImageCategory]] = Query(
|
||||||
|
default=None, description="The categories of image to include"
|
||||||
|
),
|
||||||
|
is_intermediate: Optional[bool] = Query(
|
||||||
|
default=None, description="Whether to list intermediate images"
|
||||||
|
),
|
||||||
|
offset: int = Query(default=0, description="The page offset"),
|
||||||
|
limit: int = Query(default=10, description="The number of images per page"),
|
||||||
|
) -> OffsetPaginatedResults[ImageDTO]:
|
||||||
|
"""Gets a list of images"""
|
||||||
|
|
||||||
|
image_dtos = ApiDependencies.invoker.services.images.get_many(
|
||||||
|
offset,
|
||||||
|
limit,
|
||||||
|
image_origin,
|
||||||
|
categories,
|
||||||
|
is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return image_dtos
|
||||||
248
invokeai/app/api/routers/models.py
Normal file
248
invokeai/app/api/routers/models.py
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and 2023 Kent Keirsey (https://github.com/hipsterusername)
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import asyncio
|
||||||
|
from typing import Annotated, Any, List, Literal, Optional, Union
|
||||||
|
|
||||||
|
from fastapi.routing import APIRouter, HTTPException
|
||||||
|
from pydantic import BaseModel, Field, parse_obj_as
|
||||||
|
from pathlib import Path
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
models_router = APIRouter(prefix="/v1/models", tags=["models"])
|
||||||
|
|
||||||
|
|
||||||
|
class VaeRepo(BaseModel):
|
||||||
|
repo_id: str = Field(description="The repo ID to use for this VAE")
|
||||||
|
path: Optional[str] = Field(description="The path to the VAE")
|
||||||
|
subfolder: Optional[str] = Field(description="The subfolder to use for this VAE")
|
||||||
|
|
||||||
|
class ModelInfo(BaseModel):
|
||||||
|
description: Optional[str] = Field(description="A description of the model")
|
||||||
|
|
||||||
|
class CkptModelInfo(ModelInfo):
|
||||||
|
format: Literal['ckpt'] = 'ckpt'
|
||||||
|
|
||||||
|
config: str = Field(description="The path to the model config")
|
||||||
|
weights: str = Field(description="The path to the model weights")
|
||||||
|
vae: str = Field(description="The path to the model VAE")
|
||||||
|
width: Optional[int] = Field(description="The width of the model")
|
||||||
|
height: Optional[int] = Field(description="The height of the model")
|
||||||
|
|
||||||
|
class DiffusersModelInfo(ModelInfo):
|
||||||
|
format: Literal['diffusers'] = 'diffusers'
|
||||||
|
|
||||||
|
vae: Optional[VaeRepo] = Field(description="The VAE repo to use for this model")
|
||||||
|
repo_id: Optional[str] = Field(description="The repo ID to use for this model")
|
||||||
|
path: Optional[str] = Field(description="The path to the model")
|
||||||
|
|
||||||
|
class CreateModelRequest(BaseModel):
|
||||||
|
name: str = Field(description="The name of the model")
|
||||||
|
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
||||||
|
|
||||||
|
class CreateModelResponse(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
||||||
|
status: str = Field(description="The status of the API response")
|
||||||
|
|
||||||
|
class ConversionRequest(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: CkptModelInfo = Field(description="The converted model info")
|
||||||
|
save_location: str = Field(description="The path to save the converted model weights")
|
||||||
|
|
||||||
|
|
||||||
|
class ConvertedModelResponse(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: DiffusersModelInfo = Field(description="The converted model info")
|
||||||
|
|
||||||
|
class ModelsList(BaseModel):
|
||||||
|
models: dict[str, Annotated[Union[(CkptModelInfo,DiffusersModelInfo)], Field(discriminator="format")]]
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_models",
|
||||||
|
responses={200: {"model": ModelsList }},
|
||||||
|
)
|
||||||
|
async def list_models() -> ModelsList:
|
||||||
|
"""Gets a list of models"""
|
||||||
|
models_raw = ApiDependencies.invoker.services.model_manager.list_models()
|
||||||
|
models = parse_obj_as(ModelsList, { "models": models_raw })
|
||||||
|
return models
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="update_model",
|
||||||
|
responses={200: {"status": "success"}},
|
||||||
|
)
|
||||||
|
async def update_model(
|
||||||
|
model_request: CreateModelRequest
|
||||||
|
) -> CreateModelResponse:
|
||||||
|
""" Add Model """
|
||||||
|
model_request_info = model_request.info
|
||||||
|
info_dict = model_request_info.dict()
|
||||||
|
model_response = CreateModelResponse(name=model_request.name, info=model_request.info, status="success")
|
||||||
|
|
||||||
|
ApiDependencies.invoker.services.model_manager.add_model(
|
||||||
|
model_name=model_request.name,
|
||||||
|
model_attributes=info_dict,
|
||||||
|
clobber=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_response
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.delete(
|
||||||
|
"/{model_name}",
|
||||||
|
operation_id="del_model",
|
||||||
|
responses={
|
||||||
|
204: {
|
||||||
|
"description": "Model deleted successfully"
|
||||||
|
},
|
||||||
|
404: {
|
||||||
|
"description": "Model not found"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_model(model_name: str) -> None:
|
||||||
|
"""Delete Model"""
|
||||||
|
model_names = ApiDependencies.invoker.services.model_manager.model_names()
|
||||||
|
logger = ApiDependencies.invoker.services.logger
|
||||||
|
model_exists = model_name in model_names
|
||||||
|
|
||||||
|
# check if model exists
|
||||||
|
logger.info(f"Checking for model {model_name}...")
|
||||||
|
|
||||||
|
if model_exists:
|
||||||
|
logger.info(f"Deleting Model: {model_name}")
|
||||||
|
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
|
||||||
|
logger.info(f"Model Deleted: {model_name}")
|
||||||
|
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.error(f"Model not found")
|
||||||
|
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||||
|
|
||||||
|
|
||||||
|
# @socketio.on("convertToDiffusers")
|
||||||
|
# def convert_to_diffusers(model_to_convert: dict):
|
||||||
|
# try:
|
||||||
|
# if model_info := self.generate.model_manager.model_info(
|
||||||
|
# model_name=model_to_convert["model_name"]
|
||||||
|
# ):
|
||||||
|
# if "weights" in model_info:
|
||||||
|
# ckpt_path = Path(model_info["weights"])
|
||||||
|
# original_config_file = Path(model_info["config"])
|
||||||
|
# model_name = model_to_convert["model_name"]
|
||||||
|
# model_description = model_info["description"]
|
||||||
|
# else:
|
||||||
|
# self.socketio.emit(
|
||||||
|
# "error", {"message": "Model is not a valid checkpoint file"}
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
# self.socketio.emit(
|
||||||
|
# "error", {"message": "Could not retrieve model info."}
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if not ckpt_path.is_absolute():
|
||||||
|
# ckpt_path = Path(Globals.root, ckpt_path)
|
||||||
|
|
||||||
|
# if original_config_file and not original_config_file.is_absolute():
|
||||||
|
# original_config_file = Path(Globals.root, original_config_file)
|
||||||
|
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# ckpt_path.parent.absolute(), f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if model_to_convert["save_location"] == "root":
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# global_converted_ckpts_dir(), f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if (
|
||||||
|
# model_to_convert["save_location"] == "custom"
|
||||||
|
# and model_to_convert["custom_location"] is not None
|
||||||
|
# ):
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# model_to_convert["custom_location"], f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if diffusers_path.exists():
|
||||||
|
# shutil.rmtree(diffusers_path)
|
||||||
|
|
||||||
|
# self.generate.model_manager.convert_and_import(
|
||||||
|
# ckpt_path,
|
||||||
|
# diffusers_path,
|
||||||
|
# model_name=model_name,
|
||||||
|
# model_description=model_description,
|
||||||
|
# vae=None,
|
||||||
|
# original_config_file=original_config_file,
|
||||||
|
# commit_to_conf=opt.conf,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# new_model_list = self.generate.model_manager.list_models()
|
||||||
|
# socketio.emit(
|
||||||
|
# "modelConverted",
|
||||||
|
# {
|
||||||
|
# "new_model_name": model_name,
|
||||||
|
# "model_list": new_model_list,
|
||||||
|
# "update": True,
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
# print(f">> Model Converted: {model_name}")
|
||||||
|
# except Exception as e:
|
||||||
|
# self.handle_exceptions(e)
|
||||||
|
|
||||||
|
# @socketio.on("mergeDiffusersModels")
|
||||||
|
# def merge_diffusers_models(model_merge_info: dict):
|
||||||
|
# try:
|
||||||
|
# models_to_merge = model_merge_info["models_to_merge"]
|
||||||
|
# model_ids_or_paths = [
|
||||||
|
# self.generate.model_manager.model_name_or_path(x)
|
||||||
|
# for x in models_to_merge
|
||||||
|
# ]
|
||||||
|
# merged_pipe = merge_diffusion_models(
|
||||||
|
# model_ids_or_paths,
|
||||||
|
# model_merge_info["alpha"],
|
||||||
|
# model_merge_info["interp"],
|
||||||
|
# model_merge_info["force"],
|
||||||
|
# )
|
||||||
|
|
||||||
|
# dump_path = global_models_dir() / "merged_models"
|
||||||
|
# if model_merge_info["model_merge_save_path"] is not None:
|
||||||
|
# dump_path = Path(model_merge_info["model_merge_save_path"])
|
||||||
|
|
||||||
|
# os.makedirs(dump_path, exist_ok=True)
|
||||||
|
# dump_path = dump_path / model_merge_info["merged_model_name"]
|
||||||
|
# merged_pipe.save_pretrained(dump_path, safe_serialization=1)
|
||||||
|
|
||||||
|
# merged_model_config = dict(
|
||||||
|
# model_name=model_merge_info["merged_model_name"],
|
||||||
|
# description=f'Merge of models {", ".join(models_to_merge)}',
|
||||||
|
# commit_to_conf=opt.conf,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if vae := self.generate.model_manager.config[models_to_merge[0]].get(
|
||||||
|
# "vae", None
|
||||||
|
# ):
|
||||||
|
# print(f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||||
|
# merged_model_config.update(vae=vae)
|
||||||
|
|
||||||
|
# self.generate.model_manager.import_diffuser_model(
|
||||||
|
# dump_path, **merged_model_config
|
||||||
|
# )
|
||||||
|
# new_model_list = self.generate.model_manager.list_models()
|
||||||
|
|
||||||
|
# socketio.emit(
|
||||||
|
# "modelsMerged",
|
||||||
|
# {
|
||||||
|
# "merged_models": models_to_merge,
|
||||||
|
# "merged_model_name": model_merge_info["merged_model_name"],
|
||||||
|
# "model_list": new_model_list,
|
||||||
|
# "update": True,
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
# print(f">> Models Merged: {models_to_merge}")
|
||||||
|
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||||
|
# except Exception as e:
|
||||||
286
invokeai/app/api/routers/sessions.py
Normal file
286
invokeai/app/api/routers/sessions.py
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Annotated, List, Optional, Union
|
||||||
|
|
||||||
|
from fastapi import Body, HTTPException, Path, Query, Response
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from ...invocations import *
|
||||||
|
from ...invocations.baseinvocation import BaseInvocation
|
||||||
|
from ...services.graph import (
|
||||||
|
Edge,
|
||||||
|
EdgeConnection,
|
||||||
|
Graph,
|
||||||
|
GraphExecutionState,
|
||||||
|
NodeAlreadyExecutedError,
|
||||||
|
)
|
||||||
|
from ...services.item_storage import PaginatedResults
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"])
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="create_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid json"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def create_session(
|
||||||
|
graph: Optional[Graph] = Body(
|
||||||
|
default=None, description="The graph to initialize the session with"
|
||||||
|
)
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Creates a new session, optionally initializing it with an invocation graph"""
|
||||||
|
session = ApiDependencies.invoker.create_execution_state(graph)
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_sessions",
|
||||||
|
responses={200: {"model": PaginatedResults[GraphExecutionState]}},
|
||||||
|
)
|
||||||
|
async def list_sessions(
|
||||||
|
page: int = Query(default=0, description="The page of results to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of results per page"),
|
||||||
|
query: str = Query(default="", description="The query string to search for"),
|
||||||
|
) -> PaginatedResults[GraphExecutionState]:
|
||||||
|
"""Gets a list of sessions, optionally searching"""
|
||||||
|
if query == "":
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.list(
|
||||||
|
page, per_page
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.search(
|
||||||
|
query, page, per_page
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/{session_id}",
|
||||||
|
operation_id="get_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_session(
|
||||||
|
session_id: str = Path(description="The id of the session to get"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Gets a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
else:
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/nodes",
|
||||||
|
operation_id="add_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": str},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The node to add"),
|
||||||
|
) -> str:
|
||||||
|
"""Adds a node to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_node(node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session.id
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="update_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def update_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node in the graph"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The new node"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Updates a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.update_node(node_path, node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="delete_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node to delete"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.delete_node(node_path)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/edges",
|
||||||
|
operation_id="add_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
edge: Edge = Body(description="The edge to add"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Adds an edge to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: the edge being in the path here is really ugly, find a better solution
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}",
|
||||||
|
operation_id="delete_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
from_node_id: str = Path(description="The id of the node the edge is coming from"),
|
||||||
|
from_field: str = Path(description="The field of the node the edge is coming from"),
|
||||||
|
to_node_id: str = Path(description="The id of the node the edge is going to"),
|
||||||
|
to_field: str = Path(description="The field of the node the edge is going to"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes an edge from the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
edge = Edge(
|
||||||
|
source=EdgeConnection(node_id=from_node_id, field=from_field),
|
||||||
|
destination=EdgeConnection(node_id=to_node_id, field=to_field)
|
||||||
|
)
|
||||||
|
session.delete_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="invoke_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": None},
|
||||||
|
202: {"description": "The invocation is queued"},
|
||||||
|
400: {"description": "The session has no invocations ready to invoke"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def invoke_session(
|
||||||
|
session_id: str = Path(description="The id of the session to invoke"),
|
||||||
|
all: bool = Query(
|
||||||
|
default=False, description="Whether or not to invoke all remaining invocations"
|
||||||
|
),
|
||||||
|
) -> Response:
|
||||||
|
"""Invokes a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
if session.is_complete():
|
||||||
|
raise HTTPException(status_code=400)
|
||||||
|
|
||||||
|
ApiDependencies.invoker.invoke(session, invoke_all=all)
|
||||||
|
return Response(status_code=202)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="cancel_session_invoke",
|
||||||
|
responses={
|
||||||
|
202: {"description": "The invocation is canceled"}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def cancel_session_invoke(
|
||||||
|
session_id: str = Path(description="The id of the session to cancel"),
|
||||||
|
) -> Response:
|
||||||
|
"""Invokes a session"""
|
||||||
|
ApiDependencies.invoker.cancel(session_id)
|
||||||
|
return Response(status_code=202)
|
||||||
@@ -1,36 +1,38 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from fastapi_socketio import SocketManager
|
|
||||||
from fastapi_events.handlers.local import local_handler
|
from fastapi_events.handlers.local import local_handler
|
||||||
from fastapi_events.typing import Event
|
from fastapi_events.typing import Event
|
||||||
|
from fastapi_socketio import SocketManager
|
||||||
|
|
||||||
from ..services.events import EventServiceBase
|
from ..services.events import EventServiceBase
|
||||||
|
|
||||||
|
|
||||||
class SocketIO:
|
class SocketIO:
|
||||||
__sio: SocketManager
|
__sio: SocketManager
|
||||||
|
|
||||||
def __init__(self, app: FastAPI):
|
def __init__(self, app: FastAPI):
|
||||||
self.__sio = SocketManager(app = app)
|
self.__sio = SocketManager(app=app)
|
||||||
self.__sio.on('subscribe', handler=self._handle_sub)
|
self.__sio.on("subscribe", handler=self._handle_sub)
|
||||||
self.__sio.on('unsubscribe', handler=self._handle_unsub)
|
self.__sio.on("unsubscribe", handler=self._handle_unsub)
|
||||||
|
|
||||||
local_handler.register(
|
local_handler.register(
|
||||||
event_name = EventServiceBase.session_event,
|
event_name=EventServiceBase.session_event, _func=self._handle_session_event
|
||||||
_func=self._handle_session_event
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _handle_session_event(self, event: Event):
|
async def _handle_session_event(self, event: Event):
|
||||||
await self.__sio.emit(
|
await self.__sio.emit(
|
||||||
event = event[1]['event'],
|
event=event[1]["event"],
|
||||||
data = event[1]['data'],
|
data=event[1]["data"],
|
||||||
room = event[1]['data']['graph_execution_state_id']
|
room=event[1]["data"]["graph_execution_state_id"],
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _handle_sub(self, sid, data, *args, **kwargs):
|
async def _handle_sub(self, sid, data, *args, **kwargs):
|
||||||
if 'session' in data:
|
if "session" in data:
|
||||||
self.__sio.enter_room(sid, data['session'])
|
self.__sio.enter_room(sid, data["session"])
|
||||||
|
|
||||||
# @app.sio.on('unsubscribe')
|
# @app.sio.on('unsubscribe')
|
||||||
|
|
||||||
async def _handle_unsub(self, sid, data, *args, **kwargs):
|
async def _handle_unsub(self, sid, data, *args, **kwargs):
|
||||||
if 'session' in data:
|
if "session" in data:
|
||||||
self.__sio.leave_room(sid, data['session'])
|
self.__sio.leave_room(sid, data["session"])
|
||||||
@@ -1,85 +1,82 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
from fastapi import FastAPI
|
|
||||||
from fastapi.openapi.utils import get_openapi
|
|
||||||
from fastapi.openapi.docs import get_swagger_ui_html, get_redoc_html
|
|
||||||
from fastapi.staticfiles import StaticFiles
|
|
||||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
|
||||||
from fastapi_events.handlers.local import local_handler
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
from pydantic.schema import schema
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
from .api.sockets import SocketIO
|
|
||||||
from .invocations import *
|
from fastapi import FastAPI
|
||||||
from .invocations.baseinvocation import BaseInvocation
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from .api.routers import images, sessions
|
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||||
|
from fastapi.openapi.utils import get_openapi
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from fastapi_events.handlers.local import local_handler
|
||||||
|
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||||
|
from pathlib import Path
|
||||||
|
from pydantic.schema import schema
|
||||||
|
|
||||||
|
#This should come early so that modules can log their initialization properly
|
||||||
|
from .services.config import InvokeAIAppConfig
|
||||||
|
from ..backend.util.logging import InvokeAILogger
|
||||||
|
app_config = InvokeAIAppConfig.get_config()
|
||||||
|
app_config.parse_args()
|
||||||
|
logger = InvokeAILogger.getLogger(config=app_config)
|
||||||
|
|
||||||
|
import invokeai.frontend.web as web_dir
|
||||||
|
|
||||||
from .api.dependencies import ApiDependencies
|
from .api.dependencies import ApiDependencies
|
||||||
from ..args import Args
|
from .api.routers import sessions, models, images
|
||||||
|
from .api.sockets import SocketIO
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
|
||||||
# Create the app
|
# Create the app
|
||||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||||
app = FastAPI(
|
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None)
|
||||||
title = "Invoke AI",
|
|
||||||
docs_url = None,
|
|
||||||
redoc_url = None
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add event handler
|
# Add event handler
|
||||||
event_handler_id: int = id(app)
|
event_handler_id: int = id(app)
|
||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
EventHandlerASGIMiddleware,
|
EventHandlerASGIMiddleware,
|
||||||
handlers = [local_handler], # TODO: consider doing this in services to support different configurations
|
handlers=[
|
||||||
middleware_id = event_handler_id)
|
local_handler
|
||||||
|
], # TODO: consider doing this in services to support different configurations
|
||||||
# Add CORS
|
middleware_id=event_handler_id,
|
||||||
# TODO: use configuration for this
|
|
||||||
origins = []
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=origins,
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
socket_io = SocketIO(app)
|
socket_io = SocketIO(app)
|
||||||
|
|
||||||
config = {}
|
|
||||||
|
|
||||||
# Add startup event to load dependencies
|
# Add startup event to load dependencies
|
||||||
@app.on_event('startup')
|
@app.on_event("startup")
|
||||||
async def startup_event():
|
async def startup_event():
|
||||||
args = Args()
|
app.add_middleware(
|
||||||
config = args.parse_args()
|
CORSMiddleware,
|
||||||
|
allow_origins=app_config.allow_origins,
|
||||||
ApiDependencies.initialize(
|
allow_credentials=app_config.allow_credentials,
|
||||||
args = args,
|
allow_methods=app_config.allow_methods,
|
||||||
config = config,
|
allow_headers=app_config.allow_headers,
|
||||||
event_handler_id = event_handler_id
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ApiDependencies.initialize(
|
||||||
|
config=app_config, event_handler_id=event_handler_id, logger=logger
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Shut down threads
|
# Shut down threads
|
||||||
@app.on_event('shutdown')
|
@app.on_event("shutdown")
|
||||||
async def shutdown_event():
|
async def shutdown_event():
|
||||||
ApiDependencies.shutdown()
|
ApiDependencies.shutdown()
|
||||||
|
|
||||||
|
|
||||||
# Include all routers
|
# Include all routers
|
||||||
# TODO: REMOVE
|
# TODO: REMOVE
|
||||||
# app.include_router(
|
# app.include_router(
|
||||||
# invocation.invocation_router,
|
# invocation.invocation_router,
|
||||||
# prefix = '/api')
|
# prefix = '/api')
|
||||||
|
|
||||||
app.include_router(
|
app.include_router(sessions.session_router, prefix="/api")
|
||||||
sessions.session_router,
|
|
||||||
prefix = '/api'
|
|
||||||
)
|
|
||||||
|
|
||||||
app.include_router(
|
app.include_router(models.models_router, prefix="/api")
|
||||||
images.images_router,
|
|
||||||
prefix = '/api'
|
app.include_router(images.images_router, prefix="/api")
|
||||||
)
|
|
||||||
|
|
||||||
# Build a custom OpenAPI to include all outputs
|
# Build a custom OpenAPI to include all outputs
|
||||||
# TODO: can outputs be included on metadata of invocation schemas somehow?
|
# TODO: can outputs be included on metadata of invocation schemas somehow?
|
||||||
@@ -87,10 +84,10 @@ def custom_openapi():
|
|||||||
if app.openapi_schema:
|
if app.openapi_schema:
|
||||||
return app.openapi_schema
|
return app.openapi_schema
|
||||||
openapi_schema = get_openapi(
|
openapi_schema = get_openapi(
|
||||||
title = app.title,
|
title=app.title,
|
||||||
description = "An API for invoking AI image operations",
|
description="An API for invoking AI image operations",
|
||||||
version = "1.0.0",
|
version="1.0.0",
|
||||||
routes = app.routes
|
routes=app.routes,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add all outputs
|
# Add all outputs
|
||||||
@@ -102,12 +99,12 @@ def custom_openapi():
|
|||||||
output_types.add(output_type)
|
output_types.add(output_type)
|
||||||
|
|
||||||
output_schemas = schema(output_types, ref_prefix="#/components/schemas/")
|
output_schemas = schema(output_types, ref_prefix="#/components/schemas/")
|
||||||
for schema_key, output_schema in output_schemas['definitions'].items():
|
for schema_key, output_schema in output_schemas["definitions"].items():
|
||||||
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
||||||
|
|
||||||
# TODO: note that we assume the schema_key here is the TYPE.__name__
|
# TODO: note that we assume the schema_key here is the TYPE.__name__
|
||||||
# This could break in some cases, figure out a better way to do it
|
# This could break in some cases, figure out a better way to do it
|
||||||
output_type_titles[schema_key] = output_schema['title']
|
output_type_titles[schema_key] = output_schema["title"]
|
||||||
|
|
||||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||||
for invoker in all_invocations:
|
for invoker in all_invocations:
|
||||||
@@ -115,50 +112,51 @@ def custom_openapi():
|
|||||||
output_type = signature(invoker.invoke).return_annotation
|
output_type = signature(invoker.invoke).return_annotation
|
||||||
output_type_title = output_type_titles[output_type.__name__]
|
output_type_title = output_type_titles[output_type.__name__]
|
||||||
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
||||||
outputs_ref = { '$ref': f'#/components/schemas/{output_type_title}' }
|
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||||
if 'additionalProperties' not in invoker_schema:
|
|
||||||
invoker_schema['additionalProperties'] = {}
|
invoker_schema["output"] = outputs_ref
|
||||||
|
|
||||||
invoker_schema['additionalProperties']['outputs'] = outputs_ref
|
|
||||||
|
|
||||||
app.openapi_schema = openapi_schema
|
app.openapi_schema = openapi_schema
|
||||||
return app.openapi_schema
|
return app.openapi_schema
|
||||||
|
|
||||||
|
|
||||||
app.openapi = custom_openapi
|
app.openapi = custom_openapi
|
||||||
|
|
||||||
# Override API doc favicons
|
# Override API doc favicons
|
||||||
app.mount('/static', StaticFiles(directory='static/dream_web'), name='static')
|
app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], 'static/dream_web')), name="static")
|
||||||
|
|
||||||
@app.get("/docs", include_in_schema=False)
|
@app.get("/docs", include_in_schema=False)
|
||||||
def overridden_swagger():
|
def overridden_swagger():
|
||||||
return get_swagger_ui_html(
|
return get_swagger_ui_html(
|
||||||
openapi_url=app.openapi_url,
|
openapi_url=app.openapi_url,
|
||||||
title=app.title,
|
title=app.title,
|
||||||
swagger_favicon_url="/static/favicon.ico"
|
swagger_favicon_url="/static/favicon.ico",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.get("/redoc", include_in_schema=False)
|
@app.get("/redoc", include_in_schema=False)
|
||||||
def overridden_redoc():
|
def overridden_redoc():
|
||||||
return get_redoc_html(
|
return get_redoc_html(
|
||||||
openapi_url=app.openapi_url,
|
openapi_url=app.openapi_url,
|
||||||
title=app.title,
|
title=app.title,
|
||||||
redoc_favicon_url="/static/favicon.ico"
|
redoc_favicon_url="/static/favicon.ico",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Must mount *after* the other routes else it borks em
|
||||||
|
app.mount("/",
|
||||||
|
StaticFiles(directory=Path(web_dir.__path__[0],"dist"),
|
||||||
|
html=True
|
||||||
|
), name="ui"
|
||||||
|
)
|
||||||
|
|
||||||
def invoke_api():
|
def invoke_api():
|
||||||
# Start our own event loop for eventing usage
|
# Start our own event loop for eventing usage
|
||||||
# TODO: determine if there's a better way to do this
|
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
config = uvicorn.Config(
|
config = uvicorn.Config(app=app, host=app_config.host, port=app_config.port, loop=loop)
|
||||||
app = app,
|
# Use access_log to turn off logging
|
||||||
host = "0.0.0.0",
|
|
||||||
port = 9090,
|
|
||||||
loop = loop)
|
|
||||||
# Use access_log to turn off logging
|
|
||||||
|
|
||||||
server = uvicorn.Server(config)
|
server = uvicorn.Server(config)
|
||||||
loop.run_until_complete(server.serve())
|
loop.run_until_complete(server.serve())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
invoke_api()
|
invoke_api()
|
||||||
303
invokeai/app/cli/commands.py
Normal file
303
invokeai/app/cli/commands.py
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import argparse
|
||||||
|
from typing import Any, Callable, Iterable, Literal, Union, get_args, get_origin, get_type_hints
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import networkx as nx
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as logger
|
||||||
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
|
from ..invocations.image import ImageField
|
||||||
|
from ..services.graph import GraphExecutionState, LibraryGraph, Edge
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
|
||||||
|
|
||||||
|
def add_field_argument(command_parser, name: str, field, default_override = None):
|
||||||
|
default = default_override if default_override is not None else field.default if field.default_factory is None else field.default_factory()
|
||||||
|
if get_origin(field.type_) == Literal:
|
||||||
|
allowed_values = get_args(field.type_)
|
||||||
|
allowed_types = set()
|
||||||
|
for val in allowed_values:
|
||||||
|
allowed_types.add(type(val))
|
||||||
|
allowed_types_list = list(allowed_types)
|
||||||
|
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field_type,
|
||||||
|
default=default,
|
||||||
|
choices=allowed_values,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field.type_,
|
||||||
|
default=default,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_parsers(
|
||||||
|
subparsers,
|
||||||
|
commands: list[type],
|
||||||
|
command_field: str = "type",
|
||||||
|
exclude_fields: list[str] = ["id", "type"],
|
||||||
|
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
|
||||||
|
):
|
||||||
|
"""Adds parsers for each command to the subparsers"""
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
for command in commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
cmd_name = get_args(hints[command_field])[0]
|
||||||
|
command_parser = subparsers.add_parser(cmd_name, help=command.__doc__)
|
||||||
|
|
||||||
|
if add_arguments is not None:
|
||||||
|
add_arguments(command_parser)
|
||||||
|
|
||||||
|
# Convert all fields to arguments
|
||||||
|
fields = command.__fields__ # type: ignore
|
||||||
|
for name, field in fields.items():
|
||||||
|
if name in exclude_fields:
|
||||||
|
continue
|
||||||
|
|
||||||
|
add_field_argument(command_parser, name, field)
|
||||||
|
|
||||||
|
|
||||||
|
def add_graph_parsers(
|
||||||
|
subparsers,
|
||||||
|
graphs: list[LibraryGraph],
|
||||||
|
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
|
||||||
|
):
|
||||||
|
for graph in graphs:
|
||||||
|
command_parser = subparsers.add_parser(graph.name, help=graph.description)
|
||||||
|
|
||||||
|
if add_arguments is not None:
|
||||||
|
add_arguments(command_parser)
|
||||||
|
|
||||||
|
# Add arguments for inputs
|
||||||
|
for exposed_input in graph.exposed_inputs:
|
||||||
|
node = graph.graph.get_node(exposed_input.node_path)
|
||||||
|
field = node.__fields__[exposed_input.field]
|
||||||
|
default_override = getattr(node, exposed_input.field)
|
||||||
|
add_field_argument(command_parser, exposed_input.alias, field, default_override)
|
||||||
|
|
||||||
|
|
||||||
|
class CliContext:
|
||||||
|
invoker: Invoker
|
||||||
|
session: GraphExecutionState
|
||||||
|
parser: argparse.ArgumentParser
|
||||||
|
defaults: dict[str, Any]
|
||||||
|
graph_nodes: dict[str, str]
|
||||||
|
nodes_added: list[str]
|
||||||
|
|
||||||
|
def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser):
|
||||||
|
self.invoker = invoker
|
||||||
|
self.session = session
|
||||||
|
self.parser = parser
|
||||||
|
self.defaults = dict()
|
||||||
|
self.graph_nodes = dict()
|
||||||
|
self.nodes_added = list()
|
||||||
|
|
||||||
|
def get_session(self):
|
||||||
|
self.session = self.invoker.services.graph_execution_manager.get(self.session.id)
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.session = self.invoker.create_execution_state()
|
||||||
|
self.graph_nodes = dict()
|
||||||
|
self.nodes_added = list()
|
||||||
|
# Leave defaults unchanged
|
||||||
|
|
||||||
|
def add_node(self, node: BaseInvocation):
|
||||||
|
self.get_session()
|
||||||
|
self.session.graph.add_node(node)
|
||||||
|
self.nodes_added.append(node.id)
|
||||||
|
self.invoker.services.graph_execution_manager.set(self.session)
|
||||||
|
|
||||||
|
def add_edge(self, edge: Edge):
|
||||||
|
self.get_session()
|
||||||
|
self.session.add_edge(edge)
|
||||||
|
self.invoker.services.graph_execution_manager.set(self.session)
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCli(Exception):
|
||||||
|
"""Exception to exit the CLI"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCommand(ABC, BaseModel):
|
||||||
|
"""A CLI command"""
|
||||||
|
|
||||||
|
# All commands must include a type name like this:
|
||||||
|
# type: Literal['your_command_name'] = 'your_command_name'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(cls):
|
||||||
|
return tuple(BaseCommand.get_all_subclasses())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands_map(cls):
|
||||||
|
# Get the type strings out of the literals and into a dictionary
|
||||||
|
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseCommand.get_all_subclasses()))
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
"""Run the command. Raise ExitCli to exit."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCommand(BaseCommand):
|
||||||
|
"""Exits the CLI"""
|
||||||
|
type: Literal['exit'] = 'exit'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
raise ExitCli()
|
||||||
|
|
||||||
|
|
||||||
|
class HelpCommand(BaseCommand):
|
||||||
|
"""Shows help"""
|
||||||
|
type: Literal['help'] = 'help'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
context.parser.print_help()
|
||||||
|
|
||||||
|
|
||||||
|
def get_graph_execution_history(
|
||||||
|
graph_execution_state: GraphExecutionState,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""Gets the history of fully-executed invocations for a graph execution"""
|
||||||
|
return (
|
||||||
|
n
|
||||||
|
for n in reversed(graph_execution_state.executed_history)
|
||||||
|
if n in graph_execution_state.graph.nodes
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_invocation_command(invocation) -> str:
|
||||||
|
fields = invocation.__fields__.items()
|
||||||
|
type_hints = get_type_hints(type(invocation))
|
||||||
|
command = [invocation.type]
|
||||||
|
for name, field in fields:
|
||||||
|
if name in ["id", "type"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# TODO: add links
|
||||||
|
|
||||||
|
# Skip image fields when serializing command
|
||||||
|
type_hint = type_hints.get(name) or None
|
||||||
|
if type_hint is ImageField or ImageField in get_args(type_hint):
|
||||||
|
continue
|
||||||
|
|
||||||
|
field_value = getattr(invocation, name)
|
||||||
|
field_default = field.default
|
||||||
|
if field_value != field_default:
|
||||||
|
if type_hint is str or str in get_args(type_hint):
|
||||||
|
command.append(f'--{name} "{field_value}"')
|
||||||
|
else:
|
||||||
|
command.append(f"--{name} {field_value}")
|
||||||
|
|
||||||
|
return " ".join(command)
|
||||||
|
|
||||||
|
|
||||||
|
class HistoryCommand(BaseCommand):
|
||||||
|
"""Shows the invocation history"""
|
||||||
|
type: Literal['history'] = 'history'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
count: int = Field(default=5, gt=0, description="The number of history entries to show")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
history = list(get_graph_execution_history(context.get_session()))
|
||||||
|
for i in range(min(self.count, len(history))):
|
||||||
|
entry_id = history[-1 - i]
|
||||||
|
entry = context.get_session().graph.get_node(entry_id)
|
||||||
|
logger.info(f"{entry_id}: {get_invocation_command(entry)}")
|
||||||
|
|
||||||
|
|
||||||
|
class SetDefaultCommand(BaseCommand):
|
||||||
|
"""Sets a default value for a field"""
|
||||||
|
type: Literal['default'] = 'default'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
field: str = Field(description="The field to set the default for")
|
||||||
|
value: str = Field(description="The value to set the default to, or None to clear the default")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
if self.value is None:
|
||||||
|
if self.field in context.defaults:
|
||||||
|
del context.defaults[self.field]
|
||||||
|
else:
|
||||||
|
context.defaults[self.field] = self.value
|
||||||
|
|
||||||
|
|
||||||
|
class DrawGraphCommand(BaseCommand):
|
||||||
|
"""Debugs a graph"""
|
||||||
|
type: Literal['draw_graph'] = 'draw_graph'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||||
|
nxgraph = session.graph.nx_graph_flat()
|
||||||
|
|
||||||
|
# Draw the networkx graph
|
||||||
|
plt.figure(figsize=(20, 20))
|
||||||
|
pos = nx.spectral_layout(nxgraph)
|
||||||
|
nx.draw_networkx_nodes(nxgraph, pos, node_size=1000)
|
||||||
|
nx.draw_networkx_edges(nxgraph, pos, width=2)
|
||||||
|
nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif")
|
||||||
|
plt.axis("off")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
class DrawExecutionGraphCommand(BaseCommand):
|
||||||
|
"""Debugs an execution graph"""
|
||||||
|
type: Literal['draw_xgraph'] = 'draw_xgraph'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||||
|
nxgraph = session.execution_graph.nx_graph_flat()
|
||||||
|
|
||||||
|
# Draw the networkx graph
|
||||||
|
plt.figure(figsize=(20, 20))
|
||||||
|
pos = nx.spectral_layout(nxgraph)
|
||||||
|
nx.draw_networkx_nodes(nxgraph, pos, node_size=1000)
|
||||||
|
nx.draw_networkx_edges(nxgraph, pos, width=2)
|
||||||
|
nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif")
|
||||||
|
plt.axis("off")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
class SortedHelpFormatter(argparse.HelpFormatter):
|
||||||
|
def _iter_indented_subactions(self, action):
|
||||||
|
try:
|
||||||
|
get_subactions = action._get_subactions
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self._indent()
|
||||||
|
if isinstance(action, argparse._SubParsersAction):
|
||||||
|
for subaction in sorted(get_subactions(), key=lambda x: x.dest):
|
||||||
|
yield subaction
|
||||||
|
else:
|
||||||
|
for subaction in get_subactions():
|
||||||
|
yield subaction
|
||||||
|
self._dedent()
|
||||||
169
invokeai/app/cli/completer.py
Normal file
169
invokeai/app/cli/completer.py
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
"""
|
||||||
|
Readline helper functions for cli_app.py
|
||||||
|
You may import the global singleton `completer` to get access to the
|
||||||
|
completer object.
|
||||||
|
"""
|
||||||
|
import atexit
|
||||||
|
import readline
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as logger
|
||||||
|
from ...backend import ModelManager
|
||||||
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
|
from .commands import BaseCommand
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
|
||||||
|
# singleton object, class variable
|
||||||
|
completer = None
|
||||||
|
|
||||||
|
class Completer(object):
|
||||||
|
|
||||||
|
def __init__(self, model_manager: ModelManager):
|
||||||
|
self.commands = self.get_commands()
|
||||||
|
self.matches = None
|
||||||
|
self.linebuffer = None
|
||||||
|
self.manager = model_manager
|
||||||
|
return
|
||||||
|
|
||||||
|
def complete(self, text, state):
|
||||||
|
"""
|
||||||
|
Complete commands and switches fromm the node CLI command line.
|
||||||
|
Switches are determined in a context-specific manner.
|
||||||
|
"""
|
||||||
|
|
||||||
|
buffer = readline.get_line_buffer()
|
||||||
|
if state == 0:
|
||||||
|
options = None
|
||||||
|
try:
|
||||||
|
current_command, current_switch = self.get_current_command(buffer)
|
||||||
|
options = self.get_command_options(current_command, current_switch)
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
options = options or list(self.parse_commands().keys())
|
||||||
|
|
||||||
|
if not text: # first time
|
||||||
|
self.matches = options
|
||||||
|
else:
|
||||||
|
self.matches = [s for s in options if s and s.startswith(text)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
match = self.matches[state]
|
||||||
|
except IndexError:
|
||||||
|
match = None
|
||||||
|
return match
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(self)->List[object]:
|
||||||
|
"""
|
||||||
|
Return a list of all the client commands and invocations.
|
||||||
|
"""
|
||||||
|
return BaseCommand.get_commands() + BaseInvocation.get_invocations()
|
||||||
|
|
||||||
|
def get_current_command(self, buffer: str)->tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Parse the readline buffer to find the most recent command and its switch.
|
||||||
|
"""
|
||||||
|
if len(buffer)==0:
|
||||||
|
return None, None
|
||||||
|
tokens = shlex.split(buffer)
|
||||||
|
command = None
|
||||||
|
switch = None
|
||||||
|
for t in tokens:
|
||||||
|
if t[0].isalpha():
|
||||||
|
if switch is None:
|
||||||
|
command = t
|
||||||
|
else:
|
||||||
|
switch = t
|
||||||
|
# don't try to autocomplete switches that are already complete
|
||||||
|
if switch and buffer.endswith(' '):
|
||||||
|
switch=None
|
||||||
|
return command or '', switch or ''
|
||||||
|
|
||||||
|
def parse_commands(self)->Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
Return a dict in which the keys are the command name
|
||||||
|
and the values are the parameters the command takes.
|
||||||
|
"""
|
||||||
|
result = dict()
|
||||||
|
for command in self.commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
name = get_args(hints['type'])[0]
|
||||||
|
result.update({name:hints})
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_command_options(self, command: str, switch: str)->List[str]:
|
||||||
|
"""
|
||||||
|
Return all the parameters that can be passed to the command as
|
||||||
|
command-line switches. Returns None if the command is unrecognized.
|
||||||
|
"""
|
||||||
|
parsed_commands = self.parse_commands()
|
||||||
|
if command not in parsed_commands:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# handle switches in the format "-foo=bar"
|
||||||
|
argument = None
|
||||||
|
if switch and '=' in switch:
|
||||||
|
switch, argument = switch.split('=')
|
||||||
|
|
||||||
|
parameter = switch.strip('-')
|
||||||
|
if parameter in parsed_commands[command]:
|
||||||
|
if argument is None:
|
||||||
|
return self.get_parameter_options(parameter, parsed_commands[command][parameter])
|
||||||
|
else:
|
||||||
|
return [f"--{parameter}={x}" for x in self.get_parameter_options(parameter, parsed_commands[command][parameter])]
|
||||||
|
else:
|
||||||
|
return [f"--{x}" for x in parsed_commands[command].keys()]
|
||||||
|
|
||||||
|
def get_parameter_options(self, parameter: str, typehint)->List[str]:
|
||||||
|
"""
|
||||||
|
Given a parameter type (such as Literal), offers autocompletions.
|
||||||
|
"""
|
||||||
|
if get_origin(typehint) == Literal:
|
||||||
|
return get_args(typehint)
|
||||||
|
if parameter == 'model':
|
||||||
|
return self.manager.model_names()
|
||||||
|
|
||||||
|
def _pre_input_hook(self):
|
||||||
|
if self.linebuffer:
|
||||||
|
readline.insert_text(self.linebuffer)
|
||||||
|
readline.redisplay()
|
||||||
|
self.linebuffer = None
|
||||||
|
|
||||||
|
def set_autocompleter(services: InvocationServices) -> Completer:
|
||||||
|
global completer
|
||||||
|
|
||||||
|
if completer:
|
||||||
|
return completer
|
||||||
|
|
||||||
|
completer = Completer(services.model_manager)
|
||||||
|
|
||||||
|
readline.set_completer(completer.complete)
|
||||||
|
# pyreadline3 does not have a set_auto_history() method
|
||||||
|
try:
|
||||||
|
readline.set_auto_history(True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
readline.set_pre_input_hook(completer._pre_input_hook)
|
||||||
|
readline.set_completer_delims(" ")
|
||||||
|
readline.parse_and_bind("tab: complete")
|
||||||
|
readline.parse_and_bind("set print-completions-horizontally off")
|
||||||
|
readline.parse_and_bind("set page-completions on")
|
||||||
|
readline.parse_and_bind("set skip-completed-text on")
|
||||||
|
readline.parse_and_bind("set show-all-if-ambiguous on")
|
||||||
|
|
||||||
|
histfile = Path(services.configuration.root_dir / ".invoke_history")
|
||||||
|
try:
|
||||||
|
readline.read_history_file(histfile)
|
||||||
|
readline.set_history_length(1000)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
except OSError: # file likely corrupted
|
||||||
|
newname = f"{histfile}.old"
|
||||||
|
logger.error(
|
||||||
|
f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
||||||
|
)
|
||||||
|
histfile.replace(Path(newname))
|
||||||
|
atexit.register(readline.write_history_file, histfile)
|
||||||
427
invokeai/app/cli_app.py
Normal file
427
invokeai/app/cli_app.py
Normal file
@@ -0,0 +1,427 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from typing import (
|
||||||
|
Union,
|
||||||
|
get_type_hints,
|
||||||
|
)
|
||||||
|
|
||||||
|
from pydantic import BaseModel, ValidationError
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
# This should come early so that the logger can pick up its configuration options
|
||||||
|
from .services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
config.parse_args()
|
||||||
|
logger = InvokeAILogger().getLogger(config=config)
|
||||||
|
|
||||||
|
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
|
||||||
|
from invokeai.app.services.images import ImageService
|
||||||
|
from invokeai.app.services.metadata import CoreMetadataService
|
||||||
|
from invokeai.app.services.resource_name import SimpleNameService
|
||||||
|
from invokeai.app.services.urls import LocalUrlService
|
||||||
|
|
||||||
|
from .services.default_graphs import create_system_graphs
|
||||||
|
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
|
|
||||||
|
from .cli.commands import BaseCommand, CliContext, ExitCli, add_graph_parsers, add_parsers, SortedHelpFormatter
|
||||||
|
from .cli.completer import set_autocompleter
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
from .services.events import EventServiceBase
|
||||||
|
from .services.model_manager_initializer import get_model_manager
|
||||||
|
from .services.restoration_services import RestorationServices
|
||||||
|
from .services.graph import Edge, EdgeConnection, GraphExecutionState, GraphInvocation, LibraryGraph, are_connection_types_compatible
|
||||||
|
from .services.default_graphs import default_text_to_image_graph_id
|
||||||
|
from .services.image_file_storage import DiskImageFileStorage
|
||||||
|
from .services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from .services.invocation_services import InvocationServices
|
||||||
|
from .services.invoker import Invoker
|
||||||
|
from .services.processor import DefaultInvocationProcessor
|
||||||
|
from .services.sqlite import SqliteItemStorage
|
||||||
|
|
||||||
|
|
||||||
|
class CliCommand(BaseModel):
|
||||||
|
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgs(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_invocation_args(command_parser):
|
||||||
|
# Add linking capability
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link",
|
||||||
|
"-l",
|
||||||
|
action="append",
|
||||||
|
nargs=3,
|
||||||
|
help="A link in the format 'source_node source_field dest_field'. source_node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link_node",
|
||||||
|
"-ln",
|
||||||
|
action="append",
|
||||||
|
help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser:
|
||||||
|
# Create invocation parser
|
||||||
|
parser = argparse.ArgumentParser(formatter_class=SortedHelpFormatter)
|
||||||
|
|
||||||
|
def exit(*args, **kwargs):
|
||||||
|
raise InvalidArgs
|
||||||
|
|
||||||
|
parser.exit = exit
|
||||||
|
subparsers = parser.add_subparsers(dest="type")
|
||||||
|
|
||||||
|
# Create subparsers for each invocation
|
||||||
|
invocations = BaseInvocation.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, invocations, add_arguments=add_invocation_args)
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
commands = BaseCommand.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, commands, exclude_fields=["type"])
|
||||||
|
|
||||||
|
# Create subparsers for exposed CLI graphs
|
||||||
|
# TODO: add a way to identify these graphs
|
||||||
|
text_to_image = services.graph_library.get(default_text_to_image_graph_id)
|
||||||
|
add_graph_parsers(subparsers, [text_to_image], add_arguments=add_invocation_args)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
class NodeField():
|
||||||
|
alias: str
|
||||||
|
node_path: str
|
||||||
|
field: str
|
||||||
|
field_type: type
|
||||||
|
|
||||||
|
def __init__(self, alias: str, node_path: str, field: str, field_type: type):
|
||||||
|
self.alias = alias
|
||||||
|
self.node_path = node_path
|
||||||
|
self.field = field
|
||||||
|
self.field_type = field_type
|
||||||
|
|
||||||
|
|
||||||
|
def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str,NodeField]:
|
||||||
|
return {k:NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()}
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||||
|
"""Gets the node field for the specified field alias"""
|
||||||
|
exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias)
|
||||||
|
node_type = type(graph.graph.get_node(exposed_input.node_path))
|
||||||
|
return NodeField(alias=exposed_input.alias, node_path=f'{node_id}.{exposed_input.node_path}', field=exposed_input.field, field_type=get_type_hints(node_type)[exposed_input.field])
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||||
|
"""Gets the node field for the specified field alias"""
|
||||||
|
exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias)
|
||||||
|
node_type = type(graph.graph.get_node(exposed_output.node_path))
|
||||||
|
node_output_type = node_type.get_output_type()
|
||||||
|
return NodeField(alias=exposed_output.alias, node_path=f'{node_id}.{exposed_output.node_path}', field=exposed_output.field, field_type=get_type_hints(node_output_type)[exposed_output.field])
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
|
||||||
|
"""Gets the inputs for the specified invocation from the context"""
|
||||||
|
node_type = type(invocation)
|
||||||
|
if node_type is not GraphInvocation:
|
||||||
|
return fields_from_type_hints(get_type_hints(node_type), invocation.id)
|
||||||
|
else:
|
||||||
|
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
|
||||||
|
return {e.alias: get_node_input_field(graph, e.alias, invocation.id) for e in graph.exposed_inputs}
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
|
||||||
|
"""Gets the outputs for the specified invocation from the context"""
|
||||||
|
node_type = type(invocation)
|
||||||
|
if node_type is not GraphInvocation:
|
||||||
|
return fields_from_type_hints(get_type_hints(node_type.get_output_type()), invocation.id)
|
||||||
|
else:
|
||||||
|
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
|
||||||
|
return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_matching_edges(
|
||||||
|
a: BaseInvocation, b: BaseInvocation, context: CliContext
|
||||||
|
) -> list[Edge]:
|
||||||
|
"""Generates all possible edges between two invocations"""
|
||||||
|
afields = get_node_outputs(a, context)
|
||||||
|
bfields = get_node_inputs(b, context)
|
||||||
|
|
||||||
|
matching_fields = set(afields.keys()).intersection(bfields.keys())
|
||||||
|
|
||||||
|
# Remove invalid fields
|
||||||
|
invalid_fields = set(["type", "id"])
|
||||||
|
matching_fields = matching_fields.difference(invalid_fields)
|
||||||
|
|
||||||
|
# Validate types
|
||||||
|
matching_fields = [f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type)]
|
||||||
|
|
||||||
|
edges = [
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field),
|
||||||
|
destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field)
|
||||||
|
)
|
||||||
|
for alias in matching_fields
|
||||||
|
]
|
||||||
|
return edges
|
||||||
|
|
||||||
|
|
||||||
|
class SessionError(Exception):
|
||||||
|
"""Raised when a session error has occurred"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_all(context: CliContext):
|
||||||
|
"""Runs all invocations in the specified session"""
|
||||||
|
context.invoker.invoke(context.session, invoke_all=True)
|
||||||
|
while not context.get_session().is_complete():
|
||||||
|
# Wait some time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Print any errors
|
||||||
|
if context.session.has_error():
|
||||||
|
for n in context.session.errors:
|
||||||
|
context.invoker.services.logger.error(
|
||||||
|
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
raise SessionError()
|
||||||
|
|
||||||
|
def invoke_cli():
|
||||||
|
|
||||||
|
# get the optional list of invocations to execute on the command line
|
||||||
|
parser = config.get_parser()
|
||||||
|
parser.add_argument('commands',nargs='*')
|
||||||
|
invocation_commands = parser.parse_args().commands
|
||||||
|
|
||||||
|
# get the optional file to read commands from.
|
||||||
|
# Simplest is to use it for STDIN
|
||||||
|
if infile := config.from_file:
|
||||||
|
sys.stdin = open(infile,"r")
|
||||||
|
|
||||||
|
model_manager = get_model_manager(config,logger=logger)
|
||||||
|
|
||||||
|
events = EventServiceBase()
|
||||||
|
output_folder = config.output_path
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
if config.use_memory_db:
|
||||||
|
db_location = ":memory:"
|
||||||
|
else:
|
||||||
|
db_location = config.db_path
|
||||||
|
db_location.parent.mkdir(parents=True,exist_ok=True)
|
||||||
|
|
||||||
|
logger.info(f'InvokeAI database location is "{db_location}"')
|
||||||
|
|
||||||
|
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
)
|
||||||
|
|
||||||
|
urls = LocalUrlService()
|
||||||
|
metadata = CoreMetadataService()
|
||||||
|
image_record_storage = SqliteImageRecordStorage(db_location)
|
||||||
|
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
|
||||||
|
names = SimpleNameService()
|
||||||
|
|
||||||
|
images = ImageService(
|
||||||
|
image_record_storage=image_record_storage,
|
||||||
|
image_file_storage=image_file_storage,
|
||||||
|
metadata=metadata,
|
||||||
|
url=urls,
|
||||||
|
logger=logger,
|
||||||
|
names=names,
|
||||||
|
graph_execution_manager=graph_execution_manager,
|
||||||
|
)
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=model_manager,
|
||||||
|
events=events,
|
||||||
|
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
|
||||||
|
images=images,
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_library=SqliteItemStorage[LibraryGraph](
|
||||||
|
filename=db_location, table_name="graphs"
|
||||||
|
),
|
||||||
|
graph_execution_manager=graph_execution_manager,
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config,logger=logger),
|
||||||
|
logger=logger,
|
||||||
|
configuration=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
system_graphs = create_system_graphs(services.graph_library)
|
||||||
|
system_graph_names = set([g.name for g in system_graphs])
|
||||||
|
|
||||||
|
invoker = Invoker(services)
|
||||||
|
session: GraphExecutionState = invoker.create_execution_state()
|
||||||
|
parser = get_command_parser(services)
|
||||||
|
|
||||||
|
re_negid = re.compile('^-[0-9]+$')
|
||||||
|
|
||||||
|
# Uncomment to print out previous sessions at startup
|
||||||
|
# print(services.session_manager.list())
|
||||||
|
|
||||||
|
context = CliContext(invoker, session, parser)
|
||||||
|
set_autocompleter(services)
|
||||||
|
|
||||||
|
command_line_args_exist = len(invocation_commands) > 0
|
||||||
|
done = False
|
||||||
|
|
||||||
|
while not done:
|
||||||
|
try:
|
||||||
|
if command_line_args_exist:
|
||||||
|
cmd_input = invocation_commands.pop(0)
|
||||||
|
done = len(invocation_commands) == 0
|
||||||
|
else:
|
||||||
|
cmd_input = input("invoke> ")
|
||||||
|
except (KeyboardInterrupt, EOFError):
|
||||||
|
# Ctrl-c exits
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Refresh the state of the session
|
||||||
|
#history = list(get_graph_execution_history(context.session))
|
||||||
|
history = list(reversed(context.nodes_added))
|
||||||
|
|
||||||
|
# Split the command for piping
|
||||||
|
cmds = cmd_input.split("|")
|
||||||
|
start_id = len(context.nodes_added)
|
||||||
|
current_id = start_id
|
||||||
|
new_invocations = list()
|
||||||
|
for cmd in cmds:
|
||||||
|
if cmd is None or cmd.strip() == "":
|
||||||
|
raise InvalidArgs("Empty command")
|
||||||
|
|
||||||
|
# Parse args to create invocation
|
||||||
|
args = vars(context.parser.parse_args(shlex.split(cmd.strip())))
|
||||||
|
|
||||||
|
# Override defaults
|
||||||
|
for field_name, field_default in context.defaults.items():
|
||||||
|
if field_name in args:
|
||||||
|
args[field_name] = field_default
|
||||||
|
|
||||||
|
# Parse invocation
|
||||||
|
command: CliCommand = None # type:ignore
|
||||||
|
system_graph: LibraryGraph|None = None
|
||||||
|
if args['type'] in system_graph_names:
|
||||||
|
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
|
||||||
|
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))
|
||||||
|
for exposed_input in system_graph.exposed_inputs:
|
||||||
|
if exposed_input.alias in args:
|
||||||
|
node = invocation.graph.get_node(exposed_input.node_path)
|
||||||
|
field = exposed_input.field
|
||||||
|
setattr(node, field, args[exposed_input.alias])
|
||||||
|
command = CliCommand(command = invocation)
|
||||||
|
context.graph_nodes[invocation.id] = system_graph.id
|
||||||
|
else:
|
||||||
|
args["id"] = current_id
|
||||||
|
command = CliCommand(command=args)
|
||||||
|
|
||||||
|
if command is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Run any CLI commands immediately
|
||||||
|
if isinstance(command.command, BaseCommand):
|
||||||
|
# Invoke all current nodes to preserve operation order
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
command.command.run(context)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# TODO: handle linking with library graphs
|
||||||
|
# Pipe previous command output (if there was a previous command)
|
||||||
|
edges: list[Edge] = list()
|
||||||
|
if len(history) > 0 or current_id != start_id:
|
||||||
|
from_id = (
|
||||||
|
history[0] if current_id == start_id else str(current_id - 1)
|
||||||
|
)
|
||||||
|
from_node = (
|
||||||
|
next(filter(lambda n: n[0].id == from_id, new_invocations))[0]
|
||||||
|
if current_id != start_id
|
||||||
|
else context.session.graph.get_node(from_id)
|
||||||
|
)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
from_node, command.command, context
|
||||||
|
)
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
# Parse provided links
|
||||||
|
if "link_node" in args and args["link_node"]:
|
||||||
|
for link in args["link_node"]:
|
||||||
|
node_id = link
|
||||||
|
if re_negid.match(node_id):
|
||||||
|
node_id = str(current_id + int(node_id))
|
||||||
|
|
||||||
|
link_node = context.session.graph.get_node(node_id)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
link_node, command.command, context
|
||||||
|
)
|
||||||
|
matching_destinations = [e.destination for e in matching_edges]
|
||||||
|
edges = [e for e in edges if e.destination not in matching_destinations]
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
if "link" in args and args["link"]:
|
||||||
|
for link in args["link"]:
|
||||||
|
edges = [e for e in edges if e.destination.node_id != command.command.id or e.destination.field != link[2]]
|
||||||
|
|
||||||
|
node_id = link[0]
|
||||||
|
if re_negid.match(node_id):
|
||||||
|
node_id = str(current_id + int(node_id))
|
||||||
|
|
||||||
|
# TODO: handle missing input/output
|
||||||
|
node_output = get_node_outputs(context.session.graph.get_node(node_id), context)[link[1]]
|
||||||
|
node_input = get_node_inputs(command.command, context)[link[2]]
|
||||||
|
|
||||||
|
edges.append(
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=node_output.node_path, field=node_output.field),
|
||||||
|
destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
new_invocations.append((command.command, edges))
|
||||||
|
|
||||||
|
current_id = current_id + 1
|
||||||
|
|
||||||
|
# Add the node to the session
|
||||||
|
context.add_node(command.command)
|
||||||
|
for edge in edges:
|
||||||
|
print(edge)
|
||||||
|
context.add_edge(edge)
|
||||||
|
|
||||||
|
# Execute all remaining nodes
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
except InvalidArgs:
|
||||||
|
invoker.services.logger.warning('Invalid command, use "help" to list commands')
|
||||||
|
continue
|
||||||
|
|
||||||
|
except ValidationError:
|
||||||
|
invoker.services.logger.warning('Invalid command arguments, run "<command> --help" for summary')
|
||||||
|
|
||||||
|
except SessionError:
|
||||||
|
# Start a new session
|
||||||
|
invoker.services.logger.warning("Session error: creating a new session")
|
||||||
|
context.reset()
|
||||||
|
|
||||||
|
except ExitCli:
|
||||||
|
break
|
||||||
|
|
||||||
|
except SystemExit:
|
||||||
|
continue
|
||||||
|
|
||||||
|
invoker.stop()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
invoke_cli()
|
||||||
@@ -4,5 +4,9 @@ __all__ = []
|
|||||||
|
|
||||||
dirname = os.path.dirname(os.path.abspath(__file__))
|
dirname = os.path.dirname(os.path.abspath(__file__))
|
||||||
for f in os.listdir(dirname):
|
for f in os.listdir(dirname):
|
||||||
if f != "__init__.py" and os.path.isfile("%s/%s" % (dirname, f)) and f[-3:] == ".py":
|
if (
|
||||||
|
f != "__init__.py"
|
||||||
|
and os.path.isfile("%s/%s" % (dirname, f))
|
||||||
|
and f[-3:] == ".py"
|
||||||
|
):
|
||||||
__all__.append(f[:-3])
|
__all__.append(f[:-3])
|
||||||
@@ -1,10 +1,15 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
from typing import get_args, get_type_hints
|
from typing import get_args, get_type_hints, Dict, List, Literal, TypedDict, TYPE_CHECKING
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from ..services.invocation_services import InvocationServices
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
|
||||||
|
|
||||||
class InvocationContext:
|
class InvocationContext:
|
||||||
@@ -70,5 +75,62 @@ class BaseInvocation(ABC, BaseModel):
|
|||||||
def invoke(self, context: InvocationContext) -> BaseInvocationOutput:
|
def invoke(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||||
"""Invoke with provided context and return outputs."""
|
"""Invoke with provided context and return outputs."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
id: str = Field(description="The id of this node. Must be unique among all nodes.")
|
id: str = Field(description="The id of this node. Must be unique among all nodes.")
|
||||||
|
is_intermediate: bool = Field(default=False, description="Whether or not this node is an intermediate node.")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: figure out a better way to provide these hints
|
||||||
|
# TODO: when we can upgrade to python 3.11, we can use the`NotRequired` type instead of `total=False`
|
||||||
|
class UIConfig(TypedDict, total=False):
|
||||||
|
type_hints: Dict[
|
||||||
|
str,
|
||||||
|
Literal[
|
||||||
|
"integer",
|
||||||
|
"float",
|
||||||
|
"boolean",
|
||||||
|
"string",
|
||||||
|
"enum",
|
||||||
|
"image",
|
||||||
|
"latents",
|
||||||
|
"model",
|
||||||
|
"control",
|
||||||
|
],
|
||||||
|
]
|
||||||
|
tags: List[str]
|
||||||
|
title: str
|
||||||
|
|
||||||
|
class CustomisedSchemaExtra(TypedDict):
|
||||||
|
ui: UIConfig
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationConfig(BaseModel.Config):
|
||||||
|
"""Customizes pydantic's BaseModel.Config class for use by Invocations.
|
||||||
|
|
||||||
|
Provide `schema_extra` a `ui` dict to add hints for generated UIs.
|
||||||
|
|
||||||
|
`tags`
|
||||||
|
- A list of strings, used to categorise invocations.
|
||||||
|
|
||||||
|
`type_hints`
|
||||||
|
- A dict of field types which override the types in the invocation definition.
|
||||||
|
- Each key should be the name of one of the invocation's fields.
|
||||||
|
- Each value should be one of the valid types:
|
||||||
|
- `integer`, `float`, `boolean`, `string`, `enum`, `image`, `latents`, `model`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["stable-diffusion", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"initial_image": "image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
schema_extra: CustomisedSchemaExtra
|
||||||
94
invokeai/app/invocations/collections.py
Normal file
94
invokeai/app/invocations/collections.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from pydantic import Field, validator
|
||||||
|
|
||||||
|
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||||
|
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
InvocationContext,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class IntCollectionOutput(BaseInvocationOutput):
|
||||||
|
"""A collection of integers"""
|
||||||
|
|
||||||
|
type: Literal["int_collection"] = "int_collection"
|
||||||
|
|
||||||
|
# Outputs
|
||||||
|
collection: list[int] = Field(default=[], description="The int collection")
|
||||||
|
|
||||||
|
class FloatCollectionOutput(BaseInvocationOutput):
|
||||||
|
"""A collection of floats"""
|
||||||
|
|
||||||
|
type: Literal["float_collection"] = "float_collection"
|
||||||
|
|
||||||
|
# Outputs
|
||||||
|
collection: list[float] = Field(default=[], description="The float collection")
|
||||||
|
|
||||||
|
|
||||||
|
class RangeInvocation(BaseInvocation):
|
||||||
|
"""Creates a range of numbers from start to stop with step"""
|
||||||
|
|
||||||
|
type: Literal["range"] = "range"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
start: int = Field(default=0, description="The start of the range")
|
||||||
|
stop: int = Field(default=10, description="The stop of the range")
|
||||||
|
step: int = Field(default=1, description="The step of the range")
|
||||||
|
|
||||||
|
@validator("stop")
|
||||||
|
def stop_gt_start(cls, v, values):
|
||||||
|
if "start" in values and v <= values["start"]:
|
||||||
|
raise ValueError("stop must be greater than start")
|
||||||
|
return v
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||||
|
return IntCollectionOutput(
|
||||||
|
collection=list(range(self.start, self.stop, self.step))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RangeOfSizeInvocation(BaseInvocation):
|
||||||
|
"""Creates a range from start to start + size with step"""
|
||||||
|
|
||||||
|
type: Literal["range_of_size"] = "range_of_size"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
start: int = Field(default=0, description="The start of the range")
|
||||||
|
size: int = Field(default=1, description="The number of values")
|
||||||
|
step: int = Field(default=1, description="The step of the range")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||||
|
return IntCollectionOutput(
|
||||||
|
collection=list(range(self.start, self.start + self.size, self.step))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RandomRangeInvocation(BaseInvocation):
|
||||||
|
"""Creates a collection of random numbers"""
|
||||||
|
|
||||||
|
type: Literal["random_range"] = "random_range"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
low: int = Field(default=0, description="The inclusive low value")
|
||||||
|
high: int = Field(
|
||||||
|
default=np.iinfo(np.int32).max, description="The exclusive high value"
|
||||||
|
)
|
||||||
|
size: int = Field(default=1, description="The number of values to generate")
|
||||||
|
seed: int = Field(
|
||||||
|
ge=0,
|
||||||
|
le=SEED_MAX,
|
||||||
|
description="The seed for the RNG (omit for random)",
|
||||||
|
default_factory=get_random_seed,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||||
|
rng = np.random.default_rng(self.seed)
|
||||||
|
return IntCollectionOutput(
|
||||||
|
collection=list(rng.integers(low=self.low, high=self.high, size=self.size))
|
||||||
|
)
|
||||||
266
invokeai/app/invocations/compel.py
Normal file
266
invokeai/app/invocations/compel.py
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
from typing import Literal, Optional, Union
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.invocations.util.choose_model import choose_model
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||||
|
from ...backend.prompting.conditioning import try_parse_legacy_blend
|
||||||
|
|
||||||
|
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||||
|
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
|
||||||
|
from ...backend.stable_diffusion.textual_inversion_manager import TextualInversionManager
|
||||||
|
|
||||||
|
from compel import Compel
|
||||||
|
from compel.prompt_parser import (
|
||||||
|
Blend,
|
||||||
|
CrossAttentionControlSubstitute,
|
||||||
|
FlattenedPrompt,
|
||||||
|
Fragment, Conjunction,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ConditioningField(BaseModel):
|
||||||
|
conditioning_name: Optional[str] = Field(default=None, description="The name of conditioning data")
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["conditioning_name"]}
|
||||||
|
|
||||||
|
|
||||||
|
class CompelOutput(BaseInvocationOutput):
|
||||||
|
"""Compel parser output"""
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["compel_output"] = "compel_output"
|
||||||
|
|
||||||
|
conditioning: ConditioningField = Field(default=None, description="Conditioning")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
class CompelInvocation(BaseInvocation):
|
||||||
|
"""Parse prompt using compel package to conditioning."""
|
||||||
|
|
||||||
|
type: Literal["compel"] = "compel"
|
||||||
|
|
||||||
|
prompt: str = Field(default="", description="Prompt")
|
||||||
|
model: str = Field(default="", description="Model to use")
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"title": "Prompt (Compel)",
|
||||||
|
"tags": ["prompt", "compel"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||||
|
|
||||||
|
# TODO: load without model
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
pipeline = model["model"]
|
||||||
|
tokenizer = pipeline.tokenizer
|
||||||
|
text_encoder = pipeline.text_encoder
|
||||||
|
|
||||||
|
# TODO: global? input?
|
||||||
|
#use_full_precision = precision == "float32" or precision == "autocast"
|
||||||
|
#use_full_precision = False
|
||||||
|
|
||||||
|
# TODO: redo TI when separate model loding implemented
|
||||||
|
#textual_inversion_manager = TextualInversionManager(
|
||||||
|
# tokenizer=tokenizer,
|
||||||
|
# text_encoder=text_encoder,
|
||||||
|
# full_precision=use_full_precision,
|
||||||
|
#)
|
||||||
|
|
||||||
|
def load_huggingface_concepts(concepts: list[str]):
|
||||||
|
pipeline.textual_inversion_manager.load_huggingface_concepts(concepts)
|
||||||
|
|
||||||
|
# apply the concepts library to the prompt
|
||||||
|
prompt_str = pipeline.textual_inversion_manager.hf_concepts_library.replace_concepts_with_triggers(
|
||||||
|
self.prompt,
|
||||||
|
lambda concepts: load_huggingface_concepts(concepts),
|
||||||
|
pipeline.textual_inversion_manager.get_all_trigger_strings(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# lazy-load any deferred textual inversions.
|
||||||
|
# this might take a couple of seconds the first time a textual inversion is used.
|
||||||
|
pipeline.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(
|
||||||
|
prompt_str
|
||||||
|
)
|
||||||
|
|
||||||
|
compel = Compel(
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
text_encoder=text_encoder,
|
||||||
|
textual_inversion_manager=pipeline.textual_inversion_manager,
|
||||||
|
dtype_for_device_getter=torch_dtype,
|
||||||
|
truncate_long_prompts=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
legacy_blend = try_parse_legacy_blend(prompt_str, skip_normalize=False)
|
||||||
|
if legacy_blend is not None:
|
||||||
|
conjunction = legacy_blend
|
||||||
|
else:
|
||||||
|
conjunction = Compel.parse_prompt_string(prompt_str)
|
||||||
|
|
||||||
|
if context.services.configuration.log_tokenization:
|
||||||
|
log_tokenization_for_conjunction(conjunction, tokenizer)
|
||||||
|
|
||||||
|
c, options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||||
|
|
||||||
|
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(
|
||||||
|
tokens_count_including_eos_bos=get_max_token_count(tokenizer, conjunction),
|
||||||
|
cross_attention_control_args=options.get("cross_attention_control", None),
|
||||||
|
)
|
||||||
|
|
||||||
|
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||||
|
|
||||||
|
# TODO: hacky but works ;D maybe rename latents somehow?
|
||||||
|
context.services.latents.save(conditioning_name, (c, ec))
|
||||||
|
|
||||||
|
return CompelOutput(
|
||||||
|
conditioning=ConditioningField(
|
||||||
|
conditioning_name=conditioning_name,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_max_token_count(
|
||||||
|
tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False
|
||||||
|
) -> int:
|
||||||
|
if type(prompt) is Blend:
|
||||||
|
blend: Blend = prompt
|
||||||
|
return max(
|
||||||
|
[
|
||||||
|
get_max_token_count(tokenizer, p, truncate_if_too_long)
|
||||||
|
for p in blend.prompts
|
||||||
|
]
|
||||||
|
)
|
||||||
|
elif type(prompt) is Conjunction:
|
||||||
|
conjunction: Conjunction = prompt
|
||||||
|
return sum(
|
||||||
|
[
|
||||||
|
get_max_token_count(tokenizer, p, truncate_if_too_long)
|
||||||
|
for p in conjunction.prompts
|
||||||
|
]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return len(
|
||||||
|
get_tokens_for_prompt_object(tokenizer, prompt, truncate_if_too_long)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tokens_for_prompt_object(
|
||||||
|
tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True
|
||||||
|
) -> [str]:
|
||||||
|
if type(parsed_prompt) is Blend:
|
||||||
|
raise ValueError(
|
||||||
|
"Blend is not supported here - you need to get tokens for each of its .children"
|
||||||
|
)
|
||||||
|
|
||||||
|
text_fragments = [
|
||||||
|
x.text
|
||||||
|
if type(x) is Fragment
|
||||||
|
else (
|
||||||
|
" ".join([f.text for f in x.original])
|
||||||
|
if type(x) is CrossAttentionControlSubstitute
|
||||||
|
else str(x)
|
||||||
|
)
|
||||||
|
for x in parsed_prompt.children
|
||||||
|
]
|
||||||
|
text = " ".join(text_fragments)
|
||||||
|
tokens = tokenizer.tokenize(text)
|
||||||
|
if truncate_if_too_long:
|
||||||
|
max_tokens_length = tokenizer.model_max_length - 2 # typically 75
|
||||||
|
tokens = tokens[0:max_tokens_length]
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
|
||||||
|
def log_tokenization_for_conjunction(
|
||||||
|
c: Conjunction, tokenizer, display_label_prefix=None
|
||||||
|
):
|
||||||
|
display_label_prefix = display_label_prefix or ""
|
||||||
|
for i, p in enumerate(c.prompts):
|
||||||
|
if len(c.prompts)>1:
|
||||||
|
this_display_label_prefix = f"{display_label_prefix}(conjunction part {i + 1}, weight={c.weights[i]})"
|
||||||
|
else:
|
||||||
|
this_display_label_prefix = display_label_prefix
|
||||||
|
log_tokenization_for_prompt_object(
|
||||||
|
p,
|
||||||
|
tokenizer,
|
||||||
|
display_label_prefix=this_display_label_prefix
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def log_tokenization_for_prompt_object(
|
||||||
|
p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None
|
||||||
|
):
|
||||||
|
display_label_prefix = display_label_prefix or ""
|
||||||
|
if type(p) is Blend:
|
||||||
|
blend: Blend = p
|
||||||
|
for i, c in enumerate(blend.prompts):
|
||||||
|
log_tokenization_for_prompt_object(
|
||||||
|
c,
|
||||||
|
tokenizer,
|
||||||
|
display_label_prefix=f"{display_label_prefix}(blend part {i + 1}, weight={blend.weights[i]})",
|
||||||
|
)
|
||||||
|
elif type(p) is FlattenedPrompt:
|
||||||
|
flattened_prompt: FlattenedPrompt = p
|
||||||
|
if flattened_prompt.wants_cross_attention_control:
|
||||||
|
original_fragments = []
|
||||||
|
edited_fragments = []
|
||||||
|
for f in flattened_prompt.children:
|
||||||
|
if type(f) is CrossAttentionControlSubstitute:
|
||||||
|
original_fragments += f.original
|
||||||
|
edited_fragments += f.edited
|
||||||
|
else:
|
||||||
|
original_fragments.append(f)
|
||||||
|
edited_fragments.append(f)
|
||||||
|
|
||||||
|
original_text = " ".join([x.text for x in original_fragments])
|
||||||
|
log_tokenization_for_text(
|
||||||
|
original_text,
|
||||||
|
tokenizer,
|
||||||
|
display_label=f"{display_label_prefix}(.swap originals)",
|
||||||
|
)
|
||||||
|
edited_text = " ".join([x.text for x in edited_fragments])
|
||||||
|
log_tokenization_for_text(
|
||||||
|
edited_text,
|
||||||
|
tokenizer,
|
||||||
|
display_label=f"{display_label_prefix}(.swap replacements)",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
text = " ".join([x.text for x in flattened_prompt.children])
|
||||||
|
log_tokenization_for_text(
|
||||||
|
text, tokenizer, display_label=display_label_prefix
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_too_long=False):
|
||||||
|
"""shows how the prompt is tokenized
|
||||||
|
# usually tokens have '</w>' to indicate end-of-word,
|
||||||
|
# but for readability it has been replaced with ' '
|
||||||
|
"""
|
||||||
|
tokens = tokenizer.tokenize(text)
|
||||||
|
tokenized = ""
|
||||||
|
discarded = ""
|
||||||
|
usedTokens = 0
|
||||||
|
totalTokens = len(tokens)
|
||||||
|
|
||||||
|
for i in range(0, totalTokens):
|
||||||
|
token = tokens[i].replace("</w>", " ")
|
||||||
|
# alternate color
|
||||||
|
s = (usedTokens % 6) + 1
|
||||||
|
if truncate_if_too_long and i >= tokenizer.model_max_length:
|
||||||
|
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||||
|
else:
|
||||||
|
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||||
|
usedTokens += 1
|
||||||
|
|
||||||
|
if usedTokens > 0:
|
||||||
|
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
|
||||||
|
print(f"{tokenized}\x1b[0m")
|
||||||
|
|
||||||
|
if discarded != "":
|
||||||
|
print(f"\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):")
|
||||||
|
print(f"{discarded}\x1b[0m")
|
||||||
457
invokeai/app/invocations/controlnet_image_processors.py
Normal file
457
invokeai/app/invocations/controlnet_image_processors.py
Normal file
@@ -0,0 +1,457 @@
|
|||||||
|
# InvokeAI nodes for ControlNet image preprocessors
|
||||||
|
# initial implementation by Gregg Helt, 2023
|
||||||
|
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
||||||
|
from builtins import float, bool
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from typing import Literal, Optional, Union, List
|
||||||
|
from PIL import Image, ImageFilter, ImageOps
|
||||||
|
from pydantic import BaseModel, Field, validator
|
||||||
|
|
||||||
|
from ..models.image import ImageField, ImageCategory, ResourceOrigin
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext,
|
||||||
|
InvocationConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
from controlnet_aux import (
|
||||||
|
CannyDetector,
|
||||||
|
HEDdetector,
|
||||||
|
LineartDetector,
|
||||||
|
LineartAnimeDetector,
|
||||||
|
MidasDetector,
|
||||||
|
MLSDdetector,
|
||||||
|
NormalBaeDetector,
|
||||||
|
OpenposeDetector,
|
||||||
|
PidiNetDetector,
|
||||||
|
ContentShuffleDetector,
|
||||||
|
ZoeDetector,
|
||||||
|
MediapipeFaceDetector,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .image import ImageOutput, PILInvocationConfig
|
||||||
|
|
||||||
|
CONTROLNET_DEFAULT_MODELS = [
|
||||||
|
###########################################
|
||||||
|
# lllyasviel sd v1.5, ControlNet v1.0 models
|
||||||
|
##############################################
|
||||||
|
"lllyasviel/sd-controlnet-canny",
|
||||||
|
"lllyasviel/sd-controlnet-depth",
|
||||||
|
"lllyasviel/sd-controlnet-hed",
|
||||||
|
"lllyasviel/sd-controlnet-seg",
|
||||||
|
"lllyasviel/sd-controlnet-openpose",
|
||||||
|
"lllyasviel/sd-controlnet-scribble",
|
||||||
|
"lllyasviel/sd-controlnet-normal",
|
||||||
|
"lllyasviel/sd-controlnet-mlsd",
|
||||||
|
|
||||||
|
#############################################
|
||||||
|
# lllyasviel sd v1.5, ControlNet v1.1 models
|
||||||
|
#############################################
|
||||||
|
"lllyasviel/control_v11p_sd15_canny",
|
||||||
|
"lllyasviel/control_v11p_sd15_openpose",
|
||||||
|
"lllyasviel/control_v11p_sd15_seg",
|
||||||
|
# "lllyasviel/control_v11p_sd15_depth", # broken
|
||||||
|
"lllyasviel/control_v11f1p_sd15_depth",
|
||||||
|
"lllyasviel/control_v11p_sd15_normalbae",
|
||||||
|
"lllyasviel/control_v11p_sd15_scribble",
|
||||||
|
"lllyasviel/control_v11p_sd15_mlsd",
|
||||||
|
"lllyasviel/control_v11p_sd15_softedge",
|
||||||
|
"lllyasviel/control_v11p_sd15s2_lineart_anime",
|
||||||
|
"lllyasviel/control_v11p_sd15_lineart",
|
||||||
|
"lllyasviel/control_v11p_sd15_inpaint",
|
||||||
|
# "lllyasviel/control_v11u_sd15_tile",
|
||||||
|
# problem (temporary?) with huffingface "lllyasviel/control_v11u_sd15_tile",
|
||||||
|
# so for now replace "lllyasviel/control_v11f1e_sd15_tile",
|
||||||
|
"lllyasviel/control_v11e_sd15_shuffle",
|
||||||
|
"lllyasviel/control_v11e_sd15_ip2p",
|
||||||
|
"lllyasviel/control_v11f1e_sd15_tile",
|
||||||
|
|
||||||
|
#################################################
|
||||||
|
# thibaud sd v2.1 models (ControlNet v1.0? or v1.1?
|
||||||
|
##################################################
|
||||||
|
"thibaud/controlnet-sd21-openpose-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-canny-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-depth-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-scribble-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-hed-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-zoedepth-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-color-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-openposev2-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-lineart-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-normalbae-diffusers",
|
||||||
|
"thibaud/controlnet-sd21-ade20k-diffusers",
|
||||||
|
|
||||||
|
##############################################
|
||||||
|
# ControlNetMediaPipeface, ControlNet v1.1
|
||||||
|
##############################################
|
||||||
|
# ["CrucibleAI/ControlNetMediaPipeFace", "diffusion_sd15"], # SD 1.5
|
||||||
|
# diffusion_sd15 needs to be passed to from_pretrained() as subfolder arg
|
||||||
|
# hacked t2l to split to model & subfolder if format is "model,subfolder"
|
||||||
|
"CrucibleAI/ControlNetMediaPipeFace,diffusion_sd15", # SD 1.5
|
||||||
|
"CrucibleAI/ControlNetMediaPipeFace", # SD 2.1?
|
||||||
|
]
|
||||||
|
|
||||||
|
CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)]
|
||||||
|
CONTROLNET_MODE_VALUES = Literal[tuple(["balanced", "more_prompt", "more_control", "unbalanced"])]
|
||||||
|
|
||||||
|
class ControlField(BaseModel):
|
||||||
|
image: ImageField = Field(default=None, description="The control image")
|
||||||
|
control_model: Optional[str] = Field(default=None, description="The ControlNet model to use")
|
||||||
|
# control_weight: Optional[float] = Field(default=1, description="weight given to controlnet")
|
||||||
|
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||||
|
begin_step_percent: float = Field(default=0, ge=0, le=1,
|
||||||
|
description="When the ControlNet is first applied (% of total steps)")
|
||||||
|
end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||||
|
description="When the ControlNet is last applied (% of total steps)")
|
||||||
|
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The contorl mode to use")
|
||||||
|
|
||||||
|
@validator("control_weight")
|
||||||
|
def abs_le_one(cls, v):
|
||||||
|
"""validate that all abs(values) are <=1"""
|
||||||
|
if isinstance(v, list):
|
||||||
|
for i in v:
|
||||||
|
if abs(i) > 1:
|
||||||
|
raise ValueError('all abs(control_weight) must be <= 1')
|
||||||
|
else:
|
||||||
|
if abs(v) > 1:
|
||||||
|
raise ValueError('abs(control_weight) must be <= 1')
|
||||||
|
return v
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
"required": ["image", "control_model", "control_weight", "begin_step_percent", "end_step_percent"],
|
||||||
|
"ui": {
|
||||||
|
"type_hints": {
|
||||||
|
"control_weight": "float",
|
||||||
|
# "control_weight": "number",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ControlOutput(BaseInvocationOutput):
|
||||||
|
"""node output for ControlNet info"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["control_output"] = "control_output"
|
||||||
|
control: ControlField = Field(default=None, description="The control info")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
class ControlNetInvocation(BaseInvocation):
|
||||||
|
"""Collects ControlNet info to pass to other nodes"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["controlnet"] = "controlnet"
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The control image")
|
||||||
|
control_model: CONTROLNET_NAME_VALUES = Field(default="lllyasviel/sd-controlnet-canny",
|
||||||
|
description="control model used")
|
||||||
|
control_weight: Union[float, List[float]] = Field(default=1.0, description="The weight given to the ControlNet")
|
||||||
|
begin_step_percent: float = Field(default=0, ge=0, le=1,
|
||||||
|
description="When the ControlNet is first applied (% of total steps)")
|
||||||
|
end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||||
|
description="When the ControlNet is last applied (% of total steps)")
|
||||||
|
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode used")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model",
|
||||||
|
"control": "control",
|
||||||
|
# "cfg_scale": "float",
|
||||||
|
"cfg_scale": "number",
|
||||||
|
"control_weight": "float",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||||
|
return ControlOutput(
|
||||||
|
control=ControlField(
|
||||||
|
image=self.image,
|
||||||
|
control_model=self.control_model,
|
||||||
|
control_weight=self.control_weight,
|
||||||
|
begin_step_percent=self.begin_step_percent,
|
||||||
|
end_step_percent=self.end_step_percent,
|
||||||
|
control_mode=self.control_mode,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: move image processors to separate file (image_analysis.py
|
||||||
|
class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Base class for invocations that preprocess images for ControlNet"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["image_processor"] = "image_processor"
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to process")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
# superclass just passes through image without processing
|
||||||
|
return image
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
raw_image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
||||||
|
processed_image = self.run_processor(raw_image)
|
||||||
|
|
||||||
|
# FIXME: what happened to image metadata?
|
||||||
|
# metadata = context.services.metadata.build_metadata(
|
||||||
|
# session_id=context.graph_execution_state_id, node=self
|
||||||
|
# )
|
||||||
|
|
||||||
|
# currently can't see processed image in node UI without a showImage node,
|
||||||
|
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=processed_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.CONTROL,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
node_id=self.id,
|
||||||
|
is_intermediate=self.is_intermediate
|
||||||
|
)
|
||||||
|
|
||||||
|
"""Builds an ImageOutput and its ImageField"""
|
||||||
|
processed_image_field = ImageField(image_name=image_dto.image_name)
|
||||||
|
return ImageOutput(
|
||||||
|
image=processed_image_field,
|
||||||
|
# width=processed_image.width,
|
||||||
|
width = image_dto.width,
|
||||||
|
# height=processed_image.height,
|
||||||
|
height = image_dto.height,
|
||||||
|
# mode=processed_image.mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CannyImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Canny edge detection for ControlNet"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["canny_image_processor"] = "canny_image_processor"
|
||||||
|
# Input
|
||||||
|
low_threshold: int = Field(default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)")
|
||||||
|
high_threshold: int = Field(default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
canny_processor = CannyDetector()
|
||||||
|
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class HedImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies HED edge detection to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["hed_image_processor"] = "hed_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
# safe not supported in controlnet_aux v0.0.3
|
||||||
|
# safe: bool = Field(default=False, description="whether to use safe mode")
|
||||||
|
scribble: bool = Field(default=False, description="Whether to use scribble mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = hed_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
# safe not supported in controlnet_aux v0.0.3
|
||||||
|
# safe=self.safe,
|
||||||
|
scribble=self.scribble,
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class LineartImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies line art processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["lineart_image_processor"] = "lineart_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
coarse: bool = Field(default=False, description="Whether to use coarse mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = lineart_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
coarse=self.coarse)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies line art anime processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["lineart_anime_image_processor"] = "lineart_anime_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class OpenposeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Openpose processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["openpose_image_processor"] = "openpose_image_processor"
|
||||||
|
# Inputs
|
||||||
|
hand_and_face: bool = Field(default=False, description="Whether to use hands and face mode")
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
openpose_processor = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = openpose_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
hand_and_face=self.hand_and_face,
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Midas depth processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["midas_depth_image_processor"] = "midas_depth_image_processor"
|
||||||
|
# Inputs
|
||||||
|
a_mult: float = Field(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||||
|
bg_th: float = Field(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||||
|
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||||
|
# depth_and_normal: bool = Field(default=False, description="whether to use depth and normal mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = midas_processor(image,
|
||||||
|
a=np.pi * self.a_mult,
|
||||||
|
bg_th=self.bg_th,
|
||||||
|
# dept_and_normal not supported in controlnet_aux v0.0.3
|
||||||
|
# depth_and_normal=self.depth_and_normal,
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies NormalBae processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["normalbae_image_processor"] = "normalbae_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = normalbae_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class MlsdImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies MLSD processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mlsd_image_processor"] = "mlsd_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
thr_v: float = Field(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
||||||
|
thr_d: float = Field(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = mlsd_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
thr_v=self.thr_v,
|
||||||
|
thr_d=self.thr_d)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class PidiImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies PIDI processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["pidi_image_processor"] = "pidi_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
safe: bool = Field(default=False, description="Whether to use safe mode")
|
||||||
|
scribble: bool = Field(default=False, description="Whether to use scribble mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = pidi_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
safe=self.safe,
|
||||||
|
scribble=self.scribble)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies content shuffle processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["content_shuffle_image_processor"] = "content_shuffle_image_processor"
|
||||||
|
# Inputs
|
||||||
|
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
|
||||||
|
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
|
||||||
|
h: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `h` parameter")
|
||||||
|
w: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `w` parameter")
|
||||||
|
f: Union[int, None] = Field(default=256, ge=0, description="Content shuffle `f` parameter")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
content_shuffle_processor = ContentShuffleDetector()
|
||||||
|
processed_image = content_shuffle_processor(image,
|
||||||
|
detect_resolution=self.detect_resolution,
|
||||||
|
image_resolution=self.image_resolution,
|
||||||
|
h=self.h,
|
||||||
|
w=self.w,
|
||||||
|
f=self.f
|
||||||
|
)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
# should work with controlnet_aux >= 0.0.4 and timm <= 0.6.13
|
||||||
|
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies Zoe depth processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["zoe_depth_image_processor"] = "zoe_depth_image_processor"
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
||||||
|
processed_image = zoe_depth_processor(image)
|
||||||
|
return processed_image
|
||||||
|
|
||||||
|
|
||||||
|
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig):
|
||||||
|
"""Applies mediapipe face processing to image"""
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mediapipe_face_processor"] = "mediapipe_face_processor"
|
||||||
|
# Inputs
|
||||||
|
max_faces: int = Field(default=1, ge=1, description="Maximum number of faces to detect")
|
||||||
|
min_confidence: float = Field(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run_processor(self, image):
|
||||||
|
mediapipe_face_processor = MediapipeFaceDetector()
|
||||||
|
processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence)
|
||||||
|
return processed_image
|
||||||
67
invokeai/app/invocations/cv.py
Normal file
67
invokeai/app/invocations/cv.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageCategory, ImageField, ResourceOrigin
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput
|
||||||
|
|
||||||
|
|
||||||
|
class CvInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all OpenCV invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["cv", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
|
||||||
|
"""Simple inpaint using opencv."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["cv_inpaint"] = "cv_inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to inpaint")
|
||||||
|
mask: ImageField = Field(default=None, description="The mask to use when inpainting")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
mask = context.services.images.get_pil_image(self.mask.image_name)
|
||||||
|
|
||||||
|
# Convert to cv image/mask
|
||||||
|
# TODO: consider making these utility functions
|
||||||
|
cv_image = cv.cvtColor(numpy.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
||||||
|
cv_mask = numpy.array(ImageOps.invert(mask.convert("L")))
|
||||||
|
|
||||||
|
# Inpaint
|
||||||
|
cv_inpainted = cv.inpaint(cv_image, cv_mask, 3, cv.INPAINT_TELEA)
|
||||||
|
|
||||||
|
# Convert back to Pillow
|
||||||
|
# TODO: consider making a utility function
|
||||||
|
image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB))
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=image_inpainted,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
319
invokeai/app/invocations/generate.py
Normal file
319
invokeai/app/invocations/generate.py
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from typing import Literal, Optional, Union, get_args
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from diffusers import ControlNetModel
|
||||||
|
from torch import Tensor
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ColorField, ImageField, ResourceOrigin
|
||||||
|
from invokeai.app.invocations.util.choose_model import choose_model
|
||||||
|
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||||
|
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||||
|
from invokeai.backend.generator.inpaint import infill_methods
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput
|
||||||
|
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
from ..util.step_callback import stable_diffusion_step_callback
|
||||||
|
|
||||||
|
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
|
||||||
|
INFILL_METHODS = Literal[tuple(infill_methods())]
|
||||||
|
DEFAULT_INFILL_METHOD = (
|
||||||
|
"patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SDImageInvocation(BaseModel):
|
||||||
|
"""Helper class to provide all Stable Diffusion raster image invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["stable-diffusion", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Text to image
|
||||||
|
class TextToImageInvocation(BaseInvocation, SDImageInvocation):
|
||||||
|
"""Generates an image using text2img."""
|
||||||
|
|
||||||
|
type: Literal["txt2img"] = "txt2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# TODO: consider making prompt optional to enable providing prompt through a link
|
||||||
|
# fmt: off
|
||||||
|
prompt: Optional[str] = Field(description="The prompt to generate an image from")
|
||||||
|
seed: int = Field(ge=0, le=SEED_MAX, description="The seed to use (omit for random)", default_factory=get_random_seed)
|
||||||
|
steps: int = Field(default=30, gt=0, description="The number of steps to use to generate the image")
|
||||||
|
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting image", )
|
||||||
|
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting image", )
|
||||||
|
cfg_scale: float = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
|
scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" )
|
||||||
|
model: str = Field(default="", description="The model to use (currently ignored)")
|
||||||
|
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
|
||||||
|
control_model: Optional[str] = Field(default=None, description="The control model to use")
|
||||||
|
control_image: Optional[ImageField] = Field(default=None, description="The processed control image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# loading controlnet image (currently requires pre-processed image)
|
||||||
|
control_image = (
|
||||||
|
None if self.control_image is None
|
||||||
|
else context.services.images.get_pil_image(self.control_image.image_name)
|
||||||
|
)
|
||||||
|
# loading controlnet model
|
||||||
|
if (self.control_model is None or self.control_model==''):
|
||||||
|
control_model = None
|
||||||
|
else:
|
||||||
|
# FIXME: change this to dropdown menu?
|
||||||
|
# FIXME: generalize so don't have to hardcode torch_dtype and device
|
||||||
|
control_model = ControlNetModel.from_pretrained(self.control_model,
|
||||||
|
torch_dtype=torch.float16).to("cuda")
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
txt2img = Txt2Img(model, control_model=control_model)
|
||||||
|
outputs = txt2img.generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
control_image=control_image,
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "control_image" }
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generate_output = next(outputs)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=generate_output.image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
node_id=self.id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageToImageInvocation(TextToImageInvocation):
|
||||||
|
"""Generates an image using img2img."""
|
||||||
|
|
||||||
|
type: Literal["img2img"] = "img2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(
|
||||||
|
default=0.75, gt=0, le=1, description="The strength of the original image"
|
||||||
|
)
|
||||||
|
fit: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether or not the result should be fit to the aspect ratio of the input image",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.fit:
|
||||||
|
image = image.resize((self.width, self.height))
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
outputs = Img2Img(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_image=image,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=generator_output.image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
node_id=self.id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InpaintInvocation(ImageToImageInvocation):
|
||||||
|
"""Generates an image using inpaint."""
|
||||||
|
|
||||||
|
type: Literal["inpaint"] = "inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
mask: Union[ImageField, None] = Field(description="The mask")
|
||||||
|
seam_size: int = Field(default=96, ge=1, description="The seam inpaint size (px)")
|
||||||
|
seam_blur: int = Field(
|
||||||
|
default=16, ge=0, description="The seam inpaint blur radius (px)"
|
||||||
|
)
|
||||||
|
seam_strength: float = Field(
|
||||||
|
default=0.75, gt=0, le=1, description="The seam inpaint strength"
|
||||||
|
)
|
||||||
|
seam_steps: int = Field(
|
||||||
|
default=30, ge=1, description="The number of steps to use for seam inpaint"
|
||||||
|
)
|
||||||
|
tile_size: int = Field(
|
||||||
|
default=32, ge=1, description="The tile infill method size (px)"
|
||||||
|
)
|
||||||
|
infill_method: INFILL_METHODS = Field(
|
||||||
|
default=DEFAULT_INFILL_METHOD,
|
||||||
|
description="The method used to infill empty regions (px)",
|
||||||
|
)
|
||||||
|
inpaint_width: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
multiple_of=8,
|
||||||
|
gt=0,
|
||||||
|
description="The width of the inpaint region (px)",
|
||||||
|
)
|
||||||
|
inpaint_height: Optional[int] = Field(
|
||||||
|
default=None,
|
||||||
|
multiple_of=8,
|
||||||
|
gt=0,
|
||||||
|
description="The height of the inpaint region (px)",
|
||||||
|
)
|
||||||
|
inpaint_fill: Optional[ColorField] = Field(
|
||||||
|
default=ColorField(r=127, g=127, b=127, a=255),
|
||||||
|
description="The solid infill method color",
|
||||||
|
)
|
||||||
|
inpaint_replace: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="The amount by which to replace masked areas with latent noise",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else context.services.images.get_pil_image(self.mask.image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
outputs = Inpaint(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_image=image,
|
||||||
|
mask_image=mask,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=generator_output.image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
node_id=self.id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
547
invokeai/app/invocations/image.py
Normal file
547
invokeai/app/invocations/image.py
Normal file
@@ -0,0 +1,547 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import io
|
||||||
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageFilter, ImageOps, ImageChops
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..models.image import ImageCategory, ImageField, ResourceOrigin
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext,
|
||||||
|
InvocationConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PILInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all PIL invocations with additional config"""
|
||||||
|
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["PIL", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["image_output"] = "image_output"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
width: int = Field(description="The width of the image in pixels")
|
||||||
|
height: int = Field(description="The height of the image in pixels")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||||
|
|
||||||
|
|
||||||
|
class MaskOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a mask"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mask"] = "mask"
|
||||||
|
mask: ImageField = Field(default=None, description="The output mask")
|
||||||
|
width: int = Field(description="The width of the mask in pixels")
|
||||||
|
height: int = Field(description="The height of the mask in pixels")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"mask",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class LoadImageInvocation(BaseInvocation):
|
||||||
|
"""Load an image and provide it as output."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["load_image"] = "load_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(
|
||||||
|
default=None, description="The image to load"
|
||||||
|
)
|
||||||
|
# fmt: on
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=self.image.image_name),
|
||||||
|
width=image.width,
|
||||||
|
height=image.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ShowImageInvocation(BaseInvocation):
|
||||||
|
"""Displays a provided image, and passes it forward in the pipeline."""
|
||||||
|
|
||||||
|
type: Literal["show_image"] = "show_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(
|
||||||
|
default=None, description="The image to show"
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
if image:
|
||||||
|
image.show()
|
||||||
|
|
||||||
|
# TODO: how to handle failure?
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=self.image.image_name),
|
||||||
|
width=image.width,
|
||||||
|
height=image.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageCropInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Crops an image to a specified box. The box can be outside of the image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_crop"] = "img_crop"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to crop")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate of the crop rectangle")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
|
||||||
|
width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
|
||||||
|
height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
image_crop = Image.new(
|
||||||
|
mode="RGBA", size=(self.width, self.height), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
image_crop.paste(image, (-self.x, -self.y))
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=image_crop,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImagePasteInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Pastes an image into another image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_paste"] = "img_paste"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
base_image: Union[ImageField, None] = Field(default=None, description="The base image")
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to paste")
|
||||||
|
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate at which to paste the image")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
base_image = context.services.images.get_pil_image(self.base_image.image_name)
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else ImageOps.invert(
|
||||||
|
context.services.images.get_pil_image(self.mask.image_name)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# TODO: probably shouldn't invert mask here... should user be required to do it?
|
||||||
|
|
||||||
|
min_x = min(0, self.x)
|
||||||
|
min_y = min(0, self.y)
|
||||||
|
max_x = max(base_image.width, image.width + self.x)
|
||||||
|
max_y = max(base_image.height, image.height + self.y)
|
||||||
|
|
||||||
|
new_image = Image.new(
|
||||||
|
mode="RGBA", size=(max_x - min_x, max_y - min_y), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
new_image.paste(base_image, (abs(min_x), abs(min_y)))
|
||||||
|
new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=new_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Extracts the alpha channel of an image as a mask."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["tomask"] = "tomask"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to create the mask from")
|
||||||
|
invert: bool = Field(default=False, description="Whether or not to invert the mask")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
image_mask = image.split()[-1]
|
||||||
|
if self.invert:
|
||||||
|
image_mask = ImageOps.invert(image_mask)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=image_mask,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.MASK,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return MaskOutput(
|
||||||
|
mask=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageMultiplyInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Multiplies two images together using `PIL.ImageChops.multiply()`."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_mul"] = "img_mul"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image1: Union[ImageField, None] = Field(default=None, description="The first image to multiply")
|
||||||
|
image2: Union[ImageField, None] = Field(default=None, description="The second image to multiply")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image1 = context.services.images.get_pil_image(self.image1.image_name)
|
||||||
|
image2 = context.services.images.get_pil_image(self.image2.image_name)
|
||||||
|
|
||||||
|
multiply_image = ImageChops.multiply(image1, image2)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=multiply_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
|
||||||
|
|
||||||
|
|
||||||
|
class ImageChannelInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Gets a channel from an image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_chan"] = "img_chan"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to get the channel from")
|
||||||
|
channel: IMAGE_CHANNELS = Field(default="A", description="The channel to get")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
channel_image = image.getchannel(self.channel)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=channel_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
|
||||||
|
|
||||||
|
|
||||||
|
class ImageConvertInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Converts an image to a different mode."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_conv"] = "img_conv"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to convert")
|
||||||
|
mode: IMAGE_MODES = Field(default="L", description="The mode to convert to")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
converted_image = image.convert(self.mode)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=converted_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageBlurInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Blurs an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_blur"] = "img_blur"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to blur")
|
||||||
|
radius: float = Field(default=8.0, ge=0, description="The blur radius")
|
||||||
|
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
blur = (
|
||||||
|
ImageFilter.GaussianBlur(self.radius)
|
||||||
|
if self.blur_type == "gaussian"
|
||||||
|
else ImageFilter.BoxBlur(self.radius)
|
||||||
|
)
|
||||||
|
blur_image = image.filter(blur)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=blur_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
PIL_RESAMPLING_MODES = Literal[
|
||||||
|
"nearest",
|
||||||
|
"box",
|
||||||
|
"bilinear",
|
||||||
|
"hamming",
|
||||||
|
"bicubic",
|
||||||
|
"lanczos",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
PIL_RESAMPLING_MAP = {
|
||||||
|
"nearest": Image.Resampling.NEAREST,
|
||||||
|
"box": Image.Resampling.BOX,
|
||||||
|
"bilinear": Image.Resampling.BILINEAR,
|
||||||
|
"hamming": Image.Resampling.HAMMING,
|
||||||
|
"bicubic": Image.Resampling.BICUBIC,
|
||||||
|
"lanczos": Image.Resampling.LANCZOS,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ImageResizeInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Resizes an image to specific dimensions"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_resize"] = "img_resize"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to resize")
|
||||||
|
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
|
||||||
|
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
|
||||||
|
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
resample_mode = PIL_RESAMPLING_MAP[self.resample_mode]
|
||||||
|
|
||||||
|
resize_image = image.resize(
|
||||||
|
(self.width, self.height),
|
||||||
|
resample=resample_mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=resize_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageScaleInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Scales an image by a factor"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_scale"] = "img_scale"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to scale")
|
||||||
|
scale_factor: float = Field(gt=0, description="The factor by which to scale the image")
|
||||||
|
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
resample_mode = PIL_RESAMPLING_MAP[self.resample_mode]
|
||||||
|
width = int(image.width * self.scale_factor)
|
||||||
|
height = int(image.height * self.scale_factor)
|
||||||
|
|
||||||
|
resize_image = image.resize(
|
||||||
|
(width, height),
|
||||||
|
resample=resample_mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=resize_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_lerp"] = "img_lerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32) / 255
|
||||||
|
image_arr = image_arr * (self.max - self.min) + self.max
|
||||||
|
|
||||||
|
lerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=lerp_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageInverseLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Inverse linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["img_ilerp"] = "img_ilerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32)
|
||||||
|
image_arr = (
|
||||||
|
numpy.minimum(
|
||||||
|
numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1
|
||||||
|
)
|
||||||
|
* 255
|
||||||
|
)
|
||||||
|
|
||||||
|
ilerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=ilerp_image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
230
invokeai/app/invocations/infill.py
Normal file
230
invokeai/app/invocations/infill.py
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||||
|
|
||||||
|
from typing import Literal, Union, get_args
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import math
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.invocations.image import ImageOutput
|
||||||
|
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||||
|
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||||
|
|
||||||
|
from ..models.image import ColorField, ImageCategory, ImageField, ResourceOrigin
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
InvocationContext,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def infill_methods() -> list[str]:
|
||||||
|
methods = [
|
||||||
|
"tile",
|
||||||
|
"solid",
|
||||||
|
]
|
||||||
|
if PatchMatch.patchmatch_available():
|
||||||
|
methods.insert(0, "patchmatch")
|
||||||
|
return methods
|
||||||
|
|
||||||
|
|
||||||
|
INFILL_METHODS = Literal[tuple(infill_methods())]
|
||||||
|
DEFAULT_INFILL_METHOD = (
|
||||||
|
"patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def infill_patchmatch(im: Image.Image) -> Image.Image:
|
||||||
|
if im.mode != "RGBA":
|
||||||
|
return im
|
||||||
|
|
||||||
|
# Skip patchmatch if patchmatch isn't available
|
||||||
|
if not PatchMatch.patchmatch_available():
|
||||||
|
return im
|
||||||
|
|
||||||
|
# Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though)
|
||||||
|
im_patched_np = PatchMatch.inpaint(
|
||||||
|
im.convert("RGB"), ImageOps.invert(im.split()[-1]), patch_size=3
|
||||||
|
)
|
||||||
|
im_patched = Image.fromarray(im_patched_np, mode="RGB")
|
||||||
|
return im_patched
|
||||||
|
|
||||||
|
|
||||||
|
def get_tile_images(image: np.ndarray, width=8, height=8):
|
||||||
|
_nrows, _ncols, depth = image.shape
|
||||||
|
_strides = image.strides
|
||||||
|
|
||||||
|
nrows, _m = divmod(_nrows, height)
|
||||||
|
ncols, _n = divmod(_ncols, width)
|
||||||
|
if _m != 0 or _n != 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return np.lib.stride_tricks.as_strided(
|
||||||
|
np.ravel(image),
|
||||||
|
shape=(nrows, ncols, height, width, depth),
|
||||||
|
strides=(height * _strides[0], width * _strides[1], *_strides),
|
||||||
|
writeable=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def tile_fill_missing(
|
||||||
|
im: Image.Image, tile_size: int = 16, seed: Union[int, None] = None
|
||||||
|
) -> Image.Image:
|
||||||
|
# Only fill if there's an alpha layer
|
||||||
|
if im.mode != "RGBA":
|
||||||
|
return im
|
||||||
|
|
||||||
|
a = np.asarray(im, dtype=np.uint8)
|
||||||
|
|
||||||
|
tile_size_tuple = (tile_size, tile_size)
|
||||||
|
|
||||||
|
# Get the image as tiles of a specified size
|
||||||
|
tiles = get_tile_images(a, *tile_size_tuple).copy()
|
||||||
|
|
||||||
|
# Get the mask as tiles
|
||||||
|
tiles_mask = tiles[:, :, :, :, 3]
|
||||||
|
|
||||||
|
# Find any mask tiles with any fully transparent pixels (we will be replacing these later)
|
||||||
|
tmask_shape = tiles_mask.shape
|
||||||
|
tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape))
|
||||||
|
n, ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:])
|
||||||
|
tiles_mask = tiles_mask > 0
|
||||||
|
tiles_mask = tiles_mask.reshape((n, ny)).all(axis=1)
|
||||||
|
|
||||||
|
# Get RGB tiles in single array and filter by the mask
|
||||||
|
tshape = tiles.shape
|
||||||
|
tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), *tiles.shape[2:]))
|
||||||
|
filtered_tiles = tiles_all[tiles_mask]
|
||||||
|
|
||||||
|
if len(filtered_tiles) == 0:
|
||||||
|
return im
|
||||||
|
|
||||||
|
# Find all invalid tiles and replace with a random valid tile
|
||||||
|
replace_count = (tiles_mask == False).sum()
|
||||||
|
rng = np.random.default_rng(seed=seed)
|
||||||
|
tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[
|
||||||
|
rng.choice(filtered_tiles.shape[0], replace_count), :, :, :
|
||||||
|
]
|
||||||
|
|
||||||
|
# Convert back to an image
|
||||||
|
tiles_all = tiles_all.reshape(tshape)
|
||||||
|
tiles_all = tiles_all.swapaxes(1, 2)
|
||||||
|
st = tiles_all.reshape(
|
||||||
|
(
|
||||||
|
math.prod(tiles_all.shape[0:2]),
|
||||||
|
math.prod(tiles_all.shape[2:4]),
|
||||||
|
tiles_all.shape[4],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
si = Image.fromarray(st, mode="RGBA")
|
||||||
|
|
||||||
|
return si
|
||||||
|
|
||||||
|
|
||||||
|
class InfillColorInvocation(BaseInvocation):
|
||||||
|
"""Infills transparent areas of an image with a solid color"""
|
||||||
|
|
||||||
|
type: Literal["infill_rgba"] = "infill_rgba"
|
||||||
|
image: Union[ImageField, None] = Field(
|
||||||
|
default=None, description="The image to infill"
|
||||||
|
)
|
||||||
|
color: ColorField = Field(
|
||||||
|
default=ColorField(r=127, g=127, b=127, a=255),
|
||||||
|
description="The color to use to infill",
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
solid_bg = Image.new("RGBA", image.size, self.color.tuple())
|
||||||
|
infilled = Image.alpha_composite(solid_bg, image.convert("RGBA"))
|
||||||
|
|
||||||
|
infilled.paste(image, (0, 0), image.split()[-1])
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=infilled,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InfillTileInvocation(BaseInvocation):
|
||||||
|
"""Infills transparent areas of an image with tiles of the image"""
|
||||||
|
|
||||||
|
type: Literal["infill_tile"] = "infill_tile"
|
||||||
|
|
||||||
|
image: Union[ImageField, None] = Field(
|
||||||
|
default=None, description="The image to infill"
|
||||||
|
)
|
||||||
|
tile_size: int = Field(default=32, ge=1, description="The tile size (px)")
|
||||||
|
seed: int = Field(
|
||||||
|
ge=0,
|
||||||
|
le=SEED_MAX,
|
||||||
|
description="The seed to use for tile generation (omit for random)",
|
||||||
|
default_factory=get_random_seed,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
infilled = tile_fill_missing(
|
||||||
|
image.copy(), seed=self.seed, tile_size=self.tile_size
|
||||||
|
)
|
||||||
|
infilled.paste(image, (0, 0), image.split()[-1])
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=infilled,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InfillPatchMatchInvocation(BaseInvocation):
|
||||||
|
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
||||||
|
|
||||||
|
type: Literal["infill_patchmatch"] = "infill_patchmatch"
|
||||||
|
|
||||||
|
image: Union[ImageField, None] = Field(
|
||||||
|
default=None, description="The image to infill"
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
if PatchMatch.patchmatch_available():
|
||||||
|
infilled = infill_patchmatch(image.copy())
|
||||||
|
else:
|
||||||
|
raise ValueError("PatchMatch is not available on this system")
|
||||||
|
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=infilled,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
618
invokeai/app/invocations/latent.py
Normal file
618
invokeai/app/invocations/latent.py
Normal file
@@ -0,0 +1,618 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import random
|
||||||
|
import einops
|
||||||
|
from typing import Literal, Optional, Union, List
|
||||||
|
|
||||||
|
from compel import Compel
|
||||||
|
from diffusers.pipelines.controlnet import MultiControlNetModel
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field, validator
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from invokeai.app.invocations.util.choose_model import choose_model
|
||||||
|
from invokeai.app.models.image import ImageCategory
|
||||||
|
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||||
|
|
||||||
|
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||||
|
from .controlnet_image_processors import ControlField
|
||||||
|
|
||||||
|
from ...backend.model_management.model_manager import ModelManager
|
||||||
|
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||||
|
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
||||||
|
from ...backend.image_util.seamless import configure_model_padding
|
||||||
|
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
|
||||||
|
|
||||||
|
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline, image_resized_to_grid_as_tensor
|
||||||
|
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||||
|
from ...backend.stable_diffusion.diffusers_pipeline import ControlNetData
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||||
|
import numpy as np
|
||||||
|
from ..services.image_file_storage import ResourceOrigin
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput
|
||||||
|
from .compel import ConditioningField
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||||
|
import diffusers
|
||||||
|
from diffusers import DiffusionPipeline, ControlNetModel
|
||||||
|
|
||||||
|
|
||||||
|
class LatentsField(BaseModel):
|
||||||
|
"""A latents field used for passing latents between invocations"""
|
||||||
|
|
||||||
|
latents_name: Optional[str] = Field(default=None, description="The name of the latents")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["latents_name"]}
|
||||||
|
|
||||||
|
class LatentsOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output latents"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["latents_output"] = "latents_output"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: LatentsField = Field(default=None, description="The output latents")
|
||||||
|
width: int = Field(description="The width of the latents in pixels")
|
||||||
|
height: int = Field(description="The height of the latents in pixels")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def build_latents_output(latents_name: str, latents: torch.Tensor):
|
||||||
|
return LatentsOutput(
|
||||||
|
latents=LatentsField(latents_name=latents_name),
|
||||||
|
width=latents.size()[3] * 8,
|
||||||
|
height=latents.size()[2] * 8,
|
||||||
|
)
|
||||||
|
|
||||||
|
class NoiseOutput(BaseInvocationOutput):
|
||||||
|
"""Invocation noise output"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["noise_output"] = "noise_output"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
noise: LatentsField = Field(default=None, description="The output noise")
|
||||||
|
width: int = Field(description="The width of the noise in pixels")
|
||||||
|
height: int = Field(description="The height of the noise in pixels")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def build_noise_output(latents_name: str, latents: torch.Tensor):
|
||||||
|
return NoiseOutput(
|
||||||
|
noise=LatentsField(latents_name=latents_name),
|
||||||
|
width=latents.size()[3] * 8,
|
||||||
|
height=latents.size()[2] * 8,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
SAMPLER_NAME_VALUES = Literal[
|
||||||
|
tuple(list(SCHEDULER_MAP.keys()))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
||||||
|
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
|
||||||
|
|
||||||
|
scheduler_config = model.scheduler.config
|
||||||
|
if "_backup" in scheduler_config:
|
||||||
|
scheduler_config = scheduler_config["_backup"]
|
||||||
|
scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
|
||||||
|
scheduler = scheduler_class.from_config(scheduler_config)
|
||||||
|
|
||||||
|
# hack copied over from generate.py
|
||||||
|
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||||
|
scheduler.uses_inpainting_model = lambda: False
|
||||||
|
return scheduler
|
||||||
|
|
||||||
|
|
||||||
|
def get_noise(width:int, height:int, device:torch.device, seed:int = 0, latent_channels:int=4, use_mps_noise:bool=False, downsampling_factor:int = 8):
|
||||||
|
# limit noise to only the diffusion image channels, not the mask channels
|
||||||
|
input_channels = min(latent_channels, 4)
|
||||||
|
use_device = "cpu" if (use_mps_noise or device.type == "mps") else device
|
||||||
|
generator = torch.Generator(device=use_device).manual_seed(seed)
|
||||||
|
x = torch.randn(
|
||||||
|
[
|
||||||
|
1,
|
||||||
|
input_channels,
|
||||||
|
height // downsampling_factor,
|
||||||
|
width // downsampling_factor,
|
||||||
|
],
|
||||||
|
dtype=torch_dtype(device),
|
||||||
|
device=use_device,
|
||||||
|
generator=generator,
|
||||||
|
).to(device)
|
||||||
|
# if self.perlin > 0.0:
|
||||||
|
# perlin_noise = self.get_perlin_noise(
|
||||||
|
# width // self.downsampling_factor, height // self.downsampling_factor
|
||||||
|
# )
|
||||||
|
# x = (1 - self.perlin) * x + self.perlin * perlin_noise
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class NoiseInvocation(BaseInvocation):
|
||||||
|
"""Generates latent noise."""
|
||||||
|
|
||||||
|
type: Literal["noise"] = "noise"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
seed: int = Field(ge=0, le=SEED_MAX, description="The seed to use", default_factory=get_random_seed)
|
||||||
|
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting noise", )
|
||||||
|
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting noise", )
|
||||||
|
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "noise"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@validator("seed", pre=True)
|
||||||
|
def modulo_seed(cls, v):
|
||||||
|
"""Returns the seed modulo SEED_MAX to ensure it is within the valid range."""
|
||||||
|
return v % SEED_MAX
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> NoiseOutput:
|
||||||
|
device = torch.device(choose_torch_device())
|
||||||
|
noise = get_noise(self.width, self.height, device, self.seed)
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.save(name, noise)
|
||||||
|
return build_noise_output(latents_name=name, latents=noise)
|
||||||
|
|
||||||
|
|
||||||
|
# Text to image
|
||||||
|
class TextToLatentsInvocation(BaseInvocation):
|
||||||
|
"""Generates latents from conditionings."""
|
||||||
|
|
||||||
|
type: Literal["t2l"] = "t2l"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation")
|
||||||
|
negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation")
|
||||||
|
noise: Optional[LatentsField] = Field(description="The noise to use")
|
||||||
|
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
||||||
|
cfg_scale: Union[float, List[float]] = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
|
scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" )
|
||||||
|
model: str = Field(default="", description="The model to use (currently ignored)")
|
||||||
|
control: Union[ControlField, List[ControlField]] = Field(default=None, description="The control to use")
|
||||||
|
# seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||||
|
# seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
@validator("cfg_scale")
|
||||||
|
def ge_one(cls, v):
|
||||||
|
"""validate that all cfg_scale values are >= 1"""
|
||||||
|
if isinstance(v, list):
|
||||||
|
for i in v:
|
||||||
|
if i < 1:
|
||||||
|
raise ValueError('cfg_scale must be greater than 1')
|
||||||
|
else:
|
||||||
|
if v < 1:
|
||||||
|
raise ValueError('cfg_scale must be greater than 1')
|
||||||
|
return v
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model",
|
||||||
|
"control": "control",
|
||||||
|
# "cfg_scale": "float",
|
||||||
|
"cfg_scale": "number"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
||||||
|
def dispatch_progress(
|
||||||
|
self, context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
|
||||||
|
model_info = choose_model(model_manager, self.model)
|
||||||
|
model_name = model_info['model_name']
|
||||||
|
model_hash = model_info['hash']
|
||||||
|
model: StableDiffusionGeneratorPipeline = model_info['model']
|
||||||
|
model.scheduler = get_scheduler(
|
||||||
|
model=model,
|
||||||
|
scheduler_name=self.scheduler
|
||||||
|
)
|
||||||
|
|
||||||
|
# if isinstance(model, DiffusionPipeline):
|
||||||
|
# for component in [model.unet, model.vae]:
|
||||||
|
# configure_model_padding(component,
|
||||||
|
# self.seamless,
|
||||||
|
# self.seamless_axes
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
# configure_model_padding(model,
|
||||||
|
# self.seamless,
|
||||||
|
# self.seamless_axes
|
||||||
|
# )
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def get_conditioning_data(self, context: InvocationContext, model: StableDiffusionGeneratorPipeline) -> ConditioningData:
|
||||||
|
c, extra_conditioning_info = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||||
|
uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name)
|
||||||
|
|
||||||
|
compel = Compel(
|
||||||
|
tokenizer=model.tokenizer,
|
||||||
|
text_encoder=model.text_encoder,
|
||||||
|
textual_inversion_manager=model.textual_inversion_manager,
|
||||||
|
dtype_for_device_getter=torch_dtype,
|
||||||
|
truncate_long_prompts=False,
|
||||||
|
)
|
||||||
|
[c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc])
|
||||||
|
|
||||||
|
conditioning_data = ConditioningData(
|
||||||
|
unconditioned_embeddings=uc,
|
||||||
|
text_embeddings=c,
|
||||||
|
guidance_scale=self.cfg_scale,
|
||||||
|
extra=extra_conditioning_info,
|
||||||
|
postprocessing_settings=PostprocessingSettings(
|
||||||
|
threshold=0.0,#threshold,
|
||||||
|
warmup=0.2,#warmup,
|
||||||
|
h_symmetry_time_pct=None,#h_symmetry_time_pct,
|
||||||
|
v_symmetry_time_pct=None#v_symmetry_time_pct,
|
||||||
|
),
|
||||||
|
).add_scheduler_args_if_applicable(model.scheduler, eta=0.0)#ddim_eta)
|
||||||
|
return conditioning_data
|
||||||
|
|
||||||
|
def prep_control_data(self,
|
||||||
|
context: InvocationContext,
|
||||||
|
model: StableDiffusionGeneratorPipeline, # really only need model for dtype and device
|
||||||
|
control_input: List[ControlField],
|
||||||
|
latents_shape: List[int],
|
||||||
|
do_classifier_free_guidance: bool = True,
|
||||||
|
) -> List[ControlNetData]:
|
||||||
|
# assuming fixed dimensional scaling of 8:1 for image:latents
|
||||||
|
control_height_resize = latents_shape[2] * 8
|
||||||
|
control_width_resize = latents_shape[3] * 8
|
||||||
|
if control_input is None:
|
||||||
|
control_list = None
|
||||||
|
elif isinstance(control_input, list) and len(control_input) == 0:
|
||||||
|
control_list = None
|
||||||
|
elif isinstance(control_input, ControlField):
|
||||||
|
control_list = [control_input]
|
||||||
|
elif isinstance(control_input, list) and len(control_input) > 0 and isinstance(control_input[0], ControlField):
|
||||||
|
control_list = control_input
|
||||||
|
else:
|
||||||
|
control_list = None
|
||||||
|
if (control_list is None):
|
||||||
|
control_data = None
|
||||||
|
# from above handling, any control that is not None should now be of type list[ControlField]
|
||||||
|
else:
|
||||||
|
# FIXME: add checks to skip entry if model or image is None
|
||||||
|
# and if weight is None, populate with default 1.0?
|
||||||
|
control_data = []
|
||||||
|
control_models = []
|
||||||
|
for control_info in control_list:
|
||||||
|
# handle control models
|
||||||
|
if ("," in control_info.control_model):
|
||||||
|
control_model_split = control_info.control_model.split(",")
|
||||||
|
control_name = control_model_split[0]
|
||||||
|
control_subfolder = control_model_split[1]
|
||||||
|
print("Using HF model subfolders")
|
||||||
|
print(" control_name: ", control_name)
|
||||||
|
print(" control_subfolder: ", control_subfolder)
|
||||||
|
control_model = ControlNetModel.from_pretrained(control_name,
|
||||||
|
subfolder=control_subfolder,
|
||||||
|
torch_dtype=model.unet.dtype).to(model.device)
|
||||||
|
else:
|
||||||
|
control_model = ControlNetModel.from_pretrained(control_info.control_model,
|
||||||
|
torch_dtype=model.unet.dtype).to(model.device)
|
||||||
|
control_models.append(control_model)
|
||||||
|
control_image_field = control_info.image
|
||||||
|
input_image = context.services.images.get_pil_image(control_image_field.image_name)
|
||||||
|
# self.image.image_type, self.image.image_name
|
||||||
|
# FIXME: still need to test with different widths, heights, devices, dtypes
|
||||||
|
# and add in batch_size, num_images_per_prompt?
|
||||||
|
# and do real check for classifier_free_guidance?
|
||||||
|
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
|
||||||
|
control_image = model.prepare_control_image(
|
||||||
|
image=input_image,
|
||||||
|
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||||
|
width=control_width_resize,
|
||||||
|
height=control_height_resize,
|
||||||
|
# batch_size=batch_size * num_images_per_prompt,
|
||||||
|
# num_images_per_prompt=num_images_per_prompt,
|
||||||
|
device=control_model.device,
|
||||||
|
dtype=control_model.dtype,
|
||||||
|
control_mode=control_info.control_mode,
|
||||||
|
)
|
||||||
|
control_item = ControlNetData(model=control_model,
|
||||||
|
image_tensor=control_image,
|
||||||
|
weight=control_info.control_weight,
|
||||||
|
begin_step_percent=control_info.begin_step_percent,
|
||||||
|
end_step_percent=control_info.end_step_percent,
|
||||||
|
control_mode=control_info.control_mode,
|
||||||
|
)
|
||||||
|
control_data.append(control_item)
|
||||||
|
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
|
||||||
|
return control_data
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
noise = context.services.latents.get(self.noise.latents_name)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
def step_callback(state: PipelineIntermediateState):
|
||||||
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
|
model = self.get_model(context.services.model_manager)
|
||||||
|
conditioning_data = self.get_conditioning_data(context, model)
|
||||||
|
|
||||||
|
control_data = self.prep_control_data(model=model, context=context, control_input=self.control,
|
||||||
|
latents_shape=noise.shape,
|
||||||
|
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
||||||
|
do_classifier_free_guidance=True,)
|
||||||
|
|
||||||
|
# TODO: Verify the noise is the right size
|
||||||
|
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
||||||
|
latents=torch.zeros_like(noise, dtype=torch_dtype(model.device)),
|
||||||
|
noise=noise,
|
||||||
|
num_inference_steps=self.steps,
|
||||||
|
conditioning_data=conditioning_data,
|
||||||
|
control_data=control_data, # list[ControlNetData]
|
||||||
|
callback=step_callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.save(name, result_latents)
|
||||||
|
return build_latents_output(latents_name=name, latents=result_latents)
|
||||||
|
|
||||||
|
|
||||||
|
class LatentsToLatentsInvocation(TextToLatentsInvocation):
|
||||||
|
"""Generates latents using latents as base image."""
|
||||||
|
|
||||||
|
type: Literal["l2l"] = "l2l"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to use as a base image")
|
||||||
|
strength: float = Field(default=0.7, ge=0, le=1, description="The strength of the latents to use")
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model",
|
||||||
|
"control": "control",
|
||||||
|
"cfg_scale": "number",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
noise = context.services.latents.get(self.noise.latents_name)
|
||||||
|
latent = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
def step_callback(state: PipelineIntermediateState):
|
||||||
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
|
model = self.get_model(context.services.model_manager)
|
||||||
|
conditioning_data = self.get_conditioning_data(context, model)
|
||||||
|
|
||||||
|
control_data = self.prep_control_data(model=model, context=context, control_input=self.control,
|
||||||
|
latents_shape=noise.shape,
|
||||||
|
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
||||||
|
do_classifier_free_guidance=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: Verify the noise is the right size
|
||||||
|
|
||||||
|
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
|
||||||
|
latent, device=model.device, dtype=latent.dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
timesteps, _ = model.get_img2img_timesteps(self.steps, self.strength)
|
||||||
|
|
||||||
|
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
||||||
|
latents=initial_latents,
|
||||||
|
timesteps=timesteps,
|
||||||
|
noise=noise,
|
||||||
|
num_inference_steps=self.steps,
|
||||||
|
conditioning_data=conditioning_data,
|
||||||
|
control_data=control_data, # list[ControlNetData]
|
||||||
|
callback=step_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.save(name, result_latents)
|
||||||
|
return build_latents_output(latents_name=name, latents=result_latents)
|
||||||
|
|
||||||
|
|
||||||
|
# Latent to image
|
||||||
|
class LatentsToImageInvocation(BaseInvocation):
|
||||||
|
"""Generates an image from latents."""
|
||||||
|
|
||||||
|
type: Literal["l2i"] = "l2i"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
|
||||||
|
model: str = Field(default="", description="The model to use")
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
latents = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
# TODO: this only really needs the vae
|
||||||
|
model_info = choose_model(context.services.model_manager, self.model)
|
||||||
|
model: StableDiffusionGeneratorPipeline = model_info['model']
|
||||||
|
|
||||||
|
with torch.inference_mode():
|
||||||
|
np_image = model.decode_latents(latents)
|
||||||
|
image = model.numpy_to_pil(np_image)[0]
|
||||||
|
|
||||||
|
# what happened to metadata?
|
||||||
|
# metadata = context.services.metadata.build_metadata(
|
||||||
|
# session_id=context.graph_execution_state_id, node=self
|
||||||
|
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
# new (post Image service refactor) way of using services to save image
|
||||||
|
# and gnenerate unique image_name
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=image,
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
node_id=self.id,
|
||||||
|
is_intermediate=self.is_intermediate
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LATENTS_INTERPOLATION_MODE = Literal[
|
||||||
|
"nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ResizeLatentsInvocation(BaseInvocation):
|
||||||
|
"""Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8."""
|
||||||
|
|
||||||
|
type: Literal["lresize"] = "lresize"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to resize")
|
||||||
|
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
|
||||||
|
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
|
||||||
|
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
|
||||||
|
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
latents = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
resized_latents = torch.nn.functional.interpolate(
|
||||||
|
latents,
|
||||||
|
size=(self.height // 8, self.width // 8),
|
||||||
|
mode=self.mode,
|
||||||
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||||
|
# context.services.latents.set(name, resized_latents)
|
||||||
|
context.services.latents.save(name, resized_latents)
|
||||||
|
return build_latents_output(latents_name=name, latents=resized_latents)
|
||||||
|
|
||||||
|
|
||||||
|
class ScaleLatentsInvocation(BaseInvocation):
|
||||||
|
"""Scales latents by a given factor."""
|
||||||
|
|
||||||
|
type: Literal["lscale"] = "lscale"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to scale")
|
||||||
|
scale_factor: float = Field(gt=0, description="The factor by which to scale the latents")
|
||||||
|
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
|
||||||
|
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
latents = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
# resizing
|
||||||
|
resized_latents = torch.nn.functional.interpolate(
|
||||||
|
latents,
|
||||||
|
scale_factor=self.scale_factor,
|
||||||
|
mode=self.mode,
|
||||||
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||||
|
# context.services.latents.set(name, resized_latents)
|
||||||
|
context.services.latents.save(name, resized_latents)
|
||||||
|
return build_latents_output(latents_name=name, latents=resized_latents)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageToLatentsInvocation(BaseInvocation):
|
||||||
|
"""Encodes an image into latents."""
|
||||||
|
|
||||||
|
type: Literal["i2l"] = "i2l"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The image to encode")
|
||||||
|
model: str = Field(default="", description="The model to use")
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "image"],
|
||||||
|
"type_hints": {"model": "model"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
# image = context.services.images.get(
|
||||||
|
# self.image.image_type, self.image.image_name
|
||||||
|
# )
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
|
||||||
|
# TODO: this only really needs the vae
|
||||||
|
model_info = choose_model(context.services.model_manager, self.model)
|
||||||
|
model: StableDiffusionGeneratorPipeline = model_info["model"]
|
||||||
|
|
||||||
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||||
|
|
||||||
|
if image_tensor.dim() == 3:
|
||||||
|
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||||
|
|
||||||
|
latents = model.non_noised_latents_from_image(
|
||||||
|
image_tensor,
|
||||||
|
device=model._model_group.device_for(model.unet),
|
||||||
|
dtype=model.unet.dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||||
|
# context.services.latents.set(name, latents)
|
||||||
|
context.services.latents.save(name, latents)
|
||||||
|
return build_latents_output(latents_name=name, latents=latents)
|
||||||
109
invokeai/app/invocations/math.py
Normal file
109
invokeai/app/invocations/math.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext,
|
||||||
|
InvocationConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MathInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all math invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["math"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class IntOutput(BaseInvocationOutput):
|
||||||
|
"""An integer output"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["int_output"] = "int_output"
|
||||||
|
a: int = Field(default=None, description="The output integer")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
class FloatOutput(BaseInvocationOutput):
|
||||||
|
"""A float output"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["float_output"] = "float_output"
|
||||||
|
param: float = Field(default=None, description="The output float")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
class AddInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Adds two numbers"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["add"] = "add"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a + self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class SubtractInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Subtracts two numbers"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["sub"] = "sub"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a - self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class MultiplyInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Multiplies two numbers"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mul"] = "mul"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a * self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class DivideInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Divides two numbers"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["div"] = "div"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=int(self.a / self.b))
|
||||||
|
|
||||||
|
|
||||||
|
class RandomIntInvocation(BaseInvocation):
|
||||||
|
"""Outputs a single random integer."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["rand_int"] = "rand_int"
|
||||||
|
low: int = Field(default=0, description="The inclusive low value")
|
||||||
|
high: int = Field(
|
||||||
|
default=np.iinfo(np.int32).max, description="The exclusive high value"
|
||||||
|
)
|
||||||
|
# fmt: on
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=np.random.randint(self.low, self.high))
|
||||||
237
invokeai/app/invocations/param_easing.py
Normal file
237
invokeai/app/invocations/param_easing.py
Normal file
@@ -0,0 +1,237 @@
|
|||||||
|
import io
|
||||||
|
from typing import Literal, Optional, Any
|
||||||
|
|
||||||
|
# from PIL.Image import Image
|
||||||
|
import PIL.Image
|
||||||
|
from matplotlib.ticker import MaxNLocator
|
||||||
|
from matplotlib.figure import Figure
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
from easing_functions import (
|
||||||
|
LinearInOut,
|
||||||
|
QuadEaseInOut, QuadEaseIn, QuadEaseOut,
|
||||||
|
CubicEaseInOut, CubicEaseIn, CubicEaseOut,
|
||||||
|
QuarticEaseInOut, QuarticEaseIn, QuarticEaseOut,
|
||||||
|
QuinticEaseInOut, QuinticEaseIn, QuinticEaseOut,
|
||||||
|
SineEaseInOut, SineEaseIn, SineEaseOut,
|
||||||
|
CircularEaseIn, CircularEaseInOut, CircularEaseOut,
|
||||||
|
ExponentialEaseInOut, ExponentialEaseIn, ExponentialEaseOut,
|
||||||
|
ElasticEaseIn, ElasticEaseInOut, ElasticEaseOut,
|
||||||
|
BackEaseIn, BackEaseInOut, BackEaseOut,
|
||||||
|
BounceEaseIn, BounceEaseInOut, BounceEaseOut)
|
||||||
|
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext,
|
||||||
|
InvocationConfig,
|
||||||
|
)
|
||||||
|
from ...backend.util.logging import InvokeAILogger
|
||||||
|
from .collections import FloatCollectionOutput
|
||||||
|
|
||||||
|
|
||||||
|
class FloatLinearRangeInvocation(BaseInvocation):
|
||||||
|
"""Creates a range"""
|
||||||
|
|
||||||
|
type: Literal["float_range"] = "float_range"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
start: float = Field(default=5, description="The first value of the range")
|
||||||
|
stop: float = Field(default=10, description="The last value of the range")
|
||||||
|
steps: int = Field(default=30, description="number of values to interpolate over (including start and stop)")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||||
|
param_list = list(np.linspace(self.start, self.stop, self.steps))
|
||||||
|
return FloatCollectionOutput(
|
||||||
|
collection=param_list
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
EASING_FUNCTIONS_MAP = {
|
||||||
|
"Linear": LinearInOut,
|
||||||
|
"QuadIn": QuadEaseIn,
|
||||||
|
"QuadOut": QuadEaseOut,
|
||||||
|
"QuadInOut": QuadEaseInOut,
|
||||||
|
"CubicIn": CubicEaseIn,
|
||||||
|
"CubicOut": CubicEaseOut,
|
||||||
|
"CubicInOut": CubicEaseInOut,
|
||||||
|
"QuarticIn": QuarticEaseIn,
|
||||||
|
"QuarticOut": QuarticEaseOut,
|
||||||
|
"QuarticInOut": QuarticEaseInOut,
|
||||||
|
"QuinticIn": QuinticEaseIn,
|
||||||
|
"QuinticOut": QuinticEaseOut,
|
||||||
|
"QuinticInOut": QuinticEaseInOut,
|
||||||
|
"SineIn": SineEaseIn,
|
||||||
|
"SineOut": SineEaseOut,
|
||||||
|
"SineInOut": SineEaseInOut,
|
||||||
|
"CircularIn": CircularEaseIn,
|
||||||
|
"CircularOut": CircularEaseOut,
|
||||||
|
"CircularInOut": CircularEaseInOut,
|
||||||
|
"ExponentialIn": ExponentialEaseIn,
|
||||||
|
"ExponentialOut": ExponentialEaseOut,
|
||||||
|
"ExponentialInOut": ExponentialEaseInOut,
|
||||||
|
"ElasticIn": ElasticEaseIn,
|
||||||
|
"ElasticOut": ElasticEaseOut,
|
||||||
|
"ElasticInOut": ElasticEaseInOut,
|
||||||
|
"BackIn": BackEaseIn,
|
||||||
|
"BackOut": BackEaseOut,
|
||||||
|
"BackInOut": BackEaseInOut,
|
||||||
|
"BounceIn": BounceEaseIn,
|
||||||
|
"BounceOut": BounceEaseOut,
|
||||||
|
"BounceInOut": BounceEaseInOut,
|
||||||
|
}
|
||||||
|
|
||||||
|
EASING_FUNCTION_KEYS: Any = Literal[
|
||||||
|
tuple(list(EASING_FUNCTIONS_MAP.keys()))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||||
|
class StepParamEasingInvocation(BaseInvocation):
|
||||||
|
"""Experimental per-step parameter easing for denoising steps"""
|
||||||
|
|
||||||
|
type: Literal["step_param_easing"] = "step_param_easing"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
easing: EASING_FUNCTION_KEYS = Field(default="Linear", description="The easing function to use")
|
||||||
|
num_steps: int = Field(default=20, description="number of denoising steps")
|
||||||
|
start_value: float = Field(default=0.0, description="easing starting value")
|
||||||
|
end_value: float = Field(default=1.0, description="easing ending value")
|
||||||
|
start_step_percent: float = Field(default=0.0, description="fraction of steps at which to start easing")
|
||||||
|
end_step_percent: float = Field(default=1.0, description="fraction of steps after which to end easing")
|
||||||
|
# if None, then start_value is used prior to easing start
|
||||||
|
pre_start_value: Optional[float] = Field(default=None, description="value before easing start")
|
||||||
|
# if None, then end value is used prior to easing end
|
||||||
|
post_end_value: Optional[float] = Field(default=None, description="value after easing end")
|
||||||
|
mirror: bool = Field(default=False, description="include mirror of easing function")
|
||||||
|
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||||
|
# alt_mirror: bool = Field(default=False, description="alternative mirroring by dual easing")
|
||||||
|
show_easing_plot: bool = Field(default=False, description="show easing plot")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||||
|
log_diagnostics = False
|
||||||
|
# convert from start_step_percent to nearest step <= (steps * start_step_percent)
|
||||||
|
# start_step = int(np.floor(self.num_steps * self.start_step_percent))
|
||||||
|
start_step = int(np.round(self.num_steps * self.start_step_percent))
|
||||||
|
# convert from end_step_percent to nearest step >= (steps * end_step_percent)
|
||||||
|
# end_step = int(np.ceil((self.num_steps - 1) * self.end_step_percent))
|
||||||
|
end_step = int(np.round((self.num_steps - 1) * self.end_step_percent))
|
||||||
|
|
||||||
|
# end_step = int(np.ceil(self.num_steps * self.end_step_percent))
|
||||||
|
num_easing_steps = end_step - start_step + 1
|
||||||
|
|
||||||
|
# num_presteps = max(start_step - 1, 0)
|
||||||
|
num_presteps = start_step
|
||||||
|
num_poststeps = self.num_steps - (num_presteps + num_easing_steps)
|
||||||
|
prelist = list(num_presteps * [self.pre_start_value])
|
||||||
|
postlist = list(num_poststeps * [self.post_end_value])
|
||||||
|
|
||||||
|
if log_diagnostics:
|
||||||
|
logger = InvokeAILogger.getLogger(name="StepParamEasing")
|
||||||
|
logger.debug("start_step: " + str(start_step))
|
||||||
|
logger.debug("end_step: " + str(end_step))
|
||||||
|
logger.debug("num_easing_steps: " + str(num_easing_steps))
|
||||||
|
logger.debug("num_presteps: " + str(num_presteps))
|
||||||
|
logger.debug("num_poststeps: " + str(num_poststeps))
|
||||||
|
logger.debug("prelist size: " + str(len(prelist)))
|
||||||
|
logger.debug("postlist size: " + str(len(postlist)))
|
||||||
|
logger.debug("prelist: " + str(prelist))
|
||||||
|
logger.debug("postlist: " + str(postlist))
|
||||||
|
|
||||||
|
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||||
|
if log_diagnostics:
|
||||||
|
logger.debug("easing class: " + str(easing_class))
|
||||||
|
easing_list = list()
|
||||||
|
if self.mirror: # "expected" mirroring
|
||||||
|
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||||
|
# and create reverse copy of list to append
|
||||||
|
# if number of steps is odd, squeeze duration down to ceil(number_of_steps/2)
|
||||||
|
# and create reverse copy of list[1:end-1]
|
||||||
|
# but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always
|
||||||
|
|
||||||
|
base_easing_duration = int(np.ceil(num_easing_steps/2.0))
|
||||||
|
if log_diagnostics: logger.debug("base easing duration: " + str(base_easing_duration))
|
||||||
|
even_num_steps = (num_easing_steps % 2 == 0) # even number of steps
|
||||||
|
easing_function = easing_class(start=self.start_value,
|
||||||
|
end=self.end_value,
|
||||||
|
duration=base_easing_duration - 1)
|
||||||
|
base_easing_vals = list()
|
||||||
|
for step_index in range(base_easing_duration):
|
||||||
|
easing_val = easing_function.ease(step_index)
|
||||||
|
base_easing_vals.append(easing_val)
|
||||||
|
if log_diagnostics:
|
||||||
|
logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val))
|
||||||
|
if even_num_steps:
|
||||||
|
mirror_easing_vals = list(reversed(base_easing_vals))
|
||||||
|
else:
|
||||||
|
mirror_easing_vals = list(reversed(base_easing_vals[0:-1]))
|
||||||
|
if log_diagnostics:
|
||||||
|
logger.debug("base easing vals: " + str(base_easing_vals))
|
||||||
|
logger.debug("mirror easing vals: " + str(mirror_easing_vals))
|
||||||
|
easing_list = base_easing_vals + mirror_easing_vals
|
||||||
|
|
||||||
|
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||||
|
# elif self.alt_mirror: # function mirroring (unintuitive behavior (at least to me))
|
||||||
|
# # half_ease_duration = round(num_easing_steps - 1 / 2)
|
||||||
|
# half_ease_duration = round((num_easing_steps - 1) / 2)
|
||||||
|
# easing_function = easing_class(start=self.start_value,
|
||||||
|
# end=self.end_value,
|
||||||
|
# duration=half_ease_duration,
|
||||||
|
# )
|
||||||
|
#
|
||||||
|
# mirror_function = easing_class(start=self.end_value,
|
||||||
|
# end=self.start_value,
|
||||||
|
# duration=half_ease_duration,
|
||||||
|
# )
|
||||||
|
# for step_index in range(num_easing_steps):
|
||||||
|
# if step_index <= half_ease_duration:
|
||||||
|
# step_val = easing_function.ease(step_index)
|
||||||
|
# else:
|
||||||
|
# step_val = mirror_function.ease(step_index - half_ease_duration)
|
||||||
|
# easing_list.append(step_val)
|
||||||
|
# if log_diagnostics: logger.debug(step_index, step_val)
|
||||||
|
#
|
||||||
|
|
||||||
|
else: # no mirroring (default)
|
||||||
|
easing_function = easing_class(start=self.start_value,
|
||||||
|
end=self.end_value,
|
||||||
|
duration=num_easing_steps - 1)
|
||||||
|
for step_index in range(num_easing_steps):
|
||||||
|
step_val = easing_function.ease(step_index)
|
||||||
|
easing_list.append(step_val)
|
||||||
|
if log_diagnostics:
|
||||||
|
logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val))
|
||||||
|
|
||||||
|
if log_diagnostics:
|
||||||
|
logger.debug("prelist size: " + str(len(prelist)))
|
||||||
|
logger.debug("easing_list size: " + str(len(easing_list)))
|
||||||
|
logger.debug("postlist size: " + str(len(postlist)))
|
||||||
|
|
||||||
|
param_list = prelist + easing_list + postlist
|
||||||
|
|
||||||
|
if self.show_easing_plot:
|
||||||
|
plt.figure()
|
||||||
|
plt.xlabel("Step")
|
||||||
|
plt.ylabel("Param Value")
|
||||||
|
plt.title("Per-Step Values Based On Easing: " + self.easing)
|
||||||
|
plt.bar(range(len(param_list)), param_list)
|
||||||
|
# plt.plot(param_list)
|
||||||
|
ax = plt.gca()
|
||||||
|
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
||||||
|
buf = io.BytesIO()
|
||||||
|
plt.savefig(buf, format='png')
|
||||||
|
buf.seek(0)
|
||||||
|
im = PIL.Image.open(buf)
|
||||||
|
im.show()
|
||||||
|
buf.close()
|
||||||
|
|
||||||
|
# output array of size steps, each entry list[i] is param value for step i
|
||||||
|
return FloatCollectionOutput(
|
||||||
|
collection=param_list
|
||||||
|
)
|
||||||
28
invokeai/app/invocations/params.py
Normal file
28
invokeai/app/invocations/params.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
from pydantic import Field
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
||||||
|
from .math import IntOutput, FloatOutput
|
||||||
|
|
||||||
|
# Pass-through parameter nodes - used by subgraphs
|
||||||
|
|
||||||
|
class ParamIntInvocation(BaseInvocation):
|
||||||
|
"""An integer parameter"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["param_int"] = "param_int"
|
||||||
|
a: int = Field(default=0, description="The integer value")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a)
|
||||||
|
|
||||||
|
class ParamFloatInvocation(BaseInvocation):
|
||||||
|
"""A float parameter"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["param_float"] = "param_float"
|
||||||
|
param: float = Field(default=0.0, description="The float value")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> FloatOutput:
|
||||||
|
return FloatOutput(param=self.param)
|
||||||
57
invokeai/app/invocations/prompt.py
Normal file
57
invokeai/app/invocations/prompt.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
||||||
|
from dynamicprompts.generators import RandomPromptGenerator, CombinatorialPromptGenerator
|
||||||
|
|
||||||
|
class PromptOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a prompt"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["prompt"] = "prompt"
|
||||||
|
|
||||||
|
prompt: str = Field(default=None, description="The output prompt")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'prompt',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class PromptCollectionOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a collection of prompts"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["prompt_collection_output"] = "prompt_collection_output"
|
||||||
|
|
||||||
|
prompt_collection: list[str] = Field(description="The output prompt collection")
|
||||||
|
count: int = Field(description="The size of the prompt collection")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["type", "prompt_collection", "count"]}
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicPromptInvocation(BaseInvocation):
|
||||||
|
"""Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator"""
|
||||||
|
|
||||||
|
type: Literal["dynamic_prompt"] = "dynamic_prompt"
|
||||||
|
prompt: str = Field(description="The prompt to parse with dynamicprompts")
|
||||||
|
max_prompts: int = Field(default=1, description="The number of prompts to generate")
|
||||||
|
combinatorial: bool = Field(
|
||||||
|
default=False, description="Whether to use the combinatorial generator"
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> PromptCollectionOutput:
|
||||||
|
if self.combinatorial:
|
||||||
|
generator = CombinatorialPromptGenerator()
|
||||||
|
prompts = generator.generate(self.prompt, max_prompts=self.max_prompts)
|
||||||
|
else:
|
||||||
|
generator = RandomPromptGenerator()
|
||||||
|
prompts = generator.generate(self.prompt, num_images=self.max_prompts)
|
||||||
|
|
||||||
|
return PromptCollectionOutput(prompt_collection=prompts, count=len(prompts))
|
||||||
55
invokeai/app/invocations/reconstruct.py
Normal file
55
invokeai/app/invocations/reconstruct.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageCategory, ImageField, ResourceOrigin
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput
|
||||||
|
|
||||||
|
|
||||||
|
class RestoreFaceInvocation(BaseInvocation):
|
||||||
|
"""Restores faces in an image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["restore_face"] = "restore_face"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" )
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["restoration", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=None,
|
||||||
|
strength=self.strength, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=results[0][0],
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
57
invokeai/app/invocations/upscale.py
Normal file
57
invokeai/app/invocations/upscale.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageCategory, ImageField, ResourceOrigin
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput
|
||||||
|
|
||||||
|
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["upscale"] = "upscale"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2, 4] = Field(default=2, description="The upscale level")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["upscaling", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get_pil_image(self.image.image_name)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=(self.level, self.strength),
|
||||||
|
strength=0.0, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_dto = context.services.images.create(
|
||||||
|
image=results[0][0],
|
||||||
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
|
image_category=ImageCategory.GENERAL,
|
||||||
|
node_id=self.id,
|
||||||
|
session_id=context.graph_execution_state_id,
|
||||||
|
is_intermediate=self.is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
|
width=image_dto.width,
|
||||||
|
height=image_dto.height,
|
||||||
|
)
|
||||||
14
invokeai/app/invocations/util/choose_model.py
Normal file
14
invokeai/app/invocations/util/choose_model.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from invokeai.backend.model_management.model_manager import ModelManager
|
||||||
|
|
||||||
|
|
||||||
|
def choose_model(model_manager: ModelManager, model_name: str):
|
||||||
|
"""Returns the default model if the `model_name` not a valid model, else returns the selected model."""
|
||||||
|
logger = model_manager.logger
|
||||||
|
if model_name and not model_manager.valid_model(model_name):
|
||||||
|
default_model_name = model_manager.default_model()
|
||||||
|
logger.warning(f"\'{model_name}\' is not a valid model name. Using default model \'{default_model_name}\' instead.")
|
||||||
|
model = model_manager.get_model()
|
||||||
|
else:
|
||||||
|
model = model_manager.get_model(model_name)
|
||||||
|
|
||||||
|
return model
|
||||||
3
invokeai/app/models/exceptions.py
Normal file
3
invokeai/app/models/exceptions.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
class CanceledException(Exception):
|
||||||
|
"""Execution canceled by user."""
|
||||||
|
pass
|
||||||
90
invokeai/app/models/image.py
Normal file
90
invokeai/app/models/image.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
from enum import Enum
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.util.metaenum import MetaEnum
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceOrigin(str, Enum, metaclass=MetaEnum):
|
||||||
|
"""The origin of a resource (eg image).
|
||||||
|
|
||||||
|
- INTERNAL: The resource was created by the application.
|
||||||
|
- EXTERNAL: The resource was not created by the application.
|
||||||
|
This may be a user-initiated upload, or an internal application upload (eg Canvas init image).
|
||||||
|
"""
|
||||||
|
|
||||||
|
INTERNAL = "internal"
|
||||||
|
"""The resource was created by the application."""
|
||||||
|
EXTERNAL = "external"
|
||||||
|
"""The resource was not created by the application.
|
||||||
|
This may be a user-initiated upload, or an internal application upload (eg Canvas init image).
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidOriginException(ValueError):
|
||||||
|
"""Raised when a provided value is not a valid ResourceOrigin.
|
||||||
|
|
||||||
|
Subclasses `ValueError`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message="Invalid resource origin."):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageCategory(str, Enum, metaclass=MetaEnum):
|
||||||
|
"""The category of an image.
|
||||||
|
|
||||||
|
- GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose.
|
||||||
|
- MASK: The image is a mask image.
|
||||||
|
- CONTROL: The image is a ControlNet control image.
|
||||||
|
- USER: The image is a user-provide image.
|
||||||
|
- OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
GENERAL = "general"
|
||||||
|
"""GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose."""
|
||||||
|
MASK = "mask"
|
||||||
|
"""MASK: The image is a mask image."""
|
||||||
|
CONTROL = "control"
|
||||||
|
"""CONTROL: The image is a ControlNet control image."""
|
||||||
|
USER = "user"
|
||||||
|
"""USER: The image is a user-provide image."""
|
||||||
|
OTHER = "other"
|
||||||
|
"""OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes."""
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidImageCategoryException(ValueError):
|
||||||
|
"""Raised when a provided value is not a valid ImageCategory.
|
||||||
|
|
||||||
|
Subclasses `ValueError`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message="Invalid image category."):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageField(BaseModel):
|
||||||
|
"""An image field used for passing image objects between invocations"""
|
||||||
|
|
||||||
|
image_name: Optional[str] = Field(default=None, description="The name of the image")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["image_name"]}
|
||||||
|
|
||||||
|
|
||||||
|
class ColorField(BaseModel):
|
||||||
|
r: int = Field(ge=0, le=255, description="The red component")
|
||||||
|
g: int = Field(ge=0, le=255, description="The green component")
|
||||||
|
b: int = Field(ge=0, le=255, description="The blue component")
|
||||||
|
a: int = Field(ge=0, le=255, description="The alpha component")
|
||||||
|
|
||||||
|
def tuple(self) -> Tuple[int, int, int, int]:
|
||||||
|
return (self.r, self.g, self.b, self.a)
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressImage(BaseModel):
|
||||||
|
"""The progress image sent intermittently during processing"""
|
||||||
|
|
||||||
|
width: int = Field(description="The effective width of the image in pixels")
|
||||||
|
height: int = Field(description="The effective height of the image in pixels")
|
||||||
|
dataURL: str = Field(description="The image data as a b64 data URL")
|
||||||
93
invokeai/app/models/metadata.py
Normal file
93
invokeai/app/models/metadata.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
from typing import Optional, Union, List
|
||||||
|
from pydantic import BaseModel, Extra, Field, StrictFloat, StrictInt, StrictStr
|
||||||
|
|
||||||
|
|
||||||
|
class ImageMetadata(BaseModel):
|
||||||
|
"""
|
||||||
|
Core generation metadata for an image/tensor generated in InvokeAI.
|
||||||
|
|
||||||
|
Also includes any metadata from the image's PNG tEXt chunks.
|
||||||
|
|
||||||
|
Generated by traversing the execution graph, collecting the parameters of the nearest ancestors
|
||||||
|
of a given node.
|
||||||
|
|
||||||
|
Full metadata may be accessed by querying for the session in the `graph_executions` table.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
extra = Extra.allow
|
||||||
|
"""
|
||||||
|
This lets the ImageMetadata class accept arbitrary additional fields. The CoreMetadataService
|
||||||
|
won't add any fields that are not already defined, but other a different metadata service
|
||||||
|
implementation might.
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Optional[StrictStr] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The type of the ancestor node of the image output node.",
|
||||||
|
)
|
||||||
|
"""The type of the ancestor node of the image output node."""
|
||||||
|
positive_conditioning: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The positive conditioning."
|
||||||
|
)
|
||||||
|
"""The positive conditioning"""
|
||||||
|
negative_conditioning: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The negative conditioning."
|
||||||
|
)
|
||||||
|
"""The negative conditioning"""
|
||||||
|
width: Optional[StrictInt] = Field(
|
||||||
|
default=None, description="Width of the image/latents in pixels."
|
||||||
|
)
|
||||||
|
"""Width of the image/latents in pixels"""
|
||||||
|
height: Optional[StrictInt] = Field(
|
||||||
|
default=None, description="Height of the image/latents in pixels."
|
||||||
|
)
|
||||||
|
"""Height of the image/latents in pixels"""
|
||||||
|
seed: Optional[StrictInt] = Field(
|
||||||
|
default=None, description="The seed used for noise generation."
|
||||||
|
)
|
||||||
|
"""The seed used for noise generation"""
|
||||||
|
# cfg_scale: Optional[StrictFloat] = Field(
|
||||||
|
# cfg_scale: Union[float, list[float]] = Field(
|
||||||
|
cfg_scale: Union[StrictFloat, List[StrictFloat]] = Field(
|
||||||
|
default=None, description="The classifier-free guidance scale."
|
||||||
|
)
|
||||||
|
"""The classifier-free guidance scale"""
|
||||||
|
steps: Optional[StrictInt] = Field(
|
||||||
|
default=None, description="The number of steps used for inference."
|
||||||
|
)
|
||||||
|
"""The number of steps used for inference"""
|
||||||
|
scheduler: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The scheduler used for inference."
|
||||||
|
)
|
||||||
|
"""The scheduler used for inference"""
|
||||||
|
model: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The model used for inference."
|
||||||
|
)
|
||||||
|
"""The model used for inference"""
|
||||||
|
strength: Optional[StrictFloat] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The strength used for image-to-image/latents-to-latents.",
|
||||||
|
)
|
||||||
|
"""The strength used for image-to-image/latents-to-latents."""
|
||||||
|
latents: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The ID of the initial latents."
|
||||||
|
)
|
||||||
|
"""The ID of the initial latents"""
|
||||||
|
vae: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The VAE used for decoding."
|
||||||
|
)
|
||||||
|
"""The VAE used for decoding"""
|
||||||
|
unet: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The UNet used dor inference."
|
||||||
|
)
|
||||||
|
"""The UNet used dor inference"""
|
||||||
|
clip: Optional[StrictStr] = Field(
|
||||||
|
default=None, description="The CLIP Encoder used for conditioning."
|
||||||
|
)
|
||||||
|
"""The CLIP Encoder used for conditioning"""
|
||||||
|
extra: Optional[StrictStr] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Uploaded image metadata, extracted from the PNG tEXt chunk.",
|
||||||
|
)
|
||||||
|
"""Uploaded image metadata, extracted from the PNG tEXt chunk."""
|
||||||
581
invokeai/app/services/config.py
Normal file
581
invokeai/app/services/config.py
Normal file
@@ -0,0 +1,581 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
|
||||||
|
|
||||||
|
'''Invokeai configuration system.
|
||||||
|
|
||||||
|
Arguments and fields are taken from the pydantic definition of the
|
||||||
|
model. Defaults can be set by creating a yaml configuration file that
|
||||||
|
has a top-level key of "InvokeAI" and subheadings for each of the
|
||||||
|
categories returned by `invokeai --help`. The file looks like this:
|
||||||
|
|
||||||
|
[file: invokeai.yaml]
|
||||||
|
|
||||||
|
InvokeAI:
|
||||||
|
Paths:
|
||||||
|
root: /home/lstein/invokeai-main
|
||||||
|
conf_path: configs/models.yaml
|
||||||
|
legacy_conf_dir: configs/stable-diffusion
|
||||||
|
outdir: outputs
|
||||||
|
embedding_dir: embeddings
|
||||||
|
lora_dir: loras
|
||||||
|
autoconvert_dir: null
|
||||||
|
gfpgan_model_dir: models/gfpgan/GFPGANv1.4.pth
|
||||||
|
Models:
|
||||||
|
model: stable-diffusion-1.5
|
||||||
|
embeddings: true
|
||||||
|
Memory/Performance:
|
||||||
|
xformers_enabled: false
|
||||||
|
sequential_guidance: false
|
||||||
|
precision: float16
|
||||||
|
max_loaded_models: 4
|
||||||
|
always_use_cpu: false
|
||||||
|
free_gpu_mem: false
|
||||||
|
Features:
|
||||||
|
nsfw_checker: true
|
||||||
|
restore: true
|
||||||
|
esrgan: true
|
||||||
|
patchmatch: true
|
||||||
|
internet_available: true
|
||||||
|
log_tokenization: false
|
||||||
|
Web Server:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: 8081
|
||||||
|
allow_origins: []
|
||||||
|
allow_credentials: true
|
||||||
|
allow_methods:
|
||||||
|
- '*'
|
||||||
|
allow_headers:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
The default name of the configuration file is `invokeai.yaml`, located
|
||||||
|
in INVOKEAI_ROOT. You can replace supersede this by providing any
|
||||||
|
OmegaConf dictionary object initialization time:
|
||||||
|
|
||||||
|
omegaconf = OmegaConf.load('/tmp/init.yaml')
|
||||||
|
conf = InvokeAIAppConfig()
|
||||||
|
conf.parse_args(conf=omegaconf)
|
||||||
|
|
||||||
|
InvokeAIAppConfig.parse_args() will parse the contents of `sys.argv`
|
||||||
|
at initialization time. You may pass a list of strings in the optional
|
||||||
|
`argv` argument to use instead of the system argv:
|
||||||
|
|
||||||
|
conf.parse_args(argv=['--xformers_enabled'])
|
||||||
|
|
||||||
|
It is also possible to set a value at initialization time. However, if
|
||||||
|
you call parse_args() it may be overwritten.
|
||||||
|
|
||||||
|
conf = InvokeAIAppConfig(xformers_enabled=True)
|
||||||
|
conf.parse_args(argv=['--no-xformers'])
|
||||||
|
conf.xformers_enabled
|
||||||
|
# False
|
||||||
|
|
||||||
|
|
||||||
|
To avoid this, use `get_config()` to retrieve the application-wide
|
||||||
|
configuration object. This will retain any properties set at object
|
||||||
|
creation time:
|
||||||
|
|
||||||
|
conf = InvokeAIAppConfig.get_config(xformers_enabled=True)
|
||||||
|
conf.parse_args(argv=['--no-xformers'])
|
||||||
|
conf.xformers_enabled
|
||||||
|
# True
|
||||||
|
|
||||||
|
Any setting can be overwritten by setting an environment variable of
|
||||||
|
form: "INVOKEAI_<setting>", as in:
|
||||||
|
|
||||||
|
export INVOKEAI_port=8080
|
||||||
|
|
||||||
|
Order of precedence (from highest):
|
||||||
|
1) initialization options
|
||||||
|
2) command line options
|
||||||
|
3) environment variable options
|
||||||
|
4) config file options
|
||||||
|
5) pydantic defaults
|
||||||
|
|
||||||
|
Typical usage at the top level file:
|
||||||
|
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
|
||||||
|
# get global configuration and print its nsfw_checker value
|
||||||
|
conf = InvokeAIAppConfig.get_config()
|
||||||
|
conf.parse_args()
|
||||||
|
print(conf.nsfw_checker)
|
||||||
|
|
||||||
|
Typical usage in a backend module:
|
||||||
|
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
|
||||||
|
# get global configuration and print its nsfw_checker value
|
||||||
|
conf = InvokeAIAppConfig.get_config()
|
||||||
|
print(conf.nsfw_checker)
|
||||||
|
|
||||||
|
|
||||||
|
Computed properties:
|
||||||
|
|
||||||
|
The InvokeAIAppConfig object has a series of properties that
|
||||||
|
resolve paths relative to the runtime root directory. They each return
|
||||||
|
a Path object:
|
||||||
|
|
||||||
|
root_path - path to InvokeAI root
|
||||||
|
output_path - path to default outputs directory
|
||||||
|
model_conf_path - path to models.yaml
|
||||||
|
conf - alias for the above
|
||||||
|
embedding_path - path to the embeddings directory
|
||||||
|
lora_path - path to the LoRA directory
|
||||||
|
|
||||||
|
In most cases, you will want to create a single InvokeAIAppConfig
|
||||||
|
object for the entire application. The InvokeAIAppConfig.get_config() function
|
||||||
|
does this:
|
||||||
|
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
config.parse_args() # read values from the command line/config file
|
||||||
|
print(config.root)
|
||||||
|
|
||||||
|
# Subclassing
|
||||||
|
|
||||||
|
If you wish to create a similar class, please subclass the
|
||||||
|
`InvokeAISettings` class and define a Literal field named "type",
|
||||||
|
which is set to the desired top-level name. For example, to create a
|
||||||
|
"InvokeBatch" configuration, define like this:
|
||||||
|
|
||||||
|
class InvokeBatch(InvokeAISettings):
|
||||||
|
type: Literal["InvokeBatch"] = "InvokeBatch"
|
||||||
|
node_count : int = Field(default=1, description="Number of nodes to run on", category='Resources')
|
||||||
|
cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", category='Resources')
|
||||||
|
|
||||||
|
This will now read and write from the "InvokeBatch" section of the
|
||||||
|
config file, look for environment variables named INVOKEBATCH_*, and
|
||||||
|
accept the command-line arguments `--node_count` and `--cpu_count`. The
|
||||||
|
two configs are kept in separate sections of the config file:
|
||||||
|
|
||||||
|
# invokeai.yaml
|
||||||
|
|
||||||
|
InvokeBatch:
|
||||||
|
Resources:
|
||||||
|
node_count: 1
|
||||||
|
cpu_count: 8
|
||||||
|
|
||||||
|
InvokeAI:
|
||||||
|
Paths:
|
||||||
|
root: /home/lstein/invokeai-main
|
||||||
|
conf_path: configs/models.yaml
|
||||||
|
legacy_conf_dir: configs/stable-diffusion
|
||||||
|
outdir: outputs
|
||||||
|
...
|
||||||
|
|
||||||
|
'''
|
||||||
|
from __future__ import annotations
|
||||||
|
import argparse
|
||||||
|
import pydoc
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from omegaconf import OmegaConf, DictConfig
|
||||||
|
from pathlib import Path
|
||||||
|
from pydantic import BaseSettings, Field, parse_obj_as
|
||||||
|
from typing import ClassVar, Dict, List, Literal, Type, Union, get_origin, get_type_hints, get_args
|
||||||
|
|
||||||
|
INIT_FILE = Path('invokeai.yaml')
|
||||||
|
DB_FILE = Path('invokeai.db')
|
||||||
|
LEGACY_INIT_FILE = Path('invokeai.init')
|
||||||
|
|
||||||
|
class InvokeAISettings(BaseSettings):
|
||||||
|
'''
|
||||||
|
Runtime configuration settings in which default values are
|
||||||
|
read from an omegaconf .yaml file.
|
||||||
|
'''
|
||||||
|
initconf : ClassVar[DictConfig] = None
|
||||||
|
argparse_groups : ClassVar[Dict] = {}
|
||||||
|
|
||||||
|
def parse_args(self, argv: list=sys.argv[1:]):
|
||||||
|
parser = self.get_parser()
|
||||||
|
opt = parser.parse_args(argv)
|
||||||
|
for name in self.__fields__:
|
||||||
|
if name not in self._excluded():
|
||||||
|
setattr(self, name, getattr(opt,name))
|
||||||
|
|
||||||
|
def to_yaml(self)->str:
|
||||||
|
"""
|
||||||
|
Return a YAML string representing our settings. This can be used
|
||||||
|
as the contents of `invokeai.yaml` to restore settings later.
|
||||||
|
"""
|
||||||
|
cls = self.__class__
|
||||||
|
type = get_args(get_type_hints(cls)['type'])[0]
|
||||||
|
field_dict = dict({type:dict()})
|
||||||
|
for name,field in self.__fields__.items():
|
||||||
|
if name in cls._excluded():
|
||||||
|
continue
|
||||||
|
category = field.field_info.extra.get("category") or "Uncategorized"
|
||||||
|
value = getattr(self,name)
|
||||||
|
if category not in field_dict[type]:
|
||||||
|
field_dict[type][category] = dict()
|
||||||
|
# keep paths as strings to make it easier to read
|
||||||
|
field_dict[type][category][name] = str(value) if isinstance(value,Path) else value
|
||||||
|
conf = OmegaConf.create(field_dict)
|
||||||
|
return OmegaConf.to_yaml(conf)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_parser_arguments(cls, parser):
|
||||||
|
if 'type' in get_type_hints(cls):
|
||||||
|
settings_stanza = get_args(get_type_hints(cls)['type'])[0]
|
||||||
|
else:
|
||||||
|
settings_stanza = "Uncategorized"
|
||||||
|
|
||||||
|
env_prefix = cls.Config.env_prefix if hasattr(cls.Config,'env_prefix') else settings_stanza.upper()
|
||||||
|
|
||||||
|
initconf = cls.initconf.get(settings_stanza) \
|
||||||
|
if cls.initconf and settings_stanza in cls.initconf \
|
||||||
|
else OmegaConf.create()
|
||||||
|
|
||||||
|
# create an upcase version of the environment in
|
||||||
|
# order to achieve case-insensitive environment
|
||||||
|
# variables (the way Windows does)
|
||||||
|
upcase_environ = dict()
|
||||||
|
for key,value in os.environ.items():
|
||||||
|
upcase_environ[key.upper()] = value
|
||||||
|
|
||||||
|
fields = cls.__fields__
|
||||||
|
cls.argparse_groups = {}
|
||||||
|
|
||||||
|
for name, field in fields.items():
|
||||||
|
if name not in cls._excluded():
|
||||||
|
current_default = field.default
|
||||||
|
|
||||||
|
category = field.field_info.extra.get("category","Uncategorized")
|
||||||
|
env_name = env_prefix + '_' + name
|
||||||
|
if category in initconf and name in initconf.get(category):
|
||||||
|
field.default = initconf.get(category).get(name)
|
||||||
|
if env_name.upper() in upcase_environ:
|
||||||
|
field.default = upcase_environ[env_name.upper()]
|
||||||
|
cls.add_field_argument(parser, name, field)
|
||||||
|
|
||||||
|
field.default = current_default
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def cmd_name(self, command_field: str='type')->str:
|
||||||
|
hints = get_type_hints(self)
|
||||||
|
if command_field in hints:
|
||||||
|
return get_args(hints[command_field])[0]
|
||||||
|
else:
|
||||||
|
return 'Uncategorized'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_parser(cls)->ArgumentParser:
|
||||||
|
parser = PagingArgumentParser(
|
||||||
|
prog=cls.cmd_name(),
|
||||||
|
description=cls.__doc__,
|
||||||
|
)
|
||||||
|
cls.add_parser_arguments(parser)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_subparser(cls, parser: argparse.ArgumentParser):
|
||||||
|
parser.add_parser(cls.cmd_name(), help=cls.__doc__)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _excluded(self)->List[str]:
|
||||||
|
return ['type','initconf']
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
env_file_encoding = 'utf-8'
|
||||||
|
arbitrary_types_allowed = True
|
||||||
|
case_sensitive = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_field_argument(cls, command_parser, name: str, field, default_override = None):
|
||||||
|
field_type = get_type_hints(cls).get(name)
|
||||||
|
default = default_override if default_override is not None else field.default if field.default_factory is None else field.default_factory()
|
||||||
|
if category := field.field_info.extra.get("category"):
|
||||||
|
if category not in cls.argparse_groups:
|
||||||
|
cls.argparse_groups[category] = command_parser.add_argument_group(category)
|
||||||
|
argparse_group = cls.argparse_groups[category]
|
||||||
|
else:
|
||||||
|
argparse_group = command_parser
|
||||||
|
|
||||||
|
if get_origin(field_type) == Literal:
|
||||||
|
allowed_values = get_args(field.type_)
|
||||||
|
allowed_types = set()
|
||||||
|
for val in allowed_values:
|
||||||
|
allowed_types.add(type(val))
|
||||||
|
allowed_types_list = list(allowed_types)
|
||||||
|
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
|
||||||
|
|
||||||
|
argparse_group.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field_type,
|
||||||
|
default=default,
|
||||||
|
choices=allowed_values,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
|
||||||
|
elif get_origin(field_type) == list:
|
||||||
|
argparse_group.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
nargs='*',
|
||||||
|
type=field.type_,
|
||||||
|
default=default,
|
||||||
|
action=argparse.BooleanOptionalAction if field.type_==bool else 'store',
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
argparse_group.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field.type_,
|
||||||
|
default=default,
|
||||||
|
action=argparse.BooleanOptionalAction if field.type_==bool else 'store',
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
def _find_root()->Path:
|
||||||
|
if os.environ.get("INVOKEAI_ROOT"):
|
||||||
|
root = Path(os.environ.get("INVOKEAI_ROOT")).resolve()
|
||||||
|
elif (
|
||||||
|
os.environ.get("VIRTUAL_ENV")
|
||||||
|
and (Path(os.environ.get("VIRTUAL_ENV"), "..", INIT_FILE).exists()
|
||||||
|
or
|
||||||
|
Path(os.environ.get("VIRTUAL_ENV"), "..", LEGACY_INIT_FILE).exists()
|
||||||
|
)
|
||||||
|
):
|
||||||
|
root = Path(os.environ.get("VIRTUAL_ENV"), "..").resolve()
|
||||||
|
else:
|
||||||
|
root = Path("~/invokeai").expanduser().resolve()
|
||||||
|
return root
|
||||||
|
|
||||||
|
class InvokeAIAppConfig(InvokeAISettings):
|
||||||
|
'''
|
||||||
|
Generate images using Stable Diffusion. Use "invokeai" to launch
|
||||||
|
the command-line client (recommended for experts only), or
|
||||||
|
"invokeai-web" to launch the web server. Global options
|
||||||
|
can be changed by editing the file "INVOKEAI_ROOT/invokeai.yaml" or by
|
||||||
|
setting environment variables INVOKEAI_<setting>.
|
||||||
|
'''
|
||||||
|
singleton_config: ClassVar[InvokeAIAppConfig] = None
|
||||||
|
singleton_init: ClassVar[Dict] = None
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["InvokeAI"] = "InvokeAI"
|
||||||
|
host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server')
|
||||||
|
port : int = Field(default=9090, description="Port to bind to", category='Web Server')
|
||||||
|
allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", category='Web Server')
|
||||||
|
allow_credentials : bool = Field(default=True, description="Allow CORS credentials", category='Web Server')
|
||||||
|
allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", category='Web Server')
|
||||||
|
allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", category='Web Server')
|
||||||
|
|
||||||
|
esrgan : bool = Field(default=True, description="Enable/disable upscaling code", category='Features')
|
||||||
|
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
|
||||||
|
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
|
||||||
|
nsfw_checker : bool = Field(default=True, description="Enable/disable the NSFW checker", category='Features')
|
||||||
|
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features')
|
||||||
|
restore : bool = Field(default=True, description="Enable/disable face restoration code", category='Features')
|
||||||
|
|
||||||
|
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
|
||||||
|
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
|
||||||
|
max_loaded_models : int = Field(default=2, gt=0, description="Maximum number of models to keep in memory for rapid switching", category='Memory/Performance')
|
||||||
|
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
|
||||||
|
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
||||||
|
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||||
|
|
||||||
|
|
||||||
|
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
|
||||||
|
autoconvert_dir : Path = Field(default=None, description='Path to a directory of ckpt files to be converted into diffusers and imported on startup.', category='Paths')
|
||||||
|
conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths')
|
||||||
|
embedding_dir : Path = Field(default='embeddings', description='Path to InvokeAI textual inversion aembeddings directory', category='Paths')
|
||||||
|
gfpgan_model_dir : Path = Field(default="./models/gfpgan/GFPGANv1.4.pth", description='Path to GFPGAN models directory.', category='Paths')
|
||||||
|
controlnet_dir : Path = Field(default="controlnets", description='Path to directory of ControlNet models.', category='Paths')
|
||||||
|
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
|
||||||
|
lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths')
|
||||||
|
db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths')
|
||||||
|
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
|
||||||
|
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
|
||||||
|
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
|
||||||
|
|
||||||
|
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
|
||||||
|
embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models')
|
||||||
|
|
||||||
|
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
|
||||||
|
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
||||||
|
log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
|
||||||
|
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False):
|
||||||
|
'''
|
||||||
|
Update settings with contents of init file, environment, and
|
||||||
|
command-line settings.
|
||||||
|
:param conf: alternate Omegaconf dictionary object
|
||||||
|
:param argv: aternate sys.argv list
|
||||||
|
:param clobber: ovewrite any initialization parameters passed during initialization
|
||||||
|
'''
|
||||||
|
# Set the runtime root directory. We parse command-line switches here
|
||||||
|
# in order to pick up the --root_dir option.
|
||||||
|
super().parse_args(argv)
|
||||||
|
if conf is None:
|
||||||
|
try:
|
||||||
|
conf = OmegaConf.load(self.root_dir / INIT_FILE)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
InvokeAISettings.initconf = conf
|
||||||
|
|
||||||
|
# parse args again in order to pick up settings in configuration file
|
||||||
|
super().parse_args(argv)
|
||||||
|
|
||||||
|
if self.singleton_init and not clobber:
|
||||||
|
hints = get_type_hints(self.__class__)
|
||||||
|
for k in self.singleton_init:
|
||||||
|
setattr(self,k,parse_obj_as(hints[k],self.singleton_init[k]))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_config(cls,**kwargs)->InvokeAIAppConfig:
|
||||||
|
'''
|
||||||
|
This returns a singleton InvokeAIAppConfig configuration object.
|
||||||
|
'''
|
||||||
|
if cls.singleton_config is None \
|
||||||
|
or type(cls.singleton_config)!=cls \
|
||||||
|
or (kwargs and cls.singleton_init != kwargs):
|
||||||
|
cls.singleton_config = cls(**kwargs)
|
||||||
|
cls.singleton_init = kwargs
|
||||||
|
return cls.singleton_config
|
||||||
|
|
||||||
|
@property
|
||||||
|
def root_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the runtime root directory
|
||||||
|
'''
|
||||||
|
if self.root:
|
||||||
|
return Path(self.root).expanduser()
|
||||||
|
else:
|
||||||
|
return self.find_root()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def root_dir(self)->Path:
|
||||||
|
'''
|
||||||
|
Alias for above.
|
||||||
|
'''
|
||||||
|
return self.root_path
|
||||||
|
|
||||||
|
def _resolve(self,partial_path:Path)->Path:
|
||||||
|
return (self.root_path / partial_path).resolve()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def init_file_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to invokeai.yaml
|
||||||
|
'''
|
||||||
|
return self._resolve(INIT_FILE)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to defaults outputs directory.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.outdir)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def db_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the invokeai.db file.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.db_dir) / DB_FILE
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_conf_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to models configuration file.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.conf_path)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def legacy_conf_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to directory of legacy configuration files (e.g. v1-inference.yaml)
|
||||||
|
'''
|
||||||
|
return self._resolve(self.legacy_conf_dir)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cache_dir(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the global cache directory for HuggingFace hub-managed models
|
||||||
|
'''
|
||||||
|
return self.models_dir / "hub"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def models_dir(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the models directory
|
||||||
|
'''
|
||||||
|
return self._resolve("models")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def embedding_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the textual inversion embeddings directory.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.embedding_dir) if self.embedding_dir else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lora_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the LoRA models directory.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.lora_dir) if self.lora_dir else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def controlnet_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the controlnet models directory.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.controlnet_dir) if self.controlnet_dir else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def autoconvert_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the directory containing models to be imported automatically at startup.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.autoconvert_dir) if self.autoconvert_dir else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gfpgan_model_path(self)->Path:
|
||||||
|
'''
|
||||||
|
Path to the GFPGAN model.
|
||||||
|
'''
|
||||||
|
return self._resolve(self.gfpgan_model_dir) if self.gfpgan_model_dir else None
|
||||||
|
|
||||||
|
# the following methods support legacy calls leftover from the Globals era
|
||||||
|
@property
|
||||||
|
def full_precision(self)->bool:
|
||||||
|
"""Return true if precision set to float32"""
|
||||||
|
return self.precision=='float32'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def disable_xformers(self)->bool:
|
||||||
|
"""Return true if xformers_enabled is false"""
|
||||||
|
return not self.xformers_enabled
|
||||||
|
|
||||||
|
@property
|
||||||
|
def try_patchmatch(self)->bool:
|
||||||
|
"""Return true if patchmatch true"""
|
||||||
|
return self.patchmatch
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def find_root()->Path:
|
||||||
|
'''
|
||||||
|
Choose the runtime root directory when not specified on command line or
|
||||||
|
init file.
|
||||||
|
'''
|
||||||
|
return _find_root()
|
||||||
|
|
||||||
|
|
||||||
|
class PagingArgumentParser(argparse.ArgumentParser):
|
||||||
|
'''
|
||||||
|
A custom ArgumentParser that uses pydoc to page its output.
|
||||||
|
It also supports reading defaults from an init file.
|
||||||
|
'''
|
||||||
|
def print_help(self, file=None):
|
||||||
|
text = self.format_help()
|
||||||
|
pydoc.pager(text)
|
||||||
|
|
||||||
|
def get_invokeai_config(**kwargs)->InvokeAIAppConfig:
|
||||||
|
'''
|
||||||
|
Legacy function which returns InvokeAIAppConfig.get_config()
|
||||||
|
'''
|
||||||
|
return InvokeAIAppConfig.get_config(**kwargs)
|
||||||
64
invokeai/app/services/default_graphs.py
Normal file
64
invokeai/app/services/default_graphs.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
from ..invocations.latent import LatentsToImageInvocation, NoiseInvocation, TextToLatentsInvocation
|
||||||
|
from ..invocations.compel import CompelInvocation
|
||||||
|
from ..invocations.params import ParamIntInvocation
|
||||||
|
from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
|
||||||
|
default_text_to_image_graph_id = '539b2af5-2b4d-4d8c-8071-e54a3255fc74'
|
||||||
|
|
||||||
|
|
||||||
|
def create_text_to_image() -> LibraryGraph:
|
||||||
|
return LibraryGraph(
|
||||||
|
id=default_text_to_image_graph_id,
|
||||||
|
name='t2i',
|
||||||
|
description='Converts text to an image',
|
||||||
|
graph=Graph(
|
||||||
|
nodes={
|
||||||
|
'width': ParamIntInvocation(id='width', a=512),
|
||||||
|
'height': ParamIntInvocation(id='height', a=512),
|
||||||
|
'seed': ParamIntInvocation(id='seed', a=-1),
|
||||||
|
'3': NoiseInvocation(id='3'),
|
||||||
|
'4': CompelInvocation(id='4'),
|
||||||
|
'5': CompelInvocation(id='5'),
|
||||||
|
'6': TextToLatentsInvocation(id='6'),
|
||||||
|
'7': LatentsToImageInvocation(id='7'),
|
||||||
|
},
|
||||||
|
edges=[
|
||||||
|
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='3', field='width')),
|
||||||
|
Edge(source=EdgeConnection(node_id='height', field='a'), destination=EdgeConnection(node_id='3', field='height')),
|
||||||
|
Edge(source=EdgeConnection(node_id='seed', field='a'), destination=EdgeConnection(node_id='3', field='seed')),
|
||||||
|
Edge(source=EdgeConnection(node_id='3', field='noise'), destination=EdgeConnection(node_id='6', field='noise')),
|
||||||
|
Edge(source=EdgeConnection(node_id='6', field='latents'), destination=EdgeConnection(node_id='7', field='latents')),
|
||||||
|
Edge(source=EdgeConnection(node_id='4', field='conditioning'), destination=EdgeConnection(node_id='6', field='positive_conditioning')),
|
||||||
|
Edge(source=EdgeConnection(node_id='5', field='conditioning'), destination=EdgeConnection(node_id='6', field='negative_conditioning')),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
exposed_inputs=[
|
||||||
|
ExposedNodeInput(node_path='4', field='prompt', alias='positive_prompt'),
|
||||||
|
ExposedNodeInput(node_path='5', field='prompt', alias='negative_prompt'),
|
||||||
|
ExposedNodeInput(node_path='width', field='a', alias='width'),
|
||||||
|
ExposedNodeInput(node_path='height', field='a', alias='height'),
|
||||||
|
ExposedNodeInput(node_path='seed', field='a', alias='seed'),
|
||||||
|
],
|
||||||
|
exposed_outputs=[
|
||||||
|
ExposedNodeOutput(node_path='7', field='image', alias='image')
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[LibraryGraph]:
|
||||||
|
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
|
||||||
|
|
||||||
|
# TODO: Uncomment this when we are ready to fix this up to prevent breaking changes
|
||||||
|
graphs: list[LibraryGraph] = list()
|
||||||
|
|
||||||
|
# text_to_image = graph_library.get(default_text_to_image_graph_id)
|
||||||
|
|
||||||
|
# # TODO: Check if the graph is the same as the default one, and if not, update it
|
||||||
|
# #if text_to_image is None:
|
||||||
|
text_to_image = create_text_to_image()
|
||||||
|
graph_library.set(text_to_image)
|
||||||
|
|
||||||
|
graphs.append(text_to_image)
|
||||||
|
|
||||||
|
return graphs
|
||||||
103
invokeai/app/services/events.py
Normal file
103
invokeai/app/services/events.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
from invokeai.app.models.image import ProgressImage
|
||||||
|
from invokeai.app.util.misc import get_timestamp
|
||||||
|
|
||||||
|
|
||||||
|
class EventServiceBase:
|
||||||
|
session_event: str = "session_event"
|
||||||
|
|
||||||
|
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __emit_session_event(self, event_name: str, payload: dict) -> None:
|
||||||
|
payload["timestamp"] = get_timestamp()
|
||||||
|
self.dispatch(
|
||||||
|
event_name=EventServiceBase.session_event,
|
||||||
|
payload=dict(event=event_name, data=payload),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define events here for every event in the system.
|
||||||
|
# This will make them easier to integrate until we find a schema generator.
|
||||||
|
def emit_generator_progress(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
progress_image: ProgressImage | None,
|
||||||
|
step: int,
|
||||||
|
total_steps: int,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when there is generation progress"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="generator_progress",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
progress_image=progress_image.dict() if progress_image is not None else None,
|
||||||
|
step=step,
|
||||||
|
total_steps=total_steps,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_complete(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
result: dict,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_complete",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
result=result,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_error(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
error: str,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_error",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
error=error,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_started(
|
||||||
|
self, graph_execution_state_id: str, node: dict, source_node_id: str
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has started"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_started",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_graph_execution_complete(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Emitted when a session has completed all invocations"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="graph_execution_state_complete",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
1262
invokeai/app/services/graph.py
Normal file
1262
invokeai/app/services/graph.py
Normal file
File diff suppressed because it is too large
Load Diff
186
invokeai/app/services/image_file_storage.py
Normal file
186
invokeai/app/services/image_file_storage.py
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from queue import Queue
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
from PIL.Image import Image as PILImageType
|
||||||
|
from PIL import Image, PngImagePlugin
|
||||||
|
from send2trash import send2trash
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ResourceOrigin
|
||||||
|
from invokeai.app.models.metadata import ImageMetadata
|
||||||
|
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Should these excpetions subclass existing python exceptions?
|
||||||
|
class ImageFileNotFoundException(Exception):
|
||||||
|
"""Raised when an image file is not found in storage."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image file not found"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageFileSaveException(Exception):
|
||||||
|
"""Raised when an image cannot be saved."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image file not saved"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageFileDeleteException(Exception):
|
||||||
|
"""Raised when an image cannot be deleted."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image file not deleted"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageFileStorageBase(ABC):
|
||||||
|
"""Low-level service responsible for storing and retrieving image files."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, image_name: str) -> PILImageType:
|
||||||
|
"""Retrieves an image as PIL Image."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
|
"""Gets the internal path to an image or thumbnail."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# TODO: We need to validate paths before starlette makes the FileResponse, else we get a
|
||||||
|
# 500 internal server error. I don't like having this method on the service.
|
||||||
|
@abstractmethod
|
||||||
|
def validate_path(self, path: str) -> bool:
|
||||||
|
"""Validates the path given for an image or thumbnail."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image: PILImageType,
|
||||||
|
image_name: str,
|
||||||
|
metadata: Optional[ImageMetadata] = None,
|
||||||
|
thumbnail_size: int = 256,
|
||||||
|
) -> None:
|
||||||
|
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, image_name: str) -> None:
|
||||||
|
"""Deletes an image and its thumbnail (if one exists)."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DiskImageFileStorage(ImageFileStorageBase):
|
||||||
|
"""Stores images on disk"""
|
||||||
|
|
||||||
|
__output_folder: Path
|
||||||
|
__cache_ids: Queue # TODO: this is an incredibly naive cache
|
||||||
|
__cache: Dict[Path, PILImageType]
|
||||||
|
__max_cache_size: int
|
||||||
|
|
||||||
|
def __init__(self, output_folder: str | Path):
|
||||||
|
self.__cache = dict()
|
||||||
|
self.__cache_ids = Queue()
|
||||||
|
self.__max_cache_size = 10 # TODO: get this from config
|
||||||
|
|
||||||
|
self.__output_folder: Path = output_folder if isinstance(output_folder, Path) else Path(output_folder)
|
||||||
|
self.__thumbnails_folder = self.__output_folder / 'thumbnails'
|
||||||
|
|
||||||
|
# Validate required output folders at launch
|
||||||
|
self.__validate_storage_folders()
|
||||||
|
|
||||||
|
def get(self, image_name: str) -> PILImageType:
|
||||||
|
try:
|
||||||
|
image_path = self.get_path(image_name)
|
||||||
|
|
||||||
|
cache_item = self.__get_cache(image_path)
|
||||||
|
if cache_item:
|
||||||
|
return cache_item
|
||||||
|
|
||||||
|
image = Image.open(image_path)
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
return image
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
raise ImageFileNotFoundException from e
|
||||||
|
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image: PILImageType,
|
||||||
|
image_name: str,
|
||||||
|
metadata: Optional[ImageMetadata] = None,
|
||||||
|
thumbnail_size: int = 256,
|
||||||
|
) -> None:
|
||||||
|
try:
|
||||||
|
self.__validate_storage_folders()
|
||||||
|
image_path = self.get_path(image_name)
|
||||||
|
|
||||||
|
if metadata is not None:
|
||||||
|
pnginfo = PngImagePlugin.PngInfo()
|
||||||
|
pnginfo.add_text("invokeai", metadata.json())
|
||||||
|
image.save(image_path, "PNG", pnginfo=pnginfo)
|
||||||
|
else:
|
||||||
|
image.save(image_path, "PNG")
|
||||||
|
|
||||||
|
thumbnail_name = get_thumbnail_name(image_name)
|
||||||
|
thumbnail_path = self.get_path(thumbnail_name, thumbnail=True)
|
||||||
|
thumbnail_image = make_thumbnail(image, thumbnail_size)
|
||||||
|
thumbnail_image.save(thumbnail_path)
|
||||||
|
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
self.__set_cache(thumbnail_path, thumbnail_image)
|
||||||
|
except Exception as e:
|
||||||
|
raise ImageFileSaveException from e
|
||||||
|
|
||||||
|
def delete(self, image_name: str) -> None:
|
||||||
|
try:
|
||||||
|
image_path = self.get_path(image_name)
|
||||||
|
|
||||||
|
if image_path.exists():
|
||||||
|
send2trash(image_path)
|
||||||
|
if image_path in self.__cache:
|
||||||
|
del self.__cache[image_path]
|
||||||
|
|
||||||
|
thumbnail_name = get_thumbnail_name(image_name)
|
||||||
|
thumbnail_path = self.get_path(thumbnail_name, True)
|
||||||
|
|
||||||
|
if thumbnail_path.exists():
|
||||||
|
send2trash(thumbnail_path)
|
||||||
|
if thumbnail_path in self.__cache:
|
||||||
|
del self.__cache[thumbnail_path]
|
||||||
|
except Exception as e:
|
||||||
|
raise ImageFileDeleteException from e
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
def get_path(self, image_name: str, thumbnail: bool = False) -> Path:
|
||||||
|
path = self.__output_folder / image_name
|
||||||
|
|
||||||
|
if thumbnail:
|
||||||
|
thumbnail_name = get_thumbnail_name(image_name)
|
||||||
|
path = self.__thumbnails_folder / thumbnail_name
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
def validate_path(self, path: str | Path) -> bool:
|
||||||
|
"""Validates the path given for an image or thumbnail."""
|
||||||
|
path = path if isinstance(path, Path) else Path(path)
|
||||||
|
return path.exists()
|
||||||
|
|
||||||
|
def __validate_storage_folders(self) -> None:
|
||||||
|
"""Checks if the required output folders exist and create them if they don't"""
|
||||||
|
folders: list[Path] = [self.__output_folder, self.__thumbnails_folder]
|
||||||
|
for folder in folders:
|
||||||
|
folder.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def __get_cache(self, image_name: Path) -> PILImageType | None:
|
||||||
|
return None if image_name not in self.__cache else self.__cache[image_name]
|
||||||
|
|
||||||
|
def __set_cache(self, image_name: Path, image: PILImageType):
|
||||||
|
if not image_name in self.__cache:
|
||||||
|
self.__cache[image_name] = image
|
||||||
|
self.__cache_ids.put(image_name) # TODO: this should refresh position for LRU cache
|
||||||
|
if len(self.__cache) > self.__max_cache_size:
|
||||||
|
cache_id = self.__cache_ids.get()
|
||||||
|
if cache_id in self.__cache:
|
||||||
|
del self.__cache[cache_id]
|
||||||
414
invokeai/app/services/image_record_storage.py
Normal file
414
invokeai/app/services/image_record_storage.py
Normal file
@@ -0,0 +1,414 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Generic, Optional, TypeVar, cast
|
||||||
|
import sqlite3
|
||||||
|
import threading
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.generics import GenericModel
|
||||||
|
|
||||||
|
from invokeai.app.models.metadata import ImageMetadata
|
||||||
|
from invokeai.app.models.image import (
|
||||||
|
ImageCategory,
|
||||||
|
ResourceOrigin,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.models.image_record import (
|
||||||
|
ImageRecord,
|
||||||
|
ImageRecordChanges,
|
||||||
|
deserialize_image_record,
|
||||||
|
)
|
||||||
|
|
||||||
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
|
class OffsetPaginatedResults(GenericModel, Generic[T]):
|
||||||
|
"""Offset-paginated results"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
items: list[T] = Field(description="Items")
|
||||||
|
offset: int = Field(description="Offset from which to retrieve items")
|
||||||
|
limit: int = Field(description="Limit of items to get")
|
||||||
|
total: int = Field(description="Total number of items in result")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Should these excpetions subclass existing python exceptions?
|
||||||
|
class ImageRecordNotFoundException(Exception):
|
||||||
|
"""Raised when an image record is not found."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image record not found"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRecordSaveException(Exception):
|
||||||
|
"""Raised when an image record cannot be saved."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image record not saved"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRecordDeleteException(Exception):
|
||||||
|
"""Raised when an image record cannot be deleted."""
|
||||||
|
|
||||||
|
def __init__(self, message="Image record not deleted"):
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRecordStorageBase(ABC):
|
||||||
|
"""Low-level service responsible for interfacing with the image record store."""
|
||||||
|
|
||||||
|
# TODO: Implement an `update()` method
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, image_name: str) -> ImageRecord:
|
||||||
|
"""Gets an image record."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
changes: ImageRecordChanges,
|
||||||
|
) -> None:
|
||||||
|
"""Updates an image record."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
offset: int = 0,
|
||||||
|
limit: int = 10,
|
||||||
|
image_origin: Optional[ResourceOrigin] = None,
|
||||||
|
categories: Optional[list[ImageCategory]] = None,
|
||||||
|
is_intermediate: Optional[bool] = None,
|
||||||
|
) -> OffsetPaginatedResults[ImageRecord]:
|
||||||
|
"""Gets a page of image records."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# TODO: The database has a nullable `deleted_at` column, currently unused.
|
||||||
|
# Should we implement soft deletes? Would need coordination with ImageFileStorage.
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, image_name: str) -> None:
|
||||||
|
"""Deletes an image record."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
image_origin: ResourceOrigin,
|
||||||
|
image_category: ImageCategory,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
session_id: Optional[str],
|
||||||
|
node_id: Optional[str],
|
||||||
|
metadata: Optional[ImageMetadata],
|
||||||
|
is_intermediate: bool = False,
|
||||||
|
) -> datetime:
|
||||||
|
"""Saves an image record."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||||
|
_filename: str
|
||||||
|
_conn: sqlite3.Connection
|
||||||
|
_cursor: sqlite3.Cursor
|
||||||
|
_lock: threading.Lock
|
||||||
|
|
||||||
|
def __init__(self, filename: str) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self._filename = filename
|
||||||
|
self._conn = sqlite3.connect(filename, check_same_thread=False)
|
||||||
|
# Enable row factory to get rows as dictionaries (must be done before making the cursor!)
|
||||||
|
self._conn.row_factory = sqlite3.Row
|
||||||
|
self._cursor = self._conn.cursor()
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
# Enable foreign keys
|
||||||
|
self._conn.execute("PRAGMA foreign_keys = ON;")
|
||||||
|
self._create_tables()
|
||||||
|
self._conn.commit()
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def _create_tables(self) -> None:
|
||||||
|
"""Creates the tables for the `images` database."""
|
||||||
|
|
||||||
|
# Create the `images` table.
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TABLE IF NOT EXISTS images (
|
||||||
|
image_name TEXT NOT NULL PRIMARY KEY,
|
||||||
|
-- This is an enum in python, unrestricted string here for flexibility
|
||||||
|
image_origin TEXT NOT NULL,
|
||||||
|
-- This is an enum in python, unrestricted string here for flexibility
|
||||||
|
image_category TEXT NOT NULL,
|
||||||
|
width INTEGER NOT NULL,
|
||||||
|
height INTEGER NOT NULL,
|
||||||
|
session_id TEXT,
|
||||||
|
node_id TEXT,
|
||||||
|
metadata TEXT,
|
||||||
|
is_intermediate BOOLEAN DEFAULT FALSE,
|
||||||
|
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
-- Updated via trigger
|
||||||
|
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
-- Soft delete, currently unused
|
||||||
|
deleted_at DATETIME
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the `images` table indices.
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_images_image_name ON images(image_name);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_images_image_origin ON images(image_origin);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_images_image_category ON images(image_category);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_images_created_at ON images(created_at);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add trigger for `updated_at`.
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TRIGGER IF NOT EXISTS tg_images_updated_at
|
||||||
|
AFTER UPDATE
|
||||||
|
ON images FOR EACH ROW
|
||||||
|
BEGIN
|
||||||
|
UPDATE images SET updated_at = current_timestamp
|
||||||
|
WHERE image_name = old.image_name;
|
||||||
|
END;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
def get(self, image_name: str) -> Union[ImageRecord, None]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
SELECT * FROM images
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(image_name,),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = cast(Union[sqlite3.Row, None], self._cursor.fetchone())
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordNotFoundException from e
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise ImageRecordNotFoundException
|
||||||
|
|
||||||
|
return deserialize_image_record(dict(result))
|
||||||
|
|
||||||
|
def update(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
changes: ImageRecordChanges,
|
||||||
|
) -> None:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
# Change the category of the image
|
||||||
|
if changes.image_category is not None:
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
UPDATE images
|
||||||
|
SET image_category = ?
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(changes.image_category, image_name),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Change the session associated with the image
|
||||||
|
if changes.session_id is not None:
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
UPDATE images
|
||||||
|
SET session_id = ?
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(changes.session_id, image_name),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Change the image's `is_intermediate`` flag
|
||||||
|
if changes.is_intermediate is not None:
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
UPDATE images
|
||||||
|
SET is_intermediate = ?
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(changes.is_intermediate, image_name),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordSaveException from e
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
offset: int = 0,
|
||||||
|
limit: int = 10,
|
||||||
|
image_origin: Optional[ResourceOrigin] = None,
|
||||||
|
categories: Optional[list[ImageCategory]] = None,
|
||||||
|
is_intermediate: Optional[bool] = None,
|
||||||
|
) -> OffsetPaginatedResults[ImageRecord]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
|
||||||
|
# Manually build two queries - one for the count, one for the records
|
||||||
|
|
||||||
|
count_query = f"""SELECT COUNT(*) FROM images WHERE 1=1\n"""
|
||||||
|
images_query = f"""SELECT * FROM images WHERE 1=1\n"""
|
||||||
|
|
||||||
|
query_conditions = ""
|
||||||
|
query_params = []
|
||||||
|
|
||||||
|
if image_origin is not None:
|
||||||
|
query_conditions += f"""AND image_origin = ?\n"""
|
||||||
|
query_params.append(image_origin.value)
|
||||||
|
|
||||||
|
if categories is not None:
|
||||||
|
## Convert the enum values to unique list of strings
|
||||||
|
category_strings = list(map(lambda c: c.value, set(categories)))
|
||||||
|
# Create the correct length of placeholders
|
||||||
|
placeholders = ",".join("?" * len(category_strings))
|
||||||
|
query_conditions += f"AND image_category IN ( {placeholders} )\n"
|
||||||
|
|
||||||
|
# Unpack the included categories into the query params
|
||||||
|
for c in category_strings:
|
||||||
|
query_params.append(c)
|
||||||
|
|
||||||
|
if is_intermediate is not None:
|
||||||
|
query_conditions += f"""AND is_intermediate = ?\n"""
|
||||||
|
query_params.append(is_intermediate)
|
||||||
|
|
||||||
|
query_pagination = f"""ORDER BY created_at DESC LIMIT ? OFFSET ?\n"""
|
||||||
|
|
||||||
|
# Final images query with pagination
|
||||||
|
images_query += query_conditions + query_pagination + ";"
|
||||||
|
# Add all the parameters
|
||||||
|
images_params = query_params.copy()
|
||||||
|
images_params.append(limit)
|
||||||
|
images_params.append(offset)
|
||||||
|
# Build the list of images, deserializing each row
|
||||||
|
self._cursor.execute(images_query, images_params)
|
||||||
|
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||||
|
images = list(map(lambda r: deserialize_image_record(dict(r)), result))
|
||||||
|
|
||||||
|
# Set up and execute the count query, without pagination
|
||||||
|
count_query += query_conditions + ";"
|
||||||
|
count_params = query_params.copy()
|
||||||
|
self._cursor.execute(count_query, count_params)
|
||||||
|
count = self._cursor.fetchone()[0]
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
return OffsetPaginatedResults(
|
||||||
|
items=images, offset=offset, limit=limit, total=count
|
||||||
|
)
|
||||||
|
|
||||||
|
def delete(self, image_name: str) -> None:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DELETE FROM images
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(image_name,),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordDeleteException from e
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
image_origin: ResourceOrigin,
|
||||||
|
image_category: ImageCategory,
|
||||||
|
session_id: Optional[str],
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
node_id: Optional[str],
|
||||||
|
metadata: Optional[ImageMetadata],
|
||||||
|
is_intermediate: bool = False,
|
||||||
|
) -> datetime:
|
||||||
|
try:
|
||||||
|
metadata_json = (
|
||||||
|
None if metadata is None else metadata.json(exclude_none=True)
|
||||||
|
)
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
INSERT OR IGNORE INTO images (
|
||||||
|
image_name,
|
||||||
|
image_origin,
|
||||||
|
image_category,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
node_id,
|
||||||
|
session_id,
|
||||||
|
metadata,
|
||||||
|
is_intermediate
|
||||||
|
)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||||
|
""",
|
||||||
|
(
|
||||||
|
image_name,
|
||||||
|
image_origin.value,
|
||||||
|
image_category.value,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
node_id,
|
||||||
|
session_id,
|
||||||
|
metadata_json,
|
||||||
|
is_intermediate,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT created_at
|
||||||
|
FROM images
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(image_name,),
|
||||||
|
)
|
||||||
|
|
||||||
|
created_at = datetime.fromisoformat(self._cursor.fetchone()[0])
|
||||||
|
|
||||||
|
return created_at
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordSaveException from e
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
380
invokeai/app/services/images.py
Normal file
380
invokeai/app/services/images.py
Normal file
@@ -0,0 +1,380 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from logging import Logger
|
||||||
|
from typing import Optional, TYPE_CHECKING, Union
|
||||||
|
from PIL.Image import Image as PILImageType
|
||||||
|
|
||||||
|
from invokeai.app.models.image import (
|
||||||
|
ImageCategory,
|
||||||
|
ResourceOrigin,
|
||||||
|
InvalidImageCategoryException,
|
||||||
|
InvalidOriginException,
|
||||||
|
)
|
||||||
|
from invokeai.app.models.metadata import ImageMetadata
|
||||||
|
from invokeai.app.services.image_record_storage import (
|
||||||
|
ImageRecordDeleteException,
|
||||||
|
ImageRecordNotFoundException,
|
||||||
|
ImageRecordSaveException,
|
||||||
|
ImageRecordStorageBase,
|
||||||
|
OffsetPaginatedResults,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.models.image_record import (
|
||||||
|
ImageRecord,
|
||||||
|
ImageDTO,
|
||||||
|
ImageRecordChanges,
|
||||||
|
image_record_to_dto,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.image_file_storage import (
|
||||||
|
ImageFileDeleteException,
|
||||||
|
ImageFileNotFoundException,
|
||||||
|
ImageFileSaveException,
|
||||||
|
ImageFileStorageBase,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.item_storage import ItemStorageABC, PaginatedResults
|
||||||
|
from invokeai.app.services.metadata import MetadataServiceBase
|
||||||
|
from invokeai.app.services.resource_name import NameServiceBase
|
||||||
|
from invokeai.app.services.urls import UrlServiceBase
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from invokeai.app.services.graph import GraphExecutionState
|
||||||
|
|
||||||
|
|
||||||
|
class ImageServiceABC(ABC):
|
||||||
|
"""High-level service for image management."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create(
|
||||||
|
self,
|
||||||
|
image: PILImageType,
|
||||||
|
image_origin: ResourceOrigin,
|
||||||
|
image_category: ImageCategory,
|
||||||
|
node_id: Optional[str] = None,
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
intermediate: bool = False,
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Creates an image, storing the file and its metadata."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
changes: ImageRecordChanges,
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Updates an image."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_pil_image(self, image_name: str) -> PILImageType:
|
||||||
|
"""Gets an image as a PIL image."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_record(self, image_name: str) -> ImageRecord:
|
||||||
|
"""Gets an image record."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_dto(self, image_name: str) -> ImageDTO:
|
||||||
|
"""Gets an image DTO."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_path(self, image_name: str) -> str:
|
||||||
|
"""Gets an image's path."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def validate_path(self, path: str) -> bool:
|
||||||
|
"""Validates an image's path."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_url(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
|
"""Gets an image's or thumbnail's URL."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
offset: int = 0,
|
||||||
|
limit: int = 10,
|
||||||
|
image_origin: Optional[ResourceOrigin] = None,
|
||||||
|
categories: Optional[list[ImageCategory]] = None,
|
||||||
|
is_intermediate: Optional[bool] = None,
|
||||||
|
) -> OffsetPaginatedResults[ImageDTO]:
|
||||||
|
"""Gets a paginated list of image DTOs."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, image_name: str):
|
||||||
|
"""Deletes an image."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ImageServiceDependencies:
|
||||||
|
"""Service dependencies for the ImageService."""
|
||||||
|
|
||||||
|
records: ImageRecordStorageBase
|
||||||
|
files: ImageFileStorageBase
|
||||||
|
metadata: MetadataServiceBase
|
||||||
|
urls: UrlServiceBase
|
||||||
|
logger: Logger
|
||||||
|
names: NameServiceBase
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
image_record_storage: ImageRecordStorageBase,
|
||||||
|
image_file_storage: ImageFileStorageBase,
|
||||||
|
metadata: MetadataServiceBase,
|
||||||
|
url: UrlServiceBase,
|
||||||
|
logger: Logger,
|
||||||
|
names: NameServiceBase,
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||||
|
):
|
||||||
|
self.records = image_record_storage
|
||||||
|
self.files = image_file_storage
|
||||||
|
self.metadata = metadata
|
||||||
|
self.urls = url
|
||||||
|
self.logger = logger
|
||||||
|
self.names = names
|
||||||
|
self.graph_execution_manager = graph_execution_manager
|
||||||
|
|
||||||
|
|
||||||
|
class ImageService(ImageServiceABC):
|
||||||
|
_services: ImageServiceDependencies
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
image_record_storage: ImageRecordStorageBase,
|
||||||
|
image_file_storage: ImageFileStorageBase,
|
||||||
|
metadata: MetadataServiceBase,
|
||||||
|
url: UrlServiceBase,
|
||||||
|
logger: Logger,
|
||||||
|
names: NameServiceBase,
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||||
|
):
|
||||||
|
self._services = ImageServiceDependencies(
|
||||||
|
image_record_storage=image_record_storage,
|
||||||
|
image_file_storage=image_file_storage,
|
||||||
|
metadata=metadata,
|
||||||
|
url=url,
|
||||||
|
logger=logger,
|
||||||
|
names=names,
|
||||||
|
graph_execution_manager=graph_execution_manager,
|
||||||
|
)
|
||||||
|
|
||||||
|
def create(
|
||||||
|
self,
|
||||||
|
image: PILImageType,
|
||||||
|
image_origin: ResourceOrigin,
|
||||||
|
image_category: ImageCategory,
|
||||||
|
node_id: Optional[str] = None,
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
is_intermediate: bool = False,
|
||||||
|
) -> ImageDTO:
|
||||||
|
if image_origin not in ResourceOrigin:
|
||||||
|
raise InvalidOriginException
|
||||||
|
|
||||||
|
if image_category not in ImageCategory:
|
||||||
|
raise InvalidImageCategoryException
|
||||||
|
|
||||||
|
image_name = self._services.names.create_image_name()
|
||||||
|
|
||||||
|
metadata = self._get_metadata(session_id, node_id)
|
||||||
|
|
||||||
|
(width, height) = image.size
|
||||||
|
|
||||||
|
try:
|
||||||
|
# TODO: Consider using a transaction here to ensure consistency between storage and database
|
||||||
|
created_at = self._services.records.save(
|
||||||
|
# Non-nullable fields
|
||||||
|
image_name=image_name,
|
||||||
|
image_origin=image_origin,
|
||||||
|
image_category=image_category,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
# Meta fields
|
||||||
|
is_intermediate=is_intermediate,
|
||||||
|
# Nullable fields
|
||||||
|
node_id=node_id,
|
||||||
|
session_id=session_id,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._services.files.save(
|
||||||
|
image_name=image_name,
|
||||||
|
image=image,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
image_url = self._services.urls.get_image_url(image_name)
|
||||||
|
thumbnail_url = self._services.urls.get_image_url(image_name, True)
|
||||||
|
|
||||||
|
return ImageDTO(
|
||||||
|
# Non-nullable fields
|
||||||
|
image_name=image_name,
|
||||||
|
image_origin=image_origin,
|
||||||
|
image_category=image_category,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
# Nullable fields
|
||||||
|
node_id=node_id,
|
||||||
|
session_id=session_id,
|
||||||
|
metadata=metadata,
|
||||||
|
# Meta fields
|
||||||
|
created_at=created_at,
|
||||||
|
updated_at=created_at, # this is always the same as the created_at at this time
|
||||||
|
deleted_at=None,
|
||||||
|
is_intermediate=is_intermediate,
|
||||||
|
# Extra non-nullable fields for DTO
|
||||||
|
image_url=image_url,
|
||||||
|
thumbnail_url=thumbnail_url,
|
||||||
|
)
|
||||||
|
except ImageRecordSaveException:
|
||||||
|
self._services.logger.error("Failed to save image record")
|
||||||
|
raise
|
||||||
|
except ImageFileSaveException:
|
||||||
|
self._services.logger.error("Failed to save image file")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem saving image record and file")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def update(
|
||||||
|
self,
|
||||||
|
image_name: str,
|
||||||
|
changes: ImageRecordChanges,
|
||||||
|
) -> ImageDTO:
|
||||||
|
try:
|
||||||
|
self._services.records.update(image_name, changes)
|
||||||
|
return self.get_dto(image_name)
|
||||||
|
except ImageRecordSaveException:
|
||||||
|
self._services.logger.error("Failed to update image record")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem updating image record")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_pil_image(self, image_name: str) -> PILImageType:
|
||||||
|
try:
|
||||||
|
return self._services.files.get(image_name)
|
||||||
|
except ImageFileNotFoundException:
|
||||||
|
self._services.logger.error("Failed to get image file")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting image file")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_record(self, image_name: str) -> ImageRecord:
|
||||||
|
try:
|
||||||
|
return self._services.records.get(image_name)
|
||||||
|
except ImageRecordNotFoundException:
|
||||||
|
self._services.logger.error("Image record not found")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting image record")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_dto(self, image_name: str) -> ImageDTO:
|
||||||
|
try:
|
||||||
|
image_record = self._services.records.get(image_name)
|
||||||
|
|
||||||
|
image_dto = image_record_to_dto(
|
||||||
|
image_record,
|
||||||
|
self._services.urls.get_image_url(image_name),
|
||||||
|
self._services.urls.get_image_url(image_name, True),
|
||||||
|
)
|
||||||
|
|
||||||
|
return image_dto
|
||||||
|
except ImageRecordNotFoundException:
|
||||||
|
self._services.logger.error("Image record not found")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting image DTO")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
|
try:
|
||||||
|
return self._services.files.get_path(image_name, thumbnail)
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting image path")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def validate_path(self, path: str) -> bool:
|
||||||
|
try:
|
||||||
|
return self._services.files.validate_path(path)
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem validating image path")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_url(self, image_name: str, thumbnail: bool = False) -> str:
|
||||||
|
try:
|
||||||
|
return self._services.urls.get_image_url(image_name, thumbnail)
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting image path")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def get_many(
|
||||||
|
self,
|
||||||
|
offset: int = 0,
|
||||||
|
limit: int = 10,
|
||||||
|
image_origin: Optional[ResourceOrigin] = None,
|
||||||
|
categories: Optional[list[ImageCategory]] = None,
|
||||||
|
is_intermediate: Optional[bool] = None,
|
||||||
|
) -> OffsetPaginatedResults[ImageDTO]:
|
||||||
|
try:
|
||||||
|
results = self._services.records.get_many(
|
||||||
|
offset,
|
||||||
|
limit,
|
||||||
|
image_origin,
|
||||||
|
categories,
|
||||||
|
is_intermediate,
|
||||||
|
)
|
||||||
|
|
||||||
|
image_dtos = list(
|
||||||
|
map(
|
||||||
|
lambda r: image_record_to_dto(
|
||||||
|
r,
|
||||||
|
self._services.urls.get_image_url(r.image_name),
|
||||||
|
self._services.urls.get_image_url(r.image_name, True),
|
||||||
|
),
|
||||||
|
results.items,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return OffsetPaginatedResults[ImageDTO](
|
||||||
|
items=image_dtos,
|
||||||
|
offset=results.offset,
|
||||||
|
limit=results.limit,
|
||||||
|
total=results.total,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem getting paginated image DTOs")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def delete(self, image_name: str):
|
||||||
|
try:
|
||||||
|
self._services.files.delete(image_name)
|
||||||
|
self._services.records.delete(image_name)
|
||||||
|
except ImageRecordDeleteException:
|
||||||
|
self._services.logger.error(f"Failed to delete image record")
|
||||||
|
raise
|
||||||
|
except ImageFileDeleteException:
|
||||||
|
self._services.logger.error(f"Failed to delete image file")
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self._services.logger.error("Problem deleting image record and file")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def _get_metadata(
|
||||||
|
self, session_id: Optional[str] = None, node_id: Optional[str] = None
|
||||||
|
) -> Union[ImageMetadata, None]:
|
||||||
|
"""Get the metadata for a node."""
|
||||||
|
metadata = None
|
||||||
|
|
||||||
|
if node_id is not None and session_id is not None:
|
||||||
|
session = self._services.graph_execution_manager.get(session_id)
|
||||||
|
metadata = self._services.metadata.create_image_metadata(session, node_id)
|
||||||
|
|
||||||
|
return metadata
|
||||||
68
invokeai/app/services/invocation_queue.py
Normal file
68
invokeai/app/services/invocation_queue.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import time
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from queue import Queue
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationQueueItem(BaseModel):
|
||||||
|
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
|
||||||
|
invocation_id: str = Field(description="The ID of the node being invoked")
|
||||||
|
invoke_all: bool = Field(default=False)
|
||||||
|
timestamp: float = Field(default_factory=time.time)
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationQueueABC(ABC):
|
||||||
|
"""Abstract base class for all invocation queues"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryInvocationQueue(InvocationQueueABC):
|
||||||
|
__queue: Queue
|
||||||
|
__cancellations: dict[str, float]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.__queue = Queue()
|
||||||
|
self.__cancellations = dict()
|
||||||
|
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
while isinstance(item, InvocationQueueItem) \
|
||||||
|
and item.graph_execution_state_id in self.__cancellations \
|
||||||
|
and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
# Clear old items
|
||||||
|
for graph_execution_state_id in list(self.__cancellations.keys()):
|
||||||
|
if self.__cancellations[graph_execution_state_id] < item.timestamp:
|
||||||
|
del self.__cancellations[graph_execution_state_id]
|
||||||
|
|
||||||
|
return item
|
||||||
|
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
self.__queue.put(item)
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
if graph_execution_state_id not in self.__cancellations:
|
||||||
|
self.__cancellations[graph_execution_state_id] = time.time()
|
||||||
|
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
return graph_execution_state_id in self.__cancellations
|
||||||
60
invokeai/app/services/invocation_services.py
Normal file
60
invokeai/app/services/invocation_services.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||||
|
from __future__ import annotations
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from logging import Logger
|
||||||
|
from invokeai.app.services.images import ImageService
|
||||||
|
from invokeai.backend import ModelManager
|
||||||
|
from invokeai.app.services.events import EventServiceBase
|
||||||
|
from invokeai.app.services.latent_storage import LatentsStorageBase
|
||||||
|
from invokeai.app.services.restoration_services import RestorationServices
|
||||||
|
from invokeai.app.services.invocation_queue import InvocationQueueABC
|
||||||
|
from invokeai.app.services.item_storage import ItemStorageABC
|
||||||
|
from invokeai.app.services.config import InvokeAISettings
|
||||||
|
from invokeai.app.services.graph import GraphExecutionState, LibraryGraph
|
||||||
|
from invokeai.app.services.invoker import InvocationProcessorABC
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationServices:
|
||||||
|
"""Services that can be used by invocations"""
|
||||||
|
|
||||||
|
# TODO: Just forward-declared everything due to circular dependencies. Fix structure.
|
||||||
|
events: "EventServiceBase"
|
||||||
|
latents: "LatentsStorageBase"
|
||||||
|
queue: "InvocationQueueABC"
|
||||||
|
model_manager: "ModelManager"
|
||||||
|
restoration: "RestorationServices"
|
||||||
|
configuration: "InvokeAISettings"
|
||||||
|
images: "ImageService"
|
||||||
|
|
||||||
|
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
|
||||||
|
graph_library: "ItemStorageABC"["LibraryGraph"]
|
||||||
|
graph_execution_manager: "ItemStorageABC"["GraphExecutionState"]
|
||||||
|
processor: "InvocationProcessorABC"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_manager: "ModelManager",
|
||||||
|
events: "EventServiceBase",
|
||||||
|
logger: "Logger",
|
||||||
|
latents: "LatentsStorageBase",
|
||||||
|
images: "ImageService",
|
||||||
|
queue: "InvocationQueueABC",
|
||||||
|
graph_library: "ItemStorageABC"["LibraryGraph"],
|
||||||
|
graph_execution_manager: "ItemStorageABC"["GraphExecutionState"],
|
||||||
|
processor: "InvocationProcessorABC",
|
||||||
|
restoration: "RestorationServices",
|
||||||
|
configuration: "InvokeAISettings",
|
||||||
|
):
|
||||||
|
self.model_manager = model_manager
|
||||||
|
self.events = events
|
||||||
|
self.logger = logger
|
||||||
|
self.latents = latents
|
||||||
|
self.images = images
|
||||||
|
self.queue = queue
|
||||||
|
self.graph_library = graph_library
|
||||||
|
self.graph_execution_manager = graph_execution_manager
|
||||||
|
self.processor = processor
|
||||||
|
self.restoration = restoration
|
||||||
|
self.configuration = configuration
|
||||||
@@ -2,11 +2,12 @@
|
|||||||
|
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from threading import Event, Thread
|
from threading import Event, Thread
|
||||||
from .graph import Graph, GraphExecutionState
|
|
||||||
from .item_storage import ItemStorageABC
|
|
||||||
from ..invocations.baseinvocation import InvocationContext
|
from ..invocations.baseinvocation import InvocationContext
|
||||||
from .invocation_services import InvocationServices
|
from .graph import Graph, GraphExecutionState
|
||||||
from .invocation_queue import InvocationQueueABC, InvocationQueueItem
|
from .invocation_queue import InvocationQueueABC, InvocationQueueItem
|
||||||
|
from .invocation_services import InvocationServices
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
|
||||||
class Invoker:
|
class Invoker:
|
||||||
@@ -14,15 +15,15 @@ class Invoker:
|
|||||||
|
|
||||||
services: InvocationServices
|
services: InvocationServices
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self, services: InvocationServices):
|
||||||
services: InvocationServices
|
|
||||||
):
|
|
||||||
self.services = services
|
self.services = services
|
||||||
self._start()
|
self._start()
|
||||||
|
|
||||||
|
def invoke(
|
||||||
def invoke(self, graph_execution_state: GraphExecutionState, invoke_all: bool = False) -> str|None:
|
self, graph_execution_state: GraphExecutionState, invoke_all: bool = False
|
||||||
"""Determines the next node to invoke and returns the id of the invoked node, or None if there are no nodes to execute"""
|
) -> str | None:
|
||||||
|
"""Determines the next node to invoke and enqueues it, preparing if needed.
|
||||||
|
Returns the id of the queued node, or `None` if there are no nodes left to enqueue."""
|
||||||
|
|
||||||
# Get the next invocation
|
# Get the next invocation
|
||||||
invocation = graph_execution_state.next()
|
invocation = graph_execution_state.next()
|
||||||
@@ -33,58 +34,52 @@ class Invoker:
|
|||||||
self.services.graph_execution_manager.set(graph_execution_state)
|
self.services.graph_execution_manager.set(graph_execution_state)
|
||||||
|
|
||||||
# Queue the invocation
|
# Queue the invocation
|
||||||
print(f'queueing item {invocation.id}')
|
self.services.queue.put(
|
||||||
self.services.queue.put(InvocationQueueItem(
|
InvocationQueueItem(
|
||||||
#session_id = session.id,
|
# session_id = session.id,
|
||||||
graph_execution_state_id = graph_execution_state.id,
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
invocation_id = invocation.id,
|
invocation_id=invocation.id,
|
||||||
invoke_all = invoke_all
|
invoke_all=invoke_all,
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return invocation.id
|
return invocation.id
|
||||||
|
|
||||||
|
def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState:
|
||||||
def create_execution_state(self, graph: Graph|None = None) -> GraphExecutionState:
|
|
||||||
"""Creates a new execution state for the given graph"""
|
"""Creates a new execution state for the given graph"""
|
||||||
new_state = GraphExecutionState(graph = Graph() if graph is None else graph)
|
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
|
||||||
self.services.graph_execution_manager.set(new_state)
|
self.services.graph_execution_manager.set(new_state)
|
||||||
return new_state
|
return new_state
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Cancels the given execution state"""
|
||||||
|
self.services.queue.cancel(graph_execution_state_id)
|
||||||
|
|
||||||
def __start_service(self, service) -> None:
|
def __start_service(self, service) -> None:
|
||||||
# Call start() method on any services that have it
|
# Call start() method on any services that have it
|
||||||
start_op = getattr(service, 'start', None)
|
start_op = getattr(service, "start", None)
|
||||||
if callable(start_op):
|
if callable(start_op):
|
||||||
start_op(self)
|
start_op(self)
|
||||||
|
|
||||||
|
|
||||||
def __stop_service(self, service) -> None:
|
def __stop_service(self, service) -> None:
|
||||||
# Call stop() method on any services that have it
|
# Call stop() method on any services that have it
|
||||||
stop_op = getattr(service, 'stop', None)
|
stop_op = getattr(service, "stop", None)
|
||||||
if callable(stop_op):
|
if callable(stop_op):
|
||||||
stop_op(self)
|
stop_op(self)
|
||||||
|
|
||||||
|
|
||||||
def _start(self) -> None:
|
def _start(self) -> None:
|
||||||
"""Starts the invoker. This is called automatically when the invoker is created."""
|
"""Starts the invoker. This is called automatically when the invoker is created."""
|
||||||
for service in vars(self.services):
|
for service in vars(self.services):
|
||||||
self.__start_service(getattr(self.services, service))
|
self.__start_service(getattr(self.services, service))
|
||||||
|
|
||||||
for service in vars(self.services):
|
|
||||||
self.__start_service(getattr(self.services, service))
|
|
||||||
|
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
"""Stops the invoker. A new invoker will have to be created to execute further."""
|
"""Stops the invoker. A new invoker will have to be created to execute further."""
|
||||||
# First stop all services
|
# First stop all services
|
||||||
for service in vars(self.services):
|
for service in vars(self.services):
|
||||||
self.__stop_service(getattr(self.services, service))
|
self.__stop_service(getattr(self.services, service))
|
||||||
|
|
||||||
for service in vars(self.services):
|
|
||||||
self.__stop_service(getattr(self.services, service))
|
|
||||||
|
|
||||||
self.services.queue.put(None)
|
self.services.queue.put(None)
|
||||||
|
|
||||||
|
|
||||||
class InvocationProcessorABC(ABC):
|
class InvocationProcessorABC(ABC):
|
||||||
pass
|
pass
|
||||||
@@ -1,19 +1,21 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Callable, Generic, TypeVar
|
||||||
|
|
||||||
from typing import Callable, TypeVar, Generic
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from pydantic.generics import GenericModel
|
from pydantic.generics import GenericModel
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
|
|
||||||
T = TypeVar('T', bound=BaseModel)
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
class PaginatedResults(GenericModel, Generic[T]):
|
class PaginatedResults(GenericModel, Generic[T]):
|
||||||
"""Paginated results"""
|
"""Paginated results"""
|
||||||
items: list[T] = Field(description = "Items")
|
#fmt: off
|
||||||
page: int = Field(description = "Current Page")
|
items: list[T] = Field(description="Items")
|
||||||
pages: int = Field(description = "Total number of pages")
|
page: int = Field(description="Current Page")
|
||||||
per_page: int = Field(description = "Number of items per page")
|
pages: int = Field(description="Total number of pages")
|
||||||
total: int = Field(description = "Total number of items in result")
|
per_page: int = Field(description="Number of items per page")
|
||||||
|
total: int = Field(description="Total number of items in result")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
class ItemStorageABC(ABC, Generic[T]):
|
class ItemStorageABC(ABC, Generic[T]):
|
||||||
_on_changed_callbacks: list[Callable[[T], None]]
|
_on_changed_callbacks: list[Callable[[T], None]]
|
||||||
@@ -24,6 +26,7 @@ class ItemStorageABC(ABC, Generic[T]):
|
|||||||
self._on_deleted_callbacks = list()
|
self._on_deleted_callbacks = list()
|
||||||
|
|
||||||
"""Base item storage class"""
|
"""Base item storage class"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get(self, item_id: str) -> T:
|
def get(self, item_id: str) -> T:
|
||||||
pass
|
pass
|
||||||
@@ -37,7 +40,9 @@ class ItemStorageABC(ABC, Generic[T]):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
def search(
|
||||||
|
self, query: str, page: int = 0, per_page: int = 10
|
||||||
|
) -> PaginatedResults[T]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
||||||
@@ -51,7 +56,7 @@ class ItemStorageABC(ABC, Generic[T]):
|
|||||||
def _on_changed(self, item: T) -> None:
|
def _on_changed(self, item: T) -> None:
|
||||||
for callback in self._on_changed_callbacks:
|
for callback in self._on_changed_callbacks:
|
||||||
callback(item)
|
callback(item)
|
||||||
|
|
||||||
def _on_deleted(self, item_id: str) -> None:
|
def _on_deleted(self, item_id: str) -> None:
|
||||||
for callback in self._on_deleted_callbacks:
|
for callback in self._on_deleted_callbacks:
|
||||||
callback(item_id)
|
callback(item_id)
|
||||||
94
invokeai/app/services/latent_storage.py
Normal file
94
invokeai/app/services/latent_storage.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from queue import Queue
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
class LatentsStorageBase(ABC):
|
||||||
|
"""Responsible for storing and retrieving latents."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, name: str) -> torch.Tensor:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(self, name: str, data: torch.Tensor) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, name: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ForwardCacheLatentsStorage(LatentsStorageBase):
|
||||||
|
"""Caches the latest N latents in memory, writing-thorugh to and reading from underlying storage"""
|
||||||
|
|
||||||
|
__cache: Dict[str, torch.Tensor]
|
||||||
|
__cache_ids: Queue
|
||||||
|
__max_cache_size: int
|
||||||
|
__underlying_storage: LatentsStorageBase
|
||||||
|
|
||||||
|
def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20):
|
||||||
|
self.__underlying_storage = underlying_storage
|
||||||
|
self.__cache = dict()
|
||||||
|
self.__cache_ids = Queue()
|
||||||
|
self.__max_cache_size = max_cache_size
|
||||||
|
|
||||||
|
def get(self, name: str) -> torch.Tensor:
|
||||||
|
cache_item = self.__get_cache(name)
|
||||||
|
if cache_item is not None:
|
||||||
|
return cache_item
|
||||||
|
|
||||||
|
latent = self.__underlying_storage.get(name)
|
||||||
|
self.__set_cache(name, latent)
|
||||||
|
return latent
|
||||||
|
|
||||||
|
def save(self, name: str, data: torch.Tensor) -> None:
|
||||||
|
self.__underlying_storage.save(name, data)
|
||||||
|
self.__set_cache(name, data)
|
||||||
|
|
||||||
|
def delete(self, name: str) -> None:
|
||||||
|
self.__underlying_storage.delete(name)
|
||||||
|
if name in self.__cache:
|
||||||
|
del self.__cache[name]
|
||||||
|
|
||||||
|
def __get_cache(self, name: str) -> torch.Tensor|None:
|
||||||
|
return None if name not in self.__cache else self.__cache[name]
|
||||||
|
|
||||||
|
def __set_cache(self, name: str, data: torch.Tensor):
|
||||||
|
if not name in self.__cache:
|
||||||
|
self.__cache[name] = data
|
||||||
|
self.__cache_ids.put(name)
|
||||||
|
if self.__cache_ids.qsize() > self.__max_cache_size:
|
||||||
|
self.__cache.pop(self.__cache_ids.get())
|
||||||
|
|
||||||
|
|
||||||
|
class DiskLatentsStorage(LatentsStorageBase):
|
||||||
|
"""Stores latents in a folder on disk without caching"""
|
||||||
|
|
||||||
|
__output_folder: str | Path
|
||||||
|
|
||||||
|
def __init__(self, output_folder: str | Path):
|
||||||
|
self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder)
|
||||||
|
self.__output_folder.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def get(self, name: str) -> torch.Tensor:
|
||||||
|
latent_path = self.get_path(name)
|
||||||
|
return torch.load(latent_path)
|
||||||
|
|
||||||
|
def save(self, name: str, data: torch.Tensor) -> None:
|
||||||
|
self.__output_folder.mkdir(parents=True, exist_ok=True)
|
||||||
|
latent_path = self.get_path(name)
|
||||||
|
torch.save(data, latent_path)
|
||||||
|
|
||||||
|
def delete(self, name: str) -> None:
|
||||||
|
latent_path = self.get_path(name)
|
||||||
|
latent_path.unlink()
|
||||||
|
|
||||||
|
|
||||||
|
def get_path(self, name: str) -> Path:
|
||||||
|
return self.__output_folder / name
|
||||||
|
|
||||||
142
invokeai/app/services/metadata.py
Normal file
142
invokeai/app/services/metadata.py
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Union
|
||||||
|
import networkx as nx
|
||||||
|
|
||||||
|
from invokeai.app.models.metadata import ImageMetadata
|
||||||
|
from invokeai.app.services.graph import Graph, GraphExecutionState
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataServiceBase(ABC):
|
||||||
|
"""Handles building metadata for nodes, images, and outputs."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_image_metadata(
|
||||||
|
self, session: GraphExecutionState, node_id: str
|
||||||
|
) -> ImageMetadata:
|
||||||
|
"""Builds an ImageMetadata object for a node."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CoreMetadataService(MetadataServiceBase):
|
||||||
|
_ANCESTOR_TYPES = ["t2l", "l2l"]
|
||||||
|
"""The ancestor types that contain the core metadata"""
|
||||||
|
|
||||||
|
_ANCESTOR_PARAMS = ["type", "steps", "model", "cfg_scale", "scheduler", "strength"]
|
||||||
|
"""The core metadata parameters in the ancestor types"""
|
||||||
|
|
||||||
|
_NOISE_FIELDS = ["seed", "width", "height"]
|
||||||
|
"""The core metadata parameters in the noise node"""
|
||||||
|
|
||||||
|
def create_image_metadata(
|
||||||
|
self, session: GraphExecutionState, node_id: str
|
||||||
|
) -> ImageMetadata:
|
||||||
|
metadata = self._build_metadata_from_graph(session, node_id)
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def _find_nearest_ancestor(self, G: nx.DiGraph, node_id: str) -> Union[str, None]:
|
||||||
|
"""
|
||||||
|
Finds the id of the nearest ancestor (of a valid type) of a given node.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
G (nx.DiGraph): The execution graph, converted in to a networkx DiGraph. Its nodes must
|
||||||
|
have the same data as the execution graph.
|
||||||
|
node_id (str): The ID of the node.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str | None: The ID of the nearest ancestor, or None if there are no valid ancestors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Retrieve the node from the graph
|
||||||
|
node = G.nodes[node_id]
|
||||||
|
|
||||||
|
# If the node type is one of the core metadata node types, return its id
|
||||||
|
if node.get("type") in self._ANCESTOR_TYPES:
|
||||||
|
return node.get("id")
|
||||||
|
|
||||||
|
# Else, look for the ancestor in the predecessor nodes
|
||||||
|
for predecessor in G.predecessors(node_id):
|
||||||
|
result = self._find_nearest_ancestor(G, predecessor)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
|
||||||
|
# If there are no valid ancestors, return None
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_additional_metadata(
|
||||||
|
self, graph: Graph, node_id: str
|
||||||
|
) -> Union[dict[str, Any], None]:
|
||||||
|
"""
|
||||||
|
Returns additional metadata for a given node.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
graph (Graph): The execution graph.
|
||||||
|
node_id (str): The ID of the node.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Any] | None: A dictionary of additional metadata.
|
||||||
|
"""
|
||||||
|
|
||||||
|
metadata = {}
|
||||||
|
|
||||||
|
# Iterate over all edges in the graph
|
||||||
|
for edge in graph.edges:
|
||||||
|
dest_node_id = edge.destination.node_id
|
||||||
|
dest_field = edge.destination.field
|
||||||
|
source_node_dict = graph.nodes[edge.source.node_id].dict()
|
||||||
|
|
||||||
|
# If the destination node ID matches the given node ID, gather necessary metadata
|
||||||
|
if dest_node_id == node_id:
|
||||||
|
# Prompt
|
||||||
|
if dest_field == "positive_conditioning":
|
||||||
|
metadata["positive_conditioning"] = source_node_dict.get("prompt")
|
||||||
|
# Negative prompt
|
||||||
|
if dest_field == "negative_conditioning":
|
||||||
|
metadata["negative_conditioning"] = source_node_dict.get("prompt")
|
||||||
|
# Seed, width and height
|
||||||
|
if dest_field == "noise":
|
||||||
|
for field in self._NOISE_FIELDS:
|
||||||
|
metadata[field] = source_node_dict.get(field)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def _build_metadata_from_graph(
|
||||||
|
self, session: GraphExecutionState, node_id: str
|
||||||
|
) -> ImageMetadata:
|
||||||
|
"""
|
||||||
|
Builds an ImageMetadata object for a node.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
session (GraphExecutionState): The session.
|
||||||
|
node_id (str): The ID of the node.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ImageMetadata: The metadata for the node.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# We need to do all the traversal on the execution graph
|
||||||
|
graph = session.execution_graph
|
||||||
|
|
||||||
|
# Find the nearest `t2l`/`l2l` ancestor of the given node
|
||||||
|
ancestor_id = self._find_nearest_ancestor(graph.nx_graph_with_data(), node_id)
|
||||||
|
|
||||||
|
# If no ancestor was found, return an empty ImageMetadata object
|
||||||
|
if ancestor_id is None:
|
||||||
|
return ImageMetadata()
|
||||||
|
|
||||||
|
ancestor_node = graph.get_node(ancestor_id)
|
||||||
|
|
||||||
|
# Grab all the core metadata from the ancestor node
|
||||||
|
ancestor_metadata = {
|
||||||
|
param: val
|
||||||
|
for param, val in ancestor_node.dict().items()
|
||||||
|
if param in self._ANCESTOR_PARAMS
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get this image's prompts and noise parameters
|
||||||
|
addl_metadata = self._get_additional_metadata(graph, ancestor_id)
|
||||||
|
|
||||||
|
# If additional metadata was found, add it to the main metadata
|
||||||
|
if addl_metadata is not None:
|
||||||
|
ancestor_metadata.update(addl_metadata)
|
||||||
|
|
||||||
|
return ImageMetadata(**ancestor_metadata)
|
||||||
104
invokeai/app/services/model_manager_initializer.py
Normal file
104
invokeai/app/services/model_manager_initializer.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import torch
|
||||||
|
from argparse import Namespace
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import types
|
||||||
|
|
||||||
|
import invokeai.version
|
||||||
|
from .config import InvokeAISettings
|
||||||
|
from ...backend import ModelManager
|
||||||
|
from ...backend.util import choose_precision, choose_torch_device
|
||||||
|
|
||||||
|
# TODO: Replace with an abstract class base ModelManagerBase
|
||||||
|
def get_model_manager(config: InvokeAISettings, logger: types.ModuleType) -> ModelManager:
|
||||||
|
model_config = config.model_conf_path
|
||||||
|
if not model_config.exists():
|
||||||
|
report_model_error(
|
||||||
|
config, FileNotFoundError(f"The file {model_config} could not be found."), logger
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
||||||
|
logger.info(f'InvokeAI runtime directory is "{config.root}"')
|
||||||
|
|
||||||
|
# these two lines prevent a horrible warning message from appearing
|
||||||
|
# when the frozen CLIP tokenizer is imported
|
||||||
|
import transformers # type: ignore
|
||||||
|
|
||||||
|
transformers.logging.set_verbosity_error()
|
||||||
|
import diffusers
|
||||||
|
|
||||||
|
diffusers.logging.set_verbosity_error()
|
||||||
|
embedding_path = config.embedding_path
|
||||||
|
|
||||||
|
# migrate legacy models
|
||||||
|
ModelManager.migrate_models()
|
||||||
|
|
||||||
|
# creating the model manager
|
||||||
|
try:
|
||||||
|
device = torch.device(choose_torch_device())
|
||||||
|
precision = 'float16' if config.precision=='float16' \
|
||||||
|
else 'float32' if config.precision=='float32' \
|
||||||
|
else choose_precision(device)
|
||||||
|
|
||||||
|
model_manager = ModelManager(
|
||||||
|
OmegaConf.load(config.model_conf_path),
|
||||||
|
precision=precision,
|
||||||
|
device_type=device,
|
||||||
|
max_loaded_models=config.max_loaded_models,
|
||||||
|
embedding_path = embedding_path,
|
||||||
|
logger = logger,
|
||||||
|
)
|
||||||
|
except (FileNotFoundError, TypeError, AssertionError) as e:
|
||||||
|
report_model_error(config, e, logger)
|
||||||
|
except (IOError, KeyError) as e:
|
||||||
|
logger.error(f"{e}. Aborting.")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
# try to autoconvert new models
|
||||||
|
# autoimport new .ckpt files
|
||||||
|
if config.autoconvert_path:
|
||||||
|
model_manager.heuristic_import(
|
||||||
|
config.autoconvert_path,
|
||||||
|
)
|
||||||
|
return model_manager
|
||||||
|
|
||||||
|
def report_model_error(opt: Namespace, e: Exception, logger: types.ModuleType):
|
||||||
|
logger.error(f'An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||||
|
logger.error(
|
||||||
|
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
||||||
|
)
|
||||||
|
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
||||||
|
if yes_to_all:
|
||||||
|
logger.warning(
|
||||||
|
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = input(
|
||||||
|
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
|
||||||
|
)
|
||||||
|
if response.startswith(("n", "N")):
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("invokeai-configure is launching....\n")
|
||||||
|
|
||||||
|
# Match arguments that were set on the CLI
|
||||||
|
# only the arguments accepted by the configuration script are parsed
|
||||||
|
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||||
|
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||||
|
sys.argv = ["invokeai-configure"]
|
||||||
|
sys.argv.extend(root_dir)
|
||||||
|
sys.argv.extend(config.to_dict())
|
||||||
|
if yes_to_all is not None:
|
||||||
|
for arg in yes_to_all.split():
|
||||||
|
sys.argv.append(arg)
|
||||||
|
|
||||||
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
|
invokeai_configure()
|
||||||
|
# TODO: Figure out how to restart
|
||||||
|
# print('** InvokeAI will now restart')
|
||||||
|
# sys.argv = previous_args
|
||||||
|
# main() # would rather do a os.exec(), but doesn't exist?
|
||||||
|
# sys.exit(0)
|
||||||
146
invokeai/app/services/models/image_record.py
Normal file
146
invokeai/app/services/models/image_record.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
import datetime
|
||||||
|
from typing import Optional, Union
|
||||||
|
from pydantic import BaseModel, Extra, Field, StrictBool, StrictStr
|
||||||
|
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||||
|
from invokeai.app.models.metadata import ImageMetadata
|
||||||
|
from invokeai.app.util.misc import get_iso_timestamp
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRecord(BaseModel):
|
||||||
|
"""Deserialized image record."""
|
||||||
|
|
||||||
|
image_name: str = Field(description="The unique name of the image.")
|
||||||
|
"""The unique name of the image."""
|
||||||
|
image_origin: ResourceOrigin = Field(description="The type of the image.")
|
||||||
|
"""The origin of the image."""
|
||||||
|
image_category: ImageCategory = Field(description="The category of the image.")
|
||||||
|
"""The category of the image."""
|
||||||
|
width: int = Field(description="The width of the image in px.")
|
||||||
|
"""The actual width of the image in px. This may be different from the width in metadata."""
|
||||||
|
height: int = Field(description="The height of the image in px.")
|
||||||
|
"""The actual height of the image in px. This may be different from the height in metadata."""
|
||||||
|
created_at: Union[datetime.datetime, str] = Field(
|
||||||
|
description="The created timestamp of the image."
|
||||||
|
)
|
||||||
|
"""The created timestamp of the image."""
|
||||||
|
updated_at: Union[datetime.datetime, str] = Field(
|
||||||
|
description="The updated timestamp of the image."
|
||||||
|
)
|
||||||
|
"""The updated timestamp of the image."""
|
||||||
|
deleted_at: Union[datetime.datetime, str, None] = Field(
|
||||||
|
description="The deleted timestamp of the image."
|
||||||
|
)
|
||||||
|
"""The deleted timestamp of the image."""
|
||||||
|
is_intermediate: bool = Field(description="Whether this is an intermediate image.")
|
||||||
|
"""Whether this is an intermediate image."""
|
||||||
|
session_id: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The session ID that generated this image, if it is a generated image.",
|
||||||
|
)
|
||||||
|
"""The session ID that generated this image, if it is a generated image."""
|
||||||
|
node_id: Optional[str] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The node ID that generated this image, if it is a generated image.",
|
||||||
|
)
|
||||||
|
"""The node ID that generated this image, if it is a generated image."""
|
||||||
|
metadata: Optional[ImageMetadata] = Field(
|
||||||
|
default=None,
|
||||||
|
description="A limited subset of the image's generation metadata. Retrieve the image's session for full metadata.",
|
||||||
|
)
|
||||||
|
"""A limited subset of the image's generation metadata. Retrieve the image's session for full metadata."""
|
||||||
|
|
||||||
|
|
||||||
|
class ImageRecordChanges(BaseModel, extra=Extra.forbid):
|
||||||
|
"""A set of changes to apply to an image record.
|
||||||
|
|
||||||
|
Only limited changes are valid:
|
||||||
|
- `image_category`: change the category of an image
|
||||||
|
- `session_id`: change the session associated with an image
|
||||||
|
- `is_intermediate`: change the image's `is_intermediate` flag
|
||||||
|
"""
|
||||||
|
|
||||||
|
image_category: Optional[ImageCategory] = Field(
|
||||||
|
description="The image's new category."
|
||||||
|
)
|
||||||
|
"""The image's new category."""
|
||||||
|
session_id: Optional[StrictStr] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The image's new session ID.",
|
||||||
|
)
|
||||||
|
"""The image's new session ID."""
|
||||||
|
is_intermediate: Optional[StrictBool] = Field(
|
||||||
|
default=None, description="The image's new `is_intermediate` flag."
|
||||||
|
)
|
||||||
|
"""The image's new `is_intermediate` flag."""
|
||||||
|
|
||||||
|
|
||||||
|
class ImageUrlsDTO(BaseModel):
|
||||||
|
"""The URLs for an image and its thumbnail."""
|
||||||
|
|
||||||
|
image_name: str = Field(description="The unique name of the image.")
|
||||||
|
"""The unique name of the image."""
|
||||||
|
image_url: str = Field(description="The URL of the image.")
|
||||||
|
"""The URL of the image."""
|
||||||
|
thumbnail_url: str = Field(description="The URL of the image's thumbnail.")
|
||||||
|
"""The URL of the image's thumbnail."""
|
||||||
|
|
||||||
|
|
||||||
|
class ImageDTO(ImageRecord, ImageUrlsDTO):
|
||||||
|
"""Deserialized image record, enriched for the frontend with URLs."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def image_record_to_dto(
|
||||||
|
image_record: ImageRecord, image_url: str, thumbnail_url: str
|
||||||
|
) -> ImageDTO:
|
||||||
|
"""Converts an image record to an image DTO."""
|
||||||
|
return ImageDTO(
|
||||||
|
**image_record.dict(),
|
||||||
|
image_url=image_url,
|
||||||
|
thumbnail_url=thumbnail_url,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def deserialize_image_record(image_dict: dict) -> ImageRecord:
|
||||||
|
"""Deserializes an image record."""
|
||||||
|
|
||||||
|
# Retrieve all the values, setting "reasonable" defaults if they are not present.
|
||||||
|
|
||||||
|
image_name = image_dict.get("image_name", "unknown")
|
||||||
|
image_origin = ResourceOrigin(
|
||||||
|
image_dict.get("image_origin", ResourceOrigin.INTERNAL.value)
|
||||||
|
)
|
||||||
|
image_category = ImageCategory(
|
||||||
|
image_dict.get("image_category", ImageCategory.GENERAL.value)
|
||||||
|
)
|
||||||
|
width = image_dict.get("width", 0)
|
||||||
|
height = image_dict.get("height", 0)
|
||||||
|
session_id = image_dict.get("session_id", None)
|
||||||
|
node_id = image_dict.get("node_id", None)
|
||||||
|
created_at = image_dict.get("created_at", get_iso_timestamp())
|
||||||
|
updated_at = image_dict.get("updated_at", get_iso_timestamp())
|
||||||
|
deleted_at = image_dict.get("deleted_at", get_iso_timestamp())
|
||||||
|
is_intermediate = image_dict.get("is_intermediate", False)
|
||||||
|
|
||||||
|
raw_metadata = image_dict.get("metadata")
|
||||||
|
|
||||||
|
if raw_metadata is not None:
|
||||||
|
metadata = ImageMetadata.parse_raw(raw_metadata)
|
||||||
|
else:
|
||||||
|
metadata = None
|
||||||
|
|
||||||
|
return ImageRecord(
|
||||||
|
image_name=image_name,
|
||||||
|
image_origin=image_origin,
|
||||||
|
image_category=image_category,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
session_id=session_id,
|
||||||
|
node_id=node_id,
|
||||||
|
metadata=metadata,
|
||||||
|
created_at=created_at,
|
||||||
|
updated_at=updated_at,
|
||||||
|
deleted_at=deleted_at,
|
||||||
|
is_intermediate=is_intermediate,
|
||||||
|
)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user