mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 03:28:25 -05:00
Compare commits
446 Commits
v5.7.2
...
5.10.0dev1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bea9d037bc | ||
|
|
eed4260975 | ||
|
|
6a79b1c64c | ||
|
|
4cdfe3e30d | ||
|
|
df294db236 | ||
|
|
52e247cfe0 | ||
|
|
0a13640bf3 | ||
|
|
643b71f56c | ||
|
|
b745411866 | ||
|
|
c3ffb0feed | ||
|
|
f53ff5fa3c | ||
|
|
b2337b56bd | ||
|
|
fb777b4502 | ||
|
|
4c12f5a011 | ||
|
|
d4655ea21a | ||
|
|
752b62d0b5 | ||
|
|
9a0efb308d | ||
|
|
b4c276b50f | ||
|
|
db03c196a1 | ||
|
|
6bc36b697d | ||
|
|
b7d71d3028 | ||
|
|
fa1ebd9d2f | ||
|
|
eed5d02069 | ||
|
|
3650d91045 | ||
|
|
6c7d08cacb | ||
|
|
bb1c40f222 | ||
|
|
bfb117d0e0 | ||
|
|
b31c1022c3 | ||
|
|
a5851ca31c | ||
|
|
77bf5c15bb | ||
|
|
d26b7a1a12 | ||
|
|
595133463e | ||
|
|
6155f9ff9e | ||
|
|
7be87c8048 | ||
|
|
9868c3bfe3 | ||
|
|
8b299d0bac | ||
|
|
a44bfb4658 | ||
|
|
96fb5f6881 | ||
|
|
4109ea5324 | ||
|
|
f6c2ee5040 | ||
|
|
965753bf8b | ||
|
|
40c53ab95c | ||
|
|
aaa6211625 | ||
|
|
f6d770eac9 | ||
|
|
47cb61cd62 | ||
|
|
b0fdc8ae1c | ||
|
|
ed9b30efda | ||
|
|
168e5eeff0 | ||
|
|
7acaa86bdf | ||
|
|
96c0393fe7 | ||
|
|
403f795c5e | ||
|
|
c0f88a083e | ||
|
|
542b182899 | ||
|
|
3f58c68c09 | ||
|
|
e50c7e5947 | ||
|
|
4a83700fe4 | ||
|
|
c9992914d6 | ||
|
|
c25f6d1f84 | ||
|
|
a53e1ccf08 | ||
|
|
1af9930951 | ||
|
|
c276c1cbee | ||
|
|
c619348f29 | ||
|
|
c6f96613fc | ||
|
|
258bf736da | ||
|
|
0d75c99476 | ||
|
|
323d409fb6 | ||
|
|
f251722f56 | ||
|
|
7004fde41b | ||
|
|
c9dc27afbb | ||
|
|
efd14ec0e4 | ||
|
|
21ee2b6251 | ||
|
|
82dd2d508f | ||
|
|
ffb5f6c6a6 | ||
|
|
5c5fff9ecb | ||
|
|
9ca071819b | ||
|
|
b14d8e8192 | ||
|
|
3f12a43e75 | ||
|
|
5a59f6e3b8 | ||
|
|
60b5aef16a | ||
|
|
35222a8835 | ||
|
|
0e8b5484d5 | ||
|
|
454506c83e | ||
|
|
8f6ab67376 | ||
|
|
5afcc7778f | ||
|
|
325e07d330 | ||
|
|
a016bdc159 | ||
|
|
a14f0b2864 | ||
|
|
721483318a | ||
|
|
be04743649 | ||
|
|
92f0c28d6c | ||
|
|
a6b94e8ca4 | ||
|
|
00b11ef795 | ||
|
|
182580ff69 | ||
|
|
8e9d5c1187 | ||
|
|
99aac5870e | ||
|
|
c1b475c585 | ||
|
|
ec44e68cbf | ||
|
|
73dbebbcc3 | ||
|
|
09f971467d | ||
|
|
2c71b0e873 | ||
|
|
92f69ac463 | ||
|
|
3b154df71a | ||
|
|
64aa965160 | ||
|
|
d715c27d07 | ||
|
|
515084577c | ||
|
|
7596c07a64 | ||
|
|
98fd1d949b | ||
|
|
6312e6aa8f | ||
|
|
6435f11bae | ||
|
|
1c69b9b1fa | ||
|
|
731970ff88 | ||
|
|
038bac1614 | ||
|
|
ed9efe7740 | ||
|
|
ffa0beba7a | ||
|
|
75d793f1c4 | ||
|
|
2b086917e0 | ||
|
|
a9f2738086 | ||
|
|
3a56799ea5 | ||
|
|
3162ce94dc | ||
|
|
c0dc6ac4e1 | ||
|
|
fed1995525 | ||
|
|
5006e23456 | ||
|
|
2f063bddda | ||
|
|
23a26422fd | ||
|
|
434f195a96 | ||
|
|
6a4c2d692c | ||
|
|
5127a07cf9 | ||
|
|
0b4c6f0ab4 | ||
|
|
d8450033ea | ||
|
|
3938736bd8 | ||
|
|
fb2c7b9566 | ||
|
|
29449ec27d | ||
|
|
e38f778d28 | ||
|
|
f5e78436a8 | ||
|
|
6a15b5d9be | ||
|
|
a629102c87 | ||
|
|
848ade8ab8 | ||
|
|
2110feb01c | ||
|
|
f3e1821957 | ||
|
|
bbcf93089a | ||
|
|
66f41aa307 | ||
|
|
8a709766b3 | ||
|
|
efaa20a7a1 | ||
|
|
3e4c808b23 | ||
|
|
00e3931af4 | ||
|
|
08bea07f8b | ||
|
|
166d2f0e39 | ||
|
|
21f346717a | ||
|
|
f966fb8b9c | ||
|
|
c2b20a5387 | ||
|
|
bed9089fe6 | ||
|
|
d34a4f765c | ||
|
|
efe4708b8b | ||
|
|
7cb1f61a9e | ||
|
|
6e2ef34cba | ||
|
|
d208b99a47 | ||
|
|
47eeafa5cb | ||
|
|
0cb00fbe53 | ||
|
|
a7e8ed3bc2 | ||
|
|
22eb25be48 | ||
|
|
a077f3fefc | ||
|
|
c013a6e38d | ||
|
|
6cfeb71bed | ||
|
|
534f993023 | ||
|
|
67f9b6420c | ||
|
|
61bf065237 | ||
|
|
e78cf889ee | ||
|
|
5d13f0ba15 | ||
|
|
633b9afa46 | ||
|
|
f1889b259d | ||
|
|
ed21d0b57e | ||
|
|
df90da28e1 | ||
|
|
702054aa62 | ||
|
|
636ec1de6e | ||
|
|
063d07fd41 | ||
|
|
c78eac624e | ||
|
|
05de3b7a84 | ||
|
|
9cc2232b6f | ||
|
|
9fdc06b447 | ||
|
|
5ea3ec5cc8 | ||
|
|
f13a07ba6a | ||
|
|
a913f0163d | ||
|
|
f7cfbd1323 | ||
|
|
2806b60701 | ||
|
|
d8c3af624b | ||
|
|
feed44b68d | ||
|
|
247f3b5d67 | ||
|
|
8e14f9d971 | ||
|
|
bdb44ee48d | ||
|
|
b57f5330c5 | ||
|
|
ade3c015b4 | ||
|
|
7fe4d4c21a | ||
|
|
133a7fde55 | ||
|
|
6375214878 | ||
|
|
b9972be7f1 | ||
|
|
e61c5a3f26 | ||
|
|
8c633786f6 | ||
|
|
8703eea49b | ||
|
|
c8888be4c3 | ||
|
|
11963a65a4 | ||
|
|
ab6422fdf7 | ||
|
|
1f8632029e | ||
|
|
88a762474d | ||
|
|
e6dd721e33 | ||
|
|
2a09604baf | ||
|
|
f94f00ede0 | ||
|
|
37af281299 | ||
|
|
fc82775d7a | ||
|
|
9ed46f60b7 | ||
|
|
9a389e6b93 | ||
|
|
2ef1ecf381 | ||
|
|
41de112932 | ||
|
|
e9714fe476 | ||
|
|
3f29293e39 | ||
|
|
db1aa38e98 | ||
|
|
12717d4a4d | ||
|
|
1953f3cbcd | ||
|
|
3469fc9843 | ||
|
|
7cdd4187a9 | ||
|
|
ad66c101d2 | ||
|
|
28d3356710 | ||
|
|
81e70fb9d2 | ||
|
|
971c425734 | ||
|
|
b09008c530 | ||
|
|
f9f99f873d | ||
|
|
7f93f1b600 | ||
|
|
b1d336ce8a | ||
|
|
40c7be8f5d | ||
|
|
24218b34bf | ||
|
|
d970c6d6d5 | ||
|
|
e5308be0bb | ||
|
|
7d5687e9ff | ||
|
|
7adac4581a | ||
|
|
962db86cac | ||
|
|
d65ec0e250 | ||
|
|
7fdde5e84a | ||
|
|
895956bcfe | ||
|
|
f27d26cfa2 | ||
|
|
965bcba6c2 | ||
|
|
c9f2460ff2 | ||
|
|
5abbbf4b5b | ||
|
|
e66688edbf | ||
|
|
a519483f95 | ||
|
|
75c91604bb | ||
|
|
53bdaba7b6 | ||
|
|
f3f405ca77 | ||
|
|
dda69950a7 | ||
|
|
b2198b9fa7 | ||
|
|
02b91e8e7b | ||
|
|
09bf7c35eb | ||
|
|
deb9a65b3d | ||
|
|
5be9a7227c | ||
|
|
bb9f886bd4 | ||
|
|
46520946f8 | ||
|
|
830880a6fc | ||
|
|
63b94a8ff3 | ||
|
|
f12924a1e1 | ||
|
|
f8e51c86f5 | ||
|
|
654e992630 | ||
|
|
21f247f499 | ||
|
|
8bcd9fe4b7 | ||
|
|
c84a646735 | ||
|
|
b52f8121af | ||
|
|
05bed3fddd | ||
|
|
87ea20192f | ||
|
|
2f9c95c462 | ||
|
|
47cadbb48e | ||
|
|
23518b9830 | ||
|
|
94dcf391a6 | ||
|
|
637b93d2d8 | ||
|
|
565b160060 | ||
|
|
e7a60c01ed | ||
|
|
4b54ccc29c | ||
|
|
c4183ec98c | ||
|
|
5a9cbe35e0 | ||
|
|
df18fe0298 | ||
|
|
e5591d145f | ||
|
|
371c187fc3 | ||
|
|
bdd0b90769 | ||
|
|
4377158503 | ||
|
|
c8c27079ed | ||
|
|
d8b9a8d0dd | ||
|
|
39a4608d15 | ||
|
|
cd2d5431db | ||
|
|
c04cdd9779 | ||
|
|
b86ac5e049 | ||
|
|
e982c95687 | ||
|
|
665236bb79 | ||
|
|
0eeb0dd67b | ||
|
|
28c74cbe38 | ||
|
|
7414f68acc | ||
|
|
a984462b80 | ||
|
|
c6c2567203 | ||
|
|
f05c8b909f | ||
|
|
73330a1308 | ||
|
|
6f568d48ed | ||
|
|
81a97f3796 | ||
|
|
3f9535d2f9 | ||
|
|
83bfbdcad4 | ||
|
|
729428084c | ||
|
|
523a932ecc | ||
|
|
21be7d7157 | ||
|
|
a29fb18c0b | ||
|
|
aed446f013 | ||
|
|
e81c9b0d6e | ||
|
|
f45400a275 | ||
|
|
89f457c486 | ||
|
|
30ed09a36e | ||
|
|
3334652acc | ||
|
|
e83536f396 | ||
|
|
97593f95f6 | ||
|
|
7f14cee17e | ||
|
|
0a836d6fc1 | ||
|
|
54e781d5bb | ||
|
|
aa71d0c817 | ||
|
|
07313e429d | ||
|
|
bad5023238 | ||
|
|
73a0d2c06c | ||
|
|
918e9c8ccc | ||
|
|
1e388e9ca4 | ||
|
|
5b84d45932 | ||
|
|
dc3f1184b2 | ||
|
|
87438bcad7 | ||
|
|
afd894fd04 | ||
|
|
df305c0b99 | ||
|
|
deecb7f3c3 | ||
|
|
dd5f353465 | ||
|
|
a8759ea0a6 | ||
|
|
3ff529c718 | ||
|
|
3b0fecafb0 | ||
|
|
099011000f | ||
|
|
155daa3137 | ||
|
|
c493e223cf | ||
|
|
124ca23f8b | ||
|
|
a8023cbcb6 | ||
|
|
b733d3897e | ||
|
|
ef95b37ace | ||
|
|
4feff5a185 | ||
|
|
6c8dc32d5c | ||
|
|
e5da808b2f | ||
|
|
7d3434da62 | ||
|
|
4cc70d9f16 | ||
|
|
7988bc1a59 | ||
|
|
1756d885f6 | ||
|
|
9ec4d968aa | ||
|
|
76c09301f9 | ||
|
|
1cf8749754 | ||
|
|
5d6c468833 | ||
|
|
80b3f44ae8 | ||
|
|
c77c12aa1d | ||
|
|
731992c5ec | ||
|
|
c259899bf4 | ||
|
|
f62b9ad919 | ||
|
|
57533657f9 | ||
|
|
e35537e60a | ||
|
|
be53b89203 | ||
|
|
a215eeaabf | ||
|
|
d86b392bfd | ||
|
|
3e9e45b177 | ||
|
|
907d960745 | ||
|
|
bfdace6437 | ||
|
|
a89d68b93a | ||
|
|
59a8c0d441 | ||
|
|
d5d08f6569 | ||
|
|
8a4282365e | ||
|
|
b9c7bc8b0e | ||
|
|
0f45ee04a2 | ||
|
|
839a791509 | ||
|
|
f03a2bf03f | ||
|
|
4136817d30 | ||
|
|
7f0452173b | ||
|
|
8e46b03f09 | ||
|
|
9045237bfb | ||
|
|
58959a18cb | ||
|
|
e51588197f | ||
|
|
c5319ac48c | ||
|
|
50657650c2 | ||
|
|
f657c95e45 | ||
|
|
2d3a2f9842 | ||
|
|
008837642e | ||
|
|
1a84a2fb7e | ||
|
|
b87febcf4c | ||
|
|
95a9bb6c7b | ||
|
|
93ec9a048f | ||
|
|
ec6cea6705 | ||
|
|
bfbcaad8c2 | ||
|
|
3694158434 | ||
|
|
814fb939c0 | ||
|
|
4cb73e6c19 | ||
|
|
e8aed67cf1 | ||
|
|
f56dd01419 | ||
|
|
ed9cd6a7a2 | ||
|
|
c44c28ec4c | ||
|
|
e1f7359171 | ||
|
|
3e97d49a69 | ||
|
|
c12585e52d | ||
|
|
b39774a57c | ||
|
|
8988539cd5 | ||
|
|
88c68e8016 | ||
|
|
5073c7d0a3 | ||
|
|
84e86819b8 | ||
|
|
440e3e01ac | ||
|
|
c2302f7ab1 | ||
|
|
2594eed1af | ||
|
|
e8db1c1d5a | ||
|
|
d5c5e8e8ed | ||
|
|
518a7c941f | ||
|
|
bdafe53f2e | ||
|
|
cf0cbaf0ae | ||
|
|
ac6fc6eccb | ||
|
|
07d65b8fd1 | ||
|
|
3c2e6378ca | ||
|
|
445f122f37 | ||
|
|
8c0ee9c48f | ||
|
|
0eb237ac64 | ||
|
|
9aa04f0bea | ||
|
|
76e2f41ec7 | ||
|
|
1353c3301a | ||
|
|
bf209663ac | ||
|
|
04b96dd7b4 | ||
|
|
79b2c68853 | ||
|
|
aac456527e | ||
|
|
c88b835373 | ||
|
|
9da116fd3d | ||
|
|
201d7f1fdb | ||
|
|
17a5b1bd28 | ||
|
|
a409aec00f | ||
|
|
b0593eda92 | ||
|
|
9acb24914f | ||
|
|
ab4433da2f | ||
|
|
d4423aa16f | ||
|
|
1f6430c1b0 | ||
|
|
8e28888bc4 | ||
|
|
b6b21dbcbf | ||
|
|
7b48ef2264 | ||
|
|
9c542ed655 | ||
|
|
4c02ba908a | ||
|
|
82293ae3b2 | ||
|
|
f1fde792ee | ||
|
|
e82393f7ed | ||
|
|
d5211a8088 | ||
|
|
3b095b5945 | ||
|
|
34959ef573 | ||
|
|
7f10f8f96a | ||
|
|
f2689598c0 | ||
|
|
551c78d9f3 |
@@ -1,9 +1,11 @@
|
|||||||
*
|
*
|
||||||
!invokeai
|
!invokeai
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
|
!uv.lock
|
||||||
!docker/docker-entrypoint.sh
|
!docker/docker-entrypoint.sh
|
||||||
!LICENSE
|
!LICENSE
|
||||||
|
|
||||||
|
**/dist
|
||||||
**/node_modules
|
**/node_modules
|
||||||
**/__pycache__
|
**/__pycache__
|
||||||
**/*.egg-info
|
**/*.egg-info
|
||||||
|
|||||||
@@ -1,2 +1,5 @@
|
|||||||
b3dccfaeb636599c02effc377cdd8a87d658256c
|
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||||
218b6d0546b990fc449c876fb99f44b50c4daa35
|
218b6d0546b990fc449c876fb99f44b50c4daa35
|
||||||
|
182580ff6970caed400be178c5b888514b75d7f2
|
||||||
|
8e9d5c1187b0d36da80571ce4c8ba9b3a37b6c46
|
||||||
|
99aac5870e1092b182e6c5f21abcaab6936a4ad1
|
||||||
3
.gitattributes
vendored
3
.gitattributes
vendored
@@ -2,4 +2,5 @@
|
|||||||
# Only affects text files and ignores other file types.
|
# Only affects text files and ignores other file types.
|
||||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||||
* text=auto
|
* text=auto
|
||||||
docker/** text eol=lf
|
docker/** text eol=lf
|
||||||
|
tests/test_model_probe/stripped_models/** filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|||||||
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -1,12 +1,12 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
||||||
|
|
||||||
# nodes
|
# nodes
|
||||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
|
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername @jazzhaiku
|
||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||||
@@ -22,7 +22,7 @@
|
|||||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||||
|
|
||||||
# generation, model management, postprocessing
|
# generation, model management, postprocessing
|
||||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
|
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername @jazzhaiku
|
||||||
|
|
||||||
# front ends
|
# front ends
|
||||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||||
|
|||||||
2
.github/workflows/build-container.yml
vendored
2
.github/workflows/build-container.yml
vendored
@@ -97,6 +97,8 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile
|
file: docker/Dockerfile
|
||||||
platforms: ${{ env.PLATFORMS }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
build-args: |
|
||||||
|
GPU_DRIVER=${{ matrix.gpu-driver }}
|
||||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
|||||||
7
.github/workflows/frontend-checks.yml
vendored
7
.github/workflows/frontend-checks.yml
vendored
@@ -44,7 +44,12 @@ jobs:
|
|||||||
- name: check for changed frontend files
|
- name: check for changed frontend files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v42
|
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||||
|
# See:
|
||||||
|
# - CVE-2025-30066
|
||||||
|
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||||
|
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||||
|
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
frontend:
|
frontend:
|
||||||
|
|||||||
7
.github/workflows/frontend-tests.yml
vendored
7
.github/workflows/frontend-tests.yml
vendored
@@ -44,7 +44,12 @@ jobs:
|
|||||||
- name: check for changed frontend files
|
- name: check for changed frontend files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v42
|
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||||
|
# See:
|
||||||
|
# - CVE-2025-30066
|
||||||
|
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||||
|
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||||
|
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
frontend:
|
frontend:
|
||||||
|
|||||||
28
.github/workflows/python-checks.yml
vendored
28
.github/workflows/python-checks.yml
vendored
@@ -34,6 +34,9 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
python-checks:
|
python-checks:
|
||||||
|
env:
|
||||||
|
# uv requires a venv by default - but for this, we can simply use the system python
|
||||||
|
UV_SYSTEM_PYTHON: 1
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 5 # expected run time: <1 min
|
timeout-minutes: 5 # expected run time: <1 min
|
||||||
steps:
|
steps:
|
||||||
@@ -43,7 +46,12 @@ jobs:
|
|||||||
- name: check for changed python files
|
- name: check for changed python files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v42
|
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||||
|
# See:
|
||||||
|
# - CVE-2025-30066
|
||||||
|
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||||
|
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||||
|
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
python:
|
python:
|
||||||
@@ -52,25 +60,19 @@ jobs:
|
|||||||
- '!invokeai/frontend/web/**'
|
- '!invokeai/frontend/web/**'
|
||||||
- 'tests/**'
|
- 'tests/**'
|
||||||
|
|
||||||
- name: setup python
|
- name: setup uv
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
uses: actions/setup-python@v5
|
uses: astral-sh/setup-uv@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
version: '0.6.10'
|
||||||
cache: pip
|
enable-cache: true
|
||||||
cache-dependency-path: pyproject.toml
|
|
||||||
|
|
||||||
- name: install ruff
|
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
|
||||||
run: pip install ruff==0.6.0
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
- name: ruff check
|
- name: ruff check
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
run: ruff check --output-format=github .
|
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: ruff format
|
- name: ruff format
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
run: ruff format --check .
|
run: uv tool run ruff@0.11.2 format --check .
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
40
.github/workflows/python-tests.yml
vendored
40
.github/workflows/python-tests.yml
vendored
@@ -39,24 +39,15 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version:
|
python-version:
|
||||||
- '3.10'
|
|
||||||
- '3.11'
|
- '3.11'
|
||||||
|
- '3.12'
|
||||||
platform:
|
platform:
|
||||||
- linux-cuda-11_7
|
|
||||||
- linux-rocm-5_2
|
|
||||||
- linux-cpu
|
- linux-cpu
|
||||||
- macos-default
|
- macos-default
|
||||||
- windows-cpu
|
- windows-cpu
|
||||||
include:
|
include:
|
||||||
- platform: linux-cuda-11_7
|
|
||||||
os: ubuntu-22.04
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- platform: linux-rocm-5_2
|
|
||||||
os: ubuntu-22.04
|
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- platform: linux-cpu
|
- platform: linux-cpu
|
||||||
os: ubuntu-22.04
|
os: ubuntu-24.04
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
github-env: $GITHUB_ENV
|
github-env: $GITHUB_ENV
|
||||||
- platform: macos-default
|
- platform: macos-default
|
||||||
@@ -70,14 +61,22 @@ jobs:
|
|||||||
timeout-minutes: 15 # expected run time: 2-6 min, depending on platform
|
timeout-minutes: 15 # expected run time: 2-6 min, depending on platform
|
||||||
env:
|
env:
|
||||||
PIP_USE_PEP517: '1'
|
PIP_USE_PEP517: '1'
|
||||||
|
UV_SYSTEM_PYTHON: 1
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
# https://github.com/nschloe/action-cached-lfs-checkout
|
||||||
|
uses: nschloe/action-cached-lfs-checkout@f46300cd8952454b9f0a21a3d133d4bd5684cfc2
|
||||||
|
|
||||||
- name: check for changed python files
|
- name: check for changed python files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v42
|
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||||
|
# See:
|
||||||
|
# - CVE-2025-30066
|
||||||
|
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||||
|
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||||
|
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
python:
|
python:
|
||||||
@@ -86,20 +85,25 @@ jobs:
|
|||||||
- '!invokeai/frontend/web/**'
|
- '!invokeai/frontend/web/**'
|
||||||
- 'tests/**'
|
- 'tests/**'
|
||||||
|
|
||||||
|
- name: setup uv
|
||||||
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
with:
|
||||||
|
version: '0.6.10'
|
||||||
|
enable-cache: true
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: setup python
|
- name: setup python
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
cache: pip
|
|
||||||
cache-dependency-path: pyproject.toml
|
|
||||||
|
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
env:
|
env:
|
||||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||||
run: >
|
run: uv pip install --editable ".[test]"
|
||||||
pip3 install --editable=".[test]"
|
|
||||||
|
|
||||||
- name: run pytest
|
- name: run pytest
|
||||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||||
|
|||||||
27
.github/workflows/typegen-checks.yml
vendored
27
.github/workflows/typegen-checks.yml
vendored
@@ -42,24 +42,37 @@ jobs:
|
|||||||
- name: check for changed files
|
- name: check for changed files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v42
|
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||||
|
# See:
|
||||||
|
# - CVE-2025-30066
|
||||||
|
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||||
|
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||||
|
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
src:
|
src:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- 'invokeai/**'
|
- 'invokeai/**'
|
||||||
|
|
||||||
|
- name: setup uv
|
||||||
|
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
with:
|
||||||
|
version: '0.6.10'
|
||||||
|
enable-cache: true
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: setup python
|
- name: setup python
|
||||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.11'
|
||||||
cache: pip
|
|
||||||
cache-dependency-path: pyproject.toml
|
|
||||||
|
|
||||||
- name: install python dependencies
|
- name: install dependencies
|
||||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||||
run: pip3 install --use-pep517 --editable="."
|
env:
|
||||||
|
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||||
|
run: uv pip install --editable .
|
||||||
|
|
||||||
- name: install frontend dependencies
|
- name: install frontend dependencies
|
||||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||||
@@ -72,7 +85,7 @@ jobs:
|
|||||||
|
|
||||||
- name: generate schema
|
- name: generate schema
|
||||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||||
run: make frontend-typegen
|
run: cd invokeai/frontend/web && uv run ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: compare files
|
- name: compare files
|
||||||
|
|||||||
@@ -1,77 +1,6 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.4
|
||||||
|
|
||||||
## Builder stage
|
#### Web UI ------------------------------------
|
||||||
|
|
||||||
FROM library/ubuntu:24.04 AS builder
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt update && apt-get install -y \
|
|
||||||
build-essential \
|
|
||||||
git
|
|
||||||
|
|
||||||
# Install `uv` for package management
|
|
||||||
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
|
|
||||||
|
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
||||||
ENV INVOKEAI_SRC=/opt/invokeai
|
|
||||||
ENV PYTHON_VERSION=3.11
|
|
||||||
ENV UV_PYTHON=3.11
|
|
||||||
ENV UV_COMPILE_BYTECODE=1
|
|
||||||
ENV UV_LINK_MODE=copy
|
|
||||||
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
|
|
||||||
ENV UV_INDEX="https://download.pytorch.org/whl/cu124"
|
|
||||||
|
|
||||||
ARG GPU_DRIVER=cuda
|
|
||||||
# unused but available
|
|
||||||
ARG BUILDPLATFORM
|
|
||||||
|
|
||||||
# Switch to the `ubuntu` user to work around dependency issues with uv-installed python
|
|
||||||
RUN mkdir -p ${VIRTUAL_ENV} && \
|
|
||||||
mkdir -p ${INVOKEAI_SRC} && \
|
|
||||||
chmod -R a+w /opt && \
|
|
||||||
mkdir ~ubuntu/.cache && chown ubuntu: ~ubuntu/.cache
|
|
||||||
USER ubuntu
|
|
||||||
|
|
||||||
# Install python
|
|
||||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
|
||||||
uv python install ${PYTHON_VERSION}
|
|
||||||
|
|
||||||
WORKDIR ${INVOKEAI_SRC}
|
|
||||||
|
|
||||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
|
||||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
|
||||||
#
|
|
||||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
|
||||||
# x86_64/CUDA is the default
|
|
||||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
|
||||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
|
||||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
|
||||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
|
||||||
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
|
||||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
|
||||||
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
|
|
||||||
fi && \
|
|
||||||
uv sync --no-install-project
|
|
||||||
|
|
||||||
# Now that the bulk of the dependencies have been installed, copy in the project files that change more frequently.
|
|
||||||
COPY invokeai invokeai
|
|
||||||
COPY pyproject.toml .
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
|
||||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
|
||||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
|
||||||
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
|
||||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
|
||||||
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
|
|
||||||
fi && \
|
|
||||||
uv sync
|
|
||||||
|
|
||||||
|
|
||||||
#### Build the Web UI ------------------------------------
|
|
||||||
|
|
||||||
FROM docker.io/node:22-slim AS web-builder
|
FROM docker.io/node:22-slim AS web-builder
|
||||||
ENV PNPM_HOME="/pnpm"
|
ENV PNPM_HOME="/pnpm"
|
||||||
@@ -85,69 +14,89 @@ RUN --mount=type=cache,target=/pnpm/store \
|
|||||||
pnpm install --frozen-lockfile
|
pnpm install --frozen-lockfile
|
||||||
RUN npx vite build
|
RUN npx vite build
|
||||||
|
|
||||||
#### Runtime stage ---------------------------------------
|
## Backend ---------------------------------------
|
||||||
|
|
||||||
FROM library/ubuntu:24.04 AS runtime
|
FROM library/ubuntu:24.04
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV PYTHONUNBUFFERED=1
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
--mount=type=cache,target=/var/lib/apt \
|
||||||
|
apt update && apt install -y --no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
git \
|
||||||
|
gosu \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1 \
|
||||||
|
libglx-mesa0 \
|
||||||
|
build-essential \
|
||||||
|
libopencv-dev \
|
||||||
|
libstdc++-10-dev
|
||||||
|
|
||||||
RUN apt update && apt install -y --no-install-recommends \
|
ENV \
|
||||||
git \
|
PYTHONUNBUFFERED=1 \
|
||||||
curl \
|
PYTHONDONTWRITEBYTECODE=1 \
|
||||||
vim \
|
VIRTUAL_ENV=/opt/venv \
|
||||||
tmux \
|
INVOKEAI_SRC=/opt/invokeai \
|
||||||
ncdu \
|
PYTHON_VERSION=3.12 \
|
||||||
iotop \
|
UV_PYTHON=3.12 \
|
||||||
bzip2 \
|
UV_COMPILE_BYTECODE=1 \
|
||||||
gosu \
|
UV_MANAGED_PYTHON=1 \
|
||||||
magic-wormhole \
|
UV_LINK_MODE=copy \
|
||||||
libglib2.0-0 \
|
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||||
libgl1 \
|
UV_INDEX="https://download.pytorch.org/whl/cu124" \
|
||||||
libglx-mesa0 \
|
INVOKEAI_ROOT=/invokeai \
|
||||||
build-essential \
|
INVOKEAI_HOST=0.0.0.0 \
|
||||||
libopencv-dev \
|
INVOKEAI_PORT=9090 \
|
||||||
libstdc++-10-dev &&\
|
PATH="/opt/venv/bin:$PATH" \
|
||||||
apt-get clean && apt-get autoclean
|
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||||
|
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||||
|
|
||||||
ENV INVOKEAI_SRC=/opt/invokeai
|
ARG GPU_DRIVER=cuda
|
||||||
ENV VIRTUAL_ENV=/opt/venv
|
|
||||||
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
|
|
||||||
ENV PYTHON_VERSION=3.11
|
|
||||||
ENV INVOKEAI_ROOT=/invokeai
|
|
||||||
ENV INVOKEAI_HOST=0.0.0.0
|
|
||||||
ENV INVOKEAI_PORT=9090
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
|
||||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
|
||||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
|
||||||
|
|
||||||
# Install `uv` for package management
|
# Install `uv` for package management
|
||||||
# and install python for the ubuntu user (expected to exist on ubuntu >=24.x)
|
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||||
# this is too tiny to optimize with multi-stage builds, but maybe we'll come back to it
|
|
||||||
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
|
|
||||||
USER ubuntu
|
|
||||||
RUN uv python install ${PYTHON_VERSION}
|
|
||||||
USER root
|
|
||||||
|
|
||||||
# --link requires buldkit w/ dockerfile syntax 1.4
|
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||||
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
uv python install ${PYTHON_VERSION} && \
|
||||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||||
|
chmod 711 /root
|
||||||
# Link amdgpu.ids for ROCm builds
|
|
||||||
# contributed by https://github.com/Rubonnek
|
|
||||||
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
|
||||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
|
||||||
|
|
||||||
WORKDIR ${INVOKEAI_SRC}
|
WORKDIR ${INVOKEAI_SRC}
|
||||||
|
|
||||||
|
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||||
|
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||||
|
#
|
||||||
|
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||||
|
# x86_64/CUDA is the default
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||||
|
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||||
|
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||||
|
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||||
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||||
|
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
|
||||||
|
fi && \
|
||||||
|
uv sync --frozen
|
||||||
|
|
||||||
# build patchmatch
|
# build patchmatch
|
||||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||||
RUN python -c "from patchmatch import patch_match"
|
RUN python -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
# Link amdgpu.ids for ROCm builds
|
||||||
|
# contributed by https://github.com/Rubonnek
|
||||||
|
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||||
|
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||||
|
|
||||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||||
|
|
||||||
COPY docker/docker-entrypoint.sh ./
|
COPY docker/docker-entrypoint.sh ./
|
||||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||||
CMD ["invokeai-web"]
|
CMD ["invokeai-web"]
|
||||||
|
|
||||||
|
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||||
|
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||||
|
|
||||||
|
# add sources last to minimize image changes on code changes
|
||||||
|
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||||
@@ -18,9 +18,19 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
|||||||
|
|
||||||
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||||
|
|
||||||
3. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
3. This repository uses Git LFS to manage large files. To ensure all assets are downloaded:
|
||||||
|
- Install git-lfs → [Download here](https://git-lfs.com/)
|
||||||
|
- Enable automatic LFS fetching for this repository:
|
||||||
|
```shell
|
||||||
|
git config lfs.fetchinclude "*"
|
||||||
|
```
|
||||||
|
- Fetch files from LFS (only needs to be done once; subsequent `git pull` will fetch changes automatically):
|
||||||
|
```
|
||||||
|
git lfs pull
|
||||||
|
```
|
||||||
|
4. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||||
|
|
||||||
4. Follow the [manual install][manual install link] guide, with some modifications to the install command:
|
5. Follow the [manual install][manual install link] guide, with some modifications to the install command:
|
||||||
|
|
||||||
- Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version.
|
- Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version.
|
||||||
|
|
||||||
@@ -31,22 +41,22 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
|||||||
With the modifications made, the install command should look something like this:
|
With the modifications made, the install command should look something like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.11 --python-preference only-managed --index=https://download.pytorch.org/whl/cu124 --reinstall
|
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu124 --reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
5. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||||
|
|
||||||
This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance.
|
This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance.
|
||||||
|
|
||||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||||
|
|
||||||
6. Install the frontend dev toolchain:
|
7. Install the frontend dev toolchain:
|
||||||
|
|
||||||
- [`nodejs`](https://nodejs.org/) (v20+)
|
- [`nodejs`](https://nodejs.org/) (v20+)
|
||||||
|
|
||||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||||
|
|
||||||
7. Do a production build of the frontend:
|
8. Do a production build of the frontend:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd <PATH_TO_INVOKEAI_REPO>/invokeai/frontend/web
|
cd <PATH_TO_INVOKEAI_REPO>/invokeai/frontend/web
|
||||||
@@ -54,7 +64,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
|||||||
pnpm build
|
pnpm build
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes.
|
9. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes.
|
||||||
|
|
||||||
## Updating the UI
|
## Updating the UI
|
||||||
|
|
||||||
|
|||||||
@@ -43,10 +43,10 @@ The following commands vary depending on the version of Invoke being installed a
|
|||||||
3. Create a virtual environment in that directory:
|
3. Create a virtual environment in that directory:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv venv --relocatable --prompt invoke --python 3.11 --python-preference only-managed .venv
|
uv venv --relocatable --prompt invoke --python 3.12 --python-preference only-managed .venv
|
||||||
```
|
```
|
||||||
|
|
||||||
This command creates a portable virtual environment at `.venv` complete with a portable python 3.11. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything.
|
This command creates a portable virtual environment at `.venv` complete with a portable python 3.12. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything.
|
||||||
|
|
||||||
4. Activate the virtual environment:
|
4. Activate the virtual environment:
|
||||||
|
|
||||||
@@ -88,13 +88,13 @@ The following commands vary depending on the version of Invoke being installed a
|
|||||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
|
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ The requirements below are rough guidelines for best performance. GPUs with less
|
|||||||
|
|
||||||
You don't need to do this if you are installing with the [Invoke Launcher](./quick_start.md).
|
You don't need to do this if you are installing with the [Invoke Launcher](./quick_start.md).
|
||||||
|
|
||||||
Invoke requires python 3.10 or 3.11. If you don't already have one of these versions installed, we suggest installing 3.11, as it will be supported for longer.
|
Invoke requires python 3.10 through 3.12. If you don't already have one of these versions installed, we suggest installing 3.12, as it will be supported for longer.
|
||||||
|
|
||||||
Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||||
|
|
||||||
@@ -49,19 +49,19 @@ Check that your system has an up-to-date Python installed by running `python3 --
|
|||||||
|
|
||||||
=== "Windows"
|
=== "Windows"
|
||||||
|
|
||||||
- Install python 3.11 with [an official installer].
|
- Install python with [an official installer].
|
||||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||||
|
|
||||||
=== "macOS"
|
=== "macOS"
|
||||||
|
|
||||||
- Install python 3.11 with [an official installer].
|
- Install python with [an official installer].
|
||||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||||
|
|
||||||
=== "Linux"
|
=== "Linux"
|
||||||
|
|
||||||
- Installing python varies depending on your system. On Ubuntu, you can use the [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa).
|
- Installing python varies depending on your system. We recommend [using `uv` to manage your python installation](https://docs.astral.sh/uv/concepts/python-versions/#installing-a-python-version).
|
||||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||||
|
|
||||||
## Drivers
|
## Drivers
|
||||||
|
|||||||
@@ -36,7 +36,14 @@ from invokeai.app.services.style_preset_images.style_preset_images_disk import S
|
|||||||
from invokeai.app.services.style_preset_records.style_preset_records_sqlite import SqliteStylePresetRecordsStorage
|
from invokeai.app.services.style_preset_records.style_preset_records_sqlite import SqliteStylePresetRecordsStorage
|
||||||
from invokeai.app.services.urls.urls_default import LocalUrlService
|
from invokeai.app.services.urls.urls_default import LocalUrlService
|
||||||
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_disk import WorkflowThumbnailFileStorageDisk
|
||||||
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||||
|
BasicConditioningInfo,
|
||||||
|
ConditioningFieldData,
|
||||||
|
FLUXConditioningInfo,
|
||||||
|
SD3ConditioningInfo,
|
||||||
|
SDXLConditioningInfo,
|
||||||
|
)
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.version.invokeai_version import __version__
|
from invokeai.version.invokeai_version import __version__
|
||||||
|
|
||||||
@@ -83,6 +90,7 @@ class ApiDependencies:
|
|||||||
|
|
||||||
model_images_folder = config.models_path
|
model_images_folder = config.models_path
|
||||||
style_presets_folder = config.style_presets_path
|
style_presets_folder = config.style_presets_path
|
||||||
|
workflow_thumbnails_folder = config.workflow_thumbnails_path
|
||||||
|
|
||||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||||
|
|
||||||
@@ -99,10 +107,25 @@ class ApiDependencies:
|
|||||||
images = ImageService()
|
images = ImageService()
|
||||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||||
tensors = ObjectSerializerForwardCache(
|
tensors = ObjectSerializerForwardCache(
|
||||||
ObjectSerializerDisk[torch.Tensor](output_folder / "tensors", ephemeral=True)
|
ObjectSerializerDisk[torch.Tensor](
|
||||||
|
output_folder / "tensors",
|
||||||
|
safe_globals=[torch.Tensor],
|
||||||
|
ephemeral=True,
|
||||||
|
),
|
||||||
|
max_cache_size=0,
|
||||||
)
|
)
|
||||||
conditioning = ObjectSerializerForwardCache(
|
conditioning = ObjectSerializerForwardCache(
|
||||||
ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True)
|
ObjectSerializerDisk[ConditioningFieldData](
|
||||||
|
output_folder / "conditioning",
|
||||||
|
safe_globals=[
|
||||||
|
ConditioningFieldData,
|
||||||
|
BasicConditioningInfo,
|
||||||
|
SDXLConditioningInfo,
|
||||||
|
FLUXConditioningInfo,
|
||||||
|
SD3ConditioningInfo,
|
||||||
|
],
|
||||||
|
ephemeral=True,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
download_queue_service = DownloadQueueService(app_config=configuration, event_bus=events)
|
download_queue_service = DownloadQueueService(app_config=configuration, event_bus=events)
|
||||||
model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
|
model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
|
||||||
@@ -120,6 +143,7 @@ class ApiDependencies:
|
|||||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||||
|
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
||||||
|
|
||||||
services = InvocationServices(
|
services = InvocationServices(
|
||||||
board_image_records=board_image_records,
|
board_image_records=board_image_records,
|
||||||
@@ -147,6 +171,7 @@ class ApiDependencies:
|
|||||||
conditioning=conditioning,
|
conditioning=conditioning,
|
||||||
style_preset_records=style_preset_records,
|
style_preset_records=style_preset_records,
|
||||||
style_preset_image_files=style_preset_image_files,
|
style_preset_image_files=style_preset_image_files,
|
||||||
|
workflow_thumbnails=workflow_thumbnails,
|
||||||
)
|
)
|
||||||
|
|
||||||
ApiDependencies.invoker = Invoker(services)
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|||||||
124
invokeai/app/api/extract_metadata_from_image.py
Normal file
124
invokeai/app/api/extract_metadata_from_image.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutIDValidator
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExtractedMetadata:
|
||||||
|
invokeai_metadata: str | None
|
||||||
|
invokeai_workflow: str | None
|
||||||
|
invokeai_graph: str | None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_metadata_from_image(
|
||||||
|
pil_image: Image.Image,
|
||||||
|
invokeai_metadata_override: str | None,
|
||||||
|
invokeai_workflow_override: str | None,
|
||||||
|
invokeai_graph_override: str | None,
|
||||||
|
logger: logging.Logger,
|
||||||
|
) -> ExtractedMetadata:
|
||||||
|
"""
|
||||||
|
Extracts the "invokeai_metadata", "invokeai_workflow", and "invokeai_graph" data embedded in the PIL Image.
|
||||||
|
|
||||||
|
These items are stored as stringified JSON in the image file's metadata, so we need to do some parsing to validate
|
||||||
|
them. Once parsed, the values are returned as they came (as strings), or None if they are not present or invalid.
|
||||||
|
|
||||||
|
In some situations, we may prefer to override the values extracted from the image file with some other values.
|
||||||
|
|
||||||
|
For example, when uploading an image via API, the client can optionally provide the metadata directly in the request,
|
||||||
|
as opposed to embedding it in the image file. In this case, the client-provided metadata will be used instead of the
|
||||||
|
metadata embedded in the image file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pil_image: The PIL Image object.
|
||||||
|
invokeai_metadata_override: The metadata override provided by the client.
|
||||||
|
invokeai_workflow_override: The workflow override provided by the client.
|
||||||
|
invokeai_graph_override: The graph override provided by the client.
|
||||||
|
logger: The logger to use for debug logging.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ExtractedMetadata: The extracted metadata, workflow, and graph.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The fallback value for metadata is None.
|
||||||
|
stringified_metadata: str | None = None
|
||||||
|
|
||||||
|
# Use the metadata override if provided, else attempt to extract it from the image file.
|
||||||
|
metadata_raw = invokeai_metadata_override or pil_image.info.get("invokeai_metadata", None)
|
||||||
|
|
||||||
|
# If the metadata is present in the image file, we will attempt to parse it as JSON. When we create images,
|
||||||
|
# we always store metadata as a stringified JSON dict. So, we expect it to be a string here.
|
||||||
|
if isinstance(metadata_raw, str):
|
||||||
|
try:
|
||||||
|
# Must be a JSON string
|
||||||
|
metadata_parsed = json.loads(metadata_raw)
|
||||||
|
# Must be a dict
|
||||||
|
if isinstance(metadata_parsed, dict):
|
||||||
|
# Looks good, overwrite the fallback value
|
||||||
|
stringified_metadata = metadata_raw
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Failed to parse metadata for uploaded image, {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# We expect the workflow, if embedded in the image, to be a JSON-stringified WorkflowWithoutID. We will store it
|
||||||
|
# as a string.
|
||||||
|
workflow_raw: str | None = invokeai_workflow_override or pil_image.info.get("invokeai_workflow", None)
|
||||||
|
|
||||||
|
# The fallback value for workflow is None.
|
||||||
|
stringified_workflow: str | None = None
|
||||||
|
|
||||||
|
# If the workflow is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||||
|
# always store workflows as a stringified JSON WorkflowWithoutID. So, we expect it to be a string here.
|
||||||
|
if isinstance(workflow_raw, str):
|
||||||
|
try:
|
||||||
|
# Validate the workflow JSON before storing it
|
||||||
|
WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||||
|
# Looks good, overwrite the fallback value
|
||||||
|
stringified_workflow = workflow_raw
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to parse workflow for uploaded image")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# We expect the workflow, if embedded in the image, to be a JSON-stringified Graph. We will store it as a
|
||||||
|
# string.
|
||||||
|
graph_raw: str | None = invokeai_graph_override or pil_image.info.get("invokeai_graph", None)
|
||||||
|
|
||||||
|
# The fallback value for graph is None.
|
||||||
|
stringified_graph: str | None = None
|
||||||
|
|
||||||
|
# If the graph is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||||
|
# always store graphs as a stringified JSON Graph. So, we expect it to be a string here.
|
||||||
|
if isinstance(graph_raw, str):
|
||||||
|
try:
|
||||||
|
# TODO(psyche): Due to pydantic's handling of None values, it is possible for the graph to fail validation,
|
||||||
|
# even if it is a direct dump of a valid graph. Node fields in the graph are allowed to have be unset if
|
||||||
|
# they have incoming connections, but something about the ser/de process cannot adequately handle this.
|
||||||
|
#
|
||||||
|
# In lieu of fixing the graph validation, we will just do a simple check here to see if the graph is dict
|
||||||
|
# with the correct keys. This is not a perfect solution, but it should be good enough for now.
|
||||||
|
|
||||||
|
# FIX ME: Validate the graph JSON before storing it
|
||||||
|
# Graph.model_validate_json(graph_raw)
|
||||||
|
|
||||||
|
# Crappy workaround to validate JSON
|
||||||
|
graph_parsed = json.loads(graph_raw)
|
||||||
|
if not isinstance(graph_parsed, dict):
|
||||||
|
raise ValueError("Not a dict")
|
||||||
|
if not isinstance(graph_parsed.get("nodes", None), dict):
|
||||||
|
raise ValueError("'nodes' is not a dict")
|
||||||
|
if not isinstance(graph_parsed.get("edges", None), list):
|
||||||
|
raise ValueError("'edges' is not a list")
|
||||||
|
|
||||||
|
# Looks good, overwrite the fallback value
|
||||||
|
stringified_graph = graph_raw
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Failed to parse graph for uploaded image, {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
return ExtractedMetadata(
|
||||||
|
invokeai_metadata=stringified_metadata, invokeai_workflow=stringified_workflow, invokeai_graph=stringified_graph
|
||||||
|
)
|
||||||
@@ -12,6 +12,7 @@ from pydantic import BaseModel, Field
|
|||||||
|
|
||||||
from invokeai.app.api.dependencies import ApiDependencies
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||||
|
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config
|
||||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||||
from invokeai.backend.util.logging import logging
|
from invokeai.backend.util.logging import logging
|
||||||
@@ -99,7 +100,7 @@ async def get_app_deps() -> AppDependencyVersions:
|
|||||||
|
|
||||||
|
|
||||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||||
async def get_config() -> AppConfig:
|
async def get_config_() -> AppConfig:
|
||||||
infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back
|
infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back
|
||||||
if PatchMatch.patchmatch_available():
|
if PatchMatch.patchmatch_available():
|
||||||
infill_methods.append("patchmatch")
|
infill_methods.append("patchmatch")
|
||||||
@@ -121,6 +122,21 @@ async def get_config() -> AppConfig:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InvokeAIAppConfigWithSetFields(BaseModel):
|
||||||
|
"""InvokeAI App Config with model fields set"""
|
||||||
|
|
||||||
|
set_fields: set[str] = Field(description="The set fields")
|
||||||
|
config: InvokeAIAppConfig = Field(description="The InvokeAI App Config")
|
||||||
|
|
||||||
|
|
||||||
|
@app_router.get(
|
||||||
|
"/runtime_config", operation_id="get_runtime_config", status_code=200, response_model=InvokeAIAppConfigWithSetFields
|
||||||
|
)
|
||||||
|
async def get_runtime_config() -> InvokeAIAppConfigWithSetFields:
|
||||||
|
config = get_config()
|
||||||
|
return InvokeAIAppConfigWithSetFields(set_fields=config.model_fields_set, config=config)
|
||||||
|
|
||||||
|
|
||||||
@app_router.get(
|
@app_router.get(
|
||||||
"/logging",
|
"/logging",
|
||||||
operation_id="get_log_level",
|
operation_id="get_log_level",
|
||||||
|
|||||||
@@ -6,9 +6,10 @@ from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request,
|
|||||||
from fastapi.responses import FileResponse
|
from fastapi.responses import FileResponse
|
||||||
from fastapi.routing import APIRouter
|
from fastapi.routing import APIRouter
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from pydantic import BaseModel, Field, JsonValue
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from invokeai.app.api.dependencies import ApiDependencies
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
|
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||||
from invokeai.app.invocations.fields import MetadataField
|
from invokeai.app.invocations.fields import MetadataField
|
||||||
from invokeai.app.services.image_records.image_records_common import (
|
from invokeai.app.services.image_records.image_records_common import (
|
||||||
ImageCategory,
|
ImageCategory,
|
||||||
@@ -45,18 +46,16 @@ async def upload_image(
|
|||||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||||
metadata: Optional[JsonValue] = Body(
|
metadata: Optional[str] = Body(
|
||||||
default=None, description="The metadata to associate with the image", embed=True
|
default=None,
|
||||||
|
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||||
|
embed=True,
|
||||||
),
|
),
|
||||||
) -> ImageDTO:
|
) -> ImageDTO:
|
||||||
"""Uploads an image"""
|
"""Uploads an image"""
|
||||||
if not file.content_type or not file.content_type.startswith("image"):
|
if not file.content_type or not file.content_type.startswith("image"):
|
||||||
raise HTTPException(status_code=415, detail="Not an image")
|
raise HTTPException(status_code=415, detail="Not an image")
|
||||||
|
|
||||||
_metadata = None
|
|
||||||
_workflow = None
|
|
||||||
_graph = None
|
|
||||||
|
|
||||||
contents = await file.read()
|
contents = await file.read()
|
||||||
try:
|
try:
|
||||||
pil_image = Image.open(io.BytesIO(contents))
|
pil_image = Image.open(io.BytesIO(contents))
|
||||||
@@ -67,30 +66,13 @@ async def upload_image(
|
|||||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||||
|
|
||||||
# TODO: retain non-invokeai metadata on upload?
|
extracted_metadata = extract_metadata_from_image(
|
||||||
# attempt to parse metadata from image
|
pil_image=pil_image,
|
||||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
invokeai_metadata_override=metadata,
|
||||||
if isinstance(metadata_raw, str):
|
invokeai_workflow_override=None,
|
||||||
_metadata = metadata_raw
|
invokeai_graph_override=None,
|
||||||
else:
|
logger=ApiDependencies.invoker.services.logger,
|
||||||
ApiDependencies.invoker.services.logger.debug("Failed to parse metadata for uploaded image")
|
)
|
||||||
pass
|
|
||||||
|
|
||||||
# attempt to parse workflow from image
|
|
||||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
|
||||||
if isinstance(workflow_raw, str):
|
|
||||||
_workflow = workflow_raw
|
|
||||||
else:
|
|
||||||
ApiDependencies.invoker.services.logger.debug("Failed to parse workflow for uploaded image")
|
|
||||||
pass
|
|
||||||
|
|
||||||
# attempt to extract graph from image
|
|
||||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
|
||||||
if isinstance(graph_raw, str):
|
|
||||||
_graph = graph_raw
|
|
||||||
else:
|
|
||||||
ApiDependencies.invoker.services.logger.debug("Failed to parse graph for uploaded image")
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
image_dto = ApiDependencies.invoker.services.images.create(
|
image_dto = ApiDependencies.invoker.services.images.create(
|
||||||
@@ -99,9 +81,9 @@ async def upload_image(
|
|||||||
image_category=image_category,
|
image_category=image_category,
|
||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
board_id=board_id,
|
board_id=board_id,
|
||||||
metadata=_metadata,
|
metadata=extracted_metadata.invokeai_metadata,
|
||||||
workflow=_workflow,
|
workflow=extracted_metadata.invokeai_workflow,
|
||||||
graph=_graph,
|
graph=extracted_metadata.invokeai_graph,
|
||||||
is_intermediate=is_intermediate,
|
is_intermediate=is_intermediate,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -114,6 +96,22 @@ async def upload_image(
|
|||||||
raise HTTPException(status_code=500, detail="Failed to create image")
|
raise HTTPException(status_code=500, detail="Failed to create image")
|
||||||
|
|
||||||
|
|
||||||
|
class ImageUploadEntry(BaseModel):
|
||||||
|
image_dto: ImageDTO = Body(description="The image DTO")
|
||||||
|
presigned_url: str = Body(description="The URL to get the presigned URL for the image upload")
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.post("/", operation_id="create_image_upload_entry")
|
||||||
|
async def create_image_upload_entry(
|
||||||
|
width: int = Body(description="The width of the image"),
|
||||||
|
height: int = Body(description="The height of the image"),
|
||||||
|
board_id: Optional[str] = Body(default=None, description="The board to add this image to, if any"),
|
||||||
|
) -> ImageUploadEntry:
|
||||||
|
"""Uploads an image from a URL, not implemented"""
|
||||||
|
|
||||||
|
raise HTTPException(status_code=501, detail="Not implemented")
|
||||||
|
|
||||||
|
|
||||||
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
||||||
async def delete_image(
|
async def delete_image(
|
||||||
image_name: str = Path(description="The name of the image to delete"),
|
image_name: str = Path(description="The name of the image to delete"),
|
||||||
|
|||||||
@@ -28,12 +28,10 @@ from invokeai.app.services.model_records import (
|
|||||||
UnknownModelException,
|
UnknownModelException,
|
||||||
)
|
)
|
||||||
from invokeai.app.util.suppress_output import SuppressOutput
|
from invokeai.app.util.suppress_output import SuppressOutput
|
||||||
|
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
MainCheckpointConfig,
|
MainCheckpointConfig,
|
||||||
ModelFormat,
|
|
||||||
ModelType,
|
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
|
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
|
||||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
|
import io
|
||||||
|
import traceback
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from fastapi import APIRouter, Body, HTTPException, Path, Query
|
from fastapi import APIRouter, Body, File, HTTPException, Path, Query, UploadFile
|
||||||
|
from fastapi.responses import FileResponse
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
from invokeai.app.api.dependencies import ApiDependencies
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
@@ -10,11 +14,14 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
|||||||
WorkflowCategory,
|
WorkflowCategory,
|
||||||
WorkflowNotFoundError,
|
WorkflowNotFoundError,
|
||||||
WorkflowRecordDTO,
|
WorkflowRecordDTO,
|
||||||
WorkflowRecordListItemDTO,
|
WorkflowRecordListItemWithThumbnailDTO,
|
||||||
WorkflowRecordOrderBy,
|
WorkflowRecordOrderBy,
|
||||||
|
WorkflowRecordWithThumbnailDTO,
|
||||||
WorkflowWithoutID,
|
WorkflowWithoutID,
|
||||||
)
|
)
|
||||||
|
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_common import WorkflowThumbnailFileNotFoundException
|
||||||
|
|
||||||
|
IMAGE_MAX_AGE = 31536000
|
||||||
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||||
|
|
||||||
|
|
||||||
@@ -22,15 +29,17 @@ workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
|||||||
"/i/{workflow_id}",
|
"/i/{workflow_id}",
|
||||||
operation_id="get_workflow",
|
operation_id="get_workflow",
|
||||||
responses={
|
responses={
|
||||||
200: {"model": WorkflowRecordDTO},
|
200: {"model": WorkflowRecordWithThumbnailDTO},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
async def get_workflow(
|
async def get_workflow(
|
||||||
workflow_id: str = Path(description="The workflow to get"),
|
workflow_id: str = Path(description="The workflow to get"),
|
||||||
) -> WorkflowRecordDTO:
|
) -> WorkflowRecordWithThumbnailDTO:
|
||||||
"""Gets a workflow"""
|
"""Gets a workflow"""
|
||||||
try:
|
try:
|
||||||
return ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
thumbnail_url = ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow_id)
|
||||||
|
workflow = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||||
|
return WorkflowRecordWithThumbnailDTO(thumbnail_url=thumbnail_url, **workflow.model_dump())
|
||||||
except WorkflowNotFoundError:
|
except WorkflowNotFoundError:
|
||||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||||
|
|
||||||
@@ -57,6 +66,11 @@ async def delete_workflow(
|
|||||||
workflow_id: str = Path(description="The workflow to delete"),
|
workflow_id: str = Path(description="The workflow to delete"),
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Deletes a workflow"""
|
"""Deletes a workflow"""
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||||
|
except WorkflowThumbnailFileNotFoundException:
|
||||||
|
# It's OK if the workflow has no thumbnail file. We can still delete the workflow.
|
||||||
|
pass
|
||||||
ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
|
ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
|
||||||
|
|
||||||
|
|
||||||
@@ -78,7 +92,7 @@ async def create_workflow(
|
|||||||
"/",
|
"/",
|
||||||
operation_id="list_workflows",
|
operation_id="list_workflows",
|
||||||
responses={
|
responses={
|
||||||
200: {"model": PaginatedResults[WorkflowRecordListItemDTO]},
|
200: {"model": PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
async def list_workflows(
|
async def list_workflows(
|
||||||
@@ -88,10 +102,158 @@ async def list_workflows(
|
|||||||
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
||||||
),
|
),
|
||||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
||||||
category: WorkflowCategory = Query(default=WorkflowCategory.User, description="The category of workflow to get"),
|
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories of workflow to get"),
|
||||||
|
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
|
||||||
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
||||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||||
|
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
|
||||||
"""Gets a page of workflows"""
|
"""Gets a page of workflows"""
|
||||||
return ApiDependencies.invoker.services.workflow_records.get_many(
|
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
|
||||||
order_by=order_by, direction=direction, page=page, per_page=per_page, query=query, category=category
|
workflows = ApiDependencies.invoker.services.workflow_records.get_many(
|
||||||
|
order_by=order_by,
|
||||||
|
direction=direction,
|
||||||
|
page=page,
|
||||||
|
per_page=per_page,
|
||||||
|
query=query,
|
||||||
|
categories=categories,
|
||||||
|
tags=tags,
|
||||||
|
has_been_opened=has_been_opened,
|
||||||
)
|
)
|
||||||
|
for workflow in workflows.items:
|
||||||
|
workflows_with_thumbnails.append(
|
||||||
|
WorkflowRecordListItemWithThumbnailDTO(
|
||||||
|
thumbnail_url=ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow.workflow_id),
|
||||||
|
**workflow.model_dump(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return PaginatedResults[WorkflowRecordListItemWithThumbnailDTO](
|
||||||
|
items=workflows_with_thumbnails,
|
||||||
|
total=workflows.total,
|
||||||
|
page=workflows.page,
|
||||||
|
pages=workflows.pages,
|
||||||
|
per_page=workflows.per_page,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.put(
|
||||||
|
"/i/{workflow_id}/thumbnail",
|
||||||
|
operation_id="set_workflow_thumbnail",
|
||||||
|
responses={
|
||||||
|
200: {"model": WorkflowRecordDTO},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def set_workflow_thumbnail(
|
||||||
|
workflow_id: str = Path(description="The workflow to update"),
|
||||||
|
image: UploadFile = File(description="The image file to upload"),
|
||||||
|
):
|
||||||
|
"""Sets a workflow's thumbnail image"""
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||||
|
except WorkflowNotFoundError:
|
||||||
|
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||||
|
|
||||||
|
if not image.content_type or not image.content_type.startswith("image"):
|
||||||
|
raise HTTPException(status_code=415, detail="Not an image")
|
||||||
|
|
||||||
|
contents = await image.read()
|
||||||
|
try:
|
||||||
|
pil_image = Image.open(io.BytesIO(contents))
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||||
|
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.workflow_thumbnails.save(workflow_id, pil_image)
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.delete(
|
||||||
|
"/i/{workflow_id}/thumbnail",
|
||||||
|
operation_id="delete_workflow_thumbnail",
|
||||||
|
responses={
|
||||||
|
200: {"model": WorkflowRecordDTO},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_workflow_thumbnail(
|
||||||
|
workflow_id: str = Path(description="The workflow to update"),
|
||||||
|
):
|
||||||
|
"""Removes a workflow's thumbnail image"""
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||||
|
except WorkflowNotFoundError:
|
||||||
|
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||||
|
|
||||||
|
try:
|
||||||
|
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||||
|
except ValueError as e:
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.get(
|
||||||
|
"/i/{workflow_id}/thumbnail",
|
||||||
|
operation_id="get_workflow_thumbnail",
|
||||||
|
responses={
|
||||||
|
200: {
|
||||||
|
"description": "The workflow thumbnail was fetched successfully",
|
||||||
|
},
|
||||||
|
400: {"description": "Bad request"},
|
||||||
|
404: {"description": "The workflow thumbnail could not be found"},
|
||||||
|
},
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
async def get_workflow_thumbnail(
|
||||||
|
workflow_id: str = Path(description="The id of the workflow thumbnail to get"),
|
||||||
|
) -> FileResponse:
|
||||||
|
"""Gets a workflow's thumbnail image"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
path = ApiDependencies.invoker.services.workflow_thumbnails.get_path(workflow_id)
|
||||||
|
|
||||||
|
response = FileResponse(
|
||||||
|
path,
|
||||||
|
media_type="image/png",
|
||||||
|
filename=workflow_id + ".png",
|
||||||
|
content_disposition_type="inline",
|
||||||
|
)
|
||||||
|
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||||
|
return response
|
||||||
|
except Exception:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.get("/counts_by_tag", operation_id="get_counts_by_tag")
|
||||||
|
async def get_counts_by_tag(
|
||||||
|
tags: list[str] = Query(description="The tags to get counts for"),
|
||||||
|
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
|
||||||
|
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||||
|
) -> dict[str, int]:
|
||||||
|
"""Counts workflows by tag"""
|
||||||
|
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.counts_by_tag(
|
||||||
|
tags=tags, categories=categories, has_been_opened=has_been_opened
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.get("/counts_by_category", operation_id="counts_by_category")
|
||||||
|
async def counts_by_category(
|
||||||
|
categories: list[WorkflowCategory] = Query(description="The categories to include"),
|
||||||
|
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||||
|
) -> dict[str, int]:
|
||||||
|
"""Counts workflows by category"""
|
||||||
|
|
||||||
|
return ApiDependencies.invoker.services.workflow_records.counts_by_category(
|
||||||
|
categories=categories, has_been_opened=has_been_opened
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@workflows_router.put(
|
||||||
|
"/i/{workflow_id}/opened_at",
|
||||||
|
operation_id="update_opened_at",
|
||||||
|
)
|
||||||
|
async def update_opened_at(
|
||||||
|
workflow_id: str = Path(description="The workflow to update"),
|
||||||
|
) -> None:
|
||||||
|
"""Updates the opened_at field of a workflow"""
|
||||||
|
ApiDependencies.invoker.services.workflow_records.update_opened_at(workflow_id)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import sys
|
|||||||
import warnings
|
import warnings
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from functools import lru_cache
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
@@ -27,7 +28,6 @@ import semver
|
|||||||
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
|
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
|
||||||
from pydantic.fields import FieldInfo
|
from pydantic.fields import FieldInfo
|
||||||
from pydantic_core import PydanticUndefined
|
from pydantic_core import PydanticUndefined
|
||||||
from typing_extensions import TypeAliasType
|
|
||||||
|
|
||||||
from invokeai.app.invocations.fields import (
|
from invokeai.app.invocations.fields import (
|
||||||
FieldKind,
|
FieldKind,
|
||||||
@@ -100,37 +100,6 @@ class BaseInvocationOutput(BaseModel):
|
|||||||
All invocation outputs must use the `@invocation_output` decorator to provide their unique type.
|
All invocation outputs must use the `@invocation_output` decorator to provide their unique type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_output_classes: ClassVar[set[BaseInvocationOutput]] = set()
|
|
||||||
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
|
|
||||||
_typeadapter_needs_update: ClassVar[bool] = False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def register_output(cls, output: BaseInvocationOutput) -> None:
|
|
||||||
"""Registers an invocation output."""
|
|
||||||
cls._output_classes.add(output)
|
|
||||||
cls._typeadapter_needs_update = True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_outputs(cls) -> Iterable[BaseInvocationOutput]:
|
|
||||||
"""Gets all invocation outputs."""
|
|
||||||
return cls._output_classes
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_typeadapter(cls) -> TypeAdapter[Any]:
|
|
||||||
"""Gets a pydantc TypeAdapter for the union of all invocation output types."""
|
|
||||||
if not cls._typeadapter or cls._typeadapter_needs_update:
|
|
||||||
AnyInvocationOutput = TypeAliasType(
|
|
||||||
"AnyInvocationOutput", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")]
|
|
||||||
)
|
|
||||||
cls._typeadapter = TypeAdapter(AnyInvocationOutput)
|
|
||||||
cls._typeadapter_needs_update = False
|
|
||||||
return cls._typeadapter
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_output_types(cls) -> Iterable[str]:
|
|
||||||
"""Gets all invocation output types."""
|
|
||||||
return (i.get_type() for i in BaseInvocationOutput.get_outputs())
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None:
|
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None:
|
||||||
"""Adds various UI-facing attributes to the invocation output's OpenAPI schema."""
|
"""Adds various UI-facing attributes to the invocation output's OpenAPI schema."""
|
||||||
@@ -173,76 +142,16 @@ class BaseInvocation(ABC, BaseModel):
|
|||||||
All invocations must use the `@invocation` decorator to provide their unique type.
|
All invocations must use the `@invocation` decorator to provide their unique type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_invocation_classes: ClassVar[set[BaseInvocation]] = set()
|
|
||||||
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
|
|
||||||
_typeadapter_needs_update: ClassVar[bool] = False
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_type(cls) -> str:
|
def get_type(cls) -> str:
|
||||||
"""Gets the invocation's type, as provided by the `@invocation` decorator."""
|
"""Gets the invocation's type, as provided by the `@invocation` decorator."""
|
||||||
return cls.model_fields["type"].default
|
return cls.model_fields["type"].default
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def register_invocation(cls, invocation: BaseInvocation) -> None:
|
|
||||||
"""Registers an invocation."""
|
|
||||||
cls._invocation_classes.add(invocation)
|
|
||||||
cls._typeadapter_needs_update = True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_typeadapter(cls) -> TypeAdapter[Any]:
|
|
||||||
"""Gets a pydantc TypeAdapter for the union of all invocation types."""
|
|
||||||
if not cls._typeadapter or cls._typeadapter_needs_update:
|
|
||||||
AnyInvocation = TypeAliasType(
|
|
||||||
"AnyInvocation", Annotated[Union[tuple(cls.get_invocations())], Field(discriminator="type")]
|
|
||||||
)
|
|
||||||
cls._typeadapter = TypeAdapter(AnyInvocation)
|
|
||||||
cls._typeadapter_needs_update = False
|
|
||||||
return cls._typeadapter
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def invalidate_typeadapter(cls) -> None:
|
|
||||||
"""Invalidates the typeadapter, forcing it to be rebuilt on next access. If the invocation allowlist or
|
|
||||||
denylist is changed, this should be called to ensure the typeadapter is updated and validation respects
|
|
||||||
the updated allowlist and denylist."""
|
|
||||||
cls._typeadapter_needs_update = True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
|
||||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
|
||||||
app_config = get_config()
|
|
||||||
allowed_invocations: set[BaseInvocation] = set()
|
|
||||||
for sc in cls._invocation_classes:
|
|
||||||
invocation_type = sc.get_type()
|
|
||||||
is_in_allowlist = (
|
|
||||||
invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True
|
|
||||||
)
|
|
||||||
is_in_denylist = (
|
|
||||||
invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False
|
|
||||||
)
|
|
||||||
if is_in_allowlist and not is_in_denylist:
|
|
||||||
allowed_invocations.add(sc)
|
|
||||||
return allowed_invocations
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_invocations_map(cls) -> dict[str, BaseInvocation]:
|
|
||||||
"""Gets a map of all invocation types to their invocation classes."""
|
|
||||||
return {i.get_type(): i for i in BaseInvocation.get_invocations()}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_invocation_types(cls) -> Iterable[str]:
|
|
||||||
"""Gets all invocation types."""
|
|
||||||
return (i.get_type() for i in BaseInvocation.get_invocations())
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_output_annotation(cls) -> BaseInvocationOutput:
|
def get_output_annotation(cls) -> BaseInvocationOutput:
|
||||||
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
||||||
return signature(cls.invoke).return_annotation
|
return signature(cls.invoke).return_annotation
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_invocation_for_type(cls, invocation_type: str) -> BaseInvocation | None:
|
|
||||||
"""Gets the invocation class for a given invocation type."""
|
|
||||||
return cls.get_invocations_map().get(invocation_type)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
|
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
|
||||||
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
|
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
|
||||||
@@ -340,6 +249,105 @@ class BaseInvocation(ABC, BaseModel):
|
|||||||
TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation)
|
TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation)
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationRegistry:
|
||||||
|
_invocation_classes: ClassVar[set[type[BaseInvocation]]] = set()
|
||||||
|
_output_classes: ClassVar[set[type[BaseInvocationOutput]]] = set()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def register_invocation(cls, invocation: type[BaseInvocation]) -> None:
|
||||||
|
"""Registers an invocation."""
|
||||||
|
cls._invocation_classes.add(invocation)
|
||||||
|
cls.invalidate_invocation_typeadapter()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@lru_cache(maxsize=1)
|
||||||
|
def get_invocation_typeadapter(cls) -> TypeAdapter[Any]:
|
||||||
|
"""Gets a pydantic TypeAdapter for the union of all invocation types.
|
||||||
|
|
||||||
|
This is used to parse serialized invocations into the correct invocation class.
|
||||||
|
|
||||||
|
This method is cached to avoid rebuilding the TypeAdapter on every access. If the invocation allowlist or
|
||||||
|
denylist is changed, the cache should be cleared to ensure the TypeAdapter is updated and validation respects
|
||||||
|
the updated allowlist and denylist.
|
||||||
|
|
||||||
|
@see https://docs.pydantic.dev/latest/concepts/type_adapter/
|
||||||
|
"""
|
||||||
|
return TypeAdapter(Annotated[Union[tuple(cls.get_invocation_classes())], Field(discriminator="type")])
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def invalidate_invocation_typeadapter(cls) -> None:
|
||||||
|
"""Invalidates the cached invocation type adapter."""
|
||||||
|
cls.get_invocation_typeadapter.cache_clear()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocation_classes(cls) -> Iterable[type[BaseInvocation]]:
|
||||||
|
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||||
|
app_config = get_config()
|
||||||
|
allowed_invocations: set[type[BaseInvocation]] = set()
|
||||||
|
for sc in cls._invocation_classes:
|
||||||
|
invocation_type = sc.get_type()
|
||||||
|
is_in_allowlist = (
|
||||||
|
invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True
|
||||||
|
)
|
||||||
|
is_in_denylist = (
|
||||||
|
invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False
|
||||||
|
)
|
||||||
|
if is_in_allowlist and not is_in_denylist:
|
||||||
|
allowed_invocations.add(sc)
|
||||||
|
return allowed_invocations
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocations_map(cls) -> dict[str, type[BaseInvocation]]:
|
||||||
|
"""Gets a map of all invocation types to their invocation classes."""
|
||||||
|
return {i.get_type(): i for i in cls.get_invocation_classes()}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocation_types(cls) -> Iterable[str]:
|
||||||
|
"""Gets all invocation types."""
|
||||||
|
return (i.get_type() for i in cls.get_invocation_classes())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocation_for_type(cls, invocation_type: str) -> type[BaseInvocation] | None:
|
||||||
|
"""Gets the invocation class for a given invocation type."""
|
||||||
|
return cls.get_invocations_map().get(invocation_type)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def register_output(cls, output: "type[TBaseInvocationOutput]") -> None:
|
||||||
|
"""Registers an invocation output."""
|
||||||
|
cls._output_classes.add(output)
|
||||||
|
cls.invalidate_output_typeadapter()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_output_classes(cls) -> Iterable[type[BaseInvocationOutput]]:
|
||||||
|
"""Gets all invocation outputs."""
|
||||||
|
return cls._output_classes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@lru_cache(maxsize=1)
|
||||||
|
def get_output_typeadapter(cls) -> TypeAdapter[Any]:
|
||||||
|
"""Gets a pydantic TypeAdapter for the union of all invocation output types.
|
||||||
|
|
||||||
|
This is used to parse serialized invocation outputs into the correct invocation output class.
|
||||||
|
|
||||||
|
This method is cached to avoid rebuilding the TypeAdapter on every access. If the invocation allowlist or
|
||||||
|
denylist is changed, the cache should be cleared to ensure the TypeAdapter is updated and validation respects
|
||||||
|
the updated allowlist and denylist.
|
||||||
|
|
||||||
|
@see https://docs.pydantic.dev/latest/concepts/type_adapter/
|
||||||
|
"""
|
||||||
|
return TypeAdapter(Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")])
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def invalidate_output_typeadapter(cls) -> None:
|
||||||
|
"""Invalidates the cached invocation output type adapter."""
|
||||||
|
cls.get_output_typeadapter.cache_clear()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_output_types(cls) -> Iterable[str]:
|
||||||
|
"""Gets all invocation output types."""
|
||||||
|
return (i.get_type() for i in cls.get_output_classes())
|
||||||
|
|
||||||
|
|
||||||
RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
||||||
"id",
|
"id",
|
||||||
"is_intermediate",
|
"is_intermediate",
|
||||||
@@ -417,7 +425,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
|||||||
|
|
||||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||||
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
|
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||||
field.json_schema_extra.pop("ui_type")
|
field.json_schema_extra.pop("ui_type")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -453,8 +461,8 @@ def invocation(
|
|||||||
node_pack = cls.__module__.split(".")[0]
|
node_pack = cls.__module__.split(".")[0]
|
||||||
|
|
||||||
# Handle the case where an existing node is being clobbered by the one we are registering
|
# Handle the case where an existing node is being clobbered by the one we are registering
|
||||||
if invocation_type in BaseInvocation.get_invocation_types():
|
if invocation_type in InvocationRegistry.get_invocation_types():
|
||||||
clobbered_invocation = BaseInvocation.get_invocation_for_type(invocation_type)
|
clobbered_invocation = InvocationRegistry.get_invocation_for_type(invocation_type)
|
||||||
# This should always be true - we just checked if the invocation type was in the set
|
# This should always be true - we just checked if the invocation type was in the set
|
||||||
assert clobbered_invocation is not None
|
assert clobbered_invocation is not None
|
||||||
|
|
||||||
@@ -539,8 +547,7 @@ def invocation(
|
|||||||
)
|
)
|
||||||
cls.__doc__ = docstring
|
cls.__doc__ = docstring
|
||||||
|
|
||||||
# TODO: how to type this correctly? it's typed as ModelMetaclass, a private class in pydantic
|
InvocationRegistry.register_invocation(cls)
|
||||||
BaseInvocation.register_invocation(cls) # type: ignore
|
|
||||||
|
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
@@ -565,7 +572,7 @@ def invocation_output(
|
|||||||
if re.compile(r"^\S+$").match(output_type) is None:
|
if re.compile(r"^\S+$").match(output_type) is None:
|
||||||
raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"')
|
raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"')
|
||||||
|
|
||||||
if output_type in BaseInvocationOutput.get_output_types():
|
if output_type in InvocationRegistry.get_output_types():
|
||||||
raise ValueError(f'Invocation type "{output_type}" already exists')
|
raise ValueError(f'Invocation type "{output_type}" already exists')
|
||||||
|
|
||||||
validate_fields(cls.model_fields, output_type)
|
validate_fields(cls.model_fields, output_type)
|
||||||
@@ -586,7 +593,7 @@ def invocation_output(
|
|||||||
)
|
)
|
||||||
cls.__doc__ = docstring
|
cls.__doc__ = docstring
|
||||||
|
|
||||||
BaseInvocationOutput.register_output(cls) # type: ignore # TODO: how to type this correctly?
|
InvocationRegistry.register_output(cls)
|
||||||
|
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|||||||
@@ -40,10 +40,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"compel",
|
"compel",
|
||||||
title="Prompt",
|
title="Prompt - SD1.5",
|
||||||
tags=["prompt", "compel"],
|
tags=["prompt", "compel"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.2.0",
|
version="1.2.1",
|
||||||
)
|
)
|
||||||
class CompelInvocation(BaseInvocation):
|
class CompelInvocation(BaseInvocation):
|
||||||
"""Parse prompt using compel package to conditioning."""
|
"""Parse prompt using compel package to conditioning."""
|
||||||
@@ -233,10 +233,10 @@ class SDXLPromptInvocationBase:
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sdxl_compel_prompt",
|
"sdxl_compel_prompt",
|
||||||
title="SDXL Prompt",
|
title="Prompt - SDXL",
|
||||||
tags=["sdxl", "compel", "prompt"],
|
tags=["sdxl", "compel", "prompt"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.2.0",
|
version="1.2.1",
|
||||||
)
|
)
|
||||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||||
"""Parse prompt using compel package to conditioning."""
|
"""Parse prompt using compel package to conditioning."""
|
||||||
@@ -327,10 +327,10 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sdxl_refiner_compel_prompt",
|
"sdxl_refiner_compel_prompt",
|
||||||
title="SDXL Refiner Prompt",
|
title="Prompt - SDXL Refiner",
|
||||||
tags=["sdxl", "compel", "prompt"],
|
tags=["sdxl", "compel", "prompt"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.1.1",
|
version="1.1.2",
|
||||||
)
|
)
|
||||||
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||||
"""Parse prompt using compel package to conditioning."""
|
"""Parse prompt using compel package to conditioning."""
|
||||||
@@ -376,10 +376,10 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"clip_skip",
|
"clip_skip",
|
||||||
title="CLIP Skip",
|
title="Apply CLIP Skip - SD1.5, SDXL",
|
||||||
tags=["clipskip", "clip", "skip"],
|
tags=["clipskip", "clip", "skip"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.1.0",
|
version="1.1.1",
|
||||||
)
|
)
|
||||||
class CLIPSkipInvocation(BaseInvocation):
|
class CLIPSkipInvocation(BaseInvocation):
|
||||||
"""Skip layers in clip text_encoder model."""
|
"""Skip layers in clip text_encoder model."""
|
||||||
@@ -513,7 +513,7 @@ def log_tokenization_for_text(
|
|||||||
usedTokens += 1
|
usedTokens += 1
|
||||||
|
|
||||||
if usedTokens > 0:
|
if usedTokens > 0:
|
||||||
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
|
print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):")
|
||||||
print(f"{tokenized}\x1b[0m")
|
print(f"{tokenized}\x1b[0m")
|
||||||
|
|
||||||
if discarded != "":
|
if discarded != "":
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ class ControlOutput(BaseInvocationOutput):
|
|||||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||||
|
|
||||||
|
|
||||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
|
||||||
class ControlNetInvocation(BaseInvocation):
|
class ControlNetInvocation(BaseInvocation):
|
||||||
"""Collects ControlNet info to pass to other nodes"""
|
"""Collects ControlNet info to pass to other nodes"""
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
|
|||||||
from invokeai.app.invocations.model import UNetField, VAEField
|
from invokeai.app.invocations.model import UNetField, VAEField
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager import LoadedModel
|
from invokeai.backend.model_manager import LoadedModel
|
||||||
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
|
from invokeai.backend.model_manager.config import MainConfigBase
|
||||||
|
from invokeai.backend.model_manager.taxonomy import ModelVariantType
|
||||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
"""
|
|
||||||
Invoke-managed custom node loader. See README.md for more information.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
from importlib.util import module_from_spec, spec_from_file_location
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
|
||||||
|
|
||||||
logger = InvokeAILogger.get_logger()
|
|
||||||
loaded_packs: list[str] = []
|
|
||||||
failed_packs: list[str] = []
|
|
||||||
|
|
||||||
custom_nodes_dir = Path(__file__).parent
|
|
||||||
|
|
||||||
for d in custom_nodes_dir.iterdir():
|
|
||||||
# skip files
|
|
||||||
if not d.is_dir():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# skip hidden directories
|
|
||||||
if d.name.startswith("_") or d.name.startswith("."):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# skip directories without an `__init__.py`
|
|
||||||
init = d / "__init__.py"
|
|
||||||
if not init.exists():
|
|
||||||
continue
|
|
||||||
|
|
||||||
module_name = init.parent.stem
|
|
||||||
|
|
||||||
# skip if already imported
|
|
||||||
if module_name in globals():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# load the module, appending adding a suffix to identify it as a custom node pack
|
|
||||||
spec = spec_from_file_location(module_name, init.absolute())
|
|
||||||
|
|
||||||
if spec is None or spec.loader is None:
|
|
||||||
logger.warn(f"Could not load {init}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.info(f"Loading node pack {module_name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
module = module_from_spec(spec)
|
|
||||||
sys.modules[spec.name] = module
|
|
||||||
spec.loader.exec_module(module)
|
|
||||||
|
|
||||||
loaded_packs.append(module_name)
|
|
||||||
except Exception:
|
|
||||||
failed_packs.append(module_name)
|
|
||||||
full_error = traceback.format_exc()
|
|
||||||
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
|
|
||||||
|
|
||||||
del init, module_name
|
|
||||||
|
|
||||||
loaded_count = len(loaded_packs)
|
|
||||||
if loaded_count > 0:
|
|
||||||
logger.info(
|
|
||||||
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_dir}: {', '.join(loaded_packs)}"
|
|
||||||
)
|
|
||||||
@@ -39,8 +39,8 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
|||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||||
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
|
|
||||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
|
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelVariantType
|
||||||
from invokeai.backend.model_patcher import ModelPatcher
|
from invokeai.backend.model_patcher import ModelPatcher
|
||||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||||
@@ -127,10 +127,10 @@ def get_scheduler(
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"denoise_latents",
|
"denoise_latents",
|
||||||
title="Denoise Latents",
|
title="Denoise - SD1.5, SDXL",
|
||||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.5.3",
|
version="1.5.4",
|
||||||
)
|
)
|
||||||
class DenoiseLatentsInvocation(BaseInvocation):
|
class DenoiseLatentsInvocation(BaseInvocation):
|
||||||
"""Denoises noisy latents to decodable images"""
|
"""Denoises noisy latents to decodable images"""
|
||||||
|
|||||||
@@ -57,6 +57,9 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
|||||||
CLIPGEmbedModel = "CLIPGEmbedModelField"
|
CLIPGEmbedModel = "CLIPGEmbedModelField"
|
||||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||||
ControlLoRAModel = "ControlLoRAModelField"
|
ControlLoRAModel = "ControlLoRAModelField"
|
||||||
|
SigLipModel = "SigLipModelField"
|
||||||
|
FluxReduxModel = "FluxReduxModelField"
|
||||||
|
LlavaOnevisionModel = "LLaVAModelField"
|
||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
# region Misc Field Types
|
# region Misc Field Types
|
||||||
@@ -152,6 +155,7 @@ class FieldDescriptions:
|
|||||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||||
spandrel_image_to_image_model = "Image-to-Image model"
|
spandrel_image_to_image_model = "Image-to-Image model"
|
||||||
|
vllm_model = "VLLM model"
|
||||||
lora_weight = "The weight at which the LoRA is applied to each model"
|
lora_weight = "The weight at which the LoRA is applied to each model"
|
||||||
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
||||||
raw_prompt = "Raw prompt text (no parsing)"
|
raw_prompt = "Raw prompt text (no parsing)"
|
||||||
@@ -201,6 +205,9 @@ class FieldDescriptions:
|
|||||||
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
|
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
|
||||||
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
|
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
|
||||||
instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'."
|
instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'."
|
||||||
|
flux_redux_conditioning = "FLUX Redux conditioning tensor"
|
||||||
|
vllm_model = "The VLLM model to use"
|
||||||
|
flux_fill_conditioning = "FLUX Fill conditioning tensor"
|
||||||
|
|
||||||
|
|
||||||
class ImageField(BaseModel):
|
class ImageField(BaseModel):
|
||||||
@@ -259,6 +266,24 @@ class FluxConditioningField(BaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FluxReduxConditioningField(BaseModel):
|
||||||
|
"""A FLUX Redux conditioning tensor primitive value"""
|
||||||
|
|
||||||
|
conditioning: TensorField = Field(description="The Redux image conditioning tensor.")
|
||||||
|
mask: Optional[TensorField] = Field(
|
||||||
|
default=None,
|
||||||
|
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
|
||||||
|
"included regions should be set to True.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FluxFillConditioningField(BaseModel):
|
||||||
|
"""A FLUX Fill conditioning field."""
|
||||||
|
|
||||||
|
image: ImageField = Field(description="The FLUX Fill reference image.")
|
||||||
|
mask: TensorField = Field(description="The FLUX Fill inpaint mask.")
|
||||||
|
|
||||||
|
|
||||||
class SD3ConditioningField(BaseModel):
|
class SD3ConditioningField(BaseModel):
|
||||||
"""A conditioning tensor primitive value"""
|
"""A conditioning tensor primitive value"""
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -21,11 +20,10 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_control_lora_loader",
|
"flux_control_lora_loader",
|
||||||
title="Flux Control LoRA",
|
title="Control LoRA - FLUX",
|
||||||
tags=["lora", "model", "flux"],
|
tags=["lora", "model", "flux"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.1.0",
|
version="1.1.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxControlLoRALoaderInvocation(BaseInvocation):
|
class FluxControlLoRALoaderInvocation(BaseInvocation):
|
||||||
"""LoRA model and Image to use with FLUX transformer generation."""
|
"""LoRA model and Image to use with FLUX transformer generation."""
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -52,7 +51,6 @@ class FluxControlNetOutput(BaseInvocationOutput):
|
|||||||
tags=["controlnet", "flux"],
|
tags=["controlnet", "flux"],
|
||||||
category="controlnet",
|
category="controlnet",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxControlNetInvocation(BaseInvocation):
|
class FluxControlNetInvocation(BaseInvocation):
|
||||||
"""Collect FLUX ControlNet info to pass to other nodes."""
|
"""Collect FLUX ControlNet info to pass to other nodes."""
|
||||||
|
|||||||
@@ -10,11 +10,13 @@ from PIL import Image
|
|||||||
from torchvision.transforms.functional import resize as tv_resize
|
from torchvision.transforms.functional import resize as tv_resize
|
||||||
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.fields import (
|
from invokeai.app.invocations.fields import (
|
||||||
DenoiseMaskField,
|
DenoiseMaskField,
|
||||||
FieldDescriptions,
|
FieldDescriptions,
|
||||||
FluxConditioningField,
|
FluxConditioningField,
|
||||||
|
FluxFillConditioningField,
|
||||||
|
FluxReduxConditioningField,
|
||||||
ImageField,
|
ImageField,
|
||||||
Input,
|
Input,
|
||||||
InputField,
|
InputField,
|
||||||
@@ -46,8 +48,8 @@ from invokeai.backend.flux.sampling_utils import (
|
|||||||
pack,
|
pack,
|
||||||
unpack,
|
unpack,
|
||||||
)
|
)
|
||||||
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
|
from invokeai.backend.flux.text_conditioning import FluxReduxConditioning, FluxTextConditioning
|
||||||
from invokeai.backend.model_manager.config import ModelFormat
|
from invokeai.backend.model_manager.taxonomy import ModelFormat, ModelVariantType
|
||||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||||
@@ -61,8 +63,7 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
title="FLUX Denoise",
|
title="FLUX Denoise",
|
||||||
tags=["image", "flux"],
|
tags=["image", "flux"],
|
||||||
category="image",
|
category="image",
|
||||||
version="3.2.2",
|
version="3.3.0",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Run denoising process with a FLUX transformer model."""
|
"""Run denoising process with a FLUX transformer model."""
|
||||||
@@ -103,6 +104,16 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||||
input=Input.Connection,
|
input=Input.Connection,
|
||||||
)
|
)
|
||||||
|
redux_conditioning: FluxReduxConditioningField | list[FluxReduxConditioningField] | None = InputField(
|
||||||
|
default=None,
|
||||||
|
description="FLUX Redux conditioning tensor.",
|
||||||
|
input=Input.Connection,
|
||||||
|
)
|
||||||
|
fill_conditioning: FluxFillConditioningField | None = InputField(
|
||||||
|
default=None,
|
||||||
|
description="FLUX Fill conditioning.",
|
||||||
|
input=Input.Connection,
|
||||||
|
)
|
||||||
cfg_scale: float | list[float] = InputField(default=1.0, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
cfg_scale: float | list[float] = InputField(default=1.0, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||||
cfg_scale_start_step: int = InputField(
|
cfg_scale_start_step: int = InputField(
|
||||||
default=0,
|
default=0,
|
||||||
@@ -190,11 +201,23 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
dtype=inference_dtype,
|
dtype=inference_dtype,
|
||||||
device=TorchDevice.choose_torch_device(),
|
device=TorchDevice.choose_torch_device(),
|
||||||
)
|
)
|
||||||
|
redux_conditionings: list[FluxReduxConditioning] = self._load_redux_conditioning(
|
||||||
|
context=context,
|
||||||
|
redux_cond_field=self.redux_conditioning,
|
||||||
|
packed_height=packed_h,
|
||||||
|
packed_width=packed_w,
|
||||||
|
device=TorchDevice.choose_torch_device(),
|
||||||
|
dtype=inference_dtype,
|
||||||
|
)
|
||||||
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
||||||
pos_text_conditionings, img_seq_len=packed_h * packed_w
|
text_conditioning=pos_text_conditionings,
|
||||||
|
redux_conditioning=redux_conditionings,
|
||||||
|
img_seq_len=packed_h * packed_w,
|
||||||
)
|
)
|
||||||
neg_regional_prompting_extension = (
|
neg_regional_prompting_extension = (
|
||||||
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
|
RegionalPromptingExtension.from_text_conditioning(
|
||||||
|
text_conditioning=neg_text_conditionings, redux_conditioning=[], img_seq_len=packed_h * packed_w
|
||||||
|
)
|
||||||
if neg_text_conditionings
|
if neg_text_conditionings
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
@@ -243,8 +266,19 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
if is_schnell and self.control_lora:
|
if is_schnell and self.control_lora:
|
||||||
raise ValueError("Control LoRAs cannot be used with FLUX Schnell")
|
raise ValueError("Control LoRAs cannot be used with FLUX Schnell")
|
||||||
|
|
||||||
# Prepare the extra image conditioning tensor if a FLUX structural control image is provided.
|
# Prepare the extra image conditioning tensor (img_cond) for either FLUX structural control or FLUX Fill.
|
||||||
img_cond = self._prep_structural_control_img_cond(context)
|
img_cond: torch.Tensor | None = None
|
||||||
|
is_flux_fill = transformer_config.variant == ModelVariantType.Inpaint # type: ignore
|
||||||
|
if is_flux_fill:
|
||||||
|
img_cond = self._prep_flux_fill_img_cond(
|
||||||
|
context, device=TorchDevice.choose_torch_device(), dtype=inference_dtype
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.fill_conditioning is not None:
|
||||||
|
raise ValueError("fill_conditioning was provided, but the model is not a FLUX Fill model.")
|
||||||
|
|
||||||
|
if self.control_lora is not None:
|
||||||
|
img_cond = self._prep_structural_control_img_cond(context)
|
||||||
|
|
||||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||||
|
|
||||||
@@ -253,7 +287,6 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
# Pack all latent tensors.
|
# Pack all latent tensors.
|
||||||
init_latents = pack(init_latents) if init_latents is not None else None
|
init_latents = pack(init_latents) if init_latents is not None else None
|
||||||
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
||||||
img_cond = pack(img_cond) if img_cond is not None else None
|
|
||||||
noise = pack(noise)
|
noise = pack(noise)
|
||||||
x = pack(x)
|
x = pack(x)
|
||||||
|
|
||||||
@@ -400,6 +433,42 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
|
|
||||||
return text_conditionings
|
return text_conditionings
|
||||||
|
|
||||||
|
def _load_redux_conditioning(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
redux_cond_field: FluxReduxConditioningField | list[FluxReduxConditioningField] | None,
|
||||||
|
packed_height: int,
|
||||||
|
packed_width: int,
|
||||||
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
) -> list[FluxReduxConditioning]:
|
||||||
|
# Normalize to a list of FluxReduxConditioningFields.
|
||||||
|
if redux_cond_field is None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
redux_cond_list = (
|
||||||
|
[redux_cond_field] if isinstance(redux_cond_field, FluxReduxConditioningField) else redux_cond_field
|
||||||
|
)
|
||||||
|
|
||||||
|
redux_conditionings: list[FluxReduxConditioning] = []
|
||||||
|
for redux_cond_field in redux_cond_list:
|
||||||
|
# Load the Redux conditioning tensor.
|
||||||
|
redux_cond_data = context.tensors.load(redux_cond_field.conditioning.tensor_name)
|
||||||
|
redux_cond_data.to(device=device, dtype=dtype)
|
||||||
|
|
||||||
|
# Load the mask, if provided.
|
||||||
|
mask: Optional[torch.Tensor] = None
|
||||||
|
if redux_cond_field.mask is not None:
|
||||||
|
mask = context.tensors.load(redux_cond_field.mask.tensor_name)
|
||||||
|
mask = mask.to(device=device)
|
||||||
|
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
|
||||||
|
mask, packed_height, packed_width, dtype, device
|
||||||
|
)
|
||||||
|
|
||||||
|
redux_conditionings.append(FluxReduxConditioning(redux_embeddings=redux_cond_data, mask=mask))
|
||||||
|
|
||||||
|
return redux_conditionings
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def prep_cfg_scale(
|
def prep_cfg_scale(
|
||||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||||
@@ -610,7 +679,70 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
img_cond = einops.rearrange(img_cond, "h w c -> 1 c h w")
|
img_cond = einops.rearrange(img_cond, "h w c -> 1 c h w")
|
||||||
|
|
||||||
vae_info = context.models.load(self.controlnet_vae.vae)
|
vae_info = context.models.load(self.controlnet_vae.vae)
|
||||||
return FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=img_cond)
|
img_cond = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=img_cond)
|
||||||
|
|
||||||
|
return pack(img_cond)
|
||||||
|
|
||||||
|
def _prep_flux_fill_img_cond(
|
||||||
|
self, context: InvocationContext, device: torch.device, dtype: torch.dtype
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""Prepare the FLUX Fill conditioning. This method should be called iff the model is a FLUX Fill model.
|
||||||
|
|
||||||
|
This logic is based on:
|
||||||
|
https://github.com/black-forest-labs/flux/blob/716724eb276d94397be99710a0a54d352664e23b/src/flux/sampling.py#L107-L157
|
||||||
|
"""
|
||||||
|
# Validate inputs.
|
||||||
|
if self.fill_conditioning is None:
|
||||||
|
raise ValueError("A FLUX Fill model is being used without fill_conditioning.")
|
||||||
|
# TODO(ryand): We should probable rename controlnet_vae. It's used for more than just ControlNets.
|
||||||
|
if self.controlnet_vae is None:
|
||||||
|
raise ValueError("A FLUX Fill model is being used without controlnet_vae.")
|
||||||
|
if self.control_lora is not None:
|
||||||
|
raise ValueError(
|
||||||
|
"A FLUX Fill model is being used, but a control_lora was provided. Control LoRAs are not compatible with FLUX Fill models."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Log input warnings related to FLUX Fill usage.
|
||||||
|
if self.denoise_mask is not None:
|
||||||
|
context.logger.warning(
|
||||||
|
"Both fill_conditioning and a denoise_mask were provided. You probably meant to use one or the other."
|
||||||
|
)
|
||||||
|
if self.guidance < 25.0:
|
||||||
|
context.logger.warning("A guidance value of ~30.0 is recommended for FLUX Fill models.")
|
||||||
|
|
||||||
|
# Load the conditioning image and resize it to the target image size.
|
||||||
|
cond_img = context.images.get_pil(self.fill_conditioning.image.image_name, mode="RGB")
|
||||||
|
cond_img = cond_img.resize((self.width, self.height), Image.Resampling.BICUBIC)
|
||||||
|
cond_img = np.array(cond_img)
|
||||||
|
cond_img = torch.from_numpy(cond_img).float() / 127.5 - 1.0
|
||||||
|
cond_img = einops.rearrange(cond_img, "h w c -> 1 c h w")
|
||||||
|
cond_img = cond_img.to(device=device, dtype=dtype)
|
||||||
|
|
||||||
|
# Load the mask and resize it to the target image size.
|
||||||
|
mask = context.tensors.load(self.fill_conditioning.mask.tensor_name)
|
||||||
|
# We expect mask to be a bool tensor with shape [1, H, W].
|
||||||
|
assert mask.dtype == torch.bool
|
||||||
|
assert mask.dim() == 3
|
||||||
|
assert mask.shape[0] == 1
|
||||||
|
mask = tv_resize(mask, size=[self.height, self.width], interpolation=tv_transforms.InterpolationMode.NEAREST)
|
||||||
|
mask = mask.to(device=device, dtype=dtype)
|
||||||
|
mask = einops.rearrange(mask, "1 h w -> 1 1 h w")
|
||||||
|
|
||||||
|
# Prepare image conditioning.
|
||||||
|
cond_img = cond_img * (1 - mask)
|
||||||
|
vae_info = context.models.load(self.controlnet_vae.vae)
|
||||||
|
cond_img = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=cond_img)
|
||||||
|
cond_img = pack(cond_img)
|
||||||
|
|
||||||
|
# Prepare mask conditioning.
|
||||||
|
mask = mask[:, 0, :, :]
|
||||||
|
# Rearrange mask to a 16-channel representation that matches the shape of the VAE-encoded latent space.
|
||||||
|
mask = einops.rearrange(mask, "b (h ph) (w pw) -> b (ph pw) h w", ph=8, pw=8)
|
||||||
|
mask = pack(mask)
|
||||||
|
|
||||||
|
# Merge image and mask conditioning.
|
||||||
|
img_cond = torch.cat((cond_img, mask), dim=-1)
|
||||||
|
return img_cond
|
||||||
|
|
||||||
def _normalize_ip_adapter_fields(self) -> list[IPAdapterField]:
|
def _normalize_ip_adapter_fields(self) -> list[IPAdapterField]:
|
||||||
if self.ip_adapter is None:
|
if self.ip_adapter is None:
|
||||||
|
|||||||
46
invokeai/app/invocations/flux_fill.py
Normal file
46
invokeai/app/invocations/flux_fill.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
Classification,
|
||||||
|
invocation,
|
||||||
|
invocation_output,
|
||||||
|
)
|
||||||
|
from invokeai.app.invocations.fields import (
|
||||||
|
FieldDescriptions,
|
||||||
|
FluxFillConditioningField,
|
||||||
|
InputField,
|
||||||
|
OutputField,
|
||||||
|
TensorField,
|
||||||
|
)
|
||||||
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
|
|
||||||
|
|
||||||
|
@invocation_output("flux_fill_output")
|
||||||
|
class FluxFillOutput(BaseInvocationOutput):
|
||||||
|
"""The conditioning output of a FLUX Fill invocation."""
|
||||||
|
|
||||||
|
fill_cond: FluxFillConditioningField = OutputField(
|
||||||
|
description=FieldDescriptions.flux_redux_conditioning, title="Conditioning"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"flux_fill",
|
||||||
|
title="FLUX Fill Conditioning",
|
||||||
|
tags=["inpaint"],
|
||||||
|
category="inpaint",
|
||||||
|
version="1.0.0",
|
||||||
|
classification=Classification.Beta,
|
||||||
|
)
|
||||||
|
class FluxFillInvocation(BaseInvocation):
|
||||||
|
"""Prepare the FLUX Fill conditioning data."""
|
||||||
|
|
||||||
|
image: ImageField = InputField(description="The FLUX Fill reference image.")
|
||||||
|
mask: TensorField = InputField(
|
||||||
|
description="The bool inpainting mask. Excluded regions should be set to "
|
||||||
|
"False, included regions should be set to True.",
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> FluxFillOutput:
|
||||||
|
return FluxFillOutput(fill_cond=FluxFillConditioningField(image=self.image, mask=self.mask))
|
||||||
@@ -4,7 +4,7 @@ from typing import List, Literal, Union
|
|||||||
from pydantic import field_validator, model_validator
|
from pydantic import field_validator, model_validator
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.fields import InputField, UIType
|
from invokeai.app.invocations.fields import InputField, UIType
|
||||||
from invokeai.app.invocations.ip_adapter import (
|
from invokeai.app.invocations.ip_adapter import (
|
||||||
CLIP_VISION_MODEL_MAP,
|
CLIP_VISION_MODEL_MAP,
|
||||||
@@ -28,7 +28,6 @@ from invokeai.backend.model_manager.config import (
|
|||||||
tags=["ip_adapter", "control"],
|
tags=["ip_adapter", "control"],
|
||||||
category="ip_adapter",
|
category="ip_adapter",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxIPAdapterInvocation(BaseInvocation):
|
class FluxIPAdapterInvocation(BaseInvocation):
|
||||||
"""Collects FLUX IP-Adapter info to pass to other nodes."""
|
"""Collects FLUX IP-Adapter info to pass to other nodes."""
|
||||||
|
|||||||
@@ -3,14 +3,13 @@ from typing import Optional
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
|
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager.config import BaseModelType
|
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("flux_lora_loader_output")
|
@invocation_output("flux_lora_loader_output")
|
||||||
@@ -28,11 +27,10 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_lora_loader",
|
"flux_lora_loader",
|
||||||
title="FLUX LoRA",
|
title="Apply LoRA - FLUX",
|
||||||
tags=["lora", "model", "flux"],
|
tags=["lora", "model", "flux"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.2.0",
|
version="1.2.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxLoRALoaderInvocation(BaseInvocation):
|
class FluxLoRALoaderInvocation(BaseInvocation):
|
||||||
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
|
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
|
||||||
@@ -107,11 +105,10 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_lora_collection_loader",
|
"flux_lora_collection_loader",
|
||||||
title="FLUX LoRA Collection Loader",
|
title="Apply LoRA Collection - FLUX",
|
||||||
tags=["lora", "model", "flux"],
|
tags=["lora", "model", "flux"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.3.0",
|
version="1.3.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FLUXLoRACollectionLoader(BaseInvocation):
|
class FLUXLoRACollectionLoader(BaseInvocation):
|
||||||
"""Applies a collection of LoRAs to a FLUX transformer."""
|
"""Applies a collection of LoRAs to a FLUX transformer."""
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ from typing import Literal
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -17,8 +16,8 @@ from invokeai.app.util.t5_model_identifier import (
|
|||||||
from invokeai.backend.flux.util import max_seq_lengths
|
from invokeai.backend.flux.util import max_seq_lengths
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
CheckpointConfigBase,
|
CheckpointConfigBase,
|
||||||
SubModelType,
|
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("flux_model_loader_output")
|
@invocation_output("flux_model_loader_output")
|
||||||
@@ -37,11 +36,10 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_model_loader",
|
"flux_model_loader",
|
||||||
title="Flux Main Model",
|
title="Main Model - FLUX",
|
||||||
tags=["model", "flux"],
|
tags=["model", "flux"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.5",
|
version="1.0.6",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxModelLoaderInvocation(BaseInvocation):
|
class FluxModelLoaderInvocation(BaseInvocation):
|
||||||
"""Loads a flux base model, outputting its submodels."""
|
"""Loads a flux base model, outputting its submodels."""
|
||||||
|
|||||||
120
invokeai/app/invocations/flux_redux.py
Normal file
120
invokeai/app/invocations/flux_redux.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
Classification,
|
||||||
|
invocation,
|
||||||
|
invocation_output,
|
||||||
|
)
|
||||||
|
from invokeai.app.invocations.fields import (
|
||||||
|
FieldDescriptions,
|
||||||
|
FluxReduxConditioningField,
|
||||||
|
InputField,
|
||||||
|
OutputField,
|
||||||
|
TensorField,
|
||||||
|
UIType,
|
||||||
|
)
|
||||||
|
from invokeai.app.invocations.model import ModelIdentifierField
|
||||||
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||||
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
|
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
|
||||||
|
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||||
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
|
from invokeai.backend.model_manager.starter_models import siglip
|
||||||
|
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||||
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
|
|
||||||
|
|
||||||
|
@invocation_output("flux_redux_output")
|
||||||
|
class FluxReduxOutput(BaseInvocationOutput):
|
||||||
|
"""The conditioning output of a FLUX Redux invocation."""
|
||||||
|
|
||||||
|
redux_cond: FluxReduxConditioningField = OutputField(
|
||||||
|
description=FieldDescriptions.flux_redux_conditioning, title="Conditioning"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"flux_redux",
|
||||||
|
title="FLUX Redux",
|
||||||
|
tags=["ip_adapter", "control"],
|
||||||
|
category="ip_adapter",
|
||||||
|
version="2.0.0",
|
||||||
|
classification=Classification.Beta,
|
||||||
|
)
|
||||||
|
class FluxReduxInvocation(BaseInvocation):
|
||||||
|
"""Runs a FLUX Redux model to generate a conditioning tensor."""
|
||||||
|
|
||||||
|
image: ImageField = InputField(description="The FLUX Redux image prompt.")
|
||||||
|
mask: Optional[TensorField] = InputField(
|
||||||
|
default=None,
|
||||||
|
description="The bool mask associated with this FLUX Redux image prompt. Excluded regions should be set to "
|
||||||
|
"False, included regions should be set to True.",
|
||||||
|
)
|
||||||
|
redux_model: ModelIdentifierField = InputField(
|
||||||
|
description="The FLUX Redux model to use.",
|
||||||
|
title="FLUX Redux Model",
|
||||||
|
ui_type=UIType.FluxReduxModel,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> FluxReduxOutput:
|
||||||
|
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||||
|
|
||||||
|
encoded_x = self._siglip_encode(context, image)
|
||||||
|
redux_conditioning = self._flux_redux_encode(context, encoded_x)
|
||||||
|
|
||||||
|
tensor_name = context.tensors.save(redux_conditioning)
|
||||||
|
return FluxReduxOutput(
|
||||||
|
redux_cond=FluxReduxConditioningField(conditioning=TensorField(tensor_name=tensor_name), mask=self.mask)
|
||||||
|
)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def _siglip_encode(self, context: InvocationContext, image: Image.Image) -> torch.Tensor:
|
||||||
|
siglip_model_config = self._get_siglip_model(context)
|
||||||
|
with context.models.load(siglip_model_config.key).model_on_device() as (_, siglip_pipeline):
|
||||||
|
assert isinstance(siglip_pipeline, SigLipPipeline)
|
||||||
|
return siglip_pipeline.encode_image(
|
||||||
|
x=image, device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||||
|
)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def _flux_redux_encode(self, context: InvocationContext, encoded_x: torch.Tensor) -> torch.Tensor:
|
||||||
|
with context.models.load(self.redux_model).model_on_device() as (_, flux_redux):
|
||||||
|
assert isinstance(flux_redux, FluxReduxModel)
|
||||||
|
dtype = next(flux_redux.parameters()).dtype
|
||||||
|
encoded_x = encoded_x.to(dtype=dtype)
|
||||||
|
return flux_redux(encoded_x)
|
||||||
|
|
||||||
|
def _get_siglip_model(self, context: InvocationContext) -> AnyModelConfig:
|
||||||
|
siglip_models = context.models.search_by_attrs(name=siglip.name, base=BaseModelType.Any, type=ModelType.SigLIP)
|
||||||
|
|
||||||
|
if not len(siglip_models) > 0:
|
||||||
|
context.logger.warning(
|
||||||
|
f"The SigLIP model required by FLUX Redux ({siglip.name}) is not installed. Downloading and installing now. This may take a while."
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO(psyche): Can the probe reliably determine the type of the model? Just hardcoding it bc I don't want to experiment now
|
||||||
|
config_overrides = ModelRecordChanges(name=siglip.name, type=ModelType.SigLIP)
|
||||||
|
|
||||||
|
# Queue the job
|
||||||
|
job = context._services.model_manager.install.heuristic_import(siglip.source, config=config_overrides)
|
||||||
|
|
||||||
|
# Wait for up to 10 minutes - model is ~3.5GB
|
||||||
|
context._services.model_manager.install.wait_for_job(job, timeout=600)
|
||||||
|
|
||||||
|
siglip_models = context.models.search_by_attrs(
|
||||||
|
name=siglip.name,
|
||||||
|
base=BaseModelType.Any,
|
||||||
|
type=ModelType.SigLIP,
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(siglip_models) == 0:
|
||||||
|
context.logger.error("Error while fetching SigLIP for FLUX Redux")
|
||||||
|
assert len(siglip_models) == 1
|
||||||
|
|
||||||
|
return siglip_models[0]
|
||||||
@@ -4,7 +4,7 @@ from typing import Iterator, Literal, Optional, Tuple
|
|||||||
import torch
|
import torch
|
||||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
|
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.fields import (
|
from invokeai.app.invocations.fields import (
|
||||||
FieldDescriptions,
|
FieldDescriptions,
|
||||||
FluxConditioningField,
|
FluxConditioningField,
|
||||||
@@ -17,7 +17,7 @@ from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
|||||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||||
from invokeai.backend.model_manager.config import ModelFormat
|
from invokeai.backend.model_manager import ModelFormat
|
||||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
|
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
|
||||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||||
@@ -26,11 +26,10 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_text_encoder",
|
"flux_text_encoder",
|
||||||
title="FLUX Text Encoding",
|
title="Prompt - FLUX",
|
||||||
tags=["prompt", "conditioning", "flux"],
|
tags=["prompt", "conditioning", "flux"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.1.1",
|
version="1.1.2",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class FluxTextEncoderInvocation(BaseInvocation):
|
class FluxTextEncoderInvocation(BaseInvocation):
|
||||||
"""Encodes and preps a prompt for a flux image."""
|
"""Encodes and preps a prompt for a flux image."""
|
||||||
|
|||||||
@@ -22,10 +22,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_vae_decode",
|
"flux_vae_decode",
|
||||||
title="FLUX Latents to Image",
|
title="Latents to Image - FLUX",
|
||||||
tags=["latents", "image", "vae", "l2i", "flux"],
|
tags=["latents", "image", "vae", "l2i", "flux"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.0.1",
|
version="1.0.2",
|
||||||
)
|
)
|
||||||
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|||||||
@@ -19,10 +19,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_vae_encode",
|
"flux_vae_encode",
|
||||||
title="FLUX Image to Latents",
|
title="Image to Latents - FLUX",
|
||||||
tags=["latents", "image", "vae", "i2l", "flux"],
|
tags=["latents", "image", "vae", "i2l", "flux"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
)
|
)
|
||||||
class FluxVaeEncodeInvocation(BaseInvocation):
|
class FluxVaeEncodeInvocation(BaseInvocation):
|
||||||
"""Encodes an image into latents."""
|
"""Encodes an image into latents."""
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
|||||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
|
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
|
||||||
from invokeai.app.invocations.model import UNetField
|
from invokeai.app.invocations.model import UNetField
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager.config import BaseModelType
|
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("ideal_size_output")
|
@invocation_output("ideal_size_output")
|
||||||
@@ -19,9 +19,9 @@ class IdealSizeOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"ideal_size",
|
"ideal_size",
|
||||||
title="Ideal Size",
|
title="Ideal Size - SD1.5, SDXL",
|
||||||
tags=["latents", "math", "ideal_size"],
|
tags=["latents", "math", "ideal_size"],
|
||||||
version="1.0.4",
|
version="1.0.5",
|
||||||
)
|
)
|
||||||
class IdealSizeInvocation(BaseInvocation):
|
class IdealSizeInvocation(BaseInvocation):
|
||||||
"""Calculates the ideal size for generation to avoid duplication"""
|
"""Calculates the ideal size for generation to avoid duplication"""
|
||||||
|
|||||||
@@ -355,7 +355,6 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
tags=["image", "unsharp_mask"],
|
tags=["image", "unsharp_mask"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.2.2",
|
version="1.2.2",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Applies an unsharp mask filter to an image"""
|
"""Applies an unsharp mask filter to an image"""
|
||||||
@@ -1051,7 +1050,7 @@ class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
tags=["image", "mask", "id"],
|
tags=["image", "mask", "id"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
classification=Classification.Internal,
|
classification=Classification.Deprecated,
|
||||||
)
|
)
|
||||||
class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Handles Canvas V2 image output masking and cropping"""
|
"""Handles Canvas V2 image output masking and cropping"""
|
||||||
@@ -1089,6 +1088,131 @@ class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
return ImageOutput.build(image_dto)
|
return ImageOutput.build(image_dto)
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"expand_mask_with_fade", title="Expand Mask with Fade", tags=["image", "mask"], category="image", version="1.0.1"
|
||||||
|
)
|
||||||
|
class ExpandMaskWithFadeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
|
"""Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
|
||||||
|
The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect.
|
||||||
|
The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white.
|
||||||
|
If the fade size is 0, the mask is returned as-is.
|
||||||
|
"""
|
||||||
|
|
||||||
|
mask: ImageField = InputField(description="The mask to expand")
|
||||||
|
threshold: int = InputField(default=0, ge=0, le=255, description="The threshold for the binary mask (0-255)")
|
||||||
|
fade_size_px: int = InputField(default=32, ge=0, description="The size of the fade in pixels")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
pil_mask = context.images.get_pil(self.mask.image_name, mode="L")
|
||||||
|
|
||||||
|
if self.fade_size_px == 0:
|
||||||
|
# If the fade size is 0, just return the mask as-is.
|
||||||
|
image_dto = context.images.save(image=pil_mask, image_category=ImageCategory.MASK)
|
||||||
|
return ImageOutput.build(image_dto)
|
||||||
|
|
||||||
|
np_mask = numpy.array(pil_mask)
|
||||||
|
|
||||||
|
# Threshold the mask to create a binary mask - 0 for black, 255 for white
|
||||||
|
# If we don't threshold we can get some weird artifacts
|
||||||
|
np_mask = numpy.where(np_mask > self.threshold, 255, 0).astype(numpy.uint8)
|
||||||
|
|
||||||
|
# Create a mask for the black region (1 where black, 0 otherwise)
|
||||||
|
black_mask = (np_mask == 0).astype(numpy.uint8)
|
||||||
|
|
||||||
|
# Invert the black region
|
||||||
|
bg_mask = 1 - black_mask
|
||||||
|
|
||||||
|
# Create a distance transform of the inverted mask
|
||||||
|
dist = cv2.distanceTransform(bg_mask, cv2.DIST_L2, 5)
|
||||||
|
|
||||||
|
# Normalize distances so that pixels <fade_size_px become a linear gradient (0 to 1)
|
||||||
|
d_norm = numpy.clip(dist / self.fade_size_px, 0, 1)
|
||||||
|
|
||||||
|
# Control points: x values (normalized distance) and corresponding fade pct y values.
|
||||||
|
|
||||||
|
# There are some magic numbers here that are used to create a smooth transition:
|
||||||
|
# - The first point is at 0% of fade size from edge of mask (meaning the edge of the mask), and is 0% fade (black)
|
||||||
|
# - The second point is 1px from the edge of the mask and also has 0% fade, effectively expanding the mask
|
||||||
|
# by 1px. This fixes an issue where artifacts can occur at the edge of the mask
|
||||||
|
# - The third point is at 20% of the fade size from the edge of the mask and has 20% fade
|
||||||
|
# - The fourth point is at 80% of the fade size from the edge of the mask and has 90% fade
|
||||||
|
# - The last point is at 100% of the fade size from the edge of the mask and has 100% fade (white)
|
||||||
|
|
||||||
|
# x values: 0 = mask edge, 1 = fade_size_px from edge
|
||||||
|
x_control = numpy.array([0.0, 1.0 / self.fade_size_px, 0.2, 0.8, 1.0])
|
||||||
|
# y values: 0 = black, 1 = white
|
||||||
|
y_control = numpy.array([0.0, 0.0, 0.2, 0.9, 1.0])
|
||||||
|
|
||||||
|
# Fit a cubic polynomial that smoothly passes through the control points
|
||||||
|
coeffs = numpy.polyfit(x_control, y_control, 3)
|
||||||
|
poly = numpy.poly1d(coeffs)
|
||||||
|
|
||||||
|
# Evaluate the polynomial
|
||||||
|
feather = poly(d_norm)
|
||||||
|
|
||||||
|
# The polynomial fit isn't perfect. Points beyond the fade distance are likely to be slightly less than 1.0,
|
||||||
|
# even though the control points indicate that they should be exactly 1.0. This is due to the nature of the
|
||||||
|
# polynomial fit, which is a best approximation of the control points but not an exact match.
|
||||||
|
|
||||||
|
# When this occurs, the area outside the mask and fade-out will not be 100% transparent. For example, it may
|
||||||
|
# have an alpha value of 1 instead of 0. So we must force pixels at or beyond the fade distance to exactly 1.0.
|
||||||
|
|
||||||
|
# Force pixels at or beyond the fade distance to exactly 1.0
|
||||||
|
feather = numpy.where(d_norm >= 1.0, 1.0, feather)
|
||||||
|
|
||||||
|
# Clip any other values to ensure they're in the valid range [0,1]
|
||||||
|
feather = numpy.clip(feather, 0, 1)
|
||||||
|
|
||||||
|
# Build final image.
|
||||||
|
np_result = numpy.where(black_mask == 1, 0, (feather * 255).astype(numpy.uint8))
|
||||||
|
|
||||||
|
# Convert back to PIL, grayscale
|
||||||
|
pil_result = Image.fromarray(np_result.astype(numpy.uint8), mode="L")
|
||||||
|
|
||||||
|
image_dto = context.images.save(image=pil_result, image_category=ImageCategory.MASK)
|
||||||
|
|
||||||
|
return ImageOutput.build(image_dto)
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"apply_mask_to_image",
|
||||||
|
title="Apply Mask to Image",
|
||||||
|
tags=["image", "mask", "blend"],
|
||||||
|
category="image",
|
||||||
|
version="1.0.0",
|
||||||
|
)
|
||||||
|
class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
|
"""
|
||||||
|
Extracts a region from a generated image using a mask and blends it seamlessly onto a source image.
|
||||||
|
The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
|
||||||
|
"""
|
||||||
|
|
||||||
|
image: ImageField = InputField(description="The image from which to extract the masked region")
|
||||||
|
mask: ImageField = InputField(description="The mask defining the region (black=keep, white=discard)")
|
||||||
|
invert_mask: bool = InputField(
|
||||||
|
default=False,
|
||||||
|
description="Whether to invert the mask before applying it",
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
# Load images
|
||||||
|
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||||
|
mask = context.images.get_pil(self.mask.image_name, mode="L")
|
||||||
|
|
||||||
|
if self.invert_mask:
|
||||||
|
# Invert the mask if requested
|
||||||
|
mask = ImageOps.invert(mask.copy())
|
||||||
|
|
||||||
|
# Combine the mask as the alpha channel of the image
|
||||||
|
r, g, b, _ = image.split() # Split the image into RGB and alpha channels
|
||||||
|
result_image = Image.merge("RGBA", (r, g, b, mask)) # Use the mask as the new alpha channel
|
||||||
|
|
||||||
|
# Save the resulting image
|
||||||
|
image_dto = context.images.save(image=result_image)
|
||||||
|
|
||||||
|
return ImageOutput.build(image_dto)
|
||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"img_noise",
|
"img_noise",
|
||||||
title="Add Image Noise",
|
title="Add Image Noise",
|
||||||
@@ -1159,7 +1283,6 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
tags=["image", "crop"],
|
tags=["image", "crop"],
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels."""
|
"""Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels."""
|
||||||
@@ -1186,7 +1309,6 @@ class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
tags=["image", "crop"],
|
tags=["image", "crop"],
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Paste the source image into the target image at the given bounding box.
|
"""Paste the source image into the target image at the given bounding box.
|
||||||
|
|||||||
@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"i2l",
|
"i2l",
|
||||||
title="Image to Latents",
|
title="Image to Latents - SD1.5, SDXL",
|
||||||
tags=["latents", "image", "vae", "i2l"],
|
tags=["latents", "image", "vae", "i2l"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.1.0",
|
version="1.1.1",
|
||||||
)
|
)
|
||||||
class ImageToLatentsInvocation(BaseInvocation):
|
class ImageToLatentsInvocation(BaseInvocation):
|
||||||
"""Encodes an image into latents."""
|
"""Encodes an image into latents."""
|
||||||
|
|||||||
@@ -13,10 +13,8 @@ from invokeai.app.services.model_records.model_records_base import ModelRecordCh
|
|||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
IPAdapterCheckpointConfig,
|
IPAdapterCheckpointConfig,
|
||||||
IPAdapterInvokeAIConfig,
|
IPAdapterInvokeAIConfig,
|
||||||
ModelType,
|
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.starter_models import (
|
from invokeai.backend.model_manager.starter_models import (
|
||||||
StarterModel,
|
StarterModel,
|
||||||
@@ -24,6 +22,7 @@ from invokeai.backend.model_manager.starter_models import (
|
|||||||
ip_adapter_sd_image_encoder,
|
ip_adapter_sd_image_encoder,
|
||||||
ip_adapter_sdxl_image_encoder,
|
ip_adapter_sdxl_image_encoder,
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||||
|
|
||||||
|
|
||||||
class IPAdapterField(BaseModel):
|
class IPAdapterField(BaseModel):
|
||||||
@@ -69,7 +68,13 @@ CLIP_VISION_MODEL_MAP: dict[Literal["ViT-L", "ViT-H", "ViT-G"], StarterModel] =
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.5.0")
|
@invocation(
|
||||||
|
"ip_adapter",
|
||||||
|
title="IP-Adapter - SD1.5, SDXL",
|
||||||
|
tags=["ip_adapter", "control"],
|
||||||
|
category="ip_adapter",
|
||||||
|
version="1.5.1",
|
||||||
|
)
|
||||||
class IPAdapterInvocation(BaseInvocation):
|
class IPAdapterInvocation(BaseInvocation):
|
||||||
"""Collects IP-Adapter info to pass to other nodes."""
|
"""Collects IP-Adapter info to pass to other nodes."""
|
||||||
|
|
||||||
|
|||||||
@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"l2i",
|
"l2i",
|
||||||
title="Latents to Image",
|
title="Latents to Image - SD1.5, SDXL",
|
||||||
tags=["latents", "image", "vae", "l2i"],
|
tags=["latents", "image", "vae", "l2i"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.3.1",
|
version="1.3.2",
|
||||||
)
|
)
|
||||||
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|||||||
67
invokeai/app/invocations/llava_onevision_vllm.py
Normal file
67
invokeai/app/invocations/llava_onevision_vllm.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from PIL.Image import Image
|
||||||
|
from pydantic import field_validator
|
||||||
|
|
||||||
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||||
|
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
|
||||||
|
from invokeai.app.invocations.model import ModelIdentifierField
|
||||||
|
from invokeai.app.invocations.primitives import StringOutput
|
||||||
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
|
from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
|
||||||
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
|
|
||||||
|
|
||||||
|
@invocation(
|
||||||
|
"llava_onevision_vllm",
|
||||||
|
title="LLaVA OneVision VLLM",
|
||||||
|
tags=["vllm"],
|
||||||
|
category="vllm",
|
||||||
|
version="1.0.0",
|
||||||
|
classification=Classification.Beta,
|
||||||
|
)
|
||||||
|
class LlavaOnevisionVllmInvocation(BaseInvocation):
|
||||||
|
"""Run a LLaVA OneVision VLLM model."""
|
||||||
|
|
||||||
|
images: list[ImageField] | ImageField | None = InputField(default=None, max_length=3, description="Input image.")
|
||||||
|
prompt: str = InputField(
|
||||||
|
default="",
|
||||||
|
description="Input text prompt.",
|
||||||
|
ui_component=UIComponent.Textarea,
|
||||||
|
)
|
||||||
|
vllm_model: ModelIdentifierField = InputField(
|
||||||
|
title="LLaVA Model Type",
|
||||||
|
description=FieldDescriptions.vllm_model,
|
||||||
|
ui_type=UIType.LlavaOnevisionModel,
|
||||||
|
)
|
||||||
|
|
||||||
|
@field_validator("images", mode="before")
|
||||||
|
def listify_images(cls, v: Any) -> list:
|
||||||
|
if v is None:
|
||||||
|
return v
|
||||||
|
if not isinstance(v, list):
|
||||||
|
return [v]
|
||||||
|
return v
|
||||||
|
|
||||||
|
def _get_images(self, context: InvocationContext) -> list[Image]:
|
||||||
|
if self.images is None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
image_fields = self.images if isinstance(self.images, list) else [self.images]
|
||||||
|
return [context.images.get_pil(image_field.image_name, "RGB") for image_field in image_fields]
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def invoke(self, context: InvocationContext) -> StringOutput:
|
||||||
|
images = self._get_images(context)
|
||||||
|
|
||||||
|
with context.models.load(self.vllm_model) as vllm_model:
|
||||||
|
assert isinstance(vllm_model, LlavaOnevisionModel)
|
||||||
|
output = vllm_model.run(
|
||||||
|
prompt=self.prompt,
|
||||||
|
images=images,
|
||||||
|
device=TorchDevice.choose_torch_device(),
|
||||||
|
dtype=TorchDevice.choose_torch_dtype(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return StringOutput(value=output)
|
||||||
@@ -1,40 +1,83 @@
|
|||||||
|
import logging
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
import traceback
|
||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
def load_custom_nodes(custom_nodes_path: Path):
|
def load_custom_nodes(custom_nodes_path: Path, logger: logging.Logger):
|
||||||
"""
|
"""
|
||||||
Loads all custom nodes from the custom_nodes_path directory.
|
Loads all custom nodes from the custom_nodes_path directory.
|
||||||
|
|
||||||
This function copies a custom __init__.py file to the custom_nodes_path directory, effectively turning it into a
|
If custom_nodes_path does not exist, it creates it.
|
||||||
python module.
|
|
||||||
|
|
||||||
The custom __init__.py file itself imports all the custom node packs as python modules from the custom_nodes_path
|
It also copies the custom_nodes/README.md file to the custom_nodes_path directory. Because this file may change,
|
||||||
directory.
|
it is _always_ copied to the custom_nodes_path directory.
|
||||||
|
|
||||||
Then,the custom __init__.py file is programmatically imported using importlib. As it executes, it imports all the
|
Then, it crawls the custom_nodes_path directory and imports all top-level directories as python modules.
|
||||||
custom node packs as python modules.
|
|
||||||
|
If the directory does not contain an __init__.py file or starts with an `_` or `.`, it is skipped.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# create the custom nodes directory if it does not exist
|
||||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
# Copy the README file to the custom nodes directory
|
||||||
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
|
source_custom_nodes_readme_path = Path(__file__).parent / "custom_nodes/README.md"
|
||||||
|
target_custom_nodes_readme_path = Path(custom_nodes_path) / "README.md"
|
||||||
|
|
||||||
# copy our custom nodes __init__.py to the custom nodes directory
|
# copy our custom nodes README to the custom nodes directory
|
||||||
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
|
shutil.copy(source_custom_nodes_readme_path, target_custom_nodes_readme_path)
|
||||||
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
|
|
||||||
|
|
||||||
# set the same permissions as the destination directory, in case our source is read-only,
|
loaded_packs: list[str] = []
|
||||||
# so that the files are user-writable
|
failed_packs: list[str] = []
|
||||||
for p in custom_nodes_path.glob("**/*"):
|
|
||||||
p.chmod(custom_nodes_path.stat().st_mode)
|
|
||||||
|
|
||||||
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
||||||
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
|
for d in custom_nodes_path.iterdir():
|
||||||
if spec is None or spec.loader is None:
|
# skip files
|
||||||
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
|
if not d.is_dir():
|
||||||
module = module_from_spec(spec)
|
continue
|
||||||
sys.modules[spec.name] = module
|
|
||||||
spec.loader.exec_module(module)
|
# skip hidden directories
|
||||||
|
if d.name.startswith("_") or d.name.startswith("."):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# skip directories without an `__init__.py`
|
||||||
|
init = d / "__init__.py"
|
||||||
|
if not init.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
module_name = init.parent.stem
|
||||||
|
|
||||||
|
# skip if already imported
|
||||||
|
if module_name in globals():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# load the module
|
||||||
|
spec = spec_from_file_location(module_name, init.absolute())
|
||||||
|
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
logger.warning(f"Could not load {init}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Loading node pack {module_name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
module = module_from_spec(spec)
|
||||||
|
sys.modules[spec.name] = module
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
|
||||||
|
loaded_packs.append(module_name)
|
||||||
|
except Exception:
|
||||||
|
failed_packs.append(module_name)
|
||||||
|
full_error = traceback.format_exc()
|
||||||
|
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
|
||||||
|
|
||||||
|
del init, module_name
|
||||||
|
|
||||||
|
loaded_count = len(loaded_packs)
|
||||||
|
if loaded_count > 0:
|
||||||
|
logger.info(
|
||||||
|
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_path}: {', '.join(loaded_packs)}"
|
||||||
|
)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ from PIL import Image
|
|||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
Classification,
|
|
||||||
InvocationContext,
|
InvocationContext,
|
||||||
invocation,
|
invocation,
|
||||||
)
|
)
|
||||||
@@ -58,7 +57,6 @@ class RectangleMaskInvocation(BaseInvocation, WithMetadata):
|
|||||||
tags=["conditioning"],
|
tags=["conditioning"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class AlphaMaskToTensorInvocation(BaseInvocation):
|
class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||||
"""Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0."""
|
"""Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0."""
|
||||||
@@ -67,7 +65,7 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
|
|||||||
invert: bool = InputField(default=False, description="Whether to invert the mask.")
|
invert: bool = InputField(default=False, description="Whether to invert the mask.")
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||||
image = context.images.get_pil(self.image.image_name)
|
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||||
mask = torch.zeros((1, image.height, image.width), dtype=torch.bool)
|
mask = torch.zeros((1, image.height, image.width), dtype=torch.bool)
|
||||||
if self.invert:
|
if self.invert:
|
||||||
mask[0] = torch.tensor(np.array(image)[:, :, 3] == 0, dtype=torch.bool)
|
mask[0] = torch.tensor(np.array(image)[:, :, 3] == 0, dtype=torch.bool)
|
||||||
@@ -87,7 +85,6 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
|
|||||||
tags=["conditioning"],
|
tags=["conditioning"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.1.0",
|
version="1.1.0",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class InvertTensorMaskInvocation(BaseInvocation):
|
class InvertTensorMaskInvocation(BaseInvocation):
|
||||||
"""Inverts a tensor mask."""
|
"""Inverts a tensor mask."""
|
||||||
@@ -234,7 +231,6 @@ WHITE = ColorField(r=255, g=255, b=255, a=255)
|
|||||||
tags=["mask"],
|
tags=["mask"],
|
||||||
category="mask",
|
category="mask",
|
||||||
version="1.0.0",
|
version="1.0.0",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class GetMaskBoundingBoxInvocation(BaseInvocation):
|
class GetMaskBoundingBoxInvocation(BaseInvocation):
|
||||||
"""Gets the bounding box of the given mask image."""
|
"""Gets the bounding box of the given mask image."""
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ from invokeai.app.invocations.primitives import BooleanOutput, FloatOutput, Inte
|
|||||||
from invokeai.app.invocations.scheduler import SchedulerOutput
|
from invokeai.app.invocations.scheduler import SchedulerOutput
|
||||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
|
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager.config import ModelType, SubModelType
|
from invokeai.backend.model_manager.taxonomy import ModelType, SubModelType
|
||||||
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
||||||
from invokeai.version import __version__
|
from invokeai.version import __version__
|
||||||
|
|
||||||
@@ -610,10 +610,10 @@ class LatentsMetaOutput(LatentsOutput, MetadataOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"denoise_latents_meta",
|
"denoise_latents_meta",
|
||||||
title="Denoise Latents + metadata",
|
title=f"{DenoiseLatentsInvocation.UIConfig.title} + Metadata",
|
||||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.1.0",
|
version="1.1.1",
|
||||||
)
|
)
|
||||||
class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
|
class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
|
||||||
def invoke(self, context: InvocationContext) -> LatentsMetaOutput:
|
def invoke(self, context: InvocationContext) -> LatentsMetaOutput:
|
||||||
@@ -675,10 +675,10 @@ class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"flux_denoise_meta",
|
"flux_denoise_meta",
|
||||||
title="Flux Denoise + metadata",
|
title=f"{FluxDenoiseInvocation.UIConfig.title} + Metadata",
|
||||||
tags=["flux", "latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
tags=["flux", "latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
)
|
)
|
||||||
class FluxDenoiseLatentsMetaInvocation(FluxDenoiseInvocation, WithMetadata):
|
class FluxDenoiseLatentsMetaInvocation(FluxDenoiseInvocation, WithMetadata):
|
||||||
"""Run denoising process with a FLUX transformer model + metadata."""
|
"""Run denoising process with a FLUX transformer model + metadata."""
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ from pydantic import BaseModel, Field
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -15,10 +14,8 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
|||||||
from invokeai.app.shared.models import FreeUConfig
|
from invokeai.app.shared.models import FreeUConfig
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
ModelType,
|
|
||||||
SubModelType,
|
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
|
||||||
|
|
||||||
|
|
||||||
class ModelIdentifierField(BaseModel):
|
class ModelIdentifierField(BaseModel):
|
||||||
@@ -122,11 +119,10 @@ class ModelIdentifierOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"model_identifier",
|
"model_identifier",
|
||||||
title="Model identifier",
|
title="Any Model",
|
||||||
tags=["model"],
|
tags=["model"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class ModelIdentifierInvocation(BaseInvocation):
|
class ModelIdentifierInvocation(BaseInvocation):
|
||||||
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
||||||
@@ -144,10 +140,10 @@ class ModelIdentifierInvocation(BaseInvocation):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"main_model_loader",
|
"main_model_loader",
|
||||||
title="Main Model",
|
title="Main Model - SD1.5",
|
||||||
tags=["model"],
|
tags=["model"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.3",
|
version="1.0.4",
|
||||||
)
|
)
|
||||||
class MainModelLoaderInvocation(BaseInvocation):
|
class MainModelLoaderInvocation(BaseInvocation):
|
||||||
"""Loads a main model, outputting its submodels."""
|
"""Loads a main model, outputting its submodels."""
|
||||||
@@ -181,7 +177,7 @@ class LoRALoaderOutput(BaseInvocationOutput):
|
|||||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||||
|
|
||||||
|
|
||||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.3")
|
@invocation("lora_loader", title="Apply LoRA - SD1.5", tags=["model"], category="model", version="1.0.4")
|
||||||
class LoRALoaderInvocation(BaseInvocation):
|
class LoRALoaderInvocation(BaseInvocation):
|
||||||
"""Apply selected lora to unet and text_encoder."""
|
"""Apply selected lora to unet and text_encoder."""
|
||||||
|
|
||||||
@@ -244,7 +240,7 @@ class LoRASelectorOutput(BaseInvocationOutput):
|
|||||||
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
||||||
|
|
||||||
|
|
||||||
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
|
@invocation("lora_selector", title="Select LoRA", tags=["model"], category="model", version="1.0.3")
|
||||||
class LoRASelectorInvocation(BaseInvocation):
|
class LoRASelectorInvocation(BaseInvocation):
|
||||||
"""Selects a LoRA model and weight."""
|
"""Selects a LoRA model and weight."""
|
||||||
|
|
||||||
@@ -257,7 +253,9 @@ class LoRASelectorInvocation(BaseInvocation):
|
|||||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||||
|
|
||||||
|
|
||||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
|
@invocation(
|
||||||
|
"lora_collection_loader", title="Apply LoRA Collection - SD1.5", tags=["model"], category="model", version="1.1.2"
|
||||||
|
)
|
||||||
class LoRACollectionLoader(BaseInvocation):
|
class LoRACollectionLoader(BaseInvocation):
|
||||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||||
|
|
||||||
@@ -320,10 +318,10 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sdxl_lora_loader",
|
"sdxl_lora_loader",
|
||||||
title="SDXL LoRA",
|
title="Apply LoRA - SDXL",
|
||||||
tags=["lora", "model"],
|
tags=["lora", "model"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.3",
|
version="1.0.5",
|
||||||
)
|
)
|
||||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||||
"""Apply selected lora to unet and text_encoder."""
|
"""Apply selected lora to unet and text_encoder."""
|
||||||
@@ -400,10 +398,10 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sdxl_lora_collection_loader",
|
"sdxl_lora_collection_loader",
|
||||||
title="SDXL LoRA Collection Loader",
|
title="Apply LoRA Collection - SDXL",
|
||||||
tags=["model"],
|
tags=["model"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.1.0",
|
version="1.1.2",
|
||||||
)
|
)
|
||||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||||
@@ -469,7 +467,9 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
|
@invocation(
|
||||||
|
"vae_loader", title="VAE Model - SD1.5, SDXL, SD3, FLUX", tags=["vae", "model"], category="model", version="1.0.4"
|
||||||
|
)
|
||||||
class VAELoaderInvocation(BaseInvocation):
|
class VAELoaderInvocation(BaseInvocation):
|
||||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||||
|
|
||||||
@@ -496,10 +496,10 @@ class SeamlessModeOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"seamless",
|
"seamless",
|
||||||
title="Seamless",
|
title="Apply Seamless - SD1.5, SDXL",
|
||||||
tags=["seamless", "model"],
|
tags=["seamless", "model"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.1",
|
version="1.0.2",
|
||||||
)
|
)
|
||||||
class SeamlessModeInvocation(BaseInvocation):
|
class SeamlessModeInvocation(BaseInvocation):
|
||||||
"""Applies the seamless transformation to the Model UNet and VAE."""
|
"""Applies the seamless transformation to the Model UNet and VAE."""
|
||||||
@@ -539,7 +539,7 @@ class SeamlessModeInvocation(BaseInvocation):
|
|||||||
return SeamlessModeOutput(unet=unet, vae=vae)
|
return SeamlessModeOutput(unet=unet, vae=vae)
|
||||||
|
|
||||||
|
|
||||||
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.1")
|
@invocation("freeu", title="Apply FreeU - SD1.5, SDXL", tags=["freeu"], category="unet", version="1.0.2")
|
||||||
class FreeUInvocation(BaseInvocation):
|
class FreeUInvocation(BaseInvocation):
|
||||||
"""
|
"""
|
||||||
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
|
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
|
||||||
|
|||||||
@@ -72,10 +72,10 @@ class NoiseOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"noise",
|
"noise",
|
||||||
title="Noise",
|
title="Create Latent Noise",
|
||||||
tags=["latents", "noise"],
|
tags=["latents", "noise"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.0.2",
|
version="1.0.3",
|
||||||
)
|
)
|
||||||
class NoiseInvocation(BaseInvocation):
|
class NoiseInvocation(BaseInvocation):
|
||||||
"""Generates latent noise."""
|
"""Generates latent noise."""
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
|
|||||||
from torchvision.transforms.functional import resize as tv_resize
|
from torchvision.transforms.functional import resize as tv_resize
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||||
from invokeai.app.invocations.fields import (
|
from invokeai.app.invocations.fields import (
|
||||||
DenoiseMaskField,
|
DenoiseMaskField,
|
||||||
@@ -23,7 +23,7 @@ from invokeai.app.invocations.primitives import LatentsOutput
|
|||||||
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||||
from invokeai.backend.model_manager.config import BaseModelType
|
from invokeai.backend.model_manager import BaseModelType
|
||||||
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
||||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
||||||
@@ -32,11 +32,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sd3_denoise",
|
"sd3_denoise",
|
||||||
title="SD3 Denoise",
|
title="Denoise - SD3",
|
||||||
tags=["image", "sd3"],
|
tags=["image", "sd3"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.1.0",
|
version="1.1.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Run denoising process with a SD3 model."""
|
"""Run denoising process with a SD3 model."""
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import einops
|
|||||||
import torch
|
import torch
|
||||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.fields import (
|
from invokeai.app.invocations.fields import (
|
||||||
FieldDescriptions,
|
FieldDescriptions,
|
||||||
ImageField,
|
ImageField,
|
||||||
@@ -21,11 +21,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sd3_i2l",
|
"sd3_i2l",
|
||||||
title="SD3 Image to Latents",
|
title="Image to Latents - SD3",
|
||||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||||
category="image",
|
category="image",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Generates latents from an image."""
|
"""Generates latents from an image."""
|
||||||
|
|||||||
@@ -24,10 +24,10 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sd3_l2i",
|
"sd3_l2i",
|
||||||
title="SD3 Latents to Image",
|
title="Latents to Image - SD3",
|
||||||
tags=["latents", "image", "vae", "l2i", "sd3"],
|
tags=["latents", "image", "vae", "l2i", "sd3"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.3.1",
|
version="1.3.2",
|
||||||
)
|
)
|
||||||
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ from typing import Optional
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -14,7 +13,7 @@ from invokeai.app.util.t5_model_identifier import (
|
|||||||
preprocess_t5_encoder_model_identifier,
|
preprocess_t5_encoder_model_identifier,
|
||||||
preprocess_t5_tokenizer_model_identifier,
|
preprocess_t5_tokenizer_model_identifier,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.config import SubModelType
|
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("sd3_model_loader_output")
|
@invocation_output("sd3_model_loader_output")
|
||||||
@@ -30,11 +29,10 @@ class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sd3_model_loader",
|
"sd3_model_loader",
|
||||||
title="SD3 Main Model",
|
title="Main Model - SD3",
|
||||||
tags=["model", "sd3"],
|
tags=["model", "sd3"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||||
"""Loads a SD3 base model, outputting its submodels."""
|
"""Loads a SD3 base model, outputting its submodels."""
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ from transformers import (
|
|||||||
T5TokenizerFast,
|
T5TokenizerFast,
|
||||||
)
|
)
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||||
from invokeai.app.invocations.primitives import SD3ConditioningOutput
|
from invokeai.app.invocations.primitives import SD3ConditioningOutput
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager.config import ModelFormat
|
from invokeai.backend.model_manager.taxonomy import ModelFormat
|
||||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||||
@@ -29,11 +29,10 @@ SD3_T5_MAX_SEQ_LEN = 256
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sd3_text_encoder",
|
"sd3_text_encoder",
|
||||||
title="SD3 Text Encoding",
|
title="Prompt - SD3",
|
||||||
tags=["prompt", "conditioning", "sd3"],
|
tags=["prompt", "conditioning", "sd3"],
|
||||||
category="conditioning",
|
category="conditioning",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
classification=Classification.Prototype,
|
|
||||||
)
|
)
|
||||||
class Sd3TextEncoderInvocation(BaseInvocation):
|
class Sd3TextEncoderInvocation(BaseInvocation):
|
||||||
"""Encodes and preps a prompt for a SD3 image."""
|
"""Encodes and preps a prompt for a SD3 image."""
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocati
|
|||||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
||||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
|
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.backend.model_manager import SubModelType
|
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("sdxl_model_loader_output")
|
@invocation_output("sdxl_model_loader_output")
|
||||||
@@ -24,7 +24,7 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
|||||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||||
|
|
||||||
|
|
||||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
|
@invocation("sdxl_model_loader", title="Main Model - SDXL", tags=["model", "sdxl"], category="model", version="1.0.4")
|
||||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||||
"""Loads an sdxl base model, outputting its submodels."""
|
"""Loads an sdxl base model, outputting its submodels."""
|
||||||
|
|
||||||
@@ -58,10 +58,10 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"sdxl_refiner_model_loader",
|
"sdxl_refiner_model_loader",
|
||||||
title="SDXL Refiner Model",
|
title="Refiner Model - SDXL",
|
||||||
tags=["model", "sdxl", "refiner"],
|
tags=["model", "sdxl", "refiner"],
|
||||||
category="model",
|
category="model",
|
||||||
version="1.0.3",
|
version="1.0.4",
|
||||||
)
|
)
|
||||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||||
|
|||||||
@@ -185,9 +185,9 @@ class SegmentAnythingInvocation(BaseInvocation):
|
|||||||
# Find the largest mask.
|
# Find the largest mask.
|
||||||
return [max(masks, key=lambda x: float(x.sum()))]
|
return [max(masks, key=lambda x: float(x.sum()))]
|
||||||
elif self.mask_filter == "highest_box_score":
|
elif self.mask_filter == "highest_box_score":
|
||||||
assert (
|
assert bounding_boxes is not None, (
|
||||||
bounding_boxes is not None
|
"Bounding boxes must be provided to use the 'highest_box_score' mask filter."
|
||||||
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
|
)
|
||||||
assert len(masks) == len(bounding_boxes)
|
assert len(masks) == len(bounding_boxes)
|
||||||
# Find the index of the bounding box with the highest score.
|
# Find the index of the bounding box with the highest score.
|
||||||
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most
|
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most
|
||||||
|
|||||||
@@ -45,7 +45,11 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
|
"t2i_adapter",
|
||||||
|
title="T2I-Adapter - SD1.5, SDXL",
|
||||||
|
tags=["t2i_adapter", "control"],
|
||||||
|
category="t2i_adapter",
|
||||||
|
version="1.0.4",
|
||||||
)
|
)
|
||||||
class T2IAdapterInvocation(BaseInvocation):
|
class T2IAdapterInvocation(BaseInvocation):
|
||||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
|||||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
||||||
from pydantic import field_validator
|
from pydantic import field_validator
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||||
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
||||||
from invokeai.app.invocations.denoise_latents import DenoiseLatentsInvocation, get_scheduler
|
from invokeai.app.invocations.denoise_latents import DenoiseLatentsInvocation, get_scheduler
|
||||||
@@ -53,11 +53,10 @@ def crop_controlnet_data(control_data: ControlNetData, latent_region: TBLR) -> C
|
|||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"tiled_multi_diffusion_denoise_latents",
|
"tiled_multi_diffusion_denoise_latents",
|
||||||
title="Tiled Multi-Diffusion Denoise Latents",
|
title="Tiled Multi-Diffusion Denoise - SD1.5, SDXL",
|
||||||
tags=["upscale", "denoise"],
|
tags=["upscale", "denoise"],
|
||||||
category="latents",
|
category="latents",
|
||||||
classification=Classification.Beta,
|
version="1.0.1",
|
||||||
version="1.0.0",
|
|
||||||
)
|
)
|
||||||
class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||||
"""Tiled Multi-Diffusion denoising.
|
"""Tiled Multi-Diffusion denoising.
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ from pydantic import BaseModel
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
Classification,
|
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -40,7 +39,6 @@ class CalculateImageTilesOutput(BaseInvocationOutput):
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.0.1",
|
version="1.0.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class CalculateImageTilesInvocation(BaseInvocation):
|
class CalculateImageTilesInvocation(BaseInvocation):
|
||||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||||
@@ -74,7 +72,6 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.1.1",
|
version="1.1.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||||
@@ -117,7 +114,6 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.0.1",
|
version="1.0.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
||||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||||
@@ -168,7 +164,6 @@ class TileToPropertiesOutput(BaseInvocationOutput):
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.0.1",
|
version="1.0.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class TileToPropertiesInvocation(BaseInvocation):
|
class TileToPropertiesInvocation(BaseInvocation):
|
||||||
"""Split a Tile into its individual properties."""
|
"""Split a Tile into its individual properties."""
|
||||||
@@ -201,7 +196,6 @@ class PairTileImageOutput(BaseInvocationOutput):
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.0.1",
|
version="1.0.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class PairTileImageInvocation(BaseInvocation):
|
class PairTileImageInvocation(BaseInvocation):
|
||||||
"""Pair an image with its tile properties."""
|
"""Pair an image with its tile properties."""
|
||||||
@@ -230,7 +224,6 @@ BLEND_MODES = Literal["Linear", "Seam"]
|
|||||||
tags=["tiles"],
|
tags=["tiles"],
|
||||||
category="tiles",
|
category="tiles",
|
||||||
version="1.1.1",
|
version="1.1.1",
|
||||||
classification=Classification.Beta,
|
|
||||||
)
|
)
|
||||||
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||||
"""Merge multiple tile images into a single image."""
|
"""Merge multiple tile images into a single image."""
|
||||||
|
|||||||
@@ -41,16 +41,15 @@ def run_app() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Find an open port, and modify the config accordingly.
|
# Find an open port, and modify the config accordingly.
|
||||||
orig_config_port = app_config.port
|
first_open_port = find_open_port(app_config.port)
|
||||||
app_config.port = find_open_port(app_config.port)
|
if app_config.port != first_open_port:
|
||||||
if orig_config_port != app_config.port:
|
orig_config_port = app_config.port
|
||||||
|
app_config.port = first_open_port
|
||||||
logger.warning(f"Port {orig_config_port} is already in use. Using port {app_config.port}.")
|
logger.warning(f"Port {orig_config_port} is already in use. Using port {app_config.port}.")
|
||||||
|
|
||||||
# Miscellaneous startup tasks.
|
# Miscellaneous startup tasks.
|
||||||
apply_monkeypatches()
|
apply_monkeypatches()
|
||||||
register_mime_types()
|
register_mime_types()
|
||||||
if app_config.dev_reload:
|
|
||||||
enable_dev_reload()
|
|
||||||
check_cudnn(logger)
|
check_cudnn(logger)
|
||||||
|
|
||||||
# Initialize the app and event loop.
|
# Initialize the app and event loop.
|
||||||
@@ -59,7 +58,12 @@ def run_app() -> None:
|
|||||||
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
|
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
|
||||||
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
|
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
|
||||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path)
|
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||||
|
|
||||||
|
if app_config.dev_reload:
|
||||||
|
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
|
||||||
|
# imported.
|
||||||
|
enable_dev_reload(custom_nodes_path=app_config.custom_nodes_path)
|
||||||
|
|
||||||
# Start the server.
|
# Start the server.
|
||||||
config = uvicorn.Config(
|
config = uvicorn.Config(
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ class InvokeAIAppConfig(BaseSettings):
|
|||||||
outputs_dir: Path to directory for outputs.
|
outputs_dir: Path to directory for outputs.
|
||||||
custom_nodes_dir: Path to directory for custom nodes.
|
custom_nodes_dir: Path to directory for custom nodes.
|
||||||
style_presets_dir: Path to directory for style presets.
|
style_presets_dir: Path to directory for style presets.
|
||||||
|
workflow_thumbnails_dir: Path to directory for workflow thumbnails.
|
||||||
log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||||
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||||
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||||
@@ -142,6 +143,7 @@ class InvokeAIAppConfig(BaseSettings):
|
|||||||
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
|
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
|
||||||
custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes.")
|
custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes.")
|
||||||
style_presets_dir: Path = Field(default=Path("style_presets"), description="Path to directory for style presets.")
|
style_presets_dir: Path = Field(default=Path("style_presets"), description="Path to directory for style presets.")
|
||||||
|
workflow_thumbnails_dir: Path = Field(default=Path("workflow_thumbnails"), description="Path to directory for workflow thumbnails.")
|
||||||
|
|
||||||
# LOGGING
|
# LOGGING
|
||||||
log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
|
log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
|
||||||
@@ -304,6 +306,11 @@ class InvokeAIAppConfig(BaseSettings):
|
|||||||
"""Path to the style presets directory, resolved to an absolute path.."""
|
"""Path to the style presets directory, resolved to an absolute path.."""
|
||||||
return self._resolve(self.style_presets_dir)
|
return self._resolve(self.style_presets_dir)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def workflow_thumbnails_path(self) -> Path:
|
||||||
|
"""Path to the workflow thumbnails directory, resolved to an absolute path.."""
|
||||||
|
return self._resolve(self.workflow_thumbnails_dir)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def convert_cache_path(self) -> Path:
|
def convert_cache_path(self) -> Path:
|
||||||
"""Path to the converted cache models directory, resolved to an absolute path.."""
|
"""Path to the converted cache models directory, resolved to an absolute path.."""
|
||||||
@@ -476,9 +483,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
|||||||
try:
|
try:
|
||||||
# Meta is not included in the model fields, so we need to validate it separately
|
# Meta is not included in the model fields, so we need to validate it separately
|
||||||
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
||||||
assert (
|
assert config.schema_version == CONFIG_SCHEMA_VERSION, (
|
||||||
config.schema_version == CONFIG_SCHEMA_VERSION
|
f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||||
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
)
|
||||||
return config
|
return config
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
||||||
|
|||||||
@@ -44,7 +44,8 @@ if TYPE_CHECKING:
|
|||||||
SessionQueueItem,
|
SessionQueueItem,
|
||||||
SessionQueueStatus,
|
SessionQueueStatus,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
from invokeai.backend.model_manager import SubModelType
|
||||||
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
|
|
||||||
|
|
||||||
class EventServiceBase:
|
class EventServiceBase:
|
||||||
|
|||||||
@@ -16,7 +16,8 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
|||||||
)
|
)
|
||||||
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
|
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
|
||||||
from invokeai.app.util.misc import get_timestamp
|
from invokeai.app.util.misc import get_timestamp
|
||||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
from invokeai.backend.model_manager import SubModelType
|
||||||
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from invokeai.app.services.download.download_base import DownloadJob
|
from invokeai.app.services.download.download_base import DownloadJob
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ if TYPE_CHECKING:
|
|||||||
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
||||||
from invokeai.app.services.urls.urls_base import UrlServiceBase
|
from invokeai.app.services.urls.urls_base import UrlServiceBase
|
||||||
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
||||||
|
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_base import WorkflowThumbnailServiceBase
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||||
|
|
||||||
|
|
||||||
@@ -65,6 +66,7 @@ class InvocationServices:
|
|||||||
conditioning: "ObjectSerializerBase[ConditioningFieldData]",
|
conditioning: "ObjectSerializerBase[ConditioningFieldData]",
|
||||||
style_preset_records: "StylePresetRecordsStorageBase",
|
style_preset_records: "StylePresetRecordsStorageBase",
|
||||||
style_preset_image_files: "StylePresetImageFileStorageBase",
|
style_preset_image_files: "StylePresetImageFileStorageBase",
|
||||||
|
workflow_thumbnails: "WorkflowThumbnailServiceBase",
|
||||||
):
|
):
|
||||||
self.board_images = board_images
|
self.board_images = board_images
|
||||||
self.board_image_records = board_image_records
|
self.board_image_records = board_image_records
|
||||||
@@ -91,3 +93,4 @@ class InvocationServices:
|
|||||||
self.conditioning = conditioning
|
self.conditioning = conditioning
|
||||||
self.style_preset_records = style_preset_records
|
self.style_preset_records = style_preset_records
|
||||||
self.style_preset_image_files = style_preset_image_files
|
self.style_preset_image_files = style_preset_image_files
|
||||||
|
self.workflow_thumbnails = workflow_thumbnails
|
||||||
|
|||||||
@@ -10,9 +10,9 @@ from typing_extensions import Annotated
|
|||||||
|
|
||||||
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
|
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
|
||||||
from invokeai.app.services.model_records import ModelRecordChanges
|
from invokeai.app.services.model_records import ModelRecordChanges
|
||||||
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
from invokeai.backend.model_manager.config import ModelSourceType
|
|
||||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||||
|
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||||
|
|
||||||
|
|
||||||
class InstallStatus(str, Enum):
|
class InstallStatus(str, Enum):
|
||||||
|
|||||||
@@ -38,9 +38,9 @@ from invokeai.backend.model_manager.config import (
|
|||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
CheckpointConfigBase,
|
CheckpointConfigBase,
|
||||||
InvalidModelConfigException,
|
InvalidModelConfigException,
|
||||||
ModelRepoVariant,
|
ModelConfigBase,
|
||||||
ModelSourceType,
|
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.legacy_probe import ModelProbe
|
||||||
from invokeai.backend.model_manager.metadata import (
|
from invokeai.backend.model_manager.metadata import (
|
||||||
AnyModelRepoMetadata,
|
AnyModelRepoMetadata,
|
||||||
HuggingFaceMetadataFetch,
|
HuggingFaceMetadataFetch,
|
||||||
@@ -49,8 +49,8 @@ from invokeai.backend.model_manager.metadata import (
|
|||||||
RemoteModelFile,
|
RemoteModelFile,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||||
from invokeai.backend.model_manager.probe import ModelProbe
|
|
||||||
from invokeai.backend.model_manager.search import ModelSearch
|
from invokeai.backend.model_manager.search import ModelSearch
|
||||||
|
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||||
from invokeai.backend.util import InvokeAILogger
|
from invokeai.backend.util import InvokeAILogger
|
||||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||||
from invokeai.backend.util.devices import TorchDevice
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
@@ -182,9 +182,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
) -> str: # noqa D102
|
) -> str: # noqa D102
|
||||||
model_path = Path(model_path)
|
model_path = Path(model_path)
|
||||||
config = config or ModelRecordChanges()
|
config = config or ModelRecordChanges()
|
||||||
info: AnyModelConfig = ModelProbe.probe(
|
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
||||||
Path(model_path), config.model_dump(), hash_algo=self._app_config.hashing_algorithm
|
|
||||||
) # type: ignore
|
|
||||||
|
|
||||||
if preferred_name := config.name:
|
if preferred_name := config.name:
|
||||||
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
|
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
|
||||||
@@ -644,12 +642,22 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
move(old_path, new_path)
|
move(old_path, new_path)
|
||||||
return new_path
|
return new_path
|
||||||
|
|
||||||
|
def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):
|
||||||
|
config = config or ModelRecordChanges()
|
||||||
|
hash_algo = self._app_config.hashing_algorithm
|
||||||
|
fields = config.model_dump()
|
||||||
|
|
||||||
|
try:
|
||||||
|
return ModelConfigBase.classify(model_path=model_path, hash_algo=hash_algo, **fields)
|
||||||
|
except InvalidModelConfigException:
|
||||||
|
return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore
|
||||||
|
|
||||||
def _register(
|
def _register(
|
||||||
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None
|
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
config = config or ModelRecordChanges()
|
config = config or ModelRecordChanges()
|
||||||
|
|
||||||
info = info or ModelProbe.probe(model_path, config.model_dump(), hash_algo=self._app_config.hashing_algorithm) # type: ignore
|
info = info or self._probe(model_path, config)
|
||||||
|
|
||||||
model_path = model_path.resolve()
|
model_path = model_path.resolve()
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ from abc import ABC, abstractmethod
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Callable, Optional
|
from typing import Callable, Optional
|
||||||
|
|
||||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig
|
from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig
|
||||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||||
|
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||||
|
|
||||||
|
|
||||||
class ModelLoadServiceBase(ABC):
|
class ModelLoadServiceBase(ABC):
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from torch import load as torch_load
|
|||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.invoker import Invoker
|
from invokeai.app.services.invoker import Invoker
|
||||||
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
|
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
|
||||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||||
from invokeai.backend.model_manager.load import (
|
from invokeai.backend.model_manager.load import (
|
||||||
LoadedModel,
|
LoadedModel,
|
||||||
LoadedModelWithoutConfig,
|
LoadedModelWithoutConfig,
|
||||||
@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.load import (
|
|||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||||
|
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||||
from invokeai.backend.util.devices import TorchDevice
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
@@ -85,8 +86,11 @@ class ModelLoadService(ModelLoadServiceBase):
|
|||||||
|
|
||||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||||
scan_result = scan_file_path(checkpoint)
|
scan_result = scan_file_path(checkpoint)
|
||||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
if scan_result.infected_files != 0:
|
||||||
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||||
|
if scan_result.scan_err:
|
||||||
|
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
|
||||||
|
|
||||||
result = torch_load(checkpoint, map_location="cpu")
|
result = torch_load(checkpoint, map_location="cpu")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,12 @@
|
|||||||
"""Initialization file for model manager service."""
|
"""Initialization file for model manager service."""
|
||||||
|
|
||||||
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase
|
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase
|
||||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType
|
from invokeai.backend.model_manager import AnyModelConfig
|
||||||
from invokeai.backend.model_manager.load import LoadedModel
|
from invokeai.backend.model_manager.load import LoadedModel
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"ModelManagerServiceBase",
|
"ModelManagerServiceBase",
|
||||||
"ModelManagerService",
|
"ModelManagerService",
|
||||||
"AnyModel",
|
|
||||||
"AnyModelConfig",
|
"AnyModelConfig",
|
||||||
"BaseModelType",
|
|
||||||
"ModelType",
|
|
||||||
"SubModelType",
|
|
||||||
"LoadedModel",
|
"LoadedModel",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -14,10 +14,12 @@ from invokeai.app.services.shared.pagination import PaginatedResults
|
|||||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
ClipVariantType,
|
|
||||||
ControlAdapterDefaultSettings,
|
ControlAdapterDefaultSettings,
|
||||||
MainModelDefaultSettings,
|
MainModelDefaultSettings,
|
||||||
|
)
|
||||||
|
from invokeai.backend.model_manager.taxonomy import (
|
||||||
|
BaseModelType,
|
||||||
|
ClipVariantType,
|
||||||
ModelFormat,
|
ModelFormat,
|
||||||
ModelSourceType,
|
ModelSourceType,
|
||||||
ModelType,
|
ModelType,
|
||||||
|
|||||||
@@ -60,11 +60,9 @@ from invokeai.app.services.shared.pagination import PaginatedResults
|
|||||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
ModelConfigFactory,
|
ModelConfigFactory,
|
||||||
ModelFormat,
|
|
||||||
ModelType,
|
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
|
||||||
|
|
||||||
|
|
||||||
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||||
|
|||||||
@@ -21,10 +21,16 @@ class ObjectSerializerDisk(ObjectSerializerBase[T]):
|
|||||||
"""Disk-backed storage for arbitrary python objects. Serialization is handled by `torch.save` and `torch.load`.
|
"""Disk-backed storage for arbitrary python objects. Serialization is handled by `torch.save` and `torch.load`.
|
||||||
|
|
||||||
:param output_dir: The folder where the serialized objects will be stored
|
:param output_dir: The folder where the serialized objects will be stored
|
||||||
|
:param safe_globals: A list of types to be added to the safe globals for torch serialization
|
||||||
:param ephemeral: If True, objects will be stored in a temporary directory inside the given output_dir and cleaned up on exit
|
:param ephemeral: If True, objects will be stored in a temporary directory inside the given output_dir and cleaned up on exit
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, output_dir: Path, ephemeral: bool = False):
|
def __init__(
|
||||||
|
self,
|
||||||
|
output_dir: Path,
|
||||||
|
safe_globals: list[type],
|
||||||
|
ephemeral: bool = False,
|
||||||
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._ephemeral = ephemeral
|
self._ephemeral = ephemeral
|
||||||
self._base_output_dir = output_dir
|
self._base_output_dir = output_dir
|
||||||
@@ -42,6 +48,8 @@ class ObjectSerializerDisk(ObjectSerializerBase[T]):
|
|||||||
self._output_dir = Path(self._tempdir.name) if self._tempdir else self._base_output_dir
|
self._output_dir = Path(self._tempdir.name) if self._tempdir else self._base_output_dir
|
||||||
self.__obj_class_name: Optional[str] = None
|
self.__obj_class_name: Optional[str] = None
|
||||||
|
|
||||||
|
torch.serialization.add_safe_globals(safe_globals) if safe_globals else None
|
||||||
|
|
||||||
def load(self, name: str) -> T:
|
def load(self, name: str) -> T:
|
||||||
file_path = self._get_path(name)
|
file_path = self._get_path(name)
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -570,7 +570,10 @@ ValueToInsertTuple: TypeAlias = tuple[
|
|||||||
str | None, # destination (optional)
|
str | None, # destination (optional)
|
||||||
int | None, # retried_from_item_id (optional, this is always None for new items)
|
int | None, # retried_from_item_id (optional, this is always None for new items)
|
||||||
]
|
]
|
||||||
"""A type alias for the tuple of values to insert into the session queue table."""
|
"""A type alias for the tuple of values to insert into the session queue table.
|
||||||
|
|
||||||
|
**If you change this, be sure to update the `enqueue_batch` and `retry_items_by_id` methods in the session queue service!**
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
def prepare_values_to_insert(
|
def prepare_values_to_insert(
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
|||||||
SessionQueueItemDTO,
|
SessionQueueItemDTO,
|
||||||
SessionQueueItemNotFoundError,
|
SessionQueueItemNotFoundError,
|
||||||
SessionQueueStatus,
|
SessionQueueStatus,
|
||||||
|
ValueToInsertTuple,
|
||||||
calc_session_count,
|
calc_session_count,
|
||||||
prepare_values_to_insert,
|
prepare_values_to_insert,
|
||||||
)
|
)
|
||||||
@@ -689,7 +690,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
"""Retries the given queue items"""
|
"""Retries the given queue items"""
|
||||||
try:
|
try:
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
values_to_insert: list[tuple] = []
|
values_to_insert: list[ValueToInsertTuple] = []
|
||||||
retried_item_ids: list[int] = []
|
retried_item_ids: list[int] = []
|
||||||
|
|
||||||
for item_id in item_ids:
|
for item_id in item_ids:
|
||||||
@@ -715,16 +716,16 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
else queue_item.item_id
|
else queue_item.item_id
|
||||||
)
|
)
|
||||||
|
|
||||||
value_to_insert = (
|
value_to_insert: ValueToInsertTuple = (
|
||||||
queue_item.queue_id,
|
queue_item.queue_id,
|
||||||
queue_item.batch_id,
|
|
||||||
queue_item.destination,
|
|
||||||
field_values_json,
|
|
||||||
queue_item.origin,
|
|
||||||
queue_item.priority,
|
|
||||||
workflow_json,
|
|
||||||
cloned_session_json,
|
cloned_session_json,
|
||||||
cloned_session.id,
|
cloned_session.id,
|
||||||
|
queue_item.batch_id,
|
||||||
|
field_values_json,
|
||||||
|
queue_item.priority,
|
||||||
|
workflow_json,
|
||||||
|
queue_item.origin,
|
||||||
|
queue_item.destination,
|
||||||
retried_from_item_id,
|
retried_from_item_id,
|
||||||
)
|
)
|
||||||
values_to_insert.append(value_to_insert)
|
values_to_insert.append(value_to_insert)
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from invokeai.app.invocations import * # noqa: F401 F403
|
|||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
BaseInvocationOutput,
|
BaseInvocationOutput,
|
||||||
|
InvocationRegistry,
|
||||||
invocation,
|
invocation,
|
||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
@@ -283,7 +284,7 @@ class AnyInvocation(BaseInvocation):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
|
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
|
||||||
def validate_invocation(v: Any) -> "AnyInvocation":
|
def validate_invocation(v: Any) -> "AnyInvocation":
|
||||||
return BaseInvocation.get_typeadapter().validate_python(v)
|
return InvocationRegistry.get_invocation_typeadapter().validate_python(v)
|
||||||
|
|
||||||
return core_schema.no_info_plain_validator_function(validate_invocation)
|
return core_schema.no_info_plain_validator_function(validate_invocation)
|
||||||
|
|
||||||
@@ -294,7 +295,7 @@ class AnyInvocation(BaseInvocation):
|
|||||||
# Nodes are too powerful, we have to make our own OpenAPI schema manually
|
# Nodes are too powerful, we have to make our own OpenAPI schema manually
|
||||||
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
|
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
|
||||||
oneOf: list[dict[str, str]] = []
|
oneOf: list[dict[str, str]] = []
|
||||||
names = [i.__name__ for i in BaseInvocation.get_invocations()]
|
names = [i.__name__ for i in InvocationRegistry.get_invocation_classes()]
|
||||||
for name in sorted(names):
|
for name in sorted(names):
|
||||||
oneOf.append({"$ref": f"#/components/schemas/{name}"})
|
oneOf.append({"$ref": f"#/components/schemas/{name}"})
|
||||||
return {"oneOf": oneOf}
|
return {"oneOf": oneOf}
|
||||||
@@ -304,7 +305,7 @@ class AnyInvocationOutput(BaseInvocationOutput):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler):
|
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler):
|
||||||
def validate_invocation_output(v: Any) -> "AnyInvocationOutput":
|
def validate_invocation_output(v: Any) -> "AnyInvocationOutput":
|
||||||
return BaseInvocationOutput.get_typeadapter().validate_python(v)
|
return InvocationRegistry.get_output_typeadapter().validate_python(v)
|
||||||
|
|
||||||
return core_schema.no_info_plain_validator_function(validate_invocation_output)
|
return core_schema.no_info_plain_validator_function(validate_invocation_output)
|
||||||
|
|
||||||
@@ -316,7 +317,7 @@ class AnyInvocationOutput(BaseInvocationOutput):
|
|||||||
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
|
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
|
||||||
|
|
||||||
oneOf: list[dict[str, str]] = []
|
oneOf: list[dict[str, str]] = []
|
||||||
names = [i.__name__ for i in BaseInvocationOutput.get_outputs()]
|
names = [i.__name__ for i in InvocationRegistry.get_output_classes()]
|
||||||
for name in sorted(names):
|
for name in sorted(names):
|
||||||
oneOf.append({"$ref": f"#/components/schemas/{name}"})
|
oneOf.append({"$ref": f"#/components/schemas/{name}"})
|
||||||
return {"oneOf": oneOf}
|
return {"oneOf": oneOf}
|
||||||
|
|||||||
@@ -20,14 +20,10 @@ from invokeai.app.services.session_processor.session_processor_common import Pro
|
|||||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||||
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
|
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModel,
|
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
|
||||||
ModelFormat,
|
|
||||||
ModelType,
|
|
||||||
SubModelType,
|
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig
|
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig
|
||||||
|
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import
|
|||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import build_migration_17
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||||
|
|
||||||
|
|
||||||
@@ -55,6 +57,8 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
|||||||
migrator.register_migration(build_migration_14())
|
migrator.register_migration(build_migration_14())
|
||||||
migrator.register_migration(build_migration_15())
|
migrator.register_migration(build_migration_15())
|
||||||
migrator.register_migration(build_migration_16())
|
migrator.register_migration(build_migration_16())
|
||||||
|
migrator.register_migration(build_migration_17())
|
||||||
|
migrator.register_migration(build_migration_18())
|
||||||
migrator.run_migrations()
|
migrator.run_migrations()
|
||||||
|
|
||||||
return db
|
return db
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
import sqlite3
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||||
|
|
||||||
|
|
||||||
|
class Migration17Callback:
|
||||||
|
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
self._add_workflows_tags_col(cursor)
|
||||||
|
|
||||||
|
def _add_workflows_tags_col(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
"""
|
||||||
|
- Adds `tags` column to the workflow_library table. It is a generated column that extracts the tags from the
|
||||||
|
workflow JSON.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cursor.execute(
|
||||||
|
"ALTER TABLE workflow_library ADD COLUMN tags TEXT GENERATED ALWAYS AS (json_extract(workflow, '$.tags')) VIRTUAL;"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_migration_17() -> Migration:
|
||||||
|
"""
|
||||||
|
Build the migration from database version 16 to 17.
|
||||||
|
|
||||||
|
This migration does the following:
|
||||||
|
- Adds `tags` column to the workflow_library table. It is a generated column that extracts the tags from the
|
||||||
|
workflow JSON.
|
||||||
|
"""
|
||||||
|
migration_17 = Migration(
|
||||||
|
from_version=16,
|
||||||
|
to_version=17,
|
||||||
|
callback=Migration17Callback(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return migration_17
|
||||||
@@ -0,0 +1,47 @@
|
|||||||
|
import sqlite3
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||||
|
|
||||||
|
|
||||||
|
class Migration18Callback:
|
||||||
|
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
self._make_workflow_opened_at_nullable(cursor)
|
||||||
|
|
||||||
|
def _make_workflow_opened_at_nullable(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
"""
|
||||||
|
Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
|
||||||
|
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
|
||||||
|
- Dropping the existing `opened_at` column
|
||||||
|
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
|
||||||
|
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
|
||||||
|
"""
|
||||||
|
# For index renaming in SQLite, we need to drop and recreate
|
||||||
|
cursor.execute("DROP INDEX IF EXISTS idx_workflow_library_opened_at;")
|
||||||
|
# Rename existing column to deprecated
|
||||||
|
cursor.execute("ALTER TABLE workflow_library DROP COLUMN opened_at;")
|
||||||
|
# Add new nullable column - all values will be NULL - no migration of data needed
|
||||||
|
cursor.execute("ALTER TABLE workflow_library ADD COLUMN opened_at DATETIME;")
|
||||||
|
# Create new index on the new column
|
||||||
|
cursor.execute(
|
||||||
|
"CREATE INDEX idx_workflow_library_opened_at ON workflow_library(opened_at);",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_migration_18() -> Migration:
|
||||||
|
"""
|
||||||
|
Build the migration from database version 17 to 18.
|
||||||
|
|
||||||
|
This migration does the following:
|
||||||
|
- Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
|
||||||
|
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
|
||||||
|
- Dropping the existing `opened_at` column
|
||||||
|
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
|
||||||
|
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
|
||||||
|
"""
|
||||||
|
migration_18 = Migration(
|
||||||
|
from_version=17,
|
||||||
|
to_version=18,
|
||||||
|
callback=Migration18Callback(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return migration_18
|
||||||
@@ -18,3 +18,8 @@ class UrlServiceBase(ABC):
|
|||||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||||
"""Gets the URL for a style preset image"""
|
"""Gets the URL for a style preset image"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_workflow_thumbnail_url(self, workflow_id: str) -> str:
|
||||||
|
"""Gets the URL for a workflow thumbnail"""
|
||||||
|
pass
|
||||||
|
|||||||
@@ -22,3 +22,6 @@ class LocalUrlService(UrlServiceBase):
|
|||||||
|
|
||||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||||
return f"{self._base_url}/style_presets/i/{style_preset_id}/image"
|
return f"{self._base_url}/style_presets/i/{style_preset_id}/image"
|
||||||
|
|
||||||
|
def get_workflow_thumbnail_url(self, workflow_id: str) -> str:
|
||||||
|
return f"{self._base_url}/workflows/i/{workflow_id}/thumbnail"
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "ESRGAN Upscaling with Canny ControlNet",
|
"id": "default_686bb1d0-d086-4c70-9fa3-2f600b922023",
|
||||||
|
"name": "Upscaler - SD1.5, ESRGAN",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
|
"description": "Sample workflow for using ESRGAN to upscale with ControlNet with SD1.5",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "upscale, controlnet, default",
|
"tags": "sd1.5, upscaling, control",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -184,14 +185,7 @@
|
|||||||
},
|
},
|
||||||
"control_model": {
|
"control_model": {
|
||||||
"name": "control_model",
|
"name": "control_model",
|
||||||
"label": "Control Model (select Canny)",
|
"label": "Control Model (select Canny)"
|
||||||
"value": {
|
|
||||||
"key": "a7b9c76f-4bc5-42aa-b918-c1c458a5bb24",
|
|
||||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
|
||||||
"name": "sd-controlnet-canny",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"control_weight": {
|
"control_weight": {
|
||||||
"name": "control_weight",
|
"name": "control_weight",
|
||||||
@@ -294,14 +288,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "5cd43ca0-dd0a-418d-9f7e-35b2b9d5e106",
|
|
||||||
"hash": "blake3:6987f323017f597213cc3264250edf57056d21a40a0a85d83a1a33a7d44dc41a",
|
|
||||||
"name": "Deliberate_v5",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "main"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"isOpen": true,
|
"isOpen": true,
|
||||||
@@ -848,4 +835,4 @@
|
|||||||
"targetHandle": "image_resolution"
|
"targetHandle": "image_resolution"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "FLUX Image to Image",
|
"id": "default_cbf0e034-7b54-4b2c-b670-3b1e2e4b4a88",
|
||||||
|
"name": "Image to Image - FLUX",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "A simple image-to-image workflow using a FLUX dev model. ",
|
"description": "A simple image-to-image workflow using a FLUX dev model. ",
|
||||||
"version": "1.1.0",
|
"version": "1.1.0",
|
||||||
"contact": "",
|
"contact": "",
|
||||||
"tags": "image2image, flux, image-to-image",
|
"tags": "flux, image to image",
|
||||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
|
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -200,36 +201,15 @@
|
|||||||
},
|
},
|
||||||
"t5_encoder_model": {
|
"t5_encoder_model": {
|
||||||
"name": "t5_encoder_model",
|
"name": "t5_encoder_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
|
|
||||||
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
|
|
||||||
"name": "t5_bnb_int8_quantized_encoder",
|
|
||||||
"base": "any",
|
|
||||||
"type": "t5_encoder"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"clip_embed_model": {
|
"clip_embed_model": {
|
||||||
"name": "clip_embed_model",
|
"name": "clip_embed_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
|
|
||||||
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
|
|
||||||
"name": "clip-vit-large-patch14",
|
|
||||||
"base": "any",
|
|
||||||
"type": "clip_embed"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"vae_model": {
|
"vae_model": {
|
||||||
"name": "vae_model",
|
"name": "vae_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
|
|
||||||
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
|
|
||||||
"name": "FLUX.1-schnell_ae",
|
|
||||||
"base": "flux",
|
|
||||||
"type": "vae"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "Face Detailer with IP-Adapter & Canny (See Note in Details)",
|
"id": "default_dec5a2e9-f59c-40d9-8869-a056751d79b8",
|
||||||
|
"name": "Face Detailer - SD1.5",
|
||||||
"author": "kosmoskatten",
|
"author": "kosmoskatten",
|
||||||
"description": "A workflow to add detail to and improve faces. This workflow is most effective when used with a model that creates realistic outputs. ",
|
"description": "A workflow to add detail to and improve faces. This workflow is most effective when used with a model that creates realistic outputs. ",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "face detailer, IP-Adapter, Canny",
|
"tags": "sd1.5, reference image, control",
|
||||||
"notes": "Set this image as the blur mask: https://i.imgur.com/Gxi61zP.png",
|
"notes": "Set this image as the blur mask: https://i.imgur.com/Gxi61zP.png",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -135,14 +136,7 @@
|
|||||||
},
|
},
|
||||||
"control_model": {
|
"control_model": {
|
||||||
"name": "control_model",
|
"name": "control_model",
|
||||||
"label": "Control Model (select canny)",
|
"label": "Control Model (select canny)"
|
||||||
"value": {
|
|
||||||
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
|
|
||||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
|
||||||
"name": "sd-controlnet-canny",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"control_weight": {
|
"control_weight": {
|
||||||
"name": "control_weight",
|
"name": "control_weight",
|
||||||
@@ -196,14 +190,7 @@
|
|||||||
},
|
},
|
||||||
"ip_adapter_model": {
|
"ip_adapter_model": {
|
||||||
"name": "ip_adapter_model",
|
"name": "ip_adapter_model",
|
||||||
"label": "IP-Adapter Model (select IP Adapter Face)",
|
"label": "IP-Adapter Model (select IP Adapter Face)"
|
||||||
"value": {
|
|
||||||
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
|
|
||||||
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
|
|
||||||
"name": "ip_adapter_sd15",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "ip_adapter"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"clip_vision_model": {
|
"clip_vision_model": {
|
||||||
"name": "clip_vision_model",
|
"name": "clip_vision_model",
|
||||||
@@ -1445,4 +1432,4 @@
|
|||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "FLUX Text to Image",
|
"id": "default_444fe292-896b-44fd-bfc6-c0b5d220fffc",
|
||||||
|
"name": "Text to Image - FLUX",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
|
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
|
||||||
"version": "1.1.0",
|
"version": "1.1.0",
|
||||||
"contact": "",
|
"contact": "",
|
||||||
"tags": "text2image, flux",
|
"tags": "flux, text to image",
|
||||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -168,36 +169,15 @@
|
|||||||
},
|
},
|
||||||
"t5_encoder_model": {
|
"t5_encoder_model": {
|
||||||
"name": "t5_encoder_model",
|
"name": "t5_encoder_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
|
|
||||||
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
|
|
||||||
"name": "t5_bnb_int8_quantized_encoder",
|
|
||||||
"base": "any",
|
|
||||||
"type": "t5_encoder"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"clip_embed_model": {
|
"clip_embed_model": {
|
||||||
"name": "clip_embed_model",
|
"name": "clip_embed_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
|
|
||||||
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
|
|
||||||
"name": "clip-vit-large-patch14",
|
|
||||||
"base": "any",
|
|
||||||
"type": "clip_embed"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"vae_model": {
|
"vae_model": {
|
||||||
"name": "vae_model",
|
"name": "vae_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
|
|
||||||
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
|
|
||||||
"name": "FLUX.1-schnell_ae",
|
|
||||||
"base": "flux",
|
|
||||||
"type": "vae"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "Multi ControlNet (Canny & Depth)",
|
"id": "default_2d05e719-a6b9-4e64-9310-b875d3b2f9d2",
|
||||||
|
"name": "Text to Image - SD1.5, Control",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
|
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "ControlNet, canny, depth",
|
"tags": "sd1.5, control, text to image",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -216,14 +217,7 @@
|
|||||||
},
|
},
|
||||||
"control_model": {
|
"control_model": {
|
||||||
"name": "control_model",
|
"name": "control_model",
|
||||||
"label": "Control Model (select canny)",
|
"label": "Control Model (select canny)"
|
||||||
"value": {
|
|
||||||
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
|
|
||||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
|
||||||
"name": "sd-controlnet-canny",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"control_weight": {
|
"control_weight": {
|
||||||
"name": "control_weight",
|
"name": "control_weight",
|
||||||
@@ -370,14 +364,7 @@
|
|||||||
},
|
},
|
||||||
"control_model": {
|
"control_model": {
|
||||||
"name": "control_model",
|
"name": "control_model",
|
||||||
"label": "Control Model (select depth)",
|
"label": "Control Model (select depth)"
|
||||||
"value": {
|
|
||||||
"key": "87e8855c-671f-4c9e-bbbb-8ed47ccb4aac",
|
|
||||||
"hash": "blake3:2550bf22a53942dfa28ab2fed9d10d80851112531f44d977168992edf9d0534c",
|
|
||||||
"name": "control_v11f1p_sd15_depth",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"control_weight": {
|
"control_weight": {
|
||||||
"name": "control_weight",
|
"name": "control_weight",
|
||||||
@@ -1014,4 +1001,4 @@
|
|||||||
"targetHandle": "image_resolution"
|
"targetHandle": "image_resolution"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "MultiDiffusion SD1.5",
|
"id": "default_f96e794f-eb3e-4d01-a960-9b4e43402bcf",
|
||||||
|
"name": "Upscaler - SD1.5, MultiDiffusion",
|
||||||
"author": "Invoke",
|
"author": "Invoke",
|
||||||
"description": "A workflow to upscale an input image with tiled upscaling, using SD1.5 based models.",
|
"description": "A workflow to upscale an input image with tiled upscaling, using SD1.5 based models.",
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "tiled, upscaling, sdxl",
|
"tags": "sd1.5, upscaling",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -52,7 +53,6 @@
|
|||||||
"version": "3.0.0",
|
"version": "3.0.0",
|
||||||
"category": "default"
|
"category": "default"
|
||||||
},
|
},
|
||||||
"id": "e5b5fb01-8906-463a-963a-402dbc42f79b",
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "33fe76a0-5efd-4482-a7f0-e2abf1223dc2",
|
"id": "33fe76a0-5efd-4482-a7f0-e2abf1223dc2",
|
||||||
@@ -135,14 +135,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "e7b402e5-62e5-4acb-8c39-bee6bdb758ab",
|
|
||||||
"hash": "c8659e796168d076368256b57edbc1b48d6dafc1712f1bb37cc57c7c06889a6b",
|
|
||||||
"name": "526mix",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "main"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -384,21 +377,11 @@
|
|||||||
},
|
},
|
||||||
"image": {
|
"image": {
|
||||||
"name": "image",
|
"name": "image",
|
||||||
"label": "Image to Upscale",
|
"label": "Image to Upscale"
|
||||||
"value": {
|
|
||||||
"image_name": "ee7009f7-a35d-488b-a2a6-21237ef5ae05.png"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"image_to_image_model": {
|
"image_to_image_model": {
|
||||||
"name": "image_to_image_model",
|
"name": "image_to_image_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "38bb1a29-8ede-42ba-b77f-64b3478896eb",
|
|
||||||
"hash": "blake3:e52fdbee46a484ebe9b3b20ea0aac0a35a453ab6d0d353da00acfd35ce7a91ed",
|
|
||||||
"name": "4xNomosWebPhoto_esrgan",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "spandrel_image_to_image"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"tile_size": {
|
"tile_size": {
|
||||||
"name": "tile_size",
|
"name": "tile_size",
|
||||||
@@ -437,14 +420,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "ControlNet Model - Choose a Tile ControlNet",
|
"label": "ControlNet Model - Choose a Tile ControlNet"
|
||||||
"value": {
|
|
||||||
"key": "20645e4d-ef97-4c5a-9243-b834a3483925",
|
|
||||||
"hash": "f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
|
|
||||||
"name": "tile",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1427,4 +1403,4 @@
|
|||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "MultiDiffusion SDXL",
|
"id": "default_35658541-6d41-4a20-8ec5-4bf2561faed0",
|
||||||
|
"name": "Upscaler - SDXL, MultiDiffusion",
|
||||||
"author": "Invoke",
|
"author": "Invoke",
|
||||||
"description": "A workflow to upscale an input image with tiled upscaling, using SDXL based models.",
|
"description": "A workflow to upscale an input image with tiled upscaling, using SDXL based models.",
|
||||||
"version": "1.1.0",
|
"version": "1.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "tiled, upscaling, sdxl",
|
"tags": "sdxl, upscaling",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -56,7 +57,6 @@
|
|||||||
"version": "3.0.0",
|
"version": "3.0.0",
|
||||||
"category": "default"
|
"category": "default"
|
||||||
},
|
},
|
||||||
"id": "dd607062-9e1b-48b9-89ad-9762cdfbb8f4",
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "71a116e1-c631-48b3-923d-acea4753b887",
|
"id": "71a116e1-c631-48b3-923d-acea4753b887",
|
||||||
@@ -341,14 +341,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "ControlNet Model - Choose a Tile ControlNet",
|
"label": "ControlNet Model - Choose a Tile ControlNet"
|
||||||
"value": {
|
|
||||||
"key": "74f4651f-0ace-4b7b-b616-e98360257797",
|
|
||||||
"hash": "blake3:167a5b84583aaed3e5c8d660b45830e82e1c602743c689d3c27773c6c8b85b4a",
|
|
||||||
"name": "controlnet-tile-sdxl-1.0",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -801,14 +794,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"vae_model": {
|
"vae_model": {
|
||||||
"name": "vae_model",
|
"name": "vae_model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "ff926845-090e-4d46-b81e-30289ee47474",
|
|
||||||
"hash": "9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
|
|
||||||
"name": "VAEFix",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "vae"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -832,14 +818,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "SDXL Model",
|
"label": "SDXL Model"
|
||||||
"value": {
|
|
||||||
"key": "ab191f73-68d2-492c-8aec-b438a8cf0f45",
|
|
||||||
"hash": "blake3:2d50e940627e3bf555f015280ec0976d5c1fa100f7bc94e95ffbfc770e98b6fe",
|
|
||||||
"name": "CustomXLv7",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "main"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1642,4 +1621,4 @@
|
|||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "Prompt from File",
|
"id": "default_d7a1c60f-ca2f-4f90-9e33-75a826ca6d8f",
|
||||||
|
"name": "Text to Image - SD1.5, Prompt from File",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Sample workflow using Prompt from File node",
|
"description": "Sample workflow using Prompt from File node",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "text2image, prompt from file, default",
|
"tags": "sd1.5, text to image",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -512,4 +513,4 @@
|
|||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,12 +3,14 @@
|
|||||||
Workflows placed in this directory will be synced to the `workflow_library` as
|
Workflows placed in this directory will be synced to the `workflow_library` as
|
||||||
_default workflows_ on app startup.
|
_default workflows_ on app startup.
|
||||||
|
|
||||||
|
- Default workflows must have an id that starts with "default\_". The ID must be retained when the workflow is updated. You may need to do this manually.
|
||||||
- Default workflows are not editable by users. If they are loaded and saved,
|
- Default workflows are not editable by users. If they are loaded and saved,
|
||||||
they will save as a copy of the default workflow.
|
they will save as a copy of the default workflow.
|
||||||
- Default workflows must have the `meta.category` property set to `"default"`.
|
- Default workflows must have the `meta.category` property set to `"default"`.
|
||||||
An exception will be raised during sync if this is not set correctly.
|
An exception will be raised during sync if this is not set correctly.
|
||||||
- Default workflows appear on the "Default Workflows" tab of the Workflow
|
- Default workflows appear on the "Default Workflows" tab of the Workflow
|
||||||
Library.
|
Library.
|
||||||
|
- Default workflows should not reference any resources that are user-created or installed. That includes images and models. For example, if a default workflow references Juggernaut as an SDXL model, when a user loads the workflow, even if they have a version of Juggernaut installed, it will have a different UUID. They may see a warning. So, it's best to ship default workflows without any references to these types of resources.
|
||||||
|
|
||||||
After adding or updating default workflows, you **must** start the app up and
|
After adding or updating default workflows, you **must** start the app up and
|
||||||
load them to ensure:
|
load them to ensure:
|
||||||
|
|||||||
@@ -1,382 +1,375 @@
|
|||||||
{
|
{
|
||||||
"name": "SD3.5 Text to Image",
|
"id": "default_dbe46d95-22aa-43fb-9c16-94400d0ce2fd",
|
||||||
"author": "InvokeAI",
|
"name": "Text to Image - SD3.5",
|
||||||
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
"author": "InvokeAI",
|
||||||
"version": "1.0.0",
|
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
||||||
"contact": "invoke@invoke.ai",
|
"version": "1.0.0",
|
||||||
"tags": "text2image, SD3.5, default",
|
"contact": "invoke@invoke.ai",
|
||||||
|
"tags": "SD3.5, text to image",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"fieldName": "model"
|
"fieldName": "model"
|
||||||
},
|
|
||||||
{
|
|
||||||
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
|
||||||
"fieldName": "prompt"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"meta": {
|
|
||||||
"version": "3.0.0",
|
|
||||||
"category": "default"
|
|
||||||
},
|
},
|
||||||
"id": "e3a51d6b-8208-4d6d-b187-fcfe8b32934c",
|
{
|
||||||
"nodes": [
|
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
{
|
"fieldName": "prompt"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"meta": {
|
||||||
|
"version": "3.0.0",
|
||||||
|
"category": "default"
|
||||||
|
},
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"type": "invocation",
|
"type": "sd3_model_loader",
|
||||||
"data": {
|
"version": "1.0.0",
|
||||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"label": "",
|
||||||
"type": "sd3_model_loader",
|
"notes": "",
|
||||||
"version": "1.0.0",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": true,
|
||||||
"notes": "",
|
"useCache": true,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": true,
|
"inputs": {
|
||||||
"useCache": true,
|
"model": {
|
||||||
"nodePack": "invokeai",
|
"name": "model",
|
||||||
"inputs": {
|
"label": ""
|
||||||
"model": {
|
},
|
||||||
"name": "model",
|
"t5_encoder_model": {
|
||||||
"label": "",
|
"name": "t5_encoder_model",
|
||||||
"value": {
|
"label": ""
|
||||||
"key": "f7b20be9-92a8-4cfb-bca4-6c3b5535c10b",
|
},
|
||||||
"hash": "placeholder",
|
"clip_l_model": {
|
||||||
"name": "stable-diffusion-3.5-medium",
|
"name": "clip_l_model",
|
||||||
"base": "sd-3",
|
"label": ""
|
||||||
"type": "main"
|
},
|
||||||
}
|
"clip_g_model": {
|
||||||
},
|
"name": "clip_g_model",
|
||||||
"t5_encoder_model": {
|
"label": ""
|
||||||
"name": "t5_encoder_model",
|
},
|
||||||
"label": ""
|
"vae_model": {
|
||||||
},
|
"name": "vae_model",
|
||||||
"clip_l_model": {
|
"label": ""
|
||||||
"name": "clip_l_model",
|
|
||||||
"label": ""
|
|
||||||
},
|
|
||||||
"clip_g_model": {
|
|
||||||
"name": "clip_g_model",
|
|
||||||
"label": ""
|
|
||||||
},
|
|
||||||
"vae_model": {
|
|
||||||
"name": "vae_model",
|
|
||||||
"label": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": -55.58689609637031,
|
|
||||||
"y": -111.53602444662268
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
"position": {
|
||||||
|
"x": -55.58689609637031,
|
||||||
|
"y": -111.53602444662268
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||||
"type": "invocation",
|
"type": "rand_int",
|
||||||
"data": {
|
"version": "1.0.1",
|
||||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
"label": "",
|
||||||
"type": "rand_int",
|
"notes": "",
|
||||||
"version": "1.0.1",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": true,
|
||||||
"notes": "",
|
"useCache": false,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": true,
|
"inputs": {
|
||||||
"useCache": false,
|
"low": {
|
||||||
"nodePack": "invokeai",
|
"name": "low",
|
||||||
"inputs": {
|
"label": "",
|
||||||
"low": {
|
"value": 0
|
||||||
"name": "low",
|
},
|
||||||
"label": "",
|
"high": {
|
||||||
"value": 0
|
"name": "high",
|
||||||
},
|
"label": "",
|
||||||
"high": {
|
"value": 2147483647
|
||||||
"name": "high",
|
|
||||||
"label": "",
|
|
||||||
"value": 2147483647
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": 470.45870147220353,
|
|
||||||
"y": 350.3141781644303
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
"position": {
|
||||||
|
"x": 470.45870147220353,
|
||||||
|
"y": 350.3141781644303
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||||
"type": "invocation",
|
"type": "sd3_l2i",
|
||||||
"data": {
|
"version": "1.3.0",
|
||||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
"label": "",
|
||||||
"type": "sd3_l2i",
|
"notes": "",
|
||||||
"version": "1.3.0",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": false,
|
||||||
"notes": "",
|
"useCache": true,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": false,
|
"inputs": {
|
||||||
"useCache": true,
|
"board": {
|
||||||
"nodePack": "invokeai",
|
"name": "board",
|
||||||
"inputs": {
|
"label": ""
|
||||||
"board": {
|
},
|
||||||
"name": "board",
|
"metadata": {
|
||||||
"label": ""
|
"name": "metadata",
|
||||||
},
|
"label": ""
|
||||||
"metadata": {
|
},
|
||||||
"name": "metadata",
|
"latents": {
|
||||||
"label": ""
|
"name": "latents",
|
||||||
},
|
"label": ""
|
||||||
"latents": {
|
},
|
||||||
"name": "latents",
|
"vae": {
|
||||||
"label": ""
|
"name": "vae",
|
||||||
},
|
"label": ""
|
||||||
"vae": {
|
|
||||||
"name": "vae",
|
|
||||||
"label": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": 1192.3097009334897,
|
|
||||||
"y": -366.0994675072209
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
"position": {
|
||||||
|
"x": 1192.3097009334897,
|
||||||
|
"y": -366.0994675072209
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
"type": "invocation",
|
"type": "sd3_text_encoder",
|
||||||
"data": {
|
"version": "1.0.0",
|
||||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"label": "",
|
||||||
"type": "sd3_text_encoder",
|
"notes": "",
|
||||||
"version": "1.0.0",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": true,
|
||||||
"notes": "",
|
"useCache": true,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": true,
|
"inputs": {
|
||||||
"useCache": true,
|
"clip_l": {
|
||||||
"nodePack": "invokeai",
|
"name": "clip_l",
|
||||||
"inputs": {
|
"label": ""
|
||||||
"clip_l": {
|
},
|
||||||
"name": "clip_l",
|
"clip_g": {
|
||||||
"label": ""
|
"name": "clip_g",
|
||||||
},
|
"label": ""
|
||||||
"clip_g": {
|
},
|
||||||
"name": "clip_g",
|
"t5_encoder": {
|
||||||
"label": ""
|
"name": "t5_encoder",
|
||||||
},
|
"label": ""
|
||||||
"t5_encoder": {
|
},
|
||||||
"name": "t5_encoder",
|
"prompt": {
|
||||||
"label": ""
|
"name": "prompt",
|
||||||
},
|
"label": "",
|
||||||
"prompt": {
|
"value": ""
|
||||||
"name": "prompt",
|
|
||||||
"label": "",
|
|
||||||
"value": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": 408.16054647924784,
|
|
||||||
"y": 65.06415352118786
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
"position": {
|
||||||
|
"x": 408.16054647924784,
|
||||||
|
"y": 65.06415352118786
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
"type": "invocation",
|
"type": "sd3_text_encoder",
|
||||||
"data": {
|
"version": "1.0.0",
|
||||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"label": "",
|
||||||
"type": "sd3_text_encoder",
|
"notes": "",
|
||||||
"version": "1.0.0",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": true,
|
||||||
"notes": "",
|
"useCache": true,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": true,
|
"inputs": {
|
||||||
"useCache": true,
|
"clip_l": {
|
||||||
"nodePack": "invokeai",
|
"name": "clip_l",
|
||||||
"inputs": {
|
"label": ""
|
||||||
"clip_l": {
|
},
|
||||||
"name": "clip_l",
|
"clip_g": {
|
||||||
"label": ""
|
"name": "clip_g",
|
||||||
},
|
"label": ""
|
||||||
"clip_g": {
|
},
|
||||||
"name": "clip_g",
|
"t5_encoder": {
|
||||||
"label": ""
|
"name": "t5_encoder",
|
||||||
},
|
"label": ""
|
||||||
"t5_encoder": {
|
},
|
||||||
"name": "t5_encoder",
|
"prompt": {
|
||||||
"label": ""
|
"name": "prompt",
|
||||||
},
|
"label": "",
|
||||||
"prompt": {
|
"value": ""
|
||||||
"name": "prompt",
|
|
||||||
"label": "",
|
|
||||||
"value": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": 378.9283412440941,
|
|
||||||
"y": -302.65777497352553
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
"position": {
|
||||||
|
"x": 378.9283412440941,
|
||||||
|
"y": -302.65777497352553
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"type": "invocation",
|
"type": "sd3_denoise",
|
||||||
"data": {
|
"version": "1.0.0",
|
||||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"label": "",
|
||||||
"type": "sd3_denoise",
|
"notes": "",
|
||||||
"version": "1.0.0",
|
"isOpen": true,
|
||||||
"label": "",
|
"isIntermediate": true,
|
||||||
"notes": "",
|
"useCache": true,
|
||||||
"isOpen": true,
|
"nodePack": "invokeai",
|
||||||
"isIntermediate": true,
|
"inputs": {
|
||||||
"useCache": true,
|
"board": {
|
||||||
"nodePack": "invokeai",
|
"name": "board",
|
||||||
"inputs": {
|
"label": ""
|
||||||
"board": {
|
},
|
||||||
"name": "board",
|
"metadata": {
|
||||||
"label": ""
|
"name": "metadata",
|
||||||
},
|
"label": ""
|
||||||
"metadata": {
|
},
|
||||||
"name": "metadata",
|
"transformer": {
|
||||||
"label": ""
|
"name": "transformer",
|
||||||
},
|
"label": ""
|
||||||
"transformer": {
|
},
|
||||||
"name": "transformer",
|
"positive_conditioning": {
|
||||||
"label": ""
|
"name": "positive_conditioning",
|
||||||
},
|
"label": ""
|
||||||
"positive_conditioning": {
|
},
|
||||||
"name": "positive_conditioning",
|
"negative_conditioning": {
|
||||||
"label": ""
|
"name": "negative_conditioning",
|
||||||
},
|
"label": ""
|
||||||
"negative_conditioning": {
|
},
|
||||||
"name": "negative_conditioning",
|
"cfg_scale": {
|
||||||
"label": ""
|
"name": "cfg_scale",
|
||||||
},
|
"label": "",
|
||||||
"cfg_scale": {
|
"value": 3.5
|
||||||
"name": "cfg_scale",
|
},
|
||||||
"label": "",
|
"width": {
|
||||||
"value": 3.5
|
"name": "width",
|
||||||
},
|
"label": "",
|
||||||
"width": {
|
"value": 1024
|
||||||
"name": "width",
|
},
|
||||||
"label": "",
|
"height": {
|
||||||
"value": 1024
|
"name": "height",
|
||||||
},
|
"label": "",
|
||||||
"height": {
|
"value": 1024
|
||||||
"name": "height",
|
},
|
||||||
"label": "",
|
"steps": {
|
||||||
"value": 1024
|
"name": "steps",
|
||||||
},
|
"label": "",
|
||||||
"steps": {
|
"value": 30
|
||||||
"name": "steps",
|
},
|
||||||
"label": "",
|
"seed": {
|
||||||
"value": 30
|
"name": "seed",
|
||||||
},
|
"label": "",
|
||||||
"seed": {
|
"value": 0
|
||||||
"name": "seed",
|
|
||||||
"label": "",
|
|
||||||
"value": 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"position": {
|
|
||||||
"x": 813.7814762740603,
|
|
||||||
"y": -142.20529727605867
|
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 813.7814762740603,
|
||||||
|
"y": -142.20529727605867
|
||||||
}
|
}
|
||||||
],
|
}
|
||||||
"edges": [
|
],
|
||||||
{
|
"edges": [
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "vae",
|
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||||
"targetHandle": "vae"
|
"sourceHandle": "vae",
|
||||||
},
|
"targetHandle": "vae"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "t5_encoder",
|
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
"targetHandle": "t5_encoder"
|
"sourceHandle": "t5_encoder",
|
||||||
},
|
"targetHandle": "t5_encoder"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "t5_encoder",
|
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
"targetHandle": "t5_encoder"
|
"sourceHandle": "t5_encoder",
|
||||||
},
|
"targetHandle": "t5_encoder"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "clip_g",
|
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
"targetHandle": "clip_g"
|
"sourceHandle": "clip_g",
|
||||||
},
|
"targetHandle": "clip_g"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "clip_g",
|
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
"targetHandle": "clip_g"
|
"sourceHandle": "clip_g",
|
||||||
},
|
"targetHandle": "clip_g"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "clip_l",
|
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
"targetHandle": "clip_l"
|
"sourceHandle": "clip_l",
|
||||||
},
|
"targetHandle": "clip_l"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "clip_l",
|
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
"targetHandle": "clip_l"
|
"sourceHandle": "clip_l",
|
||||||
},
|
"targetHandle": "clip_l"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
||||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
"type": "default",
|
||||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||||
"sourceHandle": "transformer",
|
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"targetHandle": "transformer"
|
"sourceHandle": "transformer",
|
||||||
},
|
"targetHandle": "transformer"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
||||||
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
"type": "default",
|
||||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||||
"sourceHandle": "value",
|
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"targetHandle": "seed"
|
"sourceHandle": "value",
|
||||||
},
|
"targetHandle": "seed"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
||||||
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"type": "default",
|
||||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"sourceHandle": "latents",
|
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||||
"targetHandle": "latents"
|
"sourceHandle": "latents",
|
||||||
},
|
"targetHandle": "latents"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
||||||
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
"type": "default",
|
||||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||||
"sourceHandle": "conditioning",
|
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"targetHandle": "positive_conditioning"
|
"sourceHandle": "conditioning",
|
||||||
},
|
"targetHandle": "positive_conditioning"
|
||||||
{
|
},
|
||||||
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
{
|
||||||
"type": "default",
|
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
||||||
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
"type": "default",
|
||||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||||
"sourceHandle": "conditioning",
|
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||||
"targetHandle": "negative_conditioning"
|
"sourceHandle": "conditioning",
|
||||||
}
|
"targetHandle": "negative_conditioning"
|
||||||
]
|
}
|
||||||
}
|
]
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
|
"id": "default_7dde3e36-d78f-4152-9eea-00ef9c8124ed",
|
||||||
"name": "Text to Image - SD1.5",
|
"name": "Text to Image - SD1.5",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "text2image, SD1.5, SD2, default",
|
"tags": "SD1.5, text to image",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -416,4 +417,4 @@
|
|||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
|
"id": "default_5e8b008d-c697-45d0-8883-085a954c6ace",
|
||||||
"name": "Text to Image - SDXL",
|
"name": "Text to Image - SDXL",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Sample text to image workflow for SDXL",
|
"description": "Sample text to image workflow for SDXL",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "text2image, SDXL, default",
|
"tags": "SDXL, text to image",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -45,14 +46,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"vae_model": {
|
"vae_model": {
|
||||||
"name": "vae_model",
|
"name": "vae_model",
|
||||||
"label": "VAE (use the FP16 model)",
|
"label": "VAE (use the FP16 model)"
|
||||||
"value": {
|
|
||||||
"key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069",
|
|
||||||
"hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
|
|
||||||
"name": "sdxl-vae-fp16-fix",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "vae"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"isOpen": true,
|
"isOpen": true,
|
||||||
@@ -202,14 +196,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"model": {
|
"model": {
|
||||||
"name": "model",
|
"name": "model",
|
||||||
"label": "",
|
"label": ""
|
||||||
"value": {
|
|
||||||
"key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba",
|
|
||||||
"hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba",
|
|
||||||
"name": "dreamshaper-xl-v2-turbo",
|
|
||||||
"base": "sdxl",
|
|
||||||
"type": "main"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"isOpen": true,
|
"isOpen": true,
|
||||||
@@ -714,4 +701,4 @@
|
|||||||
"targetHandle": "style"
|
"targetHandle": "style"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "Text to Image with LoRA",
|
"id": "default_e71d153c-2089-43c7-bd2c-f61f37d4c1c1",
|
||||||
|
"name": "Text to Image - SD1.5, LoRA",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Simple text to image workflow with a LoRA",
|
"description": "Simple text to image workflow with a LoRA",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "text to image, lora, default",
|
"tags": "sd1.5, text to image, lora",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "Tiled Upscaling (Beta)",
|
"id": "default_43b0d7f7-6a12-4dcf-a5a4-50c940cbee29",
|
||||||
|
"name": "Upscaler - SD1.5, Tiled",
|
||||||
"author": "Invoke",
|
"author": "Invoke",
|
||||||
"description": "A workflow to upscale an input image with tiled upscaling. ",
|
"description": "A workflow to upscale an input image with tiled upscaling. ",
|
||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"contact": "invoke@invoke.ai",
|
"contact": "invoke@invoke.ai",
|
||||||
"tags": "tiled, upscaling, sd1.5",
|
"tags": "sd1.5, upscaling",
|
||||||
"notes": "",
|
"notes": "",
|
||||||
"exposedFields": [
|
"exposedFields": [
|
||||||
{
|
{
|
||||||
@@ -85,14 +86,7 @@
|
|||||||
},
|
},
|
||||||
"ip_adapter_model": {
|
"ip_adapter_model": {
|
||||||
"name": "ip_adapter_model",
|
"name": "ip_adapter_model",
|
||||||
"label": "IP-Adapter Model (select ip_adapter_sd15)",
|
"label": "IP-Adapter Model (select ip_adapter_sd15)"
|
||||||
"value": {
|
|
||||||
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
|
|
||||||
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
|
|
||||||
"name": "ip_adapter_sd15",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "ip_adapter"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"clip_vision_model": {
|
"clip_vision_model": {
|
||||||
"name": "clip_vision_model",
|
"name": "clip_vision_model",
|
||||||
@@ -200,14 +194,7 @@
|
|||||||
},
|
},
|
||||||
"control_model": {
|
"control_model": {
|
||||||
"name": "control_model",
|
"name": "control_model",
|
||||||
"label": "Control Model (select contro_v11f1e_sd15_tile)",
|
"label": "Control Model (select control_v11f1e_sd15_tile)"
|
||||||
"value": {
|
|
||||||
"key": "773843c8-db1f-4502-8f65-59782efa7960",
|
|
||||||
"hash": "blake3:f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
|
|
||||||
"name": "control_v11f1e_sd15_tile",
|
|
||||||
"base": "sd-1",
|
|
||||||
"type": "controlnet"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"control_weight": {
|
"control_weight": {
|
||||||
"name": "control_weight",
|
"name": "control_weight",
|
||||||
@@ -1815,4 +1802,4 @@
|
|||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,10 +41,36 @@ class WorkflowRecordsStorageBase(ABC):
|
|||||||
self,
|
self,
|
||||||
order_by: WorkflowRecordOrderBy,
|
order_by: WorkflowRecordOrderBy,
|
||||||
direction: SQLiteDirection,
|
direction: SQLiteDirection,
|
||||||
category: WorkflowCategory,
|
categories: Optional[list[WorkflowCategory]],
|
||||||
page: int,
|
page: int,
|
||||||
per_page: Optional[int],
|
per_page: Optional[int],
|
||||||
query: Optional[str],
|
query: Optional[str],
|
||||||
|
tags: Optional[list[str]],
|
||||||
|
has_been_opened: Optional[bool],
|
||||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
"""Gets many workflows."""
|
"""Gets many workflows."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def counts_by_category(
|
||||||
|
self,
|
||||||
|
categories: list[WorkflowCategory],
|
||||||
|
has_been_opened: Optional[bool] = None,
|
||||||
|
) -> dict[str, int]:
|
||||||
|
"""Gets a dictionary of counts for each of the provided categories."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def counts_by_tag(
|
||||||
|
self,
|
||||||
|
tags: list[str],
|
||||||
|
categories: Optional[list[WorkflowCategory]] = None,
|
||||||
|
has_been_opened: Optional[bool] = None,
|
||||||
|
) -> dict[str, int]:
|
||||||
|
"""Gets a dictionary of counts for each of the provided tags."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update_opened_at(self, workflow_id: str) -> None:
|
||||||
|
"""Open a workflow."""
|
||||||
|
pass
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import datetime
|
import datetime
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
import semver
|
import semver
|
||||||
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
|
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
|
||||||
@@ -36,9 +36,7 @@ class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
|||||||
|
|
||||||
class WorkflowMeta(BaseModel):
|
class WorkflowMeta(BaseModel):
|
||||||
version: str = Field(description="The version of the workflow schema.")
|
version: str = Field(description="The version of the workflow schema.")
|
||||||
category: WorkflowCategory = Field(
|
category: WorkflowCategory = Field(description="The category of the workflow (user or default).")
|
||||||
default=WorkflowCategory.User, description="The category of the workflow (user or default)."
|
|
||||||
)
|
|
||||||
|
|
||||||
@field_validator("version")
|
@field_validator("version")
|
||||||
def validate_version(cls, version: str):
|
def validate_version(cls, version: str):
|
||||||
@@ -100,7 +98,9 @@ class WorkflowRecordDTOBase(BaseModel):
|
|||||||
name: str = Field(description="The name of the workflow.")
|
name: str = Field(description="The name of the workflow.")
|
||||||
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the workflow.")
|
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the workflow.")
|
||||||
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the workflow.")
|
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the workflow.")
|
||||||
opened_at: Union[datetime.datetime, str] = Field(description="The opened timestamp of the workflow.")
|
opened_at: Optional[Union[datetime.datetime, str]] = Field(
|
||||||
|
default=None, description="The opened timestamp of the workflow."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class WorkflowRecordDTO(WorkflowRecordDTOBase):
|
class WorkflowRecordDTO(WorkflowRecordDTOBase):
|
||||||
@@ -118,6 +118,15 @@ WorkflowRecordDTOValidator = TypeAdapter(WorkflowRecordDTO)
|
|||||||
class WorkflowRecordListItemDTO(WorkflowRecordDTOBase):
|
class WorkflowRecordListItemDTO(WorkflowRecordDTOBase):
|
||||||
description: str = Field(description="The description of the workflow.")
|
description: str = Field(description="The description of the workflow.")
|
||||||
category: WorkflowCategory = Field(description="The description of the workflow.")
|
category: WorkflowCategory = Field(description="The description of the workflow.")
|
||||||
|
tags: str = Field(description="The tags of the workflow.")
|
||||||
|
|
||||||
|
|
||||||
WorkflowRecordListItemDTOValidator = TypeAdapter(WorkflowRecordListItemDTO)
|
WorkflowRecordListItemDTOValidator = TypeAdapter(WorkflowRecordListItemDTO)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordWithThumbnailDTO(WorkflowRecordDTO):
|
||||||
|
thumbnail_url: str | None = Field(default=None, description="The URL of the workflow thumbnail.")
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowRecordListItemWithThumbnailDTO(WorkflowRecordListItemDTO):
|
||||||
|
thumbnail_url: str | None = Field(default=None, description="The URL of the workflow thumbnail.")
|
||||||
|
|||||||
@@ -14,11 +14,13 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
|||||||
WorkflowRecordListItemDTO,
|
WorkflowRecordListItemDTO,
|
||||||
WorkflowRecordListItemDTOValidator,
|
WorkflowRecordListItemDTOValidator,
|
||||||
WorkflowRecordOrderBy,
|
WorkflowRecordOrderBy,
|
||||||
|
WorkflowValidator,
|
||||||
WorkflowWithoutID,
|
WorkflowWithoutID,
|
||||||
WorkflowWithoutIDValidator,
|
|
||||||
)
|
)
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
|
|
||||||
|
SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%f"
|
||||||
|
|
||||||
|
|
||||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
@@ -32,15 +34,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||||
"""Gets a workflow by ID. Updates the opened_at column."""
|
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
UPDATE workflow_library
|
|
||||||
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
|
||||||
WHERE workflow_id = ?;
|
|
||||||
""",
|
|
||||||
(workflow_id,),
|
|
||||||
)
|
|
||||||
self._conn.commit()
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||||
@@ -55,9 +48,10 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
return WorkflowRecordDTO.from_dict(dict(row))
|
return WorkflowRecordDTO.from_dict(dict(row))
|
||||||
|
|
||||||
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
||||||
|
if workflow.meta.category is WorkflowCategory.Default:
|
||||||
|
raise ValueError("Default workflows cannot be created via this method")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Only user workflows may be created by this method
|
|
||||||
assert workflow.meta.category is WorkflowCategory.User
|
|
||||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
@@ -77,6 +71,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
return self.get(workflow_with_id.id)
|
return self.get(workflow_with_id.id)
|
||||||
|
|
||||||
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||||
|
if workflow.meta.category is WorkflowCategory.Default:
|
||||||
|
raise ValueError("Default workflows cannot be updated")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
@@ -94,6 +91,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
return self.get(workflow.id)
|
return self.get(workflow.id)
|
||||||
|
|
||||||
def delete(self, workflow_id: str) -> None:
|
def delete(self, workflow_id: str) -> None:
|
||||||
|
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
|
||||||
|
raise ValueError("Default workflows cannot be deleted")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
@@ -113,45 +113,108 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
self,
|
self,
|
||||||
order_by: WorkflowRecordOrderBy,
|
order_by: WorkflowRecordOrderBy,
|
||||||
direction: SQLiteDirection,
|
direction: SQLiteDirection,
|
||||||
category: WorkflowCategory,
|
categories: Optional[list[WorkflowCategory]],
|
||||||
page: int = 0,
|
page: int = 0,
|
||||||
per_page: Optional[int] = None,
|
per_page: Optional[int] = None,
|
||||||
query: Optional[str] = None,
|
query: Optional[str] = None,
|
||||||
|
tags: Optional[list[str]] = None,
|
||||||
|
has_been_opened: Optional[bool] = None,
|
||||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
# sanitize!
|
# sanitize!
|
||||||
assert order_by in WorkflowRecordOrderBy
|
assert order_by in WorkflowRecordOrderBy
|
||||||
assert direction in SQLiteDirection
|
assert direction in SQLiteDirection
|
||||||
assert category in WorkflowCategory
|
|
||||||
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
|
||||||
main_query = """
|
|
||||||
SELECT
|
|
||||||
workflow_id,
|
|
||||||
category,
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
created_at,
|
|
||||||
updated_at,
|
|
||||||
opened_at
|
|
||||||
FROM workflow_library
|
|
||||||
WHERE category = ?
|
|
||||||
"""
|
|
||||||
main_params: list[int | str] = [category.value]
|
|
||||||
count_params: list[int | str] = [category.value]
|
|
||||||
|
|
||||||
|
# We will construct the query dynamically based on the query params
|
||||||
|
|
||||||
|
# The main query to get the workflows / counts
|
||||||
|
main_query = """
|
||||||
|
SELECT
|
||||||
|
workflow_id,
|
||||||
|
category,
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
created_at,
|
||||||
|
updated_at,
|
||||||
|
opened_at,
|
||||||
|
tags
|
||||||
|
FROM workflow_library
|
||||||
|
"""
|
||||||
|
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||||
|
|
||||||
|
# Start with an empty list of conditions and params
|
||||||
|
conditions: list[str] = []
|
||||||
|
params: list[str | int] = []
|
||||||
|
|
||||||
|
if categories:
|
||||||
|
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
||||||
|
|
||||||
|
# Ensure all categories are valid (is this necessary?)
|
||||||
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
|
|
||||||
|
# Construct a placeholder string for the number of categories
|
||||||
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
|
|
||||||
|
# Construct the condition string & params
|
||||||
|
category_condition = f"category IN ({placeholders})"
|
||||||
|
category_params = [category.value for category in categories]
|
||||||
|
|
||||||
|
conditions.append(category_condition)
|
||||||
|
params.extend(category_params)
|
||||||
|
|
||||||
|
if tags:
|
||||||
|
# Tags is a list of strings, and a single string in the DB
|
||||||
|
# The string in the DB has no guaranteed format
|
||||||
|
|
||||||
|
# Construct a list of conditions for each tag
|
||||||
|
tags_conditions = ["tags LIKE ?" for _ in tags]
|
||||||
|
tags_conditions_joined = " OR ".join(tags_conditions)
|
||||||
|
tags_condition = f"({tags_conditions_joined})"
|
||||||
|
|
||||||
|
# And the params for the tags, case-insensitive
|
||||||
|
tags_params = [f"%{t.strip()}%" for t in tags]
|
||||||
|
|
||||||
|
conditions.append(tags_condition)
|
||||||
|
params.extend(tags_params)
|
||||||
|
|
||||||
|
if has_been_opened:
|
||||||
|
conditions.append("opened_at IS NOT NULL")
|
||||||
|
elif has_been_opened is False:
|
||||||
|
conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
|
# Ignore whitespace in the query
|
||||||
stripped_query = query.strip() if query else None
|
stripped_query = query.strip() if query else None
|
||||||
if stripped_query:
|
if stripped_query:
|
||||||
|
# Construct a wildcard query for the name, description, and tags
|
||||||
wildcard_query = "%" + stripped_query + "%"
|
wildcard_query = "%" + stripped_query + "%"
|
||||||
main_query += " AND name LIKE ? OR description LIKE ? "
|
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
||||||
count_query += " AND name LIKE ? OR description LIKE ?;"
|
|
||||||
main_params.extend([wildcard_query, wildcard_query])
|
|
||||||
count_params.extend([wildcard_query, wildcard_query])
|
|
||||||
|
|
||||||
|
conditions.append(query_condition)
|
||||||
|
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
||||||
|
|
||||||
|
if conditions:
|
||||||
|
# If there are conditions, add a WHERE clause and then join the conditions
|
||||||
|
main_query += " WHERE "
|
||||||
|
count_query += " WHERE "
|
||||||
|
|
||||||
|
all_conditions = " AND ".join(conditions)
|
||||||
|
main_query += all_conditions
|
||||||
|
count_query += all_conditions
|
||||||
|
|
||||||
|
# After this point, the query and params differ for the main query and the count query
|
||||||
|
main_params = params.copy()
|
||||||
|
count_params = params.copy()
|
||||||
|
|
||||||
|
# Main query also gets ORDER BY and LIMIT/OFFSET
|
||||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||||
|
|
||||||
if per_page:
|
if per_page:
|
||||||
main_query += " LIMIT ? OFFSET ?"
|
main_query += " LIMIT ? OFFSET ?"
|
||||||
main_params.extend([per_page, page * per_page])
|
main_params.extend([per_page, page * per_page])
|
||||||
|
|
||||||
|
# Put a ring on it
|
||||||
|
main_query += ";"
|
||||||
|
count_query += ";"
|
||||||
|
|
||||||
cursor = self._conn.cursor()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(main_query, main_params)
|
cursor.execute(main_query, main_params)
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
@@ -173,6 +236,122 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
total=total,
|
total=total,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def counts_by_tag(
|
||||||
|
self,
|
||||||
|
tags: list[str],
|
||||||
|
categories: Optional[list[WorkflowCategory]] = None,
|
||||||
|
has_been_opened: Optional[bool] = None,
|
||||||
|
) -> dict[str, int]:
|
||||||
|
if not tags:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
result: dict[str, int] = {}
|
||||||
|
# Base conditions for categories and selected tags
|
||||||
|
base_conditions: list[str] = []
|
||||||
|
base_params: list[str | int] = []
|
||||||
|
|
||||||
|
# Add category conditions
|
||||||
|
if categories:
|
||||||
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
|
base_conditions.append(f"category IN ({placeholders})")
|
||||||
|
base_params.extend([category.value for category in categories])
|
||||||
|
|
||||||
|
if has_been_opened:
|
||||||
|
base_conditions.append("opened_at IS NOT NULL")
|
||||||
|
elif has_been_opened is False:
|
||||||
|
base_conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
|
# For each tag to count, run a separate query
|
||||||
|
for tag in tags:
|
||||||
|
# Start with the base conditions
|
||||||
|
conditions = base_conditions.copy()
|
||||||
|
params = base_params.copy()
|
||||||
|
|
||||||
|
# Add this specific tag condition
|
||||||
|
conditions.append("tags LIKE ?")
|
||||||
|
params.append(f"%{tag.strip()}%")
|
||||||
|
|
||||||
|
# Construct the full query
|
||||||
|
stmt = """--sql
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM workflow_library
|
||||||
|
"""
|
||||||
|
|
||||||
|
if conditions:
|
||||||
|
stmt += " WHERE " + " AND ".join(conditions)
|
||||||
|
|
||||||
|
cursor.execute(stmt, params)
|
||||||
|
count = cursor.fetchone()[0]
|
||||||
|
result[tag] = count
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def counts_by_category(
|
||||||
|
self,
|
||||||
|
categories: list[WorkflowCategory],
|
||||||
|
has_been_opened: Optional[bool] = None,
|
||||||
|
) -> dict[str, int]:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
result: dict[str, int] = {}
|
||||||
|
# Base conditions for categories
|
||||||
|
base_conditions: list[str] = []
|
||||||
|
base_params: list[str | int] = []
|
||||||
|
|
||||||
|
# Add category conditions
|
||||||
|
if categories:
|
||||||
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
|
base_conditions.append(f"category IN ({placeholders})")
|
||||||
|
base_params.extend([category.value for category in categories])
|
||||||
|
|
||||||
|
if has_been_opened:
|
||||||
|
base_conditions.append("opened_at IS NOT NULL")
|
||||||
|
elif has_been_opened is False:
|
||||||
|
base_conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
|
# For each category to count, run a separate query
|
||||||
|
for category in categories:
|
||||||
|
# Start with the base conditions
|
||||||
|
conditions = base_conditions.copy()
|
||||||
|
params = base_params.copy()
|
||||||
|
|
||||||
|
# Add this specific category condition
|
||||||
|
conditions.append("category = ?")
|
||||||
|
params.append(category.value)
|
||||||
|
|
||||||
|
# Construct the full query
|
||||||
|
stmt = """--sql
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM workflow_library
|
||||||
|
"""
|
||||||
|
|
||||||
|
if conditions:
|
||||||
|
stmt += " WHERE " + " AND ".join(conditions)
|
||||||
|
|
||||||
|
cursor.execute(stmt, params)
|
||||||
|
count = cursor.fetchone()[0]
|
||||||
|
result[category.value] = count
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def update_opened_at(self, workflow_id: str) -> None:
|
||||||
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
UPDATE workflow_library
|
||||||
|
SET opened_at = STRFTIME('{SQL_TIME_FORMAT}', 'NOW')
|
||||||
|
WHERE workflow_id = ?;
|
||||||
|
""",
|
||||||
|
(workflow_id,),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
def _sync_default_workflows(self) -> None:
|
def _sync_default_workflows(self) -> None:
|
||||||
"""Syncs default workflows to the database. Internal use only."""
|
"""Syncs default workflows to the database. Internal use only."""
|
||||||
|
|
||||||
@@ -187,27 +366,68 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
workflows: list[Workflow] = []
|
cursor = self._conn.cursor()
|
||||||
|
workflows_from_file: list[Workflow] = []
|
||||||
|
workflows_to_update: list[Workflow] = []
|
||||||
|
workflows_to_add: list[Workflow] = []
|
||||||
workflows_dir = Path(__file__).parent / Path("default_workflows")
|
workflows_dir = Path(__file__).parent / Path("default_workflows")
|
||||||
workflow_paths = workflows_dir.glob("*.json")
|
workflow_paths = workflows_dir.glob("*.json")
|
||||||
for path in workflow_paths:
|
for path in workflow_paths:
|
||||||
bytes_ = path.read_bytes()
|
bytes_ = path.read_bytes()
|
||||||
workflow_without_id = WorkflowWithoutIDValidator.validate_json(bytes_)
|
workflow_from_file = WorkflowValidator.validate_json(bytes_)
|
||||||
workflow = Workflow(**workflow_without_id.model_dump(), id=uuid_string())
|
|
||||||
workflows.append(workflow)
|
assert workflow_from_file.id.startswith("default_"), (
|
||||||
# Only default workflows may be managed by this method
|
f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}'
|
||||||
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
|
)
|
||||||
cursor = self._conn.cursor()
|
|
||||||
cursor.execute(
|
assert workflow_from_file.meta.category is WorkflowCategory.Default, (
|
||||||
"""--sql
|
f"Invalid default workflow category: {workflow_from_file.meta.category}"
|
||||||
DELETE FROM workflow_library
|
)
|
||||||
WHERE category = 'default';
|
|
||||||
"""
|
workflows_from_file.append(workflow_from_file)
|
||||||
)
|
|
||||||
for w in workflows:
|
try:
|
||||||
|
workflow_from_db = self.get(workflow_from_file.id).workflow
|
||||||
|
if workflow_from_file != workflow_from_db:
|
||||||
|
self._invoker.services.logger.debug(
|
||||||
|
f"Updating library workflow {workflow_from_file.name} ({workflow_from_file.id})"
|
||||||
|
)
|
||||||
|
workflows_to_update.append(workflow_from_file)
|
||||||
|
continue
|
||||||
|
except WorkflowNotFoundError:
|
||||||
|
self._invoker.services.logger.debug(
|
||||||
|
f"Adding missing default workflow {workflow_from_file.name} ({workflow_from_file.id})"
|
||||||
|
)
|
||||||
|
workflows_to_add.append(workflow_from_file)
|
||||||
|
continue
|
||||||
|
|
||||||
|
library_workflows_from_db = self.get_many(
|
||||||
|
order_by=WorkflowRecordOrderBy.Name,
|
||||||
|
direction=SQLiteDirection.Ascending,
|
||||||
|
categories=[WorkflowCategory.Default],
|
||||||
|
).items
|
||||||
|
|
||||||
|
workflows_from_file_ids = [w.id for w in workflows_from_file]
|
||||||
|
|
||||||
|
for w in library_workflows_from_db:
|
||||||
|
if w.workflow_id not in workflows_from_file_ids:
|
||||||
|
self._invoker.services.logger.debug(
|
||||||
|
f"Deleting obsolete default workflow {w.name} ({w.workflow_id})"
|
||||||
|
)
|
||||||
|
# We cannot use the `delete` method here, as it only deletes non-default workflows
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DELETE from workflow_library
|
||||||
|
WHERE workflow_id = ?;
|
||||||
|
""",
|
||||||
|
(w.workflow_id,),
|
||||||
|
)
|
||||||
|
|
||||||
|
for w in workflows_to_add:
|
||||||
|
# We cannot use the `create` method here, as it only creates non-default workflows
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT OR REPLACE INTO workflow_library (
|
INSERT INTO workflow_library (
|
||||||
workflow_id,
|
workflow_id,
|
||||||
workflow
|
workflow
|
||||||
)
|
)
|
||||||
@@ -215,6 +435,18 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(w.id, w.model_dump_json()),
|
(w.id, w.model_dump_json()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for w in workflows_to_update:
|
||||||
|
# We cannot use the `update` method here, as it only updates non-default workflows
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
UPDATE workflow_library
|
||||||
|
SET workflow = ?
|
||||||
|
WHERE workflow_id = ?;
|
||||||
|
""",
|
||||||
|
(w.model_dump_json(), w.id),
|
||||||
|
)
|
||||||
|
|
||||||
self._conn.commit()
|
self._conn.commit()
|
||||||
except Exception:
|
except Exception:
|
||||||
self._conn.rollback()
|
self._conn.rollback()
|
||||||
|
|||||||
@@ -0,0 +1,28 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowThumbnailServiceBase(ABC):
|
||||||
|
"""Base class for workflow thumbnail services"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_path(self, workflow_id: str, with_hash: bool = True) -> Path:
|
||||||
|
"""Gets the path to a workflow thumbnail"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_url(self, workflow_id: str, with_hash: bool = True) -> str | None:
|
||||||
|
"""Gets the URL of a workflow thumbnail"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(self, workflow_id: str, image: Image.Image) -> None:
|
||||||
|
"""Saves a workflow thumbnail"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, workflow_id: str) -> None:
|
||||||
|
"""Deletes a workflow thumbnail"""
|
||||||
|
pass
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user