mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 13:47:55 -05:00
Compare commits
538 Commits
v2.2.4
...
developmen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
971f5c5ab1 | ||
|
|
22133392b2 | ||
|
|
5e81f51f6a | ||
|
|
9fae65ed69 | ||
|
|
2443e5dc01 | ||
|
|
a9aa4e45aa | ||
|
|
9b6b27a156 | ||
|
|
b68074bb8f | ||
|
|
1f8e56672c | ||
|
|
f8708f5dbe | ||
|
|
103efea641 | ||
|
|
b60edab0fa | ||
|
|
6bc11bfd3f | ||
|
|
5897e511f1 | ||
|
|
f43b767b87 | ||
|
|
61cc41aa3f | ||
|
|
40c3ab0181 | ||
|
|
8999a5564b | ||
|
|
8423be539b | ||
|
|
6cc56043e2 | ||
|
|
62cda009dd | ||
|
|
45e51bac9a | ||
|
|
a514f9b236 | ||
|
|
90b21db86c | ||
|
|
bc44ab786c | ||
|
|
281a2e3ecb | ||
|
|
84cd96decf | ||
|
|
a3121b8137 | ||
|
|
3f6d0fb7da | ||
|
|
08ef4d62e9 | ||
|
|
81cb7fd1b7 | ||
|
|
7c658c6d76 | ||
|
|
495104e941 | ||
|
|
1e1f871ee1 | ||
|
|
30c5a0b067 | ||
|
|
9f02595ef2 | ||
|
|
b101334b4e | ||
|
|
0608d259dd | ||
|
|
12eff0dd42 | ||
|
|
939164eaa7 | ||
|
|
f2d2a49977 | ||
|
|
37535f5897 | ||
|
|
e5646dee27 | ||
|
|
d5011efaa1 | ||
|
|
74487a95a9 | ||
|
|
a50f4da9d1 | ||
|
|
4ae1df5b5e | ||
|
|
7f3ba16cd2 | ||
|
|
7a0438586c | ||
|
|
9adaf8f8ad | ||
|
|
a341297b0c | ||
|
|
9e0504abe5 | ||
|
|
3131edb255 | ||
|
|
b0697bc4ff | ||
|
|
1e9121c8d6 | ||
|
|
916e795c26 | ||
|
|
3aebe754fa | ||
|
|
3f0cfaac4a | ||
|
|
a3a0a87f55 | ||
|
|
f5e8ffe7b4 | ||
|
|
404d81f6fd | ||
|
|
c7864f8a6d | ||
|
|
9568ac66e0 | ||
|
|
d4280bbaaa | ||
|
|
46a5fd67ed | ||
|
|
b93336dbf9 | ||
|
|
9fe9301762 | ||
|
|
7f1b95fbda | ||
|
|
52c79fa097 | ||
|
|
62ac725ba9 | ||
|
|
db188cd3c3 | ||
|
|
e67ef4aec2 | ||
|
|
473869b8ed | ||
|
|
c8c1b3e217 | ||
|
|
fcd3ef1f98 | ||
|
|
a7f11a8c09 | ||
|
|
318426b67a | ||
|
|
6f3e99efc3 | ||
|
|
7515bcfe78 | ||
|
|
8d0ef022eb | ||
|
|
9f1c1cf2e6 | ||
|
|
d44112c209 | ||
|
|
b31f90c0bd | ||
|
|
344cdf0ade | ||
|
|
500bde5b0e | ||
|
|
df03927ec6 | ||
|
|
419f670f86 | ||
|
|
30dc9220c1 | ||
|
|
941d427302 | ||
|
|
876ae7f70f | ||
|
|
a86049f822 | ||
|
|
ec3d25d778 | ||
|
|
69a4a6fec5 | ||
|
|
7b76b79887 | ||
|
|
3ea732365c | ||
|
|
dc5d696ed2 | ||
|
|
0060551490 | ||
|
|
2fcc7d9b36 | ||
|
|
78217f5ef9 | ||
|
|
c6112e3295 | ||
|
|
8b08af714d | ||
|
|
723dcf4236 | ||
|
|
ddfd82559f | ||
|
|
8488575e5c | ||
|
|
7e4e51b224 | ||
|
|
f3b7316683 | ||
|
|
25b19b9ab8 | ||
|
|
9a6a970771 | ||
|
|
93de78b6e8 | ||
|
|
00da042dab | ||
|
|
6445e802f6 | ||
|
|
7caf20aad3 | ||
|
|
11969c2e2e | ||
|
|
e821b97cfc | ||
|
|
ef1dbdb33d | ||
|
|
0cdb7bb0cd | ||
|
|
306ed44e19 | ||
|
|
b0810e1ed7 | ||
|
|
089c85a017 | ||
|
|
a1d80fd106 | ||
|
|
d9c7a28c90 | ||
|
|
c787a3a801 | ||
|
|
1f772e4bdc | ||
|
|
cb7458db77 | ||
|
|
ef482b4d3e | ||
|
|
3e22160462 | ||
|
|
6a3d725dbb | ||
|
|
8a16c8a196 | ||
|
|
90eaac5134 | ||
|
|
896c2532c7 | ||
|
|
f68702520b | ||
|
|
286e46aaa3 | ||
|
|
088fd97418 | ||
|
|
e1e978b423 | ||
|
|
d27d92325d | ||
|
|
80f6f9a931 | ||
|
|
7dff8ccd31 | ||
|
|
3f6b275bec | ||
|
|
5ed6a31b97 | ||
|
|
b72b61b790 | ||
|
|
b81231823e | ||
|
|
c7c6940e1a | ||
|
|
6c33d1356d | ||
|
|
f08c78a043 | ||
|
|
b6dd5b664c | ||
|
|
76e7e82f5e | ||
|
|
d4376ed240 | ||
|
|
9d34213b4c | ||
|
|
b908f2b4bc | ||
|
|
9418324030 | ||
|
|
0f6856b719 | ||
|
|
83d8e69219 | ||
|
|
7f999e9dfc | ||
|
|
0c3ae232af | ||
|
|
9950790f4c | ||
|
|
b50a1eb63f | ||
|
|
d55b1e169c | ||
|
|
1071a12777 | ||
|
|
d987d0a336 | ||
|
|
50a67a7172 | ||
|
|
a3308c853d | ||
|
|
cde395e02f | ||
|
|
e7f670a5b6 | ||
|
|
917c576ddb | ||
|
|
dfc0c587b1 | ||
|
|
548bcaceb2 | ||
|
|
5fd43fca13 | ||
|
|
37a356d377 | ||
|
|
cccbfb12aa | ||
|
|
d018b2d7a7 | ||
|
|
e358adecdd | ||
|
|
cdc5f66592 | ||
|
|
b8cebf29f2 | ||
|
|
68aebad7ad | ||
|
|
ae4a44de3e | ||
|
|
2ab868314f | ||
|
|
bc46c46835 | ||
|
|
d82a21cfb2 | ||
|
|
87439feeb2 | ||
|
|
e5951ad098 | ||
|
|
4f51680307 | ||
|
|
d0ceabd372 | ||
|
|
2bda3d6d2f | ||
|
|
a96af7a15d | ||
|
|
93192b90f4 | ||
|
|
024acf42af | ||
|
|
04cb2d39cb | ||
|
|
c69573e65d | ||
|
|
84f702b6d0 | ||
|
|
bb70c32ad5 | ||
|
|
425a1713ab | ||
|
|
70e67c45dd | ||
|
|
07ca0876ec | ||
|
|
aa96a457b6 | ||
|
|
e28599cadb | ||
|
|
ae6dd219d9 | ||
|
|
19322fc1ec | ||
|
|
635e7da05d | ||
|
|
c0005eb063 | ||
|
|
74485411a8 | ||
|
|
ed70fc683c | ||
|
|
425d3bc95d | ||
|
|
3994c28b77 | ||
|
|
0100a63b59 | ||
|
|
432dc704a6 | ||
|
|
1d540219fa | ||
|
|
827f516baf | ||
|
|
48ad0c289c | ||
|
|
223e0529ba | ||
|
|
98e3bbb3bd | ||
|
|
e3efcc620c | ||
|
|
15dd1339d2 | ||
|
|
caf8f0ae35 | ||
|
|
cfb87bc116 | ||
|
|
c0ad1b3469 | ||
|
|
4382cd0b91 | ||
|
|
b049bbc64e | ||
|
|
34395ff490 | ||
|
|
1bc1085542 | ||
|
|
d7884432c9 | ||
|
|
82f6402d04 | ||
|
|
0e7b735611 | ||
|
|
5304ef504c | ||
|
|
17b295871f | ||
|
|
70dcfa1684 | ||
|
|
5d484273ed | ||
|
|
179656d541 | ||
|
|
73099af6ec | ||
|
|
c223d93b4d | ||
|
|
4e34194479 | ||
|
|
00e2674076 | ||
|
|
0a2e67df1a | ||
|
|
7831468304 | ||
|
|
88d02585e7 | ||
|
|
f82e82f1bb | ||
|
|
317762861f | ||
|
|
3f1360368d | ||
|
|
d5467e7db5 | ||
|
|
9284983429 | ||
|
|
bb79c78fe8 | ||
|
|
e3735ebb45 | ||
|
|
eb17dfdeaa | ||
|
|
1114ac97e2 | ||
|
|
c7ef41af54 | ||
|
|
7075a17091 | ||
|
|
7f0fb47cf3 | ||
|
|
775f032c56 | ||
|
|
5410d42da0 | ||
|
|
e21e901fa2 | ||
|
|
00385240e7 | ||
|
|
0a96d2a888 | ||
|
|
016551e036 | ||
|
|
b8bb46042c | ||
|
|
b44e9c7752 | ||
|
|
8ed10c732b | ||
|
|
82a53782d0 | ||
|
|
6adebf065f | ||
|
|
83f369053f | ||
|
|
77d3839860 | ||
|
|
c02a0da837 | ||
|
|
4f4c6bbe33 | ||
|
|
72ea5453ce | ||
|
|
458081f9c9 | ||
|
|
d1fbe81a60 | ||
|
|
6c7191712f | ||
|
|
248068fe5d | ||
|
|
9b281856ee | ||
|
|
fdf41cc739 | ||
|
|
d079445943 | ||
|
|
a997ab2cf6 | ||
|
|
e98068a546 | ||
|
|
b945ae4e01 | ||
|
|
b23c471cf0 | ||
|
|
964e584bd3 | ||
|
|
461358bdde | ||
|
|
2433cc344a | ||
|
|
bd2eea1c70 | ||
|
|
16df759499 | ||
|
|
5a1a36ec29 | ||
|
|
c76badfb08 | ||
|
|
71c4f401b0 | ||
|
|
c59b9897d9 | ||
|
|
4cf1c856ed | ||
|
|
a78a1020be | ||
|
|
90cb7a6442 | ||
|
|
8f5cded86e | ||
|
|
02d02a86b1 | ||
|
|
ba9c695463 | ||
|
|
8202f34f38 | ||
|
|
40a7f47d22 | ||
|
|
37bcf9cc47 | ||
|
|
0340d9ad53 | ||
|
|
0d35a67e9c | ||
|
|
1260e28d94 | ||
|
|
229f782e3b | ||
|
|
c15b839dd4 | ||
|
|
a095214e52 | ||
|
|
8e81425e89 | ||
|
|
c5cbe8f87d | ||
|
|
e0581a2c37 | ||
|
|
32f538bf3a | ||
|
|
3c5a14a814 | ||
|
|
0661256b61 | ||
|
|
602e35db65 | ||
|
|
bc7ece771d | ||
|
|
38bdb440d0 | ||
|
|
ce8c2bea2f | ||
|
|
3ac0f11e97 | ||
|
|
98fe49cb55 | ||
|
|
2b7e3abe57 | ||
|
|
150c4a5d2d | ||
|
|
0381a853b5 | ||
|
|
c79ec204ec | ||
|
|
8d3b1582a5 | ||
|
|
5fd7d71a7a | ||
|
|
1f0220697b | ||
|
|
18ae3949ef | ||
|
|
aa95510444 | ||
|
|
f33df25830 | ||
|
|
3a5a8ceba5 | ||
|
|
a1e5f17d1e | ||
|
|
303431be89 | ||
|
|
8e9f80cc97 | ||
|
|
3ad598761c | ||
|
|
b4eaf8b751 | ||
|
|
fa608efa11 | ||
|
|
e9d319bfde | ||
|
|
561721aef7 | ||
|
|
891c0f21d5 | ||
|
|
8973ce7d47 | ||
|
|
51c283ba56 | ||
|
|
7d262fc158 | ||
|
|
fdb16000ab | ||
|
|
f62cc7db9d | ||
|
|
9fa3e28dd4 | ||
|
|
9200b26f21 | ||
|
|
d998b2f806 | ||
|
|
ac8a7ff70b | ||
|
|
2d6e0baa87 | ||
|
|
c212b74990 | ||
|
|
0352979a8b | ||
|
|
70bd61d616 | ||
|
|
f2a6985c78 | ||
|
|
fe5a581313 | ||
|
|
2ec9792f50 | ||
|
|
a4204abfce | ||
|
|
274b276133 | ||
|
|
7707bc7818 | ||
|
|
4c035ad4ae | ||
|
|
e9090bca8f | ||
|
|
398a9bc0c6 | ||
|
|
38b9658c15 | ||
|
|
f04d1bab21 | ||
|
|
c23efb8e2b | ||
|
|
5604d3c447 | ||
|
|
206101f59d | ||
|
|
23348dcd3f | ||
|
|
9bf6013fdd | ||
|
|
1d11e06e6f | ||
|
|
47e6f94111 | ||
|
|
810fad9e06 | ||
|
|
853c6af623 | ||
|
|
1b6bbfb4db | ||
|
|
67e25624b9 | ||
|
|
9c218788e2 | ||
|
|
bb084a844b | ||
|
|
0a88243911 | ||
|
|
8a0a90d0f3 | ||
|
|
9141132a5c | ||
|
|
78f7bef1a3 | ||
|
|
1fb7b50be7 | ||
|
|
b57c81ab38 | ||
|
|
af040e97af | ||
|
|
8dc7f119e5 | ||
|
|
4b4111a802 | ||
|
|
832f183320 | ||
|
|
8aa94d5774 | ||
|
|
48aa6416dc | ||
|
|
47ddda1f64 | ||
|
|
c248ae44d4 | ||
|
|
9e4545b2fc | ||
|
|
8cf3883adc | ||
|
|
e06a6ed4c8 | ||
|
|
12a33f6e2d | ||
|
|
6d9638ba31 | ||
|
|
c54eb00055 | ||
|
|
72338506ed | ||
|
|
78c1d07c4b | ||
|
|
143b18af8a | ||
|
|
9d39d6ecb3 | ||
|
|
9686bf0ea8 | ||
|
|
7aa7be6b24 | ||
|
|
443c9110f1 | ||
|
|
ae0ce82609 | ||
|
|
f1982cb6d8 | ||
|
|
af62958323 | ||
|
|
9342ad8d97 | ||
|
|
5214742d02 | ||
|
|
178f0c78d8 | ||
|
|
2487040ae3 | ||
|
|
5606af5083 | ||
|
|
4b5a96501d | ||
|
|
ededeaed86 | ||
|
|
636620b1d5 | ||
|
|
21961f0c32 | ||
|
|
1fe41146f0 | ||
|
|
2ad6ef355a | ||
|
|
865502ee4f | ||
|
|
c7984f3299 | ||
|
|
7f150ed833 | ||
|
|
badf4e256c | ||
|
|
e64c60bbb3 | ||
|
|
1780618543 | ||
|
|
f91fd27624 | ||
|
|
09e41e8f76 | ||
|
|
6eeb2107b3 | ||
|
|
17053ad8b7 | ||
|
|
fefb4dc1f8 | ||
|
|
d05b1b3544 | ||
|
|
82d4904c07 | ||
|
|
1cdcf33cfa | ||
|
|
6616fa835a | ||
|
|
7b9a4564b1 | ||
|
|
fcdefa0620 | ||
|
|
ef8b3ce639 | ||
|
|
36870a8f53 | ||
|
|
b70420951d | ||
|
|
1f0c5b4cf1 | ||
|
|
8648da8111 | ||
|
|
45b4593563 | ||
|
|
41b04316cf | ||
|
|
e97c6db2a3 | ||
|
|
896820a349 | ||
|
|
06c8f468bf | ||
|
|
61920e2701 | ||
|
|
f34ba7ca70 | ||
|
|
c30ef0895d | ||
|
|
aa3a774f73 | ||
|
|
2c30555b84 | ||
|
|
743f605773 | ||
|
|
519c661abb | ||
|
|
22c956c75f | ||
|
|
13696adc3a | ||
|
|
0196571a12 | ||
|
|
9666f466ab | ||
|
|
240e5486c8 | ||
|
|
8164b6b9cf | ||
|
|
4fc82d554f | ||
|
|
96b34c0f85 | ||
|
|
dd5a88dcee | ||
|
|
95ed56bf82 | ||
|
|
1ae80f5ab9 | ||
|
|
1f0bd3ca6c | ||
|
|
a1971f6830 | ||
|
|
c6118e8898 | ||
|
|
7ba958cf7f | ||
|
|
383905d5d2 | ||
|
|
6173e3e9ca | ||
|
|
3feb7d8922 | ||
|
|
1d9edbd0dd | ||
|
|
d439abdb89 | ||
|
|
ee47ea0c89 | ||
|
|
300bb2e627 | ||
|
|
ccf8593501 | ||
|
|
0fda612f3f | ||
|
|
5afff65b71 | ||
|
|
7e55bdefce | ||
|
|
620cf84d3d | ||
|
|
cfe567c62a | ||
|
|
cefe12f1df | ||
|
|
1e51c39928 | ||
|
|
42a02bbb80 | ||
|
|
f1ae6dae4c | ||
|
|
6195579910 | ||
|
|
16c8b23b34 | ||
|
|
07ae626b22 | ||
|
|
8d171bb044 | ||
|
|
6e33ca7e9e | ||
|
|
db46e12f2b | ||
|
|
868e4b2db8 | ||
|
|
2e562742c1 | ||
|
|
68e6958009 | ||
|
|
ea6e3a7949 | ||
|
|
b2879ca99f | ||
|
|
4e911566c3 | ||
|
|
9bafda6a15 | ||
|
|
871a8a5375 | ||
|
|
0eef74bc00 | ||
|
|
423ae32097 | ||
|
|
8282e5d045 | ||
|
|
19305cdbdf | ||
|
|
eb9028ab30 | ||
|
|
21483f5d07 | ||
|
|
82dcbac28f | ||
|
|
d43bd4625d | ||
|
|
ea891324a2 | ||
|
|
8fd9ea2193 | ||
|
|
fb02666856 | ||
|
|
f6f5c2731b | ||
|
|
b4e3f771e0 | ||
|
|
99bb9491ac | ||
|
|
0453f21127 | ||
|
|
9fc09aa4bd | ||
|
|
5e87062cf8 | ||
|
|
3e7a459990 | ||
|
|
bbf4c03e50 | ||
|
|
611a3a9753 | ||
|
|
1611f0d181 | ||
|
|
08835115e4 | ||
|
|
2d84e28d32 | ||
|
|
ef17aae8ab | ||
|
|
0cc39f01a3 | ||
|
|
688d7258f1 | ||
|
|
4513320bf1 | ||
|
|
533fd04ef0 | ||
|
|
dff5681cf0 | ||
|
|
5a2790a69b | ||
|
|
7c5305ccba | ||
|
|
4013e8ad6f | ||
|
|
d1dfd257f9 | ||
|
|
5322d735ee | ||
|
|
cdb107dcda | ||
|
|
be1393a41c | ||
|
|
e554c2607f | ||
|
|
6215592b12 | ||
|
|
349cc25433 | ||
|
|
214d276379 | ||
|
|
ef24d76adc | ||
|
|
ab2b5a691d | ||
|
|
c7de2b2801 | ||
|
|
e8075658ac | ||
|
|
4202dabee1 | ||
|
|
d67db2bcf1 | ||
|
|
7159ec885f | ||
|
|
b5cf734ba9 | ||
|
|
f7dc8eafee | ||
|
|
762ca60a30 | ||
|
|
e7fb9f342c |
@@ -1,26 +1,12 @@
|
||||
*
|
||||
!backend
|
||||
!configs
|
||||
!environments-and-requirements
|
||||
!frontend
|
||||
!binary_installer
|
||||
!installer
|
||||
!ldm
|
||||
!main.py
|
||||
!scripts
|
||||
!server
|
||||
!static
|
||||
!setup.py
|
||||
!docker-build
|
||||
!docs
|
||||
docker-build/Dockerfile
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
|
||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||
!configs
|
||||
configs/models.yaml
|
||||
|
||||
# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
|
||||
!environment*
|
||||
environment.yml
|
||||
|
||||
**/__pycache__
|
||||
|
||||
87
.github/workflows/build-cloud-img.yml
vendored
87
.github/workflows/build-cloud-img.yml
vendored
@@ -1,87 +0,0 @@
|
||||
name: Build and push cloud image
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
# we will NOT push the image on pull requests, only test buildability.
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
# requires resolving a patchmatch issue
|
||||
# - aarch64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# see https://github.com/docker/metadata-action
|
||||
# will push the following tags:
|
||||
# :edge
|
||||
# :main (+ any other branches enabled in the workflow)
|
||||
# :<tag>
|
||||
# :1.2.3 (for semver tags)
|
||||
# :1.2 (for semver tags)
|
||||
# :<sha>
|
||||
tags: |
|
||||
type=edge,branch=main
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha
|
||||
# suffix image tags with architecture
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=-${{ matrix.arch }},latest=true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# do not login to container registry on PRs
|
||||
- if: github.event_name != 'pull_request'
|
||||
name: Docker login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push cloud image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile.cloud
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
# do not push the image on PRs
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
9
.github/workflows/build-container.yml
vendored
9
.github/workflows/build-container.yml
vendored
@@ -5,12 +5,17 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'update-dockerfile'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
pip-requirements:
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-lin-cuda.txt
|
||||
@@ -32,7 +37,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}-${{ matrix.arch }}
|
||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||
|
||||
37
.github/workflows/test-invoke-conda.yml
vendored
37
.github/workflows/test-invoke-conda.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
@@ -19,28 +20,16 @@ jobs:
|
||||
- environment-lin-amd.yml
|
||||
- environment-lin-cuda.yml
|
||||
- environment-mac.yml
|
||||
- environment-win-cuda.yml
|
||||
include:
|
||||
- environment-yaml: environment-lin-amd.yml
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-lin-cuda.yml
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-mac.yml
|
||||
os: macos-12
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-win-cuda.yml
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
default-shell: pwsh
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
@@ -83,15 +72,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@@ -107,20 +96,22 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat invokeai.init
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
@@ -132,13 +123,11 @@ jobs:
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
|
||||
- name: Archive results
|
||||
if: matrix.os != 'windows-2022'
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
70
.github/workflows/test-invoke-pip.yml
vendored
70
.github/workflows/test-invoke-pip.yml
vendored
@@ -19,50 +19,35 @@ jobs:
|
||||
- requirements-lin-cuda.txt
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-mac-mps-cpu.txt
|
||||
- requirements-win-colab-cuda.txt
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
include:
|
||||
- requirements-file: requirements-lin-cuda.txt
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- requirements-file: requirements-lin-amd.txt
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- requirements-file: requirements-mac-mps-cpu.txt
|
||||
os: macOS-12
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-win-colab-cuda.txt
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
env:
|
||||
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: set INVOKEAI_ROOT Windows
|
||||
if: matrix.os == 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set INVOKEAI_ROOT others
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create models.yaml from example
|
||||
run: |
|
||||
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
||||
@@ -70,15 +55,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: create requirements.txt
|
||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||
@@ -87,14 +72,14 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
# cache: 'pip'
|
||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
|
||||
# - name: install dependencies
|
||||
# run: ${{ env.pythonLocation }}/bin/pip install --upgrade pip setuptools wheel
|
||||
|
||||
- name: install requirements
|
||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||
run: ${{ env.pythonLocation }}/bin/pip install -r '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@@ -110,20 +95,33 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: python3 scripts/configure_invokeai.py --no-interactive --yes
|
||||
run: |
|
||||
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
|
||||
run: |
|
||||
time ${{ env.pythonLocation }}/bin/python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
--no-nsfw_checker \
|
||||
--model ${{ matrix.stable-diffusion-model }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }} \
|
||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
if: matrix.os != 'windows-2022'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -6,7 +6,6 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
invokeai.init
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
@@ -223,11 +222,12 @@ environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
installer/*zip
|
||||
installer/install.bat
|
||||
installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
||||
82
README.md
82
README.md
@@ -1,9 +1,11 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||

|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
@@ -36,33 +38,18 @@ This is a fork of
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, macOS and Linux machines, with
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
_Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
3. Unzip the file.
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
5. Wait a while, until it is done.
|
||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||
8. Type `banana sushi` in the box on the top left and click `Invoke`:
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@@ -82,13 +69,10 @@ This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
#### System
|
||||
|
||||
You wil need one of the following:
|
||||
@@ -96,10 +80,6 @@ You wil need one of the following:
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An Apple computer with an M1 chip.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
#### Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
@@ -117,12 +97,11 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter
|
||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
Or by updating your InvokeAI configuration file with this argument.
|
||||
|
||||
### Features
|
||||
|
||||
@@ -151,7 +130,39 @@ Or by updating your InvokeAI configuration file with this argument.
|
||||
|
||||
### Latest Changes
|
||||
|
||||
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
@@ -161,9 +172,8 @@ problems and other issues.
|
||||
# Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so. To join, just raise your hand on the InvokeAI
|
||||
Discord server or discussion board.
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
|
||||
@@ -18,11 +18,9 @@ from PIL.Image import Image as ImageType
|
||||
from uuid import uuid4
|
||||
from threading import Event
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
|
||||
from backend.modules.parameters import parameters_to_command
|
||||
@@ -41,7 +39,7 @@ if not os.path.isabs(args.outdir):
|
||||
|
||||
|
||||
class InvokeAIWebServer:
|
||||
def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
|
||||
def __init__(self, generate, gfpgan, codeformer, esrgan) -> None:
|
||||
self.host = args.host
|
||||
self.port = args.port
|
||||
|
||||
@@ -245,16 +243,14 @@ class InvokeAIWebServer:
|
||||
|
||||
def find_frontend(self):
|
||||
my_dir = os.path.dirname(__file__)
|
||||
# LS: setup.py seems to put the frontend in different places on different systems, so
|
||||
# this is fragile and needs to be replaced with a better way of finding the front end.
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist'), # pip install . (Linux, Mac)
|
||||
os.path.join(my_dir,'../../../frontend','dist'), # pip install . (Windows)
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist') # pip install .
|
||||
):
|
||||
if os.path.exists(candidate):
|
||||
return candidate
|
||||
assert "Frontend files cannot be found. Cannot continue"
|
||||
|
||||
|
||||
def setup_app(self):
|
||||
self.result_url = "outputs/"
|
||||
self.init_image_url = "outputs/init-images/"
|
||||
@@ -779,10 +775,10 @@ class InvokeAIWebServer:
|
||||
).convert("RGBA")
|
||||
|
||||
"""
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
to the generator should be:
|
||||
{
|
||||
"x": 0,
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": original_bounding_box["width"],
|
||||
"height": original_bounding_box["height"]
|
||||
@@ -802,7 +798,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
"""
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
transparency where inpainting should occur. This is the kind of
|
||||
mask that prompt2image() needs.
|
||||
"""
|
||||
@@ -908,13 +904,16 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
img_base64 = image_to_dataURL(image)
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
img_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
self.socketio.emit(
|
||||
"intermediateResult",
|
||||
{
|
||||
@@ -932,7 +931,7 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
def image_done(image, seed, first_seed):
|
||||
if self.canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
@@ -1094,12 +1093,6 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt(self.generate.model, parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
|
||||
self.socketio.emit(
|
||||
"generationResult",
|
||||
{
|
||||
@@ -1112,8 +1105,6 @@ class InvokeAIWebServer:
|
||||
"height": height,
|
||||
"boundingBox": original_bounding_box,
|
||||
"generationMode": generation_parameters["generation_mode"],
|
||||
"attentionMaps": attention_maps_image_base64_url,
|
||||
"tokens": tokens,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
@@ -1125,7 +1116,7 @@ class InvokeAIWebServer:
|
||||
self.generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=image_progress,
|
||||
image_callback=image_done
|
||||
image_callback=image_done,
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
@@ -1572,19 +1563,6 @@ def dataURL_to_image(dataURL: str) -> ImageType:
|
||||
)
|
||||
return image
|
||||
|
||||
"""
|
||||
Converts an image into a base64 image dataURL.
|
||||
"""
|
||||
|
||||
def image_to_dataURL(image: ImageType) -> str:
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
image_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
return image_base64
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Converts a base64 image dataURL into bytes.
|
||||
|
||||
Binary file not shown.
@@ -1,17 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
@@ -25,5 +25,3 @@ inpainting-1.5:
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
width: 512
|
||||
height: 512
|
||||
|
||||
@@ -32,7 +32,7 @@ model:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['sculpture']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
num_vectors_per_token: 8
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.10-slim AS builder
|
||||
FROM ubuntu:22.10
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
@@ -7,42 +7,28 @@ SHELL [ "/bin/bash", "-c" ]
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc=4:10.2.* \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
python3-dev=3.9.* \
|
||||
build-essential \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set workdir, PATH and copy sources
|
||||
WORKDIR /usr/src/app
|
||||
ENV PATH /usr/src/app/.venv/bin:$PATH
|
||||
# set workdir and copy sources
|
||||
WORKDIR /invokeai
|
||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||
|
||||
# install requirements
|
||||
RUN python3 -m venv .venv \
|
||||
&& pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
|
||||
FROM python:3.10-slim AS runtime
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY --from=builder /usr/src/app .
|
||||
# install requirements and link outputs folder
|
||||
RUN pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
|
||||
# set Environment, Entrypoint and default CMD
|
||||
ENV INVOKEAI_ROOT /data
|
||||
ENV PATH=/usr/src/app/.venv/bin:$PATH
|
||||
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py", "--outdir=/data/outputs" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
#######################
|
||||
#### Builder stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 AS builder
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt-get install -y \
|
||||
git \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev
|
||||
|
||||
# This is needed for patchmatch support
|
||||
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
||||
torch==1.12.0+cu116 \
|
||||
torchvision==0.13.0+cu116 &&\
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
|
||||
COPY . .
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
||||
pip install -r requirements.txt &&\
|
||||
pip install -e .
|
||||
|
||||
|
||||
#######################
|
||||
#### Runtime stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 as runtime
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
ncdu \
|
||||
iotop \
|
||||
bzip2 \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev &&\
|
||||
apt-get clean && apt-get autoclean
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV INVOKEAI_ROOT=/mnt/invokeai
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
||||
|
||||
# build patchmatch
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
||||
RUN touch /root/.invokeai
|
||||
|
||||
ENTRYPOINT ["bash"]
|
||||
|
||||
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
||||
@@ -1,44 +0,0 @@
|
||||
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
||||
INVOKEAI_ROOT=/mnt/invokeai
|
||||
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
||||
HOST_MOUNT_PATH=${HOME}/invokeai
|
||||
|
||||
IMAGE=local/invokeai:latest
|
||||
|
||||
USER=$(shell id -u)
|
||||
GROUP=$(shell id -g)
|
||||
|
||||
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
||||
# This is consistent with the expected non-Docker behaviour.
|
||||
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
||||
|
||||
build:
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
configure:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/configure_invokeai.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
||||
web:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
-p 9090:9090 \
|
||||
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
||||
|
||||
# Run the cli with the runtime dir mounted
|
||||
cli:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/invoke.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and open a bash shell
|
||||
shell:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
||||
|
||||
.PHONY: build configure web cli shell
|
||||
@@ -12,13 +12,13 @@ pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Dockerfile:\t ${dockerfile}"
|
||||
echo -e "requirements:\t ${pip_requirements}"
|
||||
echo -e "volumename:\t ${volumename}"
|
||||
echo -e "arch:\t\t ${arch}"
|
||||
echo -e "platform:\t ${platform}"
|
||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
||||
echo "You are using these values:"
|
||||
echo -e "Dockerfile:\t\t ${dockerfile}"
|
||||
echo -e "requirements:\t\t ${pip_requirements}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
|
||||
@@ -3,10 +3,6 @@ set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "volumename:\t ${volumename}"
|
||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
|
||||
@@ -171,12 +171,12 @@ title: Changelog
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||
unecesarry caches by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to configure_invokeai step by @mauwii in
|
||||
- add --no-interactive to preload_models step by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||
contained environment (if necessary) before running the normal installation
|
||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- configure_invokeai.py script downloads the weight files by @lstein in
|
||||
- preload_models.py script downloads the weight files by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
@@ -130,34 +130,20 @@ file should contain the startup options as you would type them on the
|
||||
command line (`--steps=10 --grid`), one argument per line, or a
|
||||
mixture of both using any of the accepted command switch formats:
|
||||
|
||||
!!! example "my unmodified initialization file"
|
||||
!!! example ""
|
||||
|
||||
```bash title="~/.invokeai" linenums="1"
|
||||
# InvokeAI initialization file
|
||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
|
||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||
--root="/Users/mauwii/invokeai"
|
||||
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="/Users/mauwii/invokeai/outputs"
|
||||
|
||||
# You may place other frequently-used startup commands here, one or more per line.
|
||||
# Examples:
|
||||
# --web --host=0.0.0.0
|
||||
# --steps=20
|
||||
# -Ak_euler_a -C10.0
|
||||
```bash
|
||||
--web
|
||||
--steps=28
|
||||
--grid
|
||||
-f 0.6 -C 11.0 -A k_euler_a
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
The initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
Note that the initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
|
||||
## List of prompt arguments
|
||||
|
||||
@@ -209,17 +195,15 @@ Here are the invoke> command that apply to txt2img:
|
||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||
|
||||
!!! note
|
||||
Note that the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
|
||||
the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
### This is an example of img2img:
|
||||
|
||||
!!! example "This is a example of img2img"
|
||||
|
||||
```bash
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
```
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
|
||||
This will modify the indicated vacation photograph by making it more like the
|
||||
prompt. Results will vary greatly depending on what is in the image. We also ask
|
||||
@@ -269,7 +253,7 @@ description of the part of the image to replace. For example, if you have an
|
||||
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
||||
selectively mask the bagel and replace it with a piece of cake this way:
|
||||
|
||||
```bash
|
||||
```
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||
```
|
||||
|
||||
@@ -281,7 +265,7 @@ are getting too much or too little masking you can adjust the threshold down (to
|
||||
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
||||
value, we are insisting on a more stringent classification.
|
||||
|
||||
```bash
|
||||
```
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||
```
|
||||
|
||||
@@ -291,16 +275,16 @@ You can load and use hundreds of community-contributed Textual
|
||||
Inversion models just by typing the appropriate trigger phrase. Please
|
||||
see [Concepts Library](CONCEPTS.md) for more details.
|
||||
|
||||
## Other Commands
|
||||
# Other Commands
|
||||
|
||||
The CLI offers a number of commands that begin with "!".
|
||||
|
||||
### Postprocessing images
|
||||
## Postprocessing images
|
||||
|
||||
To postprocess a file using face restoration or upscaling, use the `!fix`
|
||||
command.
|
||||
|
||||
#### `!fix`
|
||||
### `!fix`
|
||||
|
||||
This command runs a post-processor on a previously-generated image. It takes a
|
||||
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
||||
@@ -327,19 +311,19 @@ Some examples:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
```
|
||||
|
||||
#### `!mask`
|
||||
### !mask
|
||||
|
||||
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
||||
automatically generate a mask of the area that matches the text prompt. It is
|
||||
useful for debugging the text masking process prior to inpainting with the
|
||||
`--text_mask` argument. See [INPAINTING.md] for details.
|
||||
|
||||
### Model selection and importation
|
||||
## Model selection and importation
|
||||
|
||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||
rapidly without leaving the script.
|
||||
|
||||
#### `!models`
|
||||
### !models
|
||||
|
||||
This prints out a list of the models defined in `config/models.yaml'. The active
|
||||
model is bold-faced
|
||||
@@ -352,7 +336,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
#### `!switch <model>`
|
||||
### !switch <model>
|
||||
|
||||
This quickly switches from one model to another without leaving the CLI script.
|
||||
`invoke.py` uses a memory caching system; once a model has been loaded,
|
||||
@@ -377,7 +361,7 @@ invoke> !switch waifu-diffusion
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
>> Model loaded in 18.24s
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Current VRAM usage:2.17G
|
||||
>> Setting Sampler to k_lms
|
||||
|
||||
@@ -397,7 +381,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion cached Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
#### `!import_model <path/to/model/weights>`
|
||||
### !import_model <path/to/model/weights>
|
||||
|
||||
This command imports a new model weights file into InvokeAI, makes it available
|
||||
for image generation within the script, and writes out the configuration for the
|
||||
@@ -444,10 +428,10 @@ OK to import [n]? <b>y</b>
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
invoke>
|
||||
invoke>
|
||||
</pre>
|
||||
|
||||
#### `!edit_model <name_of_model>`
|
||||
###!edit_model <name_of_model>
|
||||
|
||||
The `!edit_model` command can be used to modify a model that is already defined
|
||||
in `config/models.yaml`. Call it with the short name of the model you wish to
|
||||
@@ -484,12 +468,12 @@ text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||
|
||||
### History processing
|
||||
## History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||
retrieving them, modifying them, and re-running them.
|
||||
|
||||
#### `!history`
|
||||
### !history
|
||||
|
||||
The invoke script keeps track of all the commands you issue during a session,
|
||||
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
||||
@@ -501,22 +485,20 @@ during the session (Windows), or the most recent 1000 commands (Mac|Linux). You
|
||||
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
||||
history line number. For example:
|
||||
|
||||
!!! example ""
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
####`!fetch`
|
||||
### !fetch
|
||||
|
||||
This command retrieves the generation parameters from a previously generated
|
||||
image and either loads them into the command line (Linux|Mac), or prints them
|
||||
@@ -526,36 +508,33 @@ a folder with image png files, and wildcard \*.png to retrieve the dream command
|
||||
used to generate the images, and save them to a file commands.txt for further
|
||||
processing.
|
||||
|
||||
!!! example "load the generation command for a single png file"
|
||||
This example loads the generation command for a single png file:
|
||||
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
!!! example "fetch the generation commands from a batch of files and store them into `selected.txt`"
|
||||
This one fetches the generation commands from a batch of files and stores them
|
||||
into `selected.txt`:
|
||||
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
|
||||
#### `!replay`
|
||||
### !replay
|
||||
|
||||
This command replays a text file generated by !fetch or created manually
|
||||
|
||||
!!! example
|
||||
```
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
Note that these commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
|
||||
!!! note
|
||||
|
||||
These commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
|
||||
#### `!search <search string>`
|
||||
### !search <search string>
|
||||
|
||||
This is similar to !history but it only returns lines that contain
|
||||
`search string`. For example:
|
||||
@@ -565,7 +544,7 @@ invoke> !search surreal
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
#### `!clear`
|
||||
### `!clear`
|
||||
|
||||
This clears the search history from memory and disk. Be advised that this
|
||||
operation is irreversible and does not issue any warnings!
|
||||
|
||||
@@ -1,110 +1,130 @@
|
||||
---
|
||||
title: Concepts Library
|
||||
title: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
---
|
||||
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
# :material-file-document: Concepts Library
|
||||
|
||||
## Using Textual Inversion Files
|
||||
|
||||
Textual inversion (TI) files are small models that customize the output of
|
||||
Stable Diffusion image generation. They can augment SD with specialized subjects
|
||||
and artistic styles. They are also known as "embeds" in the machine learning
|
||||
world.
|
||||
Stable Diffusion image generation. They can augment SD with
|
||||
specialized subjects and artistic styles. They are also known as
|
||||
"embeds" in the machine learning world.
|
||||
|
||||
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
||||
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
|
||||
using angle brackets as in "<trigger-phrase>". The two most common type of
|
||||
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
||||
different TI training packages. InvokeAI supports both formats, but its
|
||||
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
Each TI file introduces one or more vocabulary terms to the SD
|
||||
model. These are known in InvokeAI as "triggers." Triggers are often,
|
||||
but not always, denoted using angle brackets as in
|
||||
"<trigger-phrase>". The two most common type of TI files that you'll
|
||||
encounter are `.pt` and `.bin` files, which are produced by different
|
||||
TI training packages. InvokeAI supports both formats, but its [built-in
|
||||
TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
|
||||
amassed a large ligrary of >800 community-contributed TI files covering a
|
||||
broad range of subjects and styles. InvokeAI has built-in support for this
|
||||
library which downloads and merges TI files automatically upon request. You can
|
||||
also install your own or others' TI files by placing them in a designated
|
||||
directory.
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library)
|
||||
has amassed a large ligrary of >800 community-contributed TI files
|
||||
covering a broad range of subjects and styles. InvokeAI has built-in
|
||||
support for this library which downloads and merges TI files
|
||||
automatically upon request. You can also install your own or others'
|
||||
TI files by placing them in a designated directory.
|
||||
|
||||
### An Example
|
||||
|
||||
Here are a few examples to illustrate how it works. All these images were
|
||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
||||
Here are a few examples to illustrate how it works. All these images
|
||||
were generated using the command-line client and the Stable Diffusion
|
||||
1.5 model:
|
||||
|
||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||
|  |  |  |  |
|
||||
Japanese gardener
|
||||
<br>
|
||||
<img src="../assets/concepts/image1.png">
|
||||
|
||||
Japanese gardener <ghibli-face>
|
||||
<br>
|
||||
<img src="../assets/concepts/image2.png">
|
||||
|
||||
Japanese gardener <hoi4-leaders>
|
||||
<br>
|
||||
<img src="../assets/concepts/image3.png">
|
||||
|
||||
Japanese gardener <cartoona-animals>
|
||||
<br>
|
||||
<img src="../assets/concepts/image4.png">
|
||||
|
||||
You can also combine styles and concepts:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
<figcaption>A portrait of <alf> in <cartoona-animal> style</figcaption>
|
||||
</figure>
|
||||
A portrait of <alf> in <cartoona-animal> style
|
||||
<br>
|
||||
<img src="../assets/concepts/image5.png">
|
||||
|
||||
## Using a Hugging Face Concept
|
||||
|
||||
Hugging Face TI concepts are downloaded and installed automatically as you
|
||||
require them. This requires your machine to be connected to the Internet. To
|
||||
find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
|
||||
look at examples of what each concept produces.
|
||||
Hugging Face TI concepts are downloaded and installed automatically as
|
||||
you require them. This requires your machine to be connected to the
|
||||
Internet. To find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts
|
||||
library](https://huggingface.co/sd-concepts-library) and look at
|
||||
examples of what each concept produces.
|
||||
|
||||
When you have an idea of a concept you wish to try, go to the command-line
|
||||
client (CLI) and type a "<" character and the beginning of the Hugging Face
|
||||
concept name you wish to load. Press the Tab key, and the CLI will show you all
|
||||
matching concepts. You can also type "<" and Tab to get a listing of all ~800
|
||||
concepts, but be prepared to scroll up to see them all! If there is more than
|
||||
one match you can continue to type and Tab until the concept is completed.
|
||||
When you have an idea of a concept you wish to try, go to the
|
||||
command-line client (CLI) and type a "<" character and the beginning
|
||||
of the Hugging Face concept name you wish to load. Press the Tab key,
|
||||
and the CLI will show you all matching concepts. You can also type "<"
|
||||
and Tab to get a listing of all ~800 concepts, but be prepared to
|
||||
scroll up to see them all! If there is more than one match you can
|
||||
continue to type and Tab until the concept is completed.
|
||||
|
||||
For example if you type "<x" and Tab, you'll be prompted with the
|
||||
completions:
|
||||
For example if you type "<x" and Tab, you'll be prompted with the completions:
|
||||
|
||||
```
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
```
|
||||
|
||||
Now type "id" and press Tab. It will be autocompleted to "<xidiversity>"
|
||||
because this is a unique match.
|
||||
Now type "id" and press Tab. It will be autocompleted to
|
||||
"<xidiversity>" because this is a unique match.
|
||||
|
||||
Finish your prompt and generate as usual. You may include multiple concept terms
|
||||
in the prompt.
|
||||
Finish your prompt and generate as usual. You may include multiple
|
||||
concept terms in the prompt.
|
||||
|
||||
If you have never used this concept before, you will see a message that the TI
|
||||
model is being downloaded and installed. After this, the concept will be saved
|
||||
locally (in the `models/sd-concepts-library` directory) for future use.
|
||||
If you have never used this concept before, you will see a message
|
||||
that the TI model is being downloaded and installed. After this, the
|
||||
concept will be saved locally (in the `models/sd-concepts-library`
|
||||
directory) for future use.
|
||||
|
||||
Several steps happen during downloading and installation, including a scan of
|
||||
the file for malicious code. Should any errors occur, you will be warned and the
|
||||
concept will fail to load. Generation will then continue treating the trigger
|
||||
term as a normal string of characters (e.g. as literal "<ghibli-face>").
|
||||
Several steps happen during downloading and
|
||||
installation, including a scan of the file for malicious code. Should
|
||||
any errors occur, you will be warned and the concept will fail to
|
||||
load. Generation will then continue treating the trigger term as a
|
||||
normal string of characters (e.g. as literal "<ghibli-face>").
|
||||
|
||||
Currently auto-installation of concepts is a feature only available on the
|
||||
command-line client. Support for the WebUI is a work in progress.
|
||||
Currently auto-installation of concepts is a feature only available on
|
||||
the command-line client. Support for the WebUI is a work in progress.
|
||||
|
||||
## Installing your Own TI Files
|
||||
|
||||
You may install any number of `.pt` and `.bin` files simply by copying them into
|
||||
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
|
||||
in your home directory). You may create subdirectories in order to organize the
|
||||
files in any way you wish. Be careful not to overwrite one file with another.
|
||||
For example, TI files generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||
You may install any number of `.pt` and `.bin` files simply by copying
|
||||
them into the `embeddings` directory of the InvokeAI runtime directory
|
||||
(usually `invokeai` in your home directory). You may create
|
||||
subdirectories in order to organize the files in any way you wish. Be
|
||||
careful not to overwrite one file with another. For example, TI files
|
||||
generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them
|
||||
distinct.
|
||||
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||
files it finds there. At startup you will see a message similar to this one:
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and
|
||||
load any TI files it finds there. At startup you will see a message
|
||||
similar to this one:
|
||||
|
||||
```bash
|
||||
```
|
||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||
```
|
||||
|
||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||
tutorials taught people to use rather than a more descriptive term.
|
||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||
first one loaded will be triggered by use of the term.
|
||||
Note the "*" trigger term. This is a placeholder term that many early
|
||||
TI tutorials taught people to use rather than a more descriptive
|
||||
term. Unfortunately, if you have multiple TI files that all use this
|
||||
term, only the first one loaded will be triggered by use of the term.
|
||||
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||
or more TI files together. If it encounters a collision of terms, the script
|
||||
will prompt you to select new terms that do not collide. See
|
||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to
|
||||
merge two or more TI files together. If it encounters a collision of
|
||||
terms, the script will prompt you to select new terms that do not
|
||||
collide. See [Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
||||
@@ -12,19 +12,21 @@ stable diffusion to build the prompt on top of the image you provide, preserving
|
||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||
option as shown here:
|
||||
|
||||
!!! example ""
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
This will take the original image shown here:
|
||||
|
||||
<figure markdown>
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
| original image | generated image |
|
||||
| :------------: | :-------------: |
|
||||
| { width=320 } | { width=320 } |
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
</figure>
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||
@@ -86,15 +88,13 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||
seed `1592514025` develops something like this:
|
||||
|
||||
!!! example ""
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
<figure markdown>
|
||||
{ width=720 }
|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||
@@ -109,23 +109,25 @@ into the sequence at the appropriate point, with just the right amount of noise.
|
||||
|
||||
### A concrete example
|
||||
|
||||
!!! example "I want SD to draw a fire based on this hand-drawn image"
|
||||
I want SD to draw a fire based on this hand-drawn image:
|
||||
|
||||
{ align=left }
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
With strength `0.4`, the steps look more like this:
|
||||
With strength `0.4`, the steps look more like this:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||
|
||||
@@ -120,7 +120,7 @@ A number of caveats:
|
||||
(`--iterations`) argument.
|
||||
|
||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
||||
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||
This model was trained specifically to harmoniously fill in image gaps. The
|
||||
standard model will work as well, but you may notice color discontinuities at
|
||||
the border.
|
||||
|
||||
@@ -28,17 +28,21 @@ should "just work" without further intervention. Simply pass the `--upscale`
|
||||
the popup in the Web GUI.
|
||||
|
||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
||||
loaded when you run `scripts/preload_models.py`. If GFPAN is failing with an
|
||||
error, please run the following from the InvokeAI directory:
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||
download the models files the first time you try to perform facial
|
||||
reconstruction.
|
||||
|
||||
## Usage
|
||||
|
||||
You will now have access to two new prompt arguments.
|
||||
|
||||
### Upscaling
|
||||
|
||||
`-U : <upscaling_factor> <upscaling_strength>`
|
||||
@@ -106,7 +110,7 @@ This repo also allows you to perform face restoration using
|
||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||
|
||||
In order to setup CodeFormer to work, you need to download the models like with
|
||||
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
||||
GFPGAN. You can do this either by running `preload_models.py` or by manually
|
||||
downloading the
|
||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||
@@ -115,7 +119,7 @@ You can use `-ft` prompt argument to swap between CodeFormer and the default
|
||||
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
||||
strength of the restoration effect.
|
||||
|
||||
### CodeFormer Usage
|
||||
### Usage
|
||||
|
||||
The following command will perform face restoration with CodeFormer instead of
|
||||
the default gfpgan.
|
||||
@@ -156,7 +160,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
||||
directory. Note that the `!fix` command does not replace the original file,
|
||||
unlike the behavior at generate time.
|
||||
|
||||
## How to disable
|
||||
### Disabling
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for different features.
|
||||
@@ -82,18 +82,9 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
First time users, please see [Automated
|
||||
Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||
getting InvokeAI up and running on your system. For alternative
|
||||
installation and upgrade instructions, please see: [InvokeAI
|
||||
Installation Overview](installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
||||
functions will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing
|
||||
PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
@@ -105,25 +96,22 @@ You wil need one of the following:
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with
|
||||
their running in half-precision mode and having insufficient VRAM to
|
||||
render 512x512 images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
!!! info
|
||||
|
||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||
full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
@@ -135,8 +123,7 @@ render 512x512 images in full-precision mode:
|
||||
|
||||
- [The InvokeAI Web Interface](features/WEB.md)
|
||||
- [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- seperator -->
|
||||
<!-- this link does not exist - [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) -->
|
||||
- [The Command Line Interace](features/CLI.md)
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
@@ -149,7 +136,6 @@ render 512x512 images in full-precision mode:
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
|
||||
@@ -174,7 +160,7 @@ render 512x512 images in full-precision mode:
|
||||
- You can now load
|
||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||
without leaving the CLI.
|
||||
- The installation process (via `scripts/configure_invokeai.py`) now lets you select
|
||||
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||
among several popular
|
||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||
and downloads and installs them on your behalf. Among other models, this
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
---
|
||||
title: build binary installers
|
||||
---
|
||||
|
||||
# :simple-buildkite: How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
||||
|
||||
## 1. Ensure `installers/requirements.in` is correct
|
||||
|
||||
and up to date on the branch to be installed.
|
||||
|
||||
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
||||
|
||||
On each target platform, in the branch that is to be installed, and
|
||||
inside the InvokeAI git root folder, run the following commands:
|
||||
|
||||
```commandline
|
||||
conda activate invokeai # or however you activate python
|
||||
pip install pip-tools
|
||||
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
||||
```
|
||||
where `<reqsfile>.txt` is whichever of
|
||||
```commandline
|
||||
py3.10-darwin-arm64-mps-reqs.txt
|
||||
py3.10-darwin-x86_64-reqs.txt
|
||||
py3.10-linux-x86_64-cuda-reqs.txt
|
||||
py3.10-windows-x86_64-cuda-reqs.txt
|
||||
```
|
||||
matches the current OS and architecture.
|
||||
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
||||
|
||||
## <a name="step-3"></a> 3. Set github repository and branch
|
||||
|
||||
Once all reqs files have been collected and committed **to the branch
|
||||
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
||||
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
||||
to be installed.
|
||||
|
||||
For example, to install `main` branch of `InvokeAI`, they should be
|
||||
set as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
||||
as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
The branch and repo specified here **must** contain the correct reqs
|
||||
files. The installer zip files **do not** contain requirements files,
|
||||
they are pulled from the specified branch during the installation
|
||||
process.
|
||||
|
||||
## 4. Create zip files.
|
||||
|
||||
cd into the `installers/` folder and run
|
||||
`./create_installers.sh`. This will create
|
||||
`InvokeAI-mac_on_<branch>.zip`,
|
||||
`InvokeAI-windows_on_<branch>.zip` and
|
||||
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
||||
|
||||
These zips will continue to function as installers for all future
|
||||
pushes to those branches, as long as necessary changes to
|
||||
`requirements.in` are propagated in a timely manner to the
|
||||
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
||||
2](#step-2).
|
||||
|
||||
To actually install, users should unzip the appropriate zip file into an empty
|
||||
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
||||
Windows.
|
||||
@@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
||||
|
||||
There are three ways to install weights files:
|
||||
|
||||
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
||||
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||
them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||
@@ -65,13 +65,13 @@ There are three ways to install weights files:
|
||||
3. You can download the files manually and add the appropriate entries to
|
||||
`models.yaml`.
|
||||
|
||||
### Installation via `configure_invokeai.py`
|
||||
### Installation via `preload_models.py`
|
||||
|
||||
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
||||
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||
console. It will ask you to select which models to download and lead you through
|
||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||
|
||||
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
||||
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||
directory
|
||||
|
||||
!!! example ""
|
||||
@@ -162,12 +162,6 @@ the command-line client's `!import_model` command.
|
||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||
possible completions.
|
||||
|
||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
||||
|
||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
||||
onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste.
|
||||
|
||||
4. Follow the wizard's instructions to complete installation as shown in the
|
||||
example here:
|
||||
|
||||
@@ -244,7 +238,7 @@ arabian-nights-1.0:
|
||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||
| description | Any description that you want to add to the model to remind you what it is. |
|
||||
| weights | Relative path to the .ckpt weights file for this model. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
---
|
||||
title: InvokeAI Automated Installation
|
||||
---
|
||||
|
||||
# InvokeAI Automated Installation
|
||||
|
||||
## Introduction
|
||||
|
||||
The automated installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
|
||||
## Walk through
|
||||
|
||||
1. Make sure that your system meets the
|
||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
!!! info "Required Space"
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||
"Powershell" on Windows) and type `python --version`. If Python is
|
||||
installed, it will print out the version number. If it is version `3.9.1` or
|
||||
higher, you meet requirements.
|
||||
|
||||
!!! warning "If you see an older version, or get a command not found error"
|
||||
|
||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||
download the appropriate installer package for your platform. We recommend
|
||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
!!! warning "At this time we do not recommend Python 3.11"
|
||||
|
||||
=== "Windows users"
|
||||
|
||||
- During the Python configuration process,
|
||||
Please look out for a checkbox to add Python to your PATH
|
||||
and select it. If the install script complains that it can't
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
|
||||
- There is a slight possibility that you will encountered
|
||||
DLL load errors at the very end of the installation process. This is caused
|
||||
by not having up to date Visual C++ redistributable libraries. If this
|
||||
happens to you, you can install the C++ libraries from this site:
|
||||
https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||
|
||||
=== "Mac users"
|
||||
|
||||
- After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem, and you can fix it with this command:
|
||||
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
- You may need to install the Xcode command line tools. These
|
||||
are a set of tools that are needed to run certain applications in a
|
||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
- To install, open a terminal window and run `xcode-select
|
||||
--install`. You will get a macOS system popup guiding you through the
|
||||
install. If you already have them installed, you will instead see some
|
||||
output in the Terminal advising you that the tools are already installed.
|
||||
|
||||
- More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
=== "Linux users"
|
||||
|
||||
- See [Installing Python in Ubuntu](#installing-python-in-ubuntu) for some
|
||||
platform-specific tips.
|
||||
|
||||
3. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
|
||||
- [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
|
||||
- [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
4. Unpack the zip file into a convenient directory. This will create a new
|
||||
directory named "InvokeAI-Installer". This example shows how this would look
|
||||
using the `unzip` command-line tool, but you may use any graphical or
|
||||
command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||
creating: InvokeAI-Installer\
|
||||
inflating: InvokeAI-Installer\install.bat
|
||||
inflating: InvokeAI-Installer\readme.txt
|
||||
...
|
||||
```
|
||||
|
||||
After successful installation, you can delete the `InvokeAI-Installer`
|
||||
directory.
|
||||
|
||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||
accept the dialog box that asks you if you wish to modify your registry.
|
||||
This activates long filename support on your system and will prevent
|
||||
mysterious errors during installation.
|
||||
|
||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||
|
||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||
directory with at least 18G of free space for a full install. InvokeAI and
|
||||
all its support files will be installed into a new directory named
|
||||
`invokeai` located at the location you specify.
|
||||
|
||||
- The default is to install the `invokeai` directory in your home directory,
|
||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
on Macintoshes, where "YourName" is your login name.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path completions.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
|
||||
9. Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||
install it.
|
||||
|
||||
Be aware that some of the library download and install steps take a long
|
||||
time. In particular, the `pytorch` package is quite large and often appears
|
||||
to get "stuck" at 99.9%. Have patience and the installation step will
|
||||
eventually resume. However, there are occasions when the library install
|
||||
does legitimately get stuck. If you have been waiting for more than ten
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process
|
||||
of selecting one or more Stable Diffusion model weights files, downloading
|
||||
and configuring them. We provide a list of popular models that InvokeAI
|
||||
performs well with. However, you can add more weight files later on using
|
||||
the command-line client or the Web UI. See
|
||||
[Installing Models](INSTALLING_MODELS.md) for details.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
11. The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeai
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||
latter, you can load the user interface by pointing your browser at
|
||||
http://localhost:9090.
|
||||
|
||||
- The script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
|
||||
12. You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
inage output directory, or select your favorite sampler. See the
|
||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
||||
configuration files, the model weight files, and outputs of image generation.
|
||||
Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||
the installer may occasionally pick up outdated libraries and either the
|
||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||
will bring InvokeAI up to date with the latest libraries.
|
||||
|
||||
### ldm from pypi
|
||||
|
||||
!!! warning
|
||||
|
||||
Some users have tried to correct dependency problems by installing
|
||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||
`pip uninstall ldm`.
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
Everything seems to install ok, but `invoke` complains of a corrupted
|
||||
configuration file and goes back into the configuration process (asking you to
|
||||
download models, etc), but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive in the
|
||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||
easiest way to fix the problem is to move the file out of the way and re-run
|
||||
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
||||
script) and run this command:
|
||||
|
||||
```cmd
|
||||
configure_invokeai.py --root=.
|
||||
```
|
||||
|
||||
Note the dot (.) after `--root`. It is part of the command.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### other problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated
|
||||
models files that may be needed. You can also use this to add additional models
|
||||
that you did not select at installation time.
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
||||
this, simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
You may also use the `update` script to install any selected version of
|
||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||
link of the version you wish to install. You can find the zip links by going to
|
||||
the one of the release pages and looking for the **Assets** section at the
|
||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||
big code directory on the InvokeAI welcome page. When you find the version you
|
||||
want to install, go to the green "<> Code" button at the top, and copy the
|
||||
"Download ZIP" link.
|
||||
|
||||
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
||||
version as its argument. For example, this will install the old 2.2.0 release.
|
||||
|
||||
```cmd
|
||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||
```
|
||||
|
||||
## Installing Python in Ubuntu
|
||||
|
||||
For reasons that are not entirely clear, installing the correct version of
|
||||
Python can be a bit of a challenge on Ubuntu, Linux Mint, and other
|
||||
Ubuntu-derived distributions.
|
||||
|
||||
In particular, Ubuntu version 20.04 LTS comes with an old version of Python,
|
||||
does not come with the PIP package manager installed, and to make matters worse,
|
||||
the `python` command points to Python2, not Python3.
|
||||
|
||||
Here is the quick recipe for bringing your system up to date:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install python3.9
|
||||
sudo apt install python3-pip
|
||||
cd /usr/bin
|
||||
sudo ln -sf python3.9 python3
|
||||
sudo ln -sf python3 python
|
||||
```
|
||||
|
||||
You can still access older versions of Python by calling `python2`, `python3.8`,
|
||||
etc.
|
||||
@@ -6,7 +6,7 @@ title: Docker
|
||||
|
||||
!!! warning "For end users"
|
||||
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
@@ -16,10 +16,6 @@ title: Docker
|
||||
|
||||
For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
!!! tip "For running on a cloud instance/service"
|
||||
|
||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
||||
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||
@@ -40,7 +36,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
||||
laptop you can build for the target platform and architecture and deploy to
|
||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
## Installation in a Linux container (desktop)
|
||||
## Installation on a Linux container
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -121,91 +117,12 @@ also do so.
|
||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||
```
|
||||
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||
|
||||
---
|
||||
|
||||
## Running InvokeAI in the cloud with Docker
|
||||
|
||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
||||
|
||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
||||
|
||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- a `docker` runtime
|
||||
- `make` (optional but helps for convenience)
|
||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
||||
|
||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
||||
|
||||
### Building and running the image locally
|
||||
|
||||
1. Clone this repo and `cd docker-build`
|
||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
||||
- `make configure` (This does *not* require a GPU-capable system)
|
||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
||||
- enter your Huggingface token when prompted
|
||||
1. `make web`
|
||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
||||
|
||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
||||
|
||||
#### Building and running without `make`
|
||||
|
||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
||||
|
||||
!!! example "Build the image and configure the runtime directory"
|
||||
```Shell
|
||||
cd docker-build
|
||||
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
||||
```
|
||||
|
||||
!!! example "Run the web server"
|
||||
```Shell
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
||||
```
|
||||
|
||||
Access the Web UI at http://localhost:9090
|
||||
|
||||
!!! example "Run the InvokeAI interactive CLI"
|
||||
```
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
||||
```
|
||||
|
||||
### Running the image in the cloud
|
||||
|
||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
||||
|
||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
||||
1. `docker pull` it on your cloud instance
|
||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
||||
|
||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
||||
|
||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
||||
|
||||
1. create a pod using this Docker image
|
||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
||||
1. Run the pod with `sleep infinity` as the Docker command
|
||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
||||
|
||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
||||
|
||||
---
|
||||
|
||||
!!! warning "Deprecated"
|
||||
|
||||
From here on you will find the the previous Docker-Docs, which will still
|
||||
@@ -218,12 +135,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
|
||||
If you're on a **Linux container** the `invoke` script is **automatically
|
||||
started** and the output dir set to the Docker volume you created earlier.
|
||||
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
||||
interface that combines the functionality of the original scripts `txt2img` and
|
||||
`img2img`:
|
||||
`img2img`:
|
||||
Use the more accurate but VRAM-intensive full precision math because
|
||||
half-precision requires autocast and won't work.
|
||||
half-precision requires autocast and won't work.
|
||||
By default the images are saved in `outputs/img-samples/`.
|
||||
|
||||
```Shell
|
||||
@@ -240,8 +157,8 @@ invoke> q
|
||||
### Text to Image
|
||||
|
||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
The prompt can be in quotes or not.
|
||||
|
||||
```Shell
|
||||
@@ -255,8 +172,8 @@ You'll need to experiment to see if face restoration is making it better or
|
||||
worse for your specific prompt.
|
||||
|
||||
If you're on a container the output is set to the Docker volume. You can copy it
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
||||
`*.png` so you'll need to specify the image file name.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: InvokeAI Binary Installer
|
||||
title: InvokeAI Installer
|
||||
---
|
||||
|
||||
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
@@ -36,7 +36,7 @@ recommended model weights files.
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
||||
InvokeAI's installer for your platform
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||
|
||||
@@ -2,10 +2,12 @@
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
@@ -14,19 +16,12 @@ start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Running Online On Google Colabotary
|
||||
[](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
## Walkthrough
|
||||
|
||||
## Running Locally (Cloning)
|
||||
## Updating to newer versions
|
||||
|
||||
1. Install the Jupyter Notebook python library (one-time):
|
||||
pip install jupyter
|
||||
### Updating the stable version
|
||||
|
||||
2. Clone the InvokeAI repository:
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd invoke-ai
|
||||
3. Create a virtual environment using conda:
|
||||
conda create -n invoke jupyter
|
||||
4. Activate the environment and start the Jupyter notebook:
|
||||
conda activate invoke
|
||||
jupyter notebook
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -8,7 +8,7 @@ title: Manual Installation
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already experienced with using conda or pip
|
||||
who are already expirienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -155,10 +155,10 @@ command-line completion.
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
@@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/configure_invokeai.py --no-interactive #optional
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
@@ -359,7 +359,7 @@ brew install llvm
|
||||
|
||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||
|
||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
---
|
||||
title: Installing PyPatchMatch
|
||||
---
|
||||
|
||||
# :octicons-paintbrush-16: Installing PyPatchMatch
|
||||
|
||||
pypatchmatch is a Python module for inpainting images. It is not
|
||||
needed to run InvokeAI, but it greatly improves the quality of
|
||||
inpainting and outpainting and is recommended.
|
||||
|
||||
Unfortunately, it is a C++ optimized module and installation
|
||||
can be somewhat challenging. This guide leads you through the steps.
|
||||
|
||||
## Windows
|
||||
|
||||
You're in luck! On Windows platforms PyPatchMatch will install
|
||||
automatically on Windows systems with no extra intervention.
|
||||
|
||||
## Macintosh
|
||||
|
||||
PyPatchMatch is not currently supported, but the team is working on
|
||||
it.
|
||||
|
||||
## Linux
|
||||
|
||||
Prior to installing PyPatchMatch, you need to take the following
|
||||
steps:
|
||||
|
||||
### Debian Based Distros
|
||||
|
||||
|
||||
1. Install the `build-essential` tools:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install build-essential
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
|
||||
```
|
||||
sudo apt install python3-opencv libopencv-dev
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
```
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
4. Activate the environment you use for invokeai, either with
|
||||
`conda` or with a virtual environment.
|
||||
|
||||
5. Do a "develop" install of pypatchmatch:
|
||||
|
||||
```
|
||||
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed.
|
||||
At the command-line prompt enter `python`, and
|
||||
then at the `>>>` line type `from patchmatch import patch_match`:
|
||||
It should look like the follwing:
|
||||
|
||||
```
|
||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||
[GCC 9.3.0] on linux
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>> from patchmatch import patch_match
|
||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||
rm -rf build/obj libpatchmatch.so
|
||||
mkdir: created directory 'build/obj'
|
||||
mkdir: created directory 'build/obj/csrc/'
|
||||
[dep] csrc/masked_image.cpp ...
|
||||
[dep] csrc/nnf.cpp ...
|
||||
[dep] csrc/inpaint.cpp ...
|
||||
[dep] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/inpaint.cpp ...
|
||||
[CC] csrc/nnf.cpp ...
|
||||
[CC] csrc/masked_image.cpp ...
|
||||
[link] libpatchmatch.so ...
|
||||
```
|
||||
|
||||
|
||||
### Arch Based Distros
|
||||
|
||||
1. Install the `base-devel` package:
|
||||
```
|
||||
sudo pacman -Syu
|
||||
sudo pacman -S --needed base-devel
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
```
|
||||
sudo pacman -S opencv
|
||||
```
|
||||
or for CUDA support
|
||||
```
|
||||
sudo pacman -S opencv-cuda
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
```
|
||||
cd /usr/lib/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
**Next, Follow Steps 4-6 from the Debian Section above**
|
||||
|
||||
|
||||
If you see no errors, then you're ready to go!
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||
@@ -29,9 +30,9 @@ off the process.
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [invokeAI-src-installer-2.2.3-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-mac.zip)
|
||||
- [invokeAI-src-installer-2.2.3-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-windows.zip)
|
||||
- [invokeAI-src-installer-2.2.3-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-linux.zip)
|
||||
- invokeAI-src-installer-mac.zip
|
||||
- invokeAI-src-installer-windows.zip
|
||||
- invokeAI-src-installer-linux.zip
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
@@ -50,44 +51,23 @@ off the process.
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are a macOS user, you may need to install the Xcode command line tools.
|
||||
These are a set of tools that are needed to run certain applications in a Terminal,
|
||||
including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
To install, open a terminal window and run `xcode-select --install`. You will get
|
||||
a macOS system popup guiding you through the install. If you already have them
|
||||
installed, you will instead see some output in the Terminal advising you that the
|
||||
tools are already installed.
|
||||
|
||||
More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
4. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
5. Alternatively, from the command line, run the shell script or .bat file:
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
6. Sit back and let the install script work. It will install various binary
|
||||
5. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
Be aware that some of the library download and install steps take a long time.
|
||||
In particular, the `pytorch` package is quite large and often appears to get
|
||||
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
||||
appear to hang. Have patience and the installation step will eventually
|
||||
resume. However, there are occasions when the library install does
|
||||
legitimately get stuck. If you have been waiting for more than ten minutes
|
||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
7. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
@@ -102,7 +82,7 @@ off the process.
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
8. The script will now exit and you'll be ready to generate some images. The
|
||||
7. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
@@ -130,71 +110,6 @@ python scripts/invoke.py --web --max_load_models=3 \
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
_Package dependency conflicts_ If you have previously installed
|
||||
InvokeAI or another Stable Diffusion package, the installer may
|
||||
occasionally pick up outdated libraries and either the installer or
|
||||
`invoke` will fail with complaints out library conflicts. There are
|
||||
two steps you can take to clear this problem. Both of these are done
|
||||
from within the "developer's console", which you can get to by
|
||||
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
||||
#3:
|
||||
|
||||
1. Remove the previous `invokeai` environment completely. From within
|
||||
the developer's console, give the command `conda env remove -n
|
||||
invokeai`. This will delete previous files installed by `invoke`.
|
||||
|
||||
Then exit from the developer's console and launch the script
|
||||
`update.sh` (or `update.bat`). This will download the most recent
|
||||
InvokeAI (including bug fixes) and reinstall the environment.
|
||||
You should then be able to run `invoke.sh`/`invoke.bat`.
|
||||
|
||||
2. If this doesn't work, you can try cleaning your system's conda
|
||||
cache. This is slightly more extreme, but won't interfere with
|
||||
any other python-based programs installed on your computer.
|
||||
From the developer's console, run the command `conda clean -a`
|
||||
and answer "yes" to all prompts.
|
||||
|
||||
After this is done, run `update.sh` and try again as before.
|
||||
|
||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||
`invoke` complains of a corrupted configuration file and goes calls
|
||||
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive
|
||||
in the `.invokeai` initialization file that contains startup settings.
|
||||
This can be corrected by fixing the offending line.
|
||||
|
||||
First find `.invokeai`. It is a small text file located in your home
|
||||
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
||||
name*\.invokeai` on Windows systems. Open it with a text editor
|
||||
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
||||
and look for the lines starting with `--root` and `--outdir`.
|
||||
|
||||
An example is here:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai"
|
||||
--outdir="/home/lstein/invokeai/outputs"
|
||||
```
|
||||
|
||||
There should not be whitespace before or after the directory paths,
|
||||
and the paths should not end with slashes:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
||||
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
||||
```
|
||||
|
||||
Fix the problem with your text editor and save as a **plain text**
|
||||
file. This should clear the issue.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the
|
||||
problem to the [InvokeAI
|
||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
@@ -204,15 +119,31 @@ This section describes how to update InvokeAI to new versions of the software.
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated models
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
`python scripts/preload_models.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
@@ -5,30 +5,58 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [Automated Installer](INSTALL_AUTOMATED.md)
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
|
||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments. In our hands the pip install
|
||||
is faster and more reliable, but your mileage may vary.
|
||||
`pip` and Python virtual environments.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
3. [Docker Installation](INSTALL_DOCKER.md)
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
||||
|
||||
@@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
||||
machine-learning models:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
||||
and obtaining an access token for downloading. It will then download and
|
||||
install the weights files for you.
|
||||
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing
|
||||
the same thing.
|
||||
|
||||
7. Start generating images!
|
||||
@@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
|
||||
@@ -111,7 +111,7 @@ will do our best to help.
|
||||
|
||||
!!! todo "Download the model weight files"
|
||||
|
||||
The `configure_invokeai.py` script downloads and installs the model weight
|
||||
The `preload_models.py` script downloads and installs the model weight
|
||||
files for you. It will lead you through the process of getting a Hugging Face
|
||||
account, accepting the Stable Diffusion model weight license agreement, and
|
||||
creating a download token:
|
||||
@@ -119,7 +119,7 @@ will do our best to help.
|
||||
```bash
|
||||
# This will take some time, depending on the speed of your internet connection
|
||||
# and will consume about 10GB of space
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! todo "Run InvokeAI!"
|
||||
@@ -150,7 +150,7 @@ will do our best to help.
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
---
|
||||
@@ -220,8 +220,8 @@ There are several causes of these errors:
|
||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||
something else you haven't.
|
||||
|
||||
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/configure_invokeai.py` or
|
||||
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/preload_models.py` or
|
||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||
|
||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||
@@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -7,7 +7,7 @@ title: Manual Installation, Windows
|
||||
## **Notebook install (semi-automated)**
|
||||
|
||||
We have a
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in this repo as
|
||||
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||
from the link above and load it up in VSCode (with the appropriate extensions
|
||||
@@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
obtaining an access token for downloading. It will then download and install the
|
||||
weights files for you.
|
||||
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing the
|
||||
same thing.
|
||||
|
||||
8. Start generating images!
|
||||
@@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
||||
|
||||
@@ -15,16 +15,16 @@ We thank them for all of their time and hard work.
|
||||
|
||||
## **Current core team**
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* lstein (Lincoln Stein) - Co-maintainer
|
||||
* blessedcoolant - Co-maintainer
|
||||
* hipsterusername (Kent Keirsey) - Product Manager
|
||||
* psychedelicious - Web Team Leader
|
||||
* Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* damian0815 - Attention Systems and Gameplay Engineer
|
||||
* mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* keturn - Lead for Diffusers port
|
||||
|
||||
## **Contributions by**
|
||||
|
||||
|
||||
@@ -42,5 +42,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -44,5 +44,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -43,5 +43,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -59,7 +59,7 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
|
||||
@@ -13,6 +13,7 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
@@ -43,5 +44,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
# pip will resolve the version which matches torch
|
||||
albumentations
|
||||
dependency_injector==4.40.0
|
||||
diffusers==0.10.*
|
||||
diffusers
|
||||
einops
|
||||
eventlet
|
||||
facexlib
|
||||
flask==2.1.3
|
||||
flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==1.0.3
|
||||
flaskwebgui==0.3.7
|
||||
getpass_asterisk
|
||||
gfpgan==1.3.8
|
||||
huggingface-hub
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
@@ -18,7 +17,6 @@ kornia
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
picklescan
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
@@ -33,8 +31,11 @@ taming-transformers-rom1504
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers==4.25.*
|
||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.4.zip#egg=pypatchmatch
|
||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||
transformers==4.21.*
|
||||
picklescan
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan ; platform_system == 'Windows'
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan ; platform_system != 'Windows'
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
|
||||
@@ -1,5 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
torch
|
||||
torchvision
|
||||
-e .
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
basicsr==1.4.1
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
||||
|
||||
48
frontend/dist/assets/index-legacy-8e84772c.js
vendored
48
frontend/dist/assets/index-legacy-8e84772c.js
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.81f1c71c.css
vendored
1
frontend/dist/assets/index.81f1c71c.css
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.d864890e.js
vendored
623
frontend/dist/assets/index.d864890e.js
vendored
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.faf4c870.js
vendored
Normal file
623
frontend/dist/assets/index.faf4c870.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
File diff suppressed because one or more lines are too long
11
frontend/dist/index.html
vendored
11
frontend/dist/index.html
vendored
@@ -2,24 +2,17 @@
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
|
||||
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.d864890e.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.81f1c71c.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
<script type="module" crossorigin src="./assets/index.faf4c870.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.c609c0c8.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8e84772c.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -53,7 +53,6 @@
|
||||
"@types/react-transition-group": "^4.4.5",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.2",
|
||||
"@typescript-eslint/parser": "^5.36.2",
|
||||
"@vitejs/plugin-legacy": "^3.0.1",
|
||||
"@vitejs/plugin-react": "^2.0.1",
|
||||
"eslint": "^8.23.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
@@ -61,7 +60,6 @@
|
||||
"patch-package": "^6.5.0",
|
||||
"postinstall-postinstall": "^2.1.0",
|
||||
"sass": "^1.55.0",
|
||||
"terser": "^5.16.1",
|
||||
"tsc-watch": "^5.0.3",
|
||||
"typescript": "^4.6.4",
|
||||
"vite": "^3.0.7",
|
||||
|
||||
@@ -42,6 +42,7 @@ const makeSocketIOEmitters = (
|
||||
options: optionsState,
|
||||
system: systemState,
|
||||
canvas: canvasState,
|
||||
gallery: galleryState,
|
||||
} = state;
|
||||
|
||||
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
|
||||
@@ -54,6 +55,13 @@ const makeSocketIOEmitters = (
|
||||
|
||||
dispatch(generationRequested());
|
||||
|
||||
if (!['txt2img', 'img2img'].includes(generationMode)) {
|
||||
if (!galleryState.currentImage?.url) return;
|
||||
|
||||
frontendToBackendParametersConfig.imageToProcessUrl =
|
||||
galleryState.currentImage.url;
|
||||
}
|
||||
|
||||
const { generationParameters, esrganParameters, facetoolParameters } =
|
||||
frontendToBackendParameters(frontendToBackendParametersConfig);
|
||||
|
||||
|
||||
@@ -30,7 +30,13 @@ export const frontendToBackendParameters = (
|
||||
): { [key: string]: any } => {
|
||||
const canvasBaseLayer = getCanvasBaseLayer();
|
||||
|
||||
const { generationMode, optionsState, canvasState, systemState } = config;
|
||||
const {
|
||||
generationMode,
|
||||
optionsState,
|
||||
canvasState,
|
||||
systemState,
|
||||
imageToProcessUrl,
|
||||
} = config;
|
||||
|
||||
const {
|
||||
cfgScale,
|
||||
@@ -158,6 +164,7 @@ export const frontendToBackendParameters = (
|
||||
|
||||
generationParameters.fit = false;
|
||||
|
||||
generationParameters.init_img = imageToProcessUrl;
|
||||
generationParameters.strength = img2imgStrength;
|
||||
|
||||
generationParameters.invert_mask = shouldPreserveMaskedArea;
|
||||
|
||||
@@ -62,7 +62,7 @@ const PromptInput = () => {
|
||||
<Textarea
|
||||
id="prompt"
|
||||
name="prompt"
|
||||
placeholder="Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)"
|
||||
placeholder="I'm dreaming of..."
|
||||
size={'lg'}
|
||||
value={prompt}
|
||||
onChange={handleChangePrompt}
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
@media (max-width: 600px) {
|
||||
#root{
|
||||
.app-content{
|
||||
padding: 5px;
|
||||
.site-header {
|
||||
position: fixed;
|
||||
display: flex;
|
||||
height: 100px;
|
||||
z-index: 1;
|
||||
.site-header-left-side{
|
||||
position: absolute;
|
||||
display: flex;
|
||||
min-width: 145px;
|
||||
float: left;
|
||||
padding-left: 0;
|
||||
}
|
||||
.site-header-right-side{
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr 1fr 1fr 1fr 1fr;
|
||||
grid-template-rows: 25px 25px 25px;
|
||||
grid-template-areas: 'logoSpace logoSpace logoSpace sampler sampler sampler'
|
||||
'status status status status status status'
|
||||
'btn1 btn2 btn3 btn4 btn5 btn6';
|
||||
row-gap: 15px;
|
||||
.chakra-popover__popper{
|
||||
grid-area: logoSpace;
|
||||
}
|
||||
> :nth-child(1).chakra-text{
|
||||
grid-area: status;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
}
|
||||
> :nth-child(2){
|
||||
grid-area: sampler;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
select{
|
||||
width: 185px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
.chakra-select__icon-wrapper{
|
||||
right:10px;
|
||||
svg{
|
||||
margin-top: 10px;
|
||||
}
|
||||
}
|
||||
}
|
||||
> :nth-child(3){
|
||||
grid-area: btn1;
|
||||
}
|
||||
> :nth-child(4){
|
||||
grid-area: btn2;
|
||||
}
|
||||
> :nth-child(6){
|
||||
grid-area: btn3;
|
||||
}
|
||||
> :nth-child(7){
|
||||
grid-area: btn4;
|
||||
}
|
||||
> :nth-child(8){
|
||||
grid-area: btn5;
|
||||
}
|
||||
> :nth-child(9){
|
||||
grid-area: btn6;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
.app-tabs{
|
||||
position: fixed;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
row-gap: 15px;
|
||||
max-width: 100%;
|
||||
overflow: hidden;
|
||||
margin-top: 120px;
|
||||
.app-tabs-list{
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.app-tabs-panels{
|
||||
overflow: hidden;
|
||||
overflow-y: scroll;
|
||||
.workarea-main{
|
||||
display: grid;
|
||||
grid-template-areas: 'workarea'
|
||||
'options'
|
||||
'gallery';
|
||||
row-gap: 15px;
|
||||
.options-panel-wrapper{
|
||||
grid-area: options;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
height: inherit;
|
||||
overflow: inherit;
|
||||
padding: 0 10px;
|
||||
.main-options-row{
|
||||
max-width: 100%;
|
||||
}
|
||||
.advanced-settings-item{
|
||||
max-width: 100%;
|
||||
}
|
||||
}
|
||||
.workarea-children-wrapper{
|
||||
grid-area: workarea;
|
||||
.workarea-split-view{
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.current-image-options{
|
||||
column-gap: 3px;
|
||||
}
|
||||
.text-to-image-area{
|
||||
padding: 0;
|
||||
}
|
||||
.current-image-preview {
|
||||
height: 430px;
|
||||
}
|
||||
|
||||
//image 2 image
|
||||
.image-upload-button {
|
||||
row-gap: 10px;
|
||||
padding: 5px;
|
||||
svg {
|
||||
width: 2rem;
|
||||
height: 2rem;
|
||||
margin-top: 10px;
|
||||
}
|
||||
}
|
||||
|
||||
//Cavas Painting
|
||||
.inpainting-settings{
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
row-gap: 10px;
|
||||
}
|
||||
.inpainting-canvas-area{
|
||||
.konvajs-content{
|
||||
height: 400px !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
.image-gallery-wrapper{
|
||||
grid-area: gallery;
|
||||
min-height: 400px;
|
||||
.image-gallery-popup{
|
||||
width: 100% !important;
|
||||
max-width: 100% !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
@forward './Shared';
|
||||
@forward './Buttons';
|
||||
@forward './Variables';
|
||||
@forward './Responsive';
|
||||
@@ -2,20 +2,12 @@ import { defineConfig } from 'vite';
|
||||
import react from '@vitejs/plugin-react';
|
||||
import eslint from 'vite-plugin-eslint';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
import legacy from '@vitejs/plugin-legacy';
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig(({ mode }) => {
|
||||
const common = {
|
||||
base: '',
|
||||
plugins: [
|
||||
react(),
|
||||
eslint(),
|
||||
tsconfigPaths(),
|
||||
legacy({
|
||||
modernPolyfills: ['es.array.find-last'],
|
||||
}),
|
||||
],
|
||||
plugins: [react(), eslint(), tsconfigPaths()],
|
||||
server: {
|
||||
// Proxy HTTP requests to the flask server
|
||||
proxy: {
|
||||
@@ -43,11 +35,7 @@ export default defineConfig(({ mode }) => {
|
||||
},
|
||||
},
|
||||
build: {
|
||||
/**
|
||||
* We need to polyfill for Array.prototype.findLast(); the polyfill plugin above
|
||||
* overrides any target specified here.
|
||||
*/
|
||||
// target: 'esnext',
|
||||
target: 'esnext',
|
||||
chunkSizeWarningLimit: 1500, // we don't really care about chunk size
|
||||
},
|
||||
};
|
||||
|
||||
@@ -213,11 +213,6 @@
|
||||
dependencies:
|
||||
regenerator-runtime "^0.13.10"
|
||||
|
||||
"@babel/standalone@^7.20.6":
|
||||
version "7.20.6"
|
||||
resolved "https://registry.yarnpkg.com/@babel/standalone/-/standalone-7.20.6.tgz#7deb7ad244176414c3cbde020aad0607afdbe2fe"
|
||||
integrity sha512-u5at/CbBLETf7kx2LOY4XdhseD79Y099WZKAOMXeT8qvd9OSR515my2UNBBLY4qIht/Qi9KySeQHQwQwxJN4Sw==
|
||||
|
||||
"@babel/template@^7.18.10":
|
||||
version "7.18.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.10.tgz#6f9134835970d1dbf0835c0d100c9f38de0c5e71"
|
||||
@@ -1209,7 +1204,7 @@
|
||||
"@jridgewell/set-array" "^1.0.0"
|
||||
"@jridgewell/sourcemap-codec" "^1.4.10"
|
||||
|
||||
"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
|
||||
"@jridgewell/gen-mapping@^0.3.2":
|
||||
version "0.3.2"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
|
||||
integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
|
||||
@@ -1228,15 +1223,7 @@
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
|
||||
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
|
||||
|
||||
"@jridgewell/source-map@^0.3.2":
|
||||
version "0.3.2"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb"
|
||||
integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==
|
||||
dependencies:
|
||||
"@jridgewell/gen-mapping" "^0.3.0"
|
||||
"@jridgewell/trace-mapping" "^0.3.9"
|
||||
|
||||
"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.13":
|
||||
"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10":
|
||||
version "1.4.14"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
|
||||
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
|
||||
@@ -1851,17 +1838,6 @@
|
||||
"@typescript-eslint/types" "5.44.0"
|
||||
eslint-visitor-keys "^3.3.0"
|
||||
|
||||
"@vitejs/plugin-legacy@^3.0.1":
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/@vitejs/plugin-legacy/-/plugin-legacy-3.0.1.tgz#bccc0eaf15a64e1854313acebec879854e413deb"
|
||||
integrity sha512-XCtEjxoR3rmy000ujYRBp5kggWqzHz9+F20/yIMUWOzbvu0+KW1e14Fvb8h7SpNn+bfjGW1RiAs1Vrgb7Js+iQ==
|
||||
dependencies:
|
||||
"@babel/standalone" "^7.20.6"
|
||||
core-js "^3.26.1"
|
||||
magic-string "^0.27.0"
|
||||
regenerator-runtime "^0.13.11"
|
||||
systemjs "^6.13.0"
|
||||
|
||||
"@vitejs/plugin-react@^2.0.1":
|
||||
version "2.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-2.2.0.tgz#1b9f63b8b6bc3f56258d20cd19b33f5cc761ce6e"
|
||||
@@ -1903,7 +1879,7 @@ acorn-jsx@^5.3.2:
|
||||
resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937"
|
||||
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
|
||||
|
||||
acorn@^8.5.0, acorn@^8.8.0:
|
||||
acorn@^8.8.0:
|
||||
version "8.8.1"
|
||||
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.1.tgz#0a3f9cbecc4ec3bea6f0a80b66ae8dd2da250b73"
|
||||
integrity sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==
|
||||
@@ -2026,11 +2002,6 @@ browserslist@^4.21.3:
|
||||
node-releases "^2.0.6"
|
||||
update-browserslist-db "^1.0.9"
|
||||
|
||||
buffer-from@^1.0.0:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
|
||||
integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
|
||||
|
||||
callsites@^3.0.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
|
||||
@@ -2102,11 +2073,6 @@ color-name@~1.1.4:
|
||||
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
|
||||
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
|
||||
|
||||
commander@^2.20.0:
|
||||
version "2.20.3"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
|
||||
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
|
||||
|
||||
commander@^4.0.0:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068"
|
||||
@@ -2139,11 +2105,6 @@ copy-to-clipboard@3.3.1:
|
||||
dependencies:
|
||||
toggle-selection "^1.0.6"
|
||||
|
||||
core-js@^3.26.1:
|
||||
version "3.26.1"
|
||||
resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.26.1.tgz#7a9816dabd9ee846c1c0fe0e8fcad68f3709134e"
|
||||
integrity sha512-21491RRQVzUn0GGM9Z1Jrpr6PNPxPi+Za8OM9q4tksTSnlbXXGKK1nXNg/QvwFYettXvSX6zWKCtHHfjN4puyA==
|
||||
|
||||
cors@~2.8.5:
|
||||
version "2.8.5"
|
||||
resolved "https://registry.yarnpkg.com/cors/-/cors-2.8.5.tgz#eac11da51592dd86b9f06f6e7ac293b3df875d29"
|
||||
@@ -3091,13 +3052,6 @@ magic-string@^0.26.7:
|
||||
dependencies:
|
||||
sourcemap-codec "^1.4.8"
|
||||
|
||||
magic-string@^0.27.0:
|
||||
version "0.27.0"
|
||||
resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.27.0.tgz#e4a3413b4bab6d98d2becffd48b4a257effdbbf3"
|
||||
integrity sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==
|
||||
dependencies:
|
||||
"@jridgewell/sourcemap-codec" "^1.4.13"
|
||||
|
||||
map-stream@~0.1.0:
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.1.0.tgz#e56aa94c4c8055a16404a0674b78f215f7c8e194"
|
||||
@@ -3601,7 +3555,7 @@ redux@^4.2.0:
|
||||
dependencies:
|
||||
"@babel/runtime" "^7.9.2"
|
||||
|
||||
regenerator-runtime@^0.13.10, regenerator-runtime@^0.13.11:
|
||||
regenerator-runtime@^0.13.10:
|
||||
version "0.13.11"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9"
|
||||
integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==
|
||||
@@ -3770,24 +3724,11 @@ socket.io@^4.5.2:
|
||||
resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c"
|
||||
integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
|
||||
|
||||
source-map-support@~0.5.20:
|
||||
version "0.5.21"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f"
|
||||
integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
|
||||
dependencies:
|
||||
buffer-from "^1.0.0"
|
||||
source-map "^0.6.0"
|
||||
|
||||
source-map@^0.5.7:
|
||||
version "0.5.7"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc"
|
||||
integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==
|
||||
|
||||
source-map@^0.6.0:
|
||||
version "0.6.1"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
|
||||
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
|
||||
|
||||
sourcemap-codec@^1.4.8:
|
||||
version "1.4.8"
|
||||
resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
|
||||
@@ -3873,21 +3814,6 @@ supports-preserve-symlinks-flag@^1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
|
||||
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
|
||||
|
||||
systemjs@^6.13.0:
|
||||
version "6.13.0"
|
||||
resolved "https://registry.yarnpkg.com/systemjs/-/systemjs-6.13.0.tgz#7b28e74b44352e1650e8652499f42de724c3fc7f"
|
||||
integrity sha512-P3cgh2bpaPvAO2NE3uRp/n6hmk4xPX4DQf+UzTlCAycssKdqhp6hjw+ENWe+aUS7TogKRFtptMosTSFeC6R55g==
|
||||
|
||||
terser@^5.16.1:
|
||||
version "5.16.1"
|
||||
resolved "https://registry.yarnpkg.com/terser/-/terser-5.16.1.tgz#5af3bc3d0f24241c7fb2024199d5c461a1075880"
|
||||
integrity sha512-xvQfyfA1ayT0qdK47zskQgRZeWLoOQ8JQ6mIgRGVNwZKdQMU+5FkCBjmv4QjcrTzyZquRw2FVtlJSRUmMKQslw==
|
||||
dependencies:
|
||||
"@jridgewell/source-map" "^0.3.2"
|
||||
acorn "^8.5.0"
|
||||
commander "^2.20.0"
|
||||
source-map-support "~0.5.20"
|
||||
|
||||
text-table@^0.2.0:
|
||||
version "0.2.0"
|
||||
resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
VERSION=$(grep ^VERSION ../setup.py | awk '{ print $3 }' | sed "s/'//g" )
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
echo Building installer zip fles for InvokeAI v$VERSION
|
||||
|
||||
# get rid of any old ones
|
||||
rm *.zip
|
||||
|
||||
rm -rf InvokeAI-Installer
|
||||
mkdir InvokeAI-Installer
|
||||
|
||||
cp -pr ../environments-and-requirements templates readme.txt InvokeAI-Installer/
|
||||
mkdir InvokeAI-Installer/templates/rootdir
|
||||
|
||||
cp -pr ../configs InvokeAI-Installer/templates/rootdir/
|
||||
|
||||
mkdir InvokeAI-Installer/templates/rootdir/{outputs,embeddings,models}
|
||||
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+rx InvokeAI-Installer/install.sh
|
||||
|
||||
zip -r InvokeAI-installer-$VERSION-linux.zip InvokeAI-Installer
|
||||
zip -r InvokeAI-installer-$VERSION-mac.zip InvokeAI-Installer
|
||||
|
||||
# now do the windows installer
|
||||
rm InvokeAI-Installer/install.sh
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# this gets rid of the "-e ." at the end of the windows requirements file
|
||||
# because it is easier to do it now than in the .bat install script
|
||||
egrep -v '^-e .' InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt >requirements.txt
|
||||
mv requirements.txt InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt
|
||||
zip -r InvokeAI-installer-$VERSION-windows.zip InvokeAI-Installer
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer
|
||||
|
||||
|
||||
exit 0
|
||||
|
||||
|
||||
29
installer/create_installers.sh
Executable file
29
installer/create_installers.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r InvokeAI-linux.zip InvokeAI
|
||||
zip -r InvokeAI-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r InvokeAI-windows.zip InvokeAI
|
||||
|
||||
rm -rf InvokeAI
|
||||
|
||||
echo "The installer zips are ready for distribution."
|
||||
@@ -10,21 +10,21 @@
|
||||
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
echo "USING development BRANCH. REMEMBER TO CHANGE TO main BEFORE RELEASE"
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
#set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
@@ -127,7 +127,7 @@ if %errorlevel% neq 0 goto err_exit
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
@@ -140,11 +140,11 @@ set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
copy installer\invoke.bat .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q binary_installer installer_files
|
||||
rd /s /q installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
||||
@@ -1,215 +0,0 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
@rem This script requires the user to install Python 3.9 or higher. All other
|
||||
@rem requirements are downloaded as needed.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
@rem Config
|
||||
@rem this should be changed to the tagged release!
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
set MINIMUM_PYTHON_VERSION=3.9.0
|
||||
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
||||
|
||||
|
||||
set err_msg=An error has occurred and the script could not continue.
|
||||
|
||||
@rem --------------------------- Intro -------------------------------
|
||||
echo This script will install InvokeAI and its dependencies. Before you start,
|
||||
echo please make sure to do the following:
|
||||
echo 1. Install python 3.9 or higher.
|
||||
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
||||
echo enable long path support on your system.
|
||||
echo 3. Some users have found they need to install the Visual C++ core
|
||||
echo libraries or else they experience DLL loading problems at the end of the install.
|
||||
echo Visual C++ is very likely already installed on your system, but if you get DLL
|
||||
echo issues, please download and install the libraries by going to:
|
||||
echo https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
echo.
|
||||
echo See %INSTRUCTIONS% for more details.
|
||||
echo.
|
||||
pause
|
||||
|
||||
@rem ---------------------------- check Python version ---------------
|
||||
echo ***** Checking and Updating Python *****
|
||||
|
||||
call python --version >.tmp1 2>.tmp2
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Please install Python 3.9 or higher. See %INSTRUCTIONS% for details.
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
||||
if "%python_version%" == "" (
|
||||
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
echo Updating PIP...
|
||||
call python -m pip install --no-warn-script-location -q --upgrade pip
|
||||
|
||||
@rem --------------------- Get the requirements file ------------
|
||||
echo.
|
||||
echo Setting up requirements file for your system.
|
||||
copy /y environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
|
||||
@rem --------------------- Get the root directory for installation ------------
|
||||
set rootdir=""
|
||||
set response=""
|
||||
set selection=""
|
||||
:pick_rootdir
|
||||
if %rootdir% neq "" goto :done
|
||||
set /p selection=Select the path to install InvokeAI's directory into [%UserProfile%]:
|
||||
if %selection% == "" set selection=%UserProfile%
|
||||
set dest=%selection%\invokeai
|
||||
if exist %dest% (
|
||||
set response=y
|
||||
set /p response=The directory %dest% exists. Do you wish to resume install from a previous attempt? [Y/n]:
|
||||
if !response! == "" set response=y
|
||||
if /I !response! == y (set rootdir=%dest%) else (goto :pick_rootdir)
|
||||
) else (
|
||||
set rootdir=!dest!
|
||||
)
|
||||
set response=y
|
||||
set /p response="You have chosen to install InvokeAI into %rootdir%. OK? [Y/n]: "
|
||||
if !response! == "" set response=y
|
||||
if /I !response! neq y set rootdir=""
|
||||
goto :pick_rootdir
|
||||
:done
|
||||
|
||||
@rem ---------------------- Initialize the runtime directory ---------------------
|
||||
echo.
|
||||
echo *** Creating Runtime Directory %rootdir% ***
|
||||
if not exist %rootdir% mkdir %rootdir%
|
||||
@rem for unknown reasons the mkdir works but returns an error code
|
||||
if not exist %rootdir% (
|
||||
set err_msg=Could not create the directory %rootdir%. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
@rem --------------------------- Create and populate .venv ---------------------------
|
||||
echo.
|
||||
echo ** Creating Virtual Environment for InvokeAI **
|
||||
call python -mvenv %rootdir%\.venv
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Could not create virtual environment %rootdir%\.venv. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Requirements ***
|
||||
call %rootdir%\.venv\Scripts\activate.bat
|
||||
copy environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
call python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of requirements failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Modules and Executables ***
|
||||
call python -mpip install %INVOKE_AI_SRC%
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of InvokeAI failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
@rem --------------------------- Set up the root directory ---------------------------
|
||||
xcopy /E /Y .\templates\rootdir %rootdir%
|
||||
PUSHD "%rootdir%"
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Configuration failed. See above for error messages and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
POPD
|
||||
copy .\templates\invoke.bat.in %rootdir%\invoke.bat
|
||||
copy .\templates\update.bat.in %rootdir%\update.bat
|
||||
|
||||
@rem so that update.bat works
|
||||
mkdir %rootdir%\environments-and-requirements
|
||||
xcopy /I /Y .\environments-and-requirements %rootdir%\environments-and-requirements
|
||||
copy .\requirements.txt %rootdir%\requirements.txt
|
||||
|
||||
|
||||
echo.
|
||||
echo ***** Finished configuration *****
|
||||
echo All done. Execute the file %rootdir%\invoke.bat to start InvokeAI.
|
||||
pause
|
||||
deactivate
|
||||
exit
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
@rem found at https://stackoverflow.com/questions/15807762/compare-version-numbers-in-batch-file
|
||||
:compareVersions
|
||||
::
|
||||
:: Compares two version numbers and returns the result in the ERRORLEVEL
|
||||
::
|
||||
:: Returns 1 if version1 > version2
|
||||
:: 0 if version1 = version2
|
||||
:: -1 if version1 < version2
|
||||
::
|
||||
:: The nodes must be delimited by . or , or -
|
||||
::
|
||||
:: Nodes are normally strictly numeric, without a 0 prefix. A letter suffix
|
||||
:: is treated as a separate node
|
||||
::
|
||||
setlocal enableDelayedExpansion
|
||||
set "v1=%~1"
|
||||
set "v2=%~2"
|
||||
call :divideLetters v1
|
||||
call :divideLetters v2
|
||||
:loop
|
||||
call :parseNode "%v1%" n1 v1
|
||||
call :parseNode "%v2%" n2 v2
|
||||
if %n1% gtr %n2% exit /b 1
|
||||
if %n1% lss %n2% exit /b -1
|
||||
if not defined v1 if not defined v2 exit /b 0
|
||||
if not defined v1 exit /b -1
|
||||
if not defined v2 exit /b 1
|
||||
goto :loop
|
||||
|
||||
|
||||
:parseNode version nodeVar remainderVar
|
||||
for /f "tokens=1* delims=.,-" %%A in ("%~1") do (
|
||||
set "%~2=%%A"
|
||||
set "%~3=%%B"
|
||||
)
|
||||
exit /b
|
||||
|
||||
|
||||
:divideLetters versionVar
|
||||
for %%C in (a b c d e f g h i j k l m n o p q r s t u v w x y z) do set "%~1=!%~1:%%C=.%%C!"
|
||||
exit /b
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
echo The installer will exit now.
|
||||
pause
|
||||
exit /b
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
@@ -26,8 +22,6 @@ function _err_exit {
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
@@ -35,6 +29,10 @@ if [ $# -ge 1 ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$no_cache_dir"
|
||||
|
||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
@@ -82,17 +80,19 @@ if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
fi
|
||||
|
||||
# config
|
||||
echo "USING development BRANCH. REMEMBER TO CHANGE TO main BEFORE RELEASE"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
@@ -192,33 +192,32 @@ echo -e "We're running under"
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location --upgrade pip wheel
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
echo -e "\n***** Updated pip and wheel *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
cp installer/invoke.sh .
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf binary_installer/ installer_files/
|
||||
rm -rf installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
@@ -228,8 +227,6 @@ deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command"
|
||||
echo " $scriptdir/invoke.sh"
|
||||
echo "to start InvokeAI."
|
||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
@@ -1,217 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
# make sure we are not already in a venv
|
||||
# (don't need to check status)
|
||||
deactivate >/dev/null 2>&1
|
||||
|
||||
# this should be changed to the tagged release!
|
||||
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "You may need to use the Xcode command line tools to proceed. See step number 3 of"
|
||||
echo "https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#walk_through for"
|
||||
echo "installation instructions and then run this script again."
|
||||
else
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
fi
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
function readinput() {
|
||||
local CLEAN_ARGS=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
local i="$1"
|
||||
case "$i" in
|
||||
"-i")
|
||||
if read -i "default" 2>/dev/null <<< "test"; then
|
||||
CLEAN_ARGS="$CLEAN_ARGS -i \"$2\""
|
||||
fi
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
"-p")
|
||||
CLEAN_ARGS="$CLEAN_ARGS -p \"$2\""
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
CLEAN_ARGS="$CLEAN_ARGS $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
eval read $CLEAN_ARGS
|
||||
}
|
||||
|
||||
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
echo "InvokeAI simple installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="osx";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
echo "Installing for $OS_NAME-$OS_ARCH"
|
||||
# confirm that python is installed and is up to date
|
||||
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
||||
if ppath=`which $candidate`; then
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
echo Python $python_version found at $PYTHON
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
xcode_path=$(xcode-select --print-path)
|
||||
_err_exit $? "xcode_path command not found"
|
||||
export CPPFLAGS="-I$xcode_path/Library/Frameworks/Python3.framework/Versions/Current/Headers"
|
||||
echo "Will compile wheels with CPPFLAGS=$CPPFLAGS"
|
||||
fi
|
||||
|
||||
ROOTDIR=""
|
||||
while [ "$ROOTDIR" == "" ]
|
||||
do
|
||||
echo
|
||||
readinput -e -p "Select your preferred location for the 'invokeai' directory [$HOME]: " -i $HOME input
|
||||
ROOTDIR=${input:=$HOME}/invokeai
|
||||
read -e -p "InvokeAI will be installed into $ROOTDIR. OK? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
if [ -e $ROOTDIR ]; then
|
||||
echo
|
||||
read -e -p "Directory $ROOTDIR already exists. Do you want to resume an interrupted install? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" != 'y' ]; then
|
||||
ROOTDIR=""
|
||||
fi
|
||||
else
|
||||
mkdir -p $ROOTDIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Could not create $ROOTDIR. Try again with a different install location."
|
||||
ROOTDIR=""
|
||||
fi
|
||||
fi
|
||||
else
|
||||
ROOTDIR=""
|
||||
fi
|
||||
done
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Creating Virtual Environment for InvokeAI **"
|
||||
|
||||
$PYTHON -mpip install --upgrade pip
|
||||
$PYTHON -mvenv $ROOTDIR/.venv
|
||||
_err_exit $? "Python failed to create virtual environment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Activating Virtual Environment for InvokeAI **"
|
||||
|
||||
source $ROOTDIR/.venv/bin/activate
|
||||
_err_exit $? "Failed to activate virtual evironment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
PYTHON=$ROOTDIR/.venv/bin/python
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Dependencies ***"
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "macOS detected. Installing MPS and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-mac-mps-cpu.txt >requirements.txt
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-amd.txt >requirements.txt
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt >requirements.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
$PYTHON -mpip install -r requirements.txt
|
||||
_err_exit $? "Failed to install InvokeAI's dependencies."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Modules and Executables ***"
|
||||
$PYTHON -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "Installation of InvokeAI failed."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo " *** Setting Up Root Directory $ROOTDIR *** "
|
||||
cp -pr templates/rootdir/* $ROOTDIR/
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
# This allows the updater to work!
|
||||
cp -pr environments-and-requirements requirements.txt $ROOTDIR/
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Confguring InvokeAI ***"
|
||||
pushd $ROOTDIR
|
||||
./.venv/bin/configure_invokeai.py --root=$ROOTDIR
|
||||
_err_exit $? "Initial configuration failed. Please see above error messages and $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
popd
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
echo "You may now run InvokeAI by entering the directory $ROOTDIR and running invoke.sh"
|
||||
@@ -1,6 +1,5 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo Do you want to generate images using the
|
||||
@@ -11,10 +10,10 @@ echo 3. open the developer console
|
||||
set /p choice="Please enter 1, 2 or 3: "
|
||||
if /i "%choice%" == "1" (
|
||||
echo Starting the InvokeAI command-line.
|
||||
.venv\Scripts\python scripts\invoke.py %*
|
||||
.venv\Scripts\python scripts\invoke.py
|
||||
) else if /i "%choice%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI.
|
||||
.venv\Scripts\python scripts\invoke.py --web %*
|
||||
.venv\Scripts\python scripts\invoke.py --web
|
||||
) else if /i "%choice%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
9
binary_installer/invoke.sh.in → installer/invoke.sh
Normal file → Executable file
9
binary_installer/invoke.sh.in → installer/invoke.sh
Normal file → Executable file
@@ -4,11 +4,6 @@ set -eu
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
@@ -20,11 +15,11 @@ read choice
|
||||
case $choice in
|
||||
1)
|
||||
printf "\nStarting the InvokeAI command-line..\n";
|
||||
.venv/bin/python scripts/invoke.py $*;
|
||||
.venv/bin/python scripts/invoke.py;
|
||||
;;
|
||||
2)
|
||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||
.venv/bin/python scripts/invoke.py --web $*;
|
||||
.venv/bin/python scripts/invoke.py --web;
|
||||
;;
|
||||
3)
|
||||
printf "\nDeveloper Console:\n";
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-x86_64-cpu-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--trusted-host https
|
||||
|
||||
absl-py==1.3.0 \
|
||||
@@ -987,6 +987,7 @@ numpy==1.23.4 \
|
||||
# pandas
|
||||
# pyarrow
|
||||
# pydeck
|
||||
# pypatchmatch
|
||||
# pytorch-lightning
|
||||
# pywavelets
|
||||
# qudida
|
||||
@@ -1159,6 +1160,7 @@ pillow==9.3.0 \
|
||||
# imageio
|
||||
# k-diffusion
|
||||
# matplotlib
|
||||
# pypatchmatch
|
||||
# realesrgan
|
||||
# scikit-image
|
||||
# streamlit
|
||||
@@ -1294,6 +1296,9 @@ pyparsing==3.0.9 \
|
||||
# via
|
||||
# matplotlib
|
||||
# packaging
|
||||
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||
# via -r installer/requirements.in
|
||||
pyreadline3==3.4.1 \
|
||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||
@@ -1826,27 +1831,27 @@ toolz==0.12.0 \
|
||||
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
||||
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
||||
# via altair
|
||||
torch==1.12.0 ; platform_system == "Darwin" \
|
||||
--hash=sha256:0399746f83b4541bcb5b219a18dbe8cade760aba1c660d2748a38c6dc338ebc7 \
|
||||
--hash=sha256:0986685f2ec8b7c4d3593e8cfe96be85d462943f1a8f54112fc48d4d9fbbe903 \
|
||||
--hash=sha256:13c7cca6b2ea3704d775444f02af53c5f072d145247e17b8cd7813ac57869f03 \
|
||||
--hash=sha256:201abf43a99bb4980cc827dd4b38ac28f35e4dddac7832718be3d5479cafd2c1 \
|
||||
--hash=sha256:2143d5fe192fd908b70b494349de5b1ac02854a8a902bd5f47d13d85b410e430 \
|
||||
--hash=sha256:2568f011dddeb5990d8698cc375d237f14568ffa8489854e3b94113b4b6b7c8b \
|
||||
--hash=sha256:3322d33a06e440d715bb214334bd41314c94632d9a2f07d22006bf21da3a2be4 \
|
||||
--hash=sha256:349ea3ba0c0e789e0507876c023181f13b35307aebc2e771efd0e045b8e03e84 \
|
||||
--hash=sha256:44a3804e9bb189574f5d02ccc2dc6e32e26a81b3e095463b7067b786048c6072 \
|
||||
--hash=sha256:5ed69d5af232c5c3287d44cef998880dadcc9721cd020e9ae02f42e56b79c2e4 \
|
||||
--hash=sha256:60d06ee2abfa85f10582d205404d52889d69bcbb71f7e211cfc37e3957ac19ca \
|
||||
--hash=sha256:63341f96840a223f277e498d2737b39da30d9f57c7a1ef88857b920096317739 \
|
||||
--hash=sha256:72207b8733523388c49d43ffcc4416d1d8cd64c40f7826332e714605ace9b1d2 \
|
||||
--hash=sha256:7ddb167827170c4e3ff6a27157414a00b9fef93dea175da04caf92a0619b7aee \
|
||||
--hash=sha256:844f1db41173b53fe40c44b3e04fcca23a6ce00ac328b7099f2800e611766845 \
|
||||
--hash=sha256:a1325c9c28823af497cbf443369bddac9ac59f67f1e600f8ab9b754958e55b76 \
|
||||
--hash=sha256:abbdc5483359b9495dc76e3bd7911ccd2ddc57706c117f8316832e31590af871 \
|
||||
--hash=sha256:c0313438bc36448ffd209f5fb4e5f325b3af158cdf61c8829b8ddaf128c57816 \
|
||||
--hash=sha256:e3e8348edca3e3cee5a67a2b452b85c57712efe1cc3ffdb87c128b3dde54534e \
|
||||
--hash=sha256:fb47291596677570246d723ee6abbcbac07eeba89d8f83de31e3954f21f44879
|
||||
torch==1.12.1 ; platform_system == "Darwin" \
|
||||
--hash=sha256:03e31c37711db2cd201e02de5826de875529e45a55631d317aadce2f1ed45aa8 \
|
||||
--hash=sha256:0b44601ec56f7dd44ad8afc00846051162ef9c26a8579dda0a02194327f2d55e \
|
||||
--hash=sha256:42e115dab26f60c29e298559dbec88444175528b729ae994ec4c65d56fe267dd \
|
||||
--hash=sha256:42f639501928caabb9d1d55ddd17f07cd694de146686c24489ab8c615c2871f2 \
|
||||
--hash=sha256:4e1b9c14cf13fd2ab8d769529050629a0e68a6fc5cb8e84b4a3cc1dd8c4fe541 \
|
||||
--hash=sha256:68104e4715a55c4bb29a85c6a8d57d820e0757da363be1ba680fa8cc5be17b52 \
|
||||
--hash=sha256:69fe2cae7c39ccadd65a123793d30e0db881f1c1927945519c5c17323131437e \
|
||||
--hash=sha256:6cf6f54b43c0c30335428195589bd00e764a6d27f3b9ba637aaa8c11aaf93073 \
|
||||
--hash=sha256:743784ccea0dc8f2a3fe6a536bec8c4763bd82c1352f314937cb4008d4805de1 \
|
||||
--hash=sha256:8a34a2fbbaa07c921e1b203f59d3d6e00ed379f2b384445773bd14e328a5b6c8 \
|
||||
--hash=sha256:976c3f997cea38ee91a0dd3c3a42322785414748d1761ef926b789dfa97c6134 \
|
||||
--hash=sha256:9b356aea223772cd754edb4d9ecf2a025909b8615a7668ac7d5130f86e7ec421 \
|
||||
--hash=sha256:9c038662db894a23e49e385df13d47b2a777ffd56d9bcd5b832593fab0a7e286 \
|
||||
--hash=sha256:a8320ba9ad87e80ca5a6a016e46ada4d1ba0c54626e135d99b2129a4541c509d \
|
||||
--hash=sha256:b5dbcca369800ce99ba7ae6dee3466607a66958afca3b740690d88168752abcf \
|
||||
--hash=sha256:bfec2843daa654f04fda23ba823af03e7b6f7650a873cdb726752d0e3718dada \
|
||||
--hash=sha256:cd26d8c5640c3a28c526d41ccdca14cf1cbca0d0f2e14e8263a7ac17194ab1d2 \
|
||||
--hash=sha256:e9c8f4a311ac29fc7e8e955cfb7733deb5dbe1bdaabf5d4af2765695824b7e0d \
|
||||
--hash=sha256:f00c721f489089dc6364a01fd84906348fe02243d0af737f944fddb36003400d \
|
||||
--hash=sha256:f3b52a634e62821e747e872084ab32fbcb01b7fa7dbb7471b6218279f02a178a
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# accelerate
|
||||
@@ -1877,26 +1882,26 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchvision==0.13.0 ; platform_system == "Darwin" \
|
||||
--hash=sha256:01e9e7b2e7724e66561e8d98f900985d80191e977c5c0b3f33ed31800ba0210c \
|
||||
--hash=sha256:0e28740bd5695076f7c449af650fc474d6566722d446461c2ceebf9c9599b37f \
|
||||
--hash=sha256:1b703701f0b99f307ad925b1abda2b3d5bdbf30643ff02102b6aeeb8840ae278 \
|
||||
--hash=sha256:1e2049f1207631d42d743205f663f1d2235796565be3f18b0339d479626faf30 \
|
||||
--hash=sha256:253eb0c67bf88cef4a79ec69058c3e94f9fde28b9e3699ad1afc0b3ed50f8075 \
|
||||
--hash=sha256:42d95ab197d090efc5669fec02fbc603d05c859e50ca2c60180d1a113aa9b3e2 \
|
||||
--hash=sha256:5c31e9b3004142dbfdf32adc4cf2d4fd709b820833e9786f839ae3a91ff65ef0 \
|
||||
--hash=sha256:61d5093a50b7923a4e5bf9e0271001c29e01abec2348b7dd93370a0a9d15836c \
|
||||
--hash=sha256:667cac55afb13cda7d362466e7eba3119e529b210e55507d231bead09aca5e1f \
|
||||
--hash=sha256:6c4c35428c758adc485ff8f239b5ed68c1b6c26efa261a52e431cab0f7f22aec \
|
||||
--hash=sha256:83a4d9d50787d1e886c94486b63b15978391f6cf1892fce6a93132c09b14e128 \
|
||||
--hash=sha256:a20662c11dc14fd4eff102ceb946a7ee80b9f98303bb52435cc903f2c4c1fe10 \
|
||||
--hash=sha256:acb72a40e5dc0cd454d28514dbdd589a5057afd9bb5c785b87a54718b999bfa1 \
|
||||
--hash=sha256:ad458146aca15f652f9b0c227bebd5403602c7341f15f68f20ec119fa8e8f4a5 \
|
||||
--hash=sha256:ada295dbfe55017b02acfab960a997387f5addbadd28ee5e575e24f692992ce4 \
|
||||
--hash=sha256:b620a43df4131ad09f5761c415a016a9ea95aaf8ec8c91d030fb59bad591094a \
|
||||
--hash=sha256:b7a2c9aebc7ef265777fe7e82577364288d98cf6b8cf0a63bb2621df78a7af1a \
|
||||
--hash=sha256:c2278a189663087bb8e65915062aa7a25b8f8e5a3cfaa5879fe277e23e4bbf40 \
|
||||
--hash=sha256:df16abf31e7a5fce8db1f781bf1e4f20c8bc730c7c3f657e946cc5820c04e465
|
||||
torchvision==0.13.1 ; platform_system == "Darwin" \
|
||||
--hash=sha256:0298bae3b09ac361866088434008d82b99d6458fe8888c8df90720ef4b347d44 \
|
||||
--hash=sha256:08f592ea61836ebeceb5c97f4d7a813b9d7dc651bbf7ce4401563ccfae6a21fc \
|
||||
--hash=sha256:099874088df104d54d8008f2a28539ca0117b512daed8bf3c2bbfa2b7ccb187a \
|
||||
--hash=sha256:0e77706cc90462653620e336bb90daf03d7bf1b88c3a9a3037df8d111823a56e \
|
||||
--hash=sha256:19286a733c69dcbd417b86793df807bd227db5786ed787c17297741a9b0d0fc7 \
|
||||
--hash=sha256:3567fb3def829229ec217c1e38f08c5128ff7fb65854cac17ebac358ff7aa309 \
|
||||
--hash=sha256:4d8bf321c4380854ef04613935fdd415dce29d1088a7ff99e06e113f0efe9203 \
|
||||
--hash=sha256:5e631241bee3661de64f83616656224af2e3512eb2580da7c08e08b8c965a8ac \
|
||||
--hash=sha256:7552e80fa222252b8b217a951c85e172a710ea4cad0ae0c06fbb67addece7871 \
|
||||
--hash=sha256:7cb789ceefe6dcd0dc8eeda37bfc45efb7cf34770eac9533861d51ca508eb5b3 \
|
||||
--hash=sha256:83e9e2457f23110fd53b0177e1bc621518d6ea2108f570e853b768ce36b7c679 \
|
||||
--hash=sha256:87c137f343197769a51333076e66bfcd576301d2cd8614b06657187c71b06c4f \
|
||||
--hash=sha256:899eec0b9f3b99b96d6f85b9aa58c002db41c672437677b553015b9135b3be7e \
|
||||
--hash=sha256:8e4d02e4d8a203e0c09c10dfb478214c224d080d31efc0dbf36d9c4051f7f3c6 \
|
||||
--hash=sha256:b167934a5943242da7b1e59318f911d2d253feeca0d13ad5d832b58eed943401 \
|
||||
--hash=sha256:c5ed609c8bc88c575226400b2232e0309094477c82af38952e0373edef0003fd \
|
||||
--hash=sha256:e9a563894f9fa40692e24d1aa58c3ef040450017cfed3598ff9637f404f3fe3b \
|
||||
--hash=sha256:ef5fe3ec1848123cd0ec74c07658192b3147dcd38e507308c790d5943e87b88c \
|
||||
--hash=sha256:f230a1a40ed70d51e463ce43df243ec520902f8725de2502e485efc5eea9d864
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# basicsr
|
||||
@@ -1,10 +1,9 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# by the following command:
|
||||
# This file is autogenerated by pip-compile with python 3.9
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/py3.10-linux-x86_64-cuda-reqs.txt binary_installer/requirements.in
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-linux-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
@@ -18,7 +17,7 @@ accelerate==0.14.0 \
|
||||
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# -r installer/requirements.in
|
||||
# k-diffusion
|
||||
addict==2.4.0 \
|
||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||
@@ -120,7 +119,7 @@ aiosignal==1.2.0 \
|
||||
albumentations==1.3.0 \
|
||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
altair==4.2.0 \
|
||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||
@@ -151,10 +150,6 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@@ -188,11 +183,11 @@ click==8.1.3 \
|
||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# -r installer/requirements.in
|
||||
# clipseg
|
||||
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
commonmark==0.9.1 \
|
||||
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
||||
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
||||
@@ -279,7 +274,7 @@ decorator==5.1.1 \
|
||||
diffusers==0.7.2 \
|
||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
dnspython==2.2.1 \
|
||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||
@@ -299,7 +294,7 @@ entrypoints==0.4 \
|
||||
eventlet==0.33.1 \
|
||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
facexlib==0.2.5 \
|
||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||
@@ -325,15 +320,15 @@ flask==2.2.2 \
|
||||
flask-cors==3.0.10 \
|
||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
flask-socketio==5.3.1 \
|
||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
flaskwebgui==0.3.7 \
|
||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
fonttools==4.38.0 \
|
||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||
@@ -417,11 +412,11 @@ future==0.18.2 \
|
||||
getpass-asterisk==1.0.1 \
|
||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||
# via -r binary_installer/requirements.in
|
||||
gfpgan @ https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system == "Linux" or platform_system == "Darwin" \
|
||||
--hash=sha256:4155907b8b7db3686324554df7007eedd245cdf8656c21da9d9a3f44bef2fcaa
|
||||
# via -r installer/requirements.in
|
||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# -r installer/requirements.in
|
||||
# realesrgan
|
||||
gitdb==4.0.9 \
|
||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||
@@ -582,7 +577,7 @@ imageio-ffmpeg==0.4.7 \
|
||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
importlib-metadata==5.0.0 \
|
||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||
@@ -615,9 +610,9 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
# via -r binary_installer/requirements.in
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
# via -r installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||
@@ -1010,7 +1005,6 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@@ -1097,7 +1091,7 @@ pathtools==0.1.2 \
|
||||
picklescan==0.0.5 \
|
||||
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
pillow==9.3.0 \
|
||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||
@@ -1306,11 +1300,11 @@ pyparsing==3.0.9 \
|
||||
# packaging
|
||||
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
pyreadline3==3.4.1 \
|
||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
pyrsistent==0.19.2 \
|
||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||
@@ -1447,7 +1441,7 @@ qudida==0.0.4 \
|
||||
realesrgan==0.3.0 \
|
||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
regex==2022.10.31 \
|
||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||
@@ -1662,7 +1656,6 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@@ -1670,7 +1663,7 @@ semver==2.13.0 \
|
||||
send2trash==1.8.0 \
|
||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
sentry-sdk==1.10.1 \
|
||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||
@@ -1761,11 +1754,11 @@ smmap==5.0.0 \
|
||||
streamlit==1.14.0 \
|
||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
taming-transformers-rom1504==0.0.6 \
|
||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
tb-nightly==2.11.0a20221106 \
|
||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||
# via
|
||||
@@ -1790,7 +1783,7 @@ tensorboard-plugin-wit==1.8.1 \
|
||||
# tensorboard
|
||||
test-tube==0.7.5 \
|
||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
threadpoolctl==3.1.0 \
|
||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||
@@ -1850,7 +1843,7 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
--hash=sha256:aa43d7b54b86f723f17c5c44df1078c59a6149fc4d42fbef08aafab9d61451c9 \
|
||||
--hash=sha256:f772be831447dd01ebd26cbedf619e668d1b269d69bf6b4ff46b1378362bff26
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# -r installer/requirements.in
|
||||
# accelerate
|
||||
# basicsr
|
||||
# clean-fid
|
||||
@@ -1866,12 +1859,11 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
torchdiffeq==0.2.3 \
|
||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||
@@ -1880,10 +1872,6 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||
@@ -1894,7 +1882,7 @@ torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Wi
|
||||
--hash=sha256:cb6bf0117b8f4b601baeae54e8a6bb5c4942b054835ba997f438ddcb7adcfb90 \
|
||||
--hash=sha256:d1a3c124645e3460b3e50b54eb89a2575a5036bfa618f15dc4f5d635c716069d
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# -r installer/requirements.in
|
||||
# basicsr
|
||||
# clean-fid
|
||||
# clip
|
||||
@@ -1933,13 +1921,10 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||
# via -r binary_installer/requirements.in
|
||||
# via -r installer/requirements.in
|
||||
typing-extensions==4.4.0 \
|
||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
||||
@@ -4,7 +4,6 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-windows-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
@@ -151,10 +150,6 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@@ -619,8 +614,8 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
# via -r installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
@@ -1014,7 +1009,6 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@@ -1666,7 +1660,6 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@@ -1870,7 +1863,6 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
@@ -1884,10 +1876,6 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||
@@ -1937,9 +1925,6 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||
@@ -2,51 +2,16 @@ InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Preparations:
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
You will need to install Python 3.9 or higher for this installer
|
||||
to work. Instructions are given here:
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Before you start the installer, please open up your system's command
|
||||
line window (Terminal or Command) and type the commands:
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
python --version
|
||||
|
||||
If all is well, it will print "Python 3.X.X", where the version number
|
||||
is at least 3.9.1
|
||||
|
||||
If this works, check the version of the Python package manager, pip:
|
||||
|
||||
pip --version
|
||||
|
||||
You should get a message that indicates that the pip package
|
||||
installer was derived from Python 3.9 or 3.10. For example:
|
||||
"pip 22.3.1 from /usr/bin/pip (python 3.9)"
|
||||
|
||||
Long Paths on Windows:
|
||||
|
||||
If you are on Windows, you will need to enable Windows Long Paths to
|
||||
run InvokeAI successfully. If you're not sure what this is, you
|
||||
almost certainly need to do this.
|
||||
|
||||
Simply double-click the "WinLongPathsEnabled.reg" file located in
|
||||
this directory, and approve the Windows warnings. Note that you will
|
||||
need to have admin privileges in order to do this.
|
||||
|
||||
Launching the installer:
|
||||
|
||||
Windows: double-click the 'install.bat' file (while keeping it inside
|
||||
the InvokeAI-Installer folder).
|
||||
|
||||
Linux and Mac: Please open the terminal application and run
|
||||
'./install.sh' (while keeping it inside the InvokeAI-Installer
|
||||
folder).
|
||||
|
||||
The installer will create a directory named "invokeai" in the folder
|
||||
of your choice. This directory contains everything you need to run
|
||||
invokeai. Once InvokeAI is up and running, you may delete the
|
||||
InvokeAI-Installer folder at your convenience.
|
||||
|
||||
For more information, please see
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
accelerate~=0.14
|
||||
albumentations
|
||||
@@ -8,7 +7,7 @@ diffusers
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui==1.0.3
|
||||
flaskwebgui
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
@@ -26,7 +25,6 @@ transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip
|
||||
https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
||||
@@ -1,37 +0,0 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
setlocal
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
python .venv\Scripts\invoke.py %*
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python .venv\Scripts\invoke.py --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
endlocal
|
||||
@@ -1,52 +0,0 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,4%" neq "http" (
|
||||
echo Usage: update.bat ^<release URL^>.zip
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the zip file for the release you want, and pass it as the argument.
|
||||
echo For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent development version, equivalent to
|
||||
echo update.bat https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_SRC=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call pip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call pip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
|
||||
if %errorlevel% neq 0 (
|
||||
echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:4}" != "http" ]; then
|
||||
echo "Usage: update.sh <release URL>.zip"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the zip file for the release you want, and pass it as the argument."
|
||||
echo "For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.3.zip"
|
||||
echo ""
|
||||
echo "If no argument provided then will install the most recent development version, equivalent to"
|
||||
echo "update.sh https://github.com/invoke-ai/InvokeAI/archive/main.zip"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_SRC=${1:-https://github.com/invoke-ai/InvokeAI/archive/main.zip}
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies from $INVOKE_AI_SRC.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
pip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
pip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
python .venv/bin/configure_invoke.py
|
||||
_err_exit $? "The configure script failed to run successfully."
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -20,8 +20,6 @@ import cv2
|
||||
import skimage
|
||||
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
import ldm.invoke.conditioning
|
||||
from ldm.invoke.generator.base import downsampling
|
||||
from PIL import Image, ImageOps
|
||||
from torch import nn
|
||||
@@ -42,7 +40,7 @@ from ldm.invoke.model_cache import ModelCache
|
||||
from ldm.invoke.seamless import configure_model_padding
|
||||
from ldm.invoke.txt2mask import Txt2Mask, SegmentedGrayscale
|
||||
from ldm.invoke.concepts_lib import Concepts
|
||||
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
def new_func(*args, **kw):
|
||||
@@ -131,6 +129,7 @@ gr = Generate(
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Generate:
|
||||
"""Generate class
|
||||
Stores default values for multiple configuration items
|
||||
@@ -236,7 +235,7 @@ class Generate:
|
||||
except Exception:
|
||||
print('** An error was encountered while installing the safety checker:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
def prompt2png(self, prompt, outdir, **kwargs):
|
||||
"""
|
||||
Takes a prompt and an output directory, writes out the requested number
|
||||
@@ -330,7 +329,7 @@ class Generate:
|
||||
infill_method = infill_methods[0], # The infill method to use
|
||||
force_outpaint: bool = False,
|
||||
enable_image_debugging = False,
|
||||
|
||||
|
||||
**args,
|
||||
): # eat up additional cruft
|
||||
"""
|
||||
@@ -373,7 +372,7 @@ class Generate:
|
||||
def process_image(image,seed):
|
||||
image.save(f{'images/seed.png'})
|
||||
|
||||
The code used to save images to a directory can be found in ldm/invoke/pngwriter.py.
|
||||
The code used to save images to a directory can be found in ldm/invoke/pngwriter.py.
|
||||
It contains code to create the requested output directory, select a unique informative
|
||||
name for each image, and write the prompt into the PNG metadata.
|
||||
"""
|
||||
@@ -453,15 +452,10 @@ class Generate:
|
||||
init_image = None
|
||||
mask_image = None
|
||||
|
||||
|
||||
if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
|
||||
self.model.cond_stage_model.device = self.model.device
|
||||
self.model.cond_stage_model.to(self.model.device)
|
||||
|
||||
try:
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
prompt, model =self.model,
|
||||
skip_normalize_legacy_blend=skip_normalize,
|
||||
skip_normalize=skip_normalize,
|
||||
log_tokens =self.log_tokenization
|
||||
)
|
||||
|
||||
@@ -595,7 +589,7 @@ class Generate:
|
||||
seed = opt.seed or args.seed
|
||||
if seed is None or seed < 0:
|
||||
seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
|
||||
|
||||
prompt = opt.prompt or args.prompt or ''
|
||||
print(f'>> using seed {seed} and prompt "{prompt}" for {image_path}')
|
||||
|
||||
@@ -613,8 +607,8 @@ class Generate:
|
||||
# todo: cross-attention control
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
prompt, model =self.model,
|
||||
skip_normalize_legacy_blend=opt.skip_normalize,
|
||||
log_tokens =ldm.invoke.conditioning.log_tokenization
|
||||
skip_normalize=opt.skip_normalize,
|
||||
log_tokens =opt.log_tokenization
|
||||
)
|
||||
|
||||
if tool in ('gfpgan','codeformer','upscale'):
|
||||
@@ -647,7 +641,7 @@ class Generate:
|
||||
|
||||
opt.seed = seed
|
||||
opt.prompt = prompt
|
||||
|
||||
|
||||
if len(extend_instructions) > 0:
|
||||
restorer = Outcrop(image,self,)
|
||||
return restorer.process (
|
||||
@@ -689,7 +683,7 @@ class Generate:
|
||||
image_callback = callback,
|
||||
prefix = prefix
|
||||
)
|
||||
|
||||
|
||||
elif tool is None:
|
||||
print(f'* please provide at least one postprocessing option, such as -G or -U')
|
||||
return None
|
||||
@@ -712,13 +706,13 @@ class Generate:
|
||||
|
||||
if embiggen is not None:
|
||||
return self._make_embiggen()
|
||||
|
||||
|
||||
if inpainting_model_in_use:
|
||||
return self._make_omnibus()
|
||||
|
||||
if ((init_image is not None) and (mask_image is not None)) or force_outpaint:
|
||||
return self._make_inpaint()
|
||||
|
||||
|
||||
if init_image is not None:
|
||||
return self._make_img2img()
|
||||
|
||||
@@ -749,7 +743,7 @@ class Generate:
|
||||
if self._has_transparency(image):
|
||||
self._transparency_check_and_warning(image, mask, force_outpaint)
|
||||
init_mask = self._create_init_mask(image, width, height, fit=fit)
|
||||
|
||||
|
||||
if (image.width * image.height) > (self.width * self.height) and self.size_matters:
|
||||
print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.")
|
||||
self.size_matters = False
|
||||
@@ -763,9 +757,9 @@ class Generate:
|
||||
elif text_mask:
|
||||
init_mask = self._txt2mask(image, text_mask, width, height, fit=fit)
|
||||
|
||||
if init_mask and invert_mask:
|
||||
if invert_mask:
|
||||
init_mask = ImageOps.invert(init_mask)
|
||||
|
||||
|
||||
return init_image,init_mask
|
||||
|
||||
# lots o' repeated code here! Turn into a make_func()
|
||||
@@ -824,7 +818,7 @@ class Generate:
|
||||
self.set_model(self.model_name)
|
||||
|
||||
def set_model(self,model_name):
|
||||
"""
|
||||
"""
|
||||
Given the name of a model defined in models.yaml, will load and initialize it
|
||||
and return the model object. Previously-used models will be cached.
|
||||
"""
|
||||
@@ -836,7 +830,7 @@ class Generate:
|
||||
if not cache.valid_model(model_name):
|
||||
print(f'** "{model_name}" is not a known model name. Please check your models.yaml file')
|
||||
return self.model
|
||||
|
||||
|
||||
cache.print_vram_usage()
|
||||
|
||||
# have to get rid of all references to model in order
|
||||
@@ -845,7 +839,7 @@ class Generate:
|
||||
self.sampler = None
|
||||
self.generators = {}
|
||||
gc.collect()
|
||||
|
||||
|
||||
model_data = cache.get_model(model_name)
|
||||
if model_data is None: # restore previous
|
||||
model_data = cache.get_model(self.model_name)
|
||||
@@ -858,7 +852,7 @@ class Generate:
|
||||
|
||||
# uncache generators so they pick up new models
|
||||
self.generators = {}
|
||||
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
self.model.embedding_manager.load(
|
||||
@@ -907,7 +901,7 @@ class Generate:
|
||||
image_callback = None,
|
||||
prefix = None,
|
||||
):
|
||||
|
||||
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
try:
|
||||
@@ -917,7 +911,7 @@ class Generate:
|
||||
if self.gfpgan is None:
|
||||
print('>> GFPGAN not found. Face restoration is disabled.')
|
||||
else:
|
||||
image = self.gfpgan.process(image, strength, seed)
|
||||
image = self.gfpgan.process(image, strength, seed)
|
||||
if facetool == 'codeformer':
|
||||
if self.codeformer is None:
|
||||
print('>> CodeFormer not found. Face restoration is disabled.')
|
||||
|
||||
@@ -8,7 +8,6 @@ import time
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.prompt_parser import PromptParser
|
||||
from ldm.invoke.readline import get_completer, Completer
|
||||
@@ -28,7 +27,7 @@ def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
print('* Initializing, be patient...')
|
||||
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@@ -46,8 +45,9 @@ def main():
|
||||
args.max_loaded_models = 1
|
||||
|
||||
# alert - setting globals here
|
||||
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
|
||||
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
|
||||
# loading here to avoid long delays on startup
|
||||
@@ -68,8 +68,6 @@ def main():
|
||||
if opt.embeddings:
|
||||
if not os.path.isabs(opt.embedding_path):
|
||||
embedding_path = os.path.normpath(os.path.join(Globals.root,opt.embedding_path))
|
||||
else:
|
||||
embedding_path = opt.embedding_path
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
@@ -281,7 +279,7 @@ def main_loop(gen, opt):
|
||||
prefix = file_writer.unique_prefix()
|
||||
step_callback = make_step_callback(gen, opt, prefix) if opt.save_intermediates > 0 else None
|
||||
|
||||
def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None, prompt_in=None, attention_maps_image=None):
|
||||
def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None):
|
||||
# note the seed is the seed of the current image
|
||||
# the first_seed is the original seed that noise is added to
|
||||
# when the -v switch is used to generate variations
|
||||
@@ -310,7 +308,7 @@ def main_loop(gen, opt):
|
||||
if use_prefix is not None:
|
||||
prefix = use_prefix
|
||||
postprocessed = upscaled if upscaled else operation=='postprocess'
|
||||
opt.prompt = gen.concept_lib().replace_triggers_with_concepts(opt.prompt or prompt_in) # to avoid the problem of non-unique concept triggers
|
||||
opt.prompt = gen.concept_lib().replace_triggers_with_concepts(opt.prompt) # to avoid the problem of non-unique concept triggers
|
||||
filename, formatted_dream_prompt = prepare_image_metadata(
|
||||
opt,
|
||||
prefix,
|
||||
@@ -341,8 +339,8 @@ def main_loop(gen, opt):
|
||||
filename,
|
||||
tool,
|
||||
formatted_dream_prompt,
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
if (not postprocessed) or opt.save_original:
|
||||
# only append to results if we didn't overwrite an earlier output
|
||||
results.append([path, formatted_dream_prompt])
|
||||
@@ -432,7 +430,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
add_embedding_terms(gen, completer)
|
||||
completer.add_history(command)
|
||||
operation = None
|
||||
|
||||
|
||||
elif command.startswith('!models'):
|
||||
gen.model_cache.print_models()
|
||||
completer.add_history(command)
|
||||
@@ -533,7 +531,7 @@ def add_weights_to_config(model_path:str, gen, opt, completer):
|
||||
|
||||
completer.complete_extensions(('.yaml','.yml'))
|
||||
completer.linebuffer = 'configs/stable-diffusion/v1-inference.yaml'
|
||||
|
||||
|
||||
done = False
|
||||
while not done:
|
||||
new_config['config'] = input('Configuration file for this model: ')
|
||||
@@ -564,7 +562,7 @@ def add_weights_to_config(model_path:str, gen, opt, completer):
|
||||
print('** Please enter a valid integer between 64 and 2048')
|
||||
|
||||
make_default = input('Make this the default model? [n] ') in ('y','Y')
|
||||
|
||||
|
||||
if write_config_file(opt.conf, gen, model_name, new_config, make_default=make_default):
|
||||
completer.add_model(model_name)
|
||||
|
||||
@@ -577,14 +575,14 @@ def del_config(model_name:str, gen, opt, completer):
|
||||
gen.model_cache.commit(opt.conf)
|
||||
print(f'** {model_name} deleted')
|
||||
completer.del_model(model_name)
|
||||
|
||||
|
||||
def edit_config(model_name:str, gen, opt, completer):
|
||||
config = gen.model_cache.config
|
||||
|
||||
|
||||
if model_name not in config:
|
||||
print(f'** Unknown model {model_name}')
|
||||
return
|
||||
|
||||
|
||||
print(f'\n>> Editing model {model_name} from configuration file {opt.conf}')
|
||||
|
||||
conf = config[model_name]
|
||||
@@ -597,10 +595,10 @@ def edit_config(model_name:str, gen, opt, completer):
|
||||
make_default = input('Make this the default model? [n] ') in ('y','Y')
|
||||
completer.complete_extensions(None)
|
||||
write_config_file(opt.conf, gen, model_name, new_config, clobber=True, make_default=make_default)
|
||||
|
||||
|
||||
def write_config_file(conf_path, gen, model_name, new_config, clobber=False, make_default=False):
|
||||
current_model = gen.model_name
|
||||
|
||||
|
||||
op = 'modify' if clobber else 'import'
|
||||
print('\n>> New configuration:')
|
||||
if make_default:
|
||||
@@ -623,7 +621,7 @@ def write_config_file(conf_path, gen, model_name, new_config, clobber=False, mak
|
||||
gen.model_cache.set_default_model(model_name)
|
||||
|
||||
gen.model_cache.commit(conf_path)
|
||||
|
||||
|
||||
do_switch = input(f'Keep model loaded? [y]')
|
||||
if len(do_switch)==0 or do_switch[0] in ('y','Y'):
|
||||
pass
|
||||
@@ -653,7 +651,7 @@ def do_postprocess (gen, opt, callback):
|
||||
opt.prompt = opt.new_prompt
|
||||
else:
|
||||
opt.prompt = None
|
||||
|
||||
|
||||
if os.path.dirname(file_path) == '': #basename given
|
||||
file_path = os.path.join(opt.outdir,file_path)
|
||||
|
||||
@@ -718,7 +716,7 @@ def add_postprocessing_to_metadata(opt,original_file,new_file,tool,command):
|
||||
)
|
||||
meta['image']['postprocessing'] = pp
|
||||
write_metadata(new_file,meta)
|
||||
|
||||
|
||||
def prepare_image_metadata(
|
||||
opt,
|
||||
prefix,
|
||||
@@ -789,28 +787,28 @@ def get_next_command(infile=None) -> str: # command string
|
||||
print(f'#{command}')
|
||||
return command
|
||||
|
||||
def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan):
|
||||
def invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan):
|
||||
print('\n* --web was specified, starting web server...')
|
||||
from backend.invoke_ai_web_server import InvokeAIWebServer
|
||||
# Change working directory to the stable-diffusion directory
|
||||
os.chdir(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
)
|
||||
|
||||
|
||||
invoke_ai_web_server = InvokeAIWebServer(generate=gen, gfpgan=gfpgan, codeformer=codeformer, esrgan=esrgan)
|
||||
|
||||
try:
|
||||
invoke_ai_web_server.run()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
def add_embedding_terms(gen,completer):
|
||||
'''
|
||||
Called after setting the model, updates the autocompleter with
|
||||
any terms loaded by the embedding manager.
|
||||
'''
|
||||
completer.add_embedding_terms(gen.model.embedding_manager.list_terms())
|
||||
|
||||
|
||||
def split_variations(variations_string) -> list:
|
||||
# shotgun parsing, woo
|
||||
parts = []
|
||||
@@ -867,7 +865,7 @@ def make_step_callback(gen, opt, prefix):
|
||||
image = gen.sample_to_image(img)
|
||||
image.save(filename,'PNG')
|
||||
return callback
|
||||
|
||||
|
||||
def retrieve_dream_command(opt,command,completer):
|
||||
'''
|
||||
Given a full or partial path to a previously-generated image file,
|
||||
@@ -875,7 +873,7 @@ def retrieve_dream_command(opt,command,completer):
|
||||
and pop it into the readline buffer (linux, Mac), or print out a comment
|
||||
for cut-and-paste (windows)
|
||||
|
||||
Given a wildcard path to a folder with image png files,
|
||||
Given a wildcard path to a folder with image png files,
|
||||
will retrieve and format the dream command used to generate the images,
|
||||
and save them to a file commands.txt for further processing
|
||||
'''
|
||||
@@ -911,7 +909,7 @@ def write_commands(opt, file_path:str, outfilepath:str):
|
||||
except ValueError:
|
||||
print(f'## "{basename}": unacceptable pattern')
|
||||
return
|
||||
|
||||
|
||||
commands = []
|
||||
cmd = None
|
||||
for path in paths:
|
||||
@@ -940,7 +938,7 @@ def emergency_model_reconfigure():
|
||||
print(' After reconfiguration is done, please relaunch invoke.py. ')
|
||||
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
|
||||
print('configure_invokeai is launching....\n')
|
||||
|
||||
|
||||
sys.argv = ['configure_invokeai','--interactive']
|
||||
import configure_invokeai
|
||||
configure_invokeai.main()
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
__version__='2.2.4'
|
||||
|
||||
@@ -119,7 +119,7 @@ PRECISION_CHOICES = [
|
||||
|
||||
# is there a way to pick this up during git commits?
|
||||
APP_ID = 'invoke-ai/InvokeAI'
|
||||
APP_VERSION = 'v2.2.4'
|
||||
APP_VERSION = 'v2.2.0'
|
||||
|
||||
class ArgFormatter(argparse.RawTextHelpFormatter):
|
||||
# use defined argument order to display usage
|
||||
@@ -172,20 +172,14 @@ class Args(object):
|
||||
'''Parse the shell switches and store.'''
|
||||
try:
|
||||
sysargs = sys.argv[1:]
|
||||
# pre-parse to get the root directory; ignore the rest
|
||||
switches = self._arg_parser.parse_args(sysargs)
|
||||
Globals.root = os.path.abspath(switches.root_dir or Globals.root)
|
||||
|
||||
# now use root directory to find the init file
|
||||
initfile = os.path.expanduser(os.path.join(Globals.root,Globals.initfile))
|
||||
legacyinit = os.path.expanduser('~/.invokeai')
|
||||
initfile = os.path.expanduser(Globals.initfile)
|
||||
if os.path.exists(initfile):
|
||||
print(f'>> Initialization file {initfile} found. Loading...')
|
||||
sysargs.insert(0,f'@{initfile}')
|
||||
elif os.path.exists(legacyinit):
|
||||
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
|
||||
sysargs.insert(0,f'@{legacyinit}')
|
||||
|
||||
else:
|
||||
from ldm.invoke.CLI import emergency_model_reconfigure
|
||||
emergency_model_reconfigure()
|
||||
sys.exit(-1)
|
||||
self._arg_switches = self._arg_parser.parse_args(sysargs)
|
||||
return self._arg_switches
|
||||
except Exception as e:
|
||||
@@ -417,7 +411,7 @@ class Args(object):
|
||||
model_group.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.',
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will try to read from ~/.invokeai and then from environment variable INVOKEAI_ROOT. Defaults to the current directory as a last resort.',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--config',
|
||||
|
||||
@@ -36,7 +36,7 @@ class Concepts(object):
|
||||
models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/'))
|
||||
self.concept_list = [a.id.split('/')[1] for a in models]
|
||||
except Exception as e:
|
||||
print(f' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(' ** You may load .bin and .pt file(s) manually using the --embedding_directory argument.')
|
||||
return self.concept_list
|
||||
|
||||
|
||||
@@ -7,46 +7,20 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
|
||||
|
||||
'''
|
||||
import re
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
|
||||
from .prompt_parser import PromptParser, Blend, FlattenedPrompt, \
|
||||
CrossAttentionControlledFragment, CrossAttentionControlSubstitute, Fragment
|
||||
CrossAttentionControlledFragment, CrossAttentionControlSubstitute, Fragment, log_tokenization
|
||||
from ..models.diffusion import cross_attention_control
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder
|
||||
|
||||
|
||||
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
|
||||
prompt, negative_prompt = get_prompt_structure(prompt_string,
|
||||
skip_normalize_legacy_blend=skip_normalize_legacy_blend)
|
||||
conditioning = _get_conditioning_for_prompt(prompt, negative_prompt, model, log_tokens)
|
||||
def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_normalize=False):
|
||||
|
||||
return conditioning
|
||||
|
||||
|
||||
def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = False) -> (
|
||||
Union[FlattenedPrompt, Blend], FlattenedPrompt):
|
||||
"""
|
||||
parse the passed-in prompt string and return tuple (positive_prompt, negative_prompt)
|
||||
"""
|
||||
prompt, negative_prompt = _parse_prompt_string(prompt_string,
|
||||
skip_normalize_legacy_blend=skip_normalize_legacy_blend)
|
||||
return prompt, negative_prompt
|
||||
|
||||
|
||||
def get_tokens_for_prompt(model, parsed_prompt: FlattenedPrompt) -> [str]:
|
||||
text_fragments = [x.text if type(x) is Fragment else
|
||||
(" ".join([f.text for f in x.original]) if type(x) is CrossAttentionControlSubstitute else
|
||||
str(x))
|
||||
for x in parsed_prompt.children]
|
||||
text = " ".join(text_fragments)
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
return tokens
|
||||
|
||||
|
||||
def _parse_prompt_string(prompt_string_uncleaned, skip_normalize_legacy_blend=False) -> Union[FlattenedPrompt, Blend]:
|
||||
# Extract Unconditioned Words From Prompt
|
||||
unconditioned_words = ''
|
||||
unconditional_regex = r'\[(.*?)\]'
|
||||
@@ -65,7 +39,7 @@ def _parse_prompt_string(prompt_string_uncleaned, skip_normalize_legacy_blend=Fa
|
||||
pp = PromptParser()
|
||||
|
||||
parsed_prompt: Union[FlattenedPrompt, Blend] = None
|
||||
legacy_blend: Blend = pp.parse_legacy_blend(prompt_string_cleaned, skip_normalize_legacy_blend)
|
||||
legacy_blend: Blend = pp.parse_legacy_blend(prompt_string_cleaned)
|
||||
if legacy_blend is not None:
|
||||
parsed_prompt = legacy_blend
|
||||
else:
|
||||
@@ -73,150 +47,118 @@ def _parse_prompt_string(prompt_string_uncleaned, skip_normalize_legacy_blend=Fa
|
||||
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
||||
|
||||
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
||||
return parsed_prompt, parsed_negative_prompt
|
||||
|
||||
|
||||
def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], parsed_negative_prompt: FlattenedPrompt,
|
||||
model, log_tokens=False) \
|
||||
-> tuple[torch.Tensor, torch.Tensor, InvokeAIDiffuserComponent.ExtraConditioningInfo]:
|
||||
"""
|
||||
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
|
||||
"""
|
||||
|
||||
if log_tokens:
|
||||
print(f">> Parsed prompt to {parsed_prompt}")
|
||||
print(f">> Parsed negative prompt to {parsed_negative_prompt}")
|
||||
|
||||
conditioning = None
|
||||
cac_args: cross_attention_control.Arguments = None
|
||||
cac_args:cross_attention_control.Arguments = None
|
||||
|
||||
if type(parsed_prompt) is Blend:
|
||||
conditioning = _get_conditioning_for_blend(model, parsed_prompt, log_tokens)
|
||||
elif type(parsed_prompt) is FlattenedPrompt:
|
||||
if parsed_prompt.wants_cross_attention_control:
|
||||
conditioning, cac_args = _get_conditioning_for_cross_attention_control(model, parsed_prompt, log_tokens)
|
||||
|
||||
else:
|
||||
conditioning, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
parsed_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(prompt)")
|
||||
blend: Blend = parsed_prompt
|
||||
embeddings_to_blend = None
|
||||
for i,flattened_prompt in enumerate(blend.prompts):
|
||||
this_embedding, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label=f"(blend part {i+1}, weight={blend.weights[i]})" )
|
||||
embeddings_to_blend = this_embedding if embeddings_to_blend is None else torch.cat(
|
||||
(embeddings_to_blend, this_embedding))
|
||||
conditioning = WeightedFrozenCLIPEmbedder.apply_embedding_weights(embeddings_to_blend.unsqueeze(0),
|
||||
blend.weights,
|
||||
normalize=blend.normalize_weights)
|
||||
else:
|
||||
raise ValueError(f"parsed_prompt is '{type(parsed_prompt)}' which is not a supported prompt type")
|
||||
flattened_prompt: FlattenedPrompt = parsed_prompt
|
||||
wants_cross_attention_control = type(flattened_prompt) is not Blend \
|
||||
and any([issubclass(type(x), CrossAttentionControlledFragment) for x in flattened_prompt.children])
|
||||
if wants_cross_attention_control:
|
||||
original_prompt = FlattenedPrompt()
|
||||
edited_prompt = FlattenedPrompt()
|
||||
# for name, a0, a1, b0, b1 in edit_opcodes: only name == 'equal' is currently parsed
|
||||
original_token_count = 0
|
||||
edited_token_count = 0
|
||||
edit_opcodes = []
|
||||
edit_options = []
|
||||
for fragment in flattened_prompt.children:
|
||||
if type(fragment) is CrossAttentionControlSubstitute:
|
||||
original_prompt.append(fragment.original)
|
||||
edited_prompt.append(fragment.edited)
|
||||
|
||||
unconditioning, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
parsed_negative_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(unconditioning)")
|
||||
to_replace_token_count = get_tokens_length(model, fragment.original)
|
||||
replacement_token_count = get_tokens_length(model, fragment.edited)
|
||||
edit_opcodes.append(('replace',
|
||||
original_token_count, original_token_count + to_replace_token_count,
|
||||
edited_token_count, edited_token_count + replacement_token_count
|
||||
))
|
||||
original_token_count += to_replace_token_count
|
||||
edited_token_count += replacement_token_count
|
||||
edit_options.append(fragment.options)
|
||||
#elif type(fragment) is CrossAttentionControlAppend:
|
||||
# edited_prompt.append(fragment.fragment)
|
||||
else:
|
||||
# regular fragment
|
||||
original_prompt.append(fragment)
|
||||
edited_prompt.append(fragment)
|
||||
|
||||
count = get_tokens_length(model, [fragment])
|
||||
edit_opcodes.append(('equal', original_token_count, original_token_count+count, edited_token_count, edited_token_count+count))
|
||||
edit_options.append(None)
|
||||
original_token_count += count
|
||||
edited_token_count += count
|
||||
original_embeddings, original_tokens = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
original_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap originals)")
|
||||
# naïvely building a single edited_embeddings like this disregards the effects of changing the absolute location of
|
||||
# subsequent tokens when there is >1 edit and earlier edits change the total token count.
|
||||
# eg "a cat.swap(smiling dog, s_start=0.5) eating a hotdog.swap(pizza)" - when the 'pizza' edit is active but the
|
||||
# 'cat' edit is not, the 'pizza' feature vector will nevertheless be affected by the introduction of the extra
|
||||
# token 'smiling' in the inactive 'cat' edit.
|
||||
# todo: build multiple edited_embeddings, one for each edit, and pass just the edited fragments through to the CrossAttentionControl functions
|
||||
edited_embeddings, edited_tokens = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
edited_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap replacements)")
|
||||
|
||||
conditioning = original_embeddings
|
||||
edited_conditioning = edited_embeddings
|
||||
#print('>> got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
cac_args = cross_attention_control.Arguments(
|
||||
edited_conditioning = edited_conditioning,
|
||||
edit_opcodes = edit_opcodes,
|
||||
edit_options = edit_options
|
||||
)
|
||||
else:
|
||||
conditioning, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(prompt)")
|
||||
|
||||
unconditioning, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
parsed_negative_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(unconditioning)")
|
||||
if isinstance(conditioning, dict):
|
||||
# hybrid conditioning is in play
|
||||
unconditioning, conditioning = _flatten_hybrid_conditioning(unconditioning, conditioning)
|
||||
unconditioning, conditioning = flatten_hybrid_conditioning(unconditioning, conditioning)
|
||||
if cac_args is not None:
|
||||
print(
|
||||
">> Hybrid conditioning cannot currently be combined with cross attention control. Cross attention control will be ignored.")
|
||||
print(">> Hybrid conditioning cannot currently be combined with cross attention control. Cross attention control will be ignored.")
|
||||
cac_args = None
|
||||
|
||||
eos_token_index = 1
|
||||
if type(parsed_prompt) is not Blend:
|
||||
tokens = get_tokens_for_prompt(model, parsed_prompt)
|
||||
eos_token_index = len(tokens)+1
|
||||
return (
|
||||
unconditioning, conditioning, InvokeAIDiffuserComponent.ExtraConditioningInfo(
|
||||
tokens_count_including_eos_bos=eos_token_index + 1,
|
||||
cross_attention_control_args=cac_args
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _get_conditioning_for_cross_attention_control(model, prompt: FlattenedPrompt, log_tokens: bool = True):
|
||||
original_prompt = FlattenedPrompt()
|
||||
edited_prompt = FlattenedPrompt()
|
||||
# for name, a0, a1, b0, b1 in edit_opcodes: only name == 'equal' is currently parsed
|
||||
original_token_count = 0
|
||||
edited_token_count = 0
|
||||
edit_options = []
|
||||
edit_opcodes = []
|
||||
# beginning of sequence
|
||||
edit_opcodes.append(
|
||||
('equal', original_token_count, original_token_count + 1, edited_token_count, edited_token_count + 1))
|
||||
edit_options.append(None)
|
||||
original_token_count += 1
|
||||
edited_token_count += 1
|
||||
for fragment in prompt.children:
|
||||
if type(fragment) is CrossAttentionControlSubstitute:
|
||||
original_prompt.append(fragment.original)
|
||||
edited_prompt.append(fragment.edited)
|
||||
def build_token_edit_opcodes(original_tokens, edited_tokens):
|
||||
original_tokens = original_tokens.cpu().numpy()[0]
|
||||
edited_tokens = edited_tokens.cpu().numpy()[0]
|
||||
|
||||
to_replace_token_count = _get_tokens_length(model, fragment.original)
|
||||
replacement_token_count = _get_tokens_length(model, fragment.edited)
|
||||
edit_opcodes.append(('replace',
|
||||
original_token_count, original_token_count + to_replace_token_count,
|
||||
edited_token_count, edited_token_count + replacement_token_count
|
||||
))
|
||||
original_token_count += to_replace_token_count
|
||||
edited_token_count += replacement_token_count
|
||||
edit_options.append(fragment.options)
|
||||
# elif type(fragment) is CrossAttentionControlAppend:
|
||||
# edited_prompt.append(fragment.fragment)
|
||||
else:
|
||||
# regular fragment
|
||||
original_prompt.append(fragment)
|
||||
edited_prompt.append(fragment)
|
||||
return SequenceMatcher(None, original_tokens, edited_tokens).get_opcodes()
|
||||
|
||||
count = _get_tokens_length(model, [fragment])
|
||||
edit_opcodes.append(('equal', original_token_count, original_token_count + count, edited_token_count,
|
||||
edited_token_count + count))
|
||||
edit_options.append(None)
|
||||
original_token_count += count
|
||||
edited_token_count += count
|
||||
# end of sequence
|
||||
edit_opcodes.append(
|
||||
('equal', original_token_count, original_token_count + 1, edited_token_count, edited_token_count + 1))
|
||||
edit_options.append(None)
|
||||
original_token_count += 1
|
||||
edited_token_count += 1
|
||||
original_embeddings, original_tokens = _get_embeddings_and_tokens_for_prompt(model,
|
||||
original_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap originals)")
|
||||
# naïvely building a single edited_embeddings like this disregards the effects of changing the absolute location of
|
||||
# subsequent tokens when there is >1 edit and earlier edits change the total token count.
|
||||
# eg "a cat.swap(smiling dog, s_start=0.5) eating a hotdog.swap(pizza)" - when the 'pizza' edit is active but the
|
||||
# 'cat' edit is not, the 'pizza' feature vector will nevertheless be affected by the introduction of the extra
|
||||
# token 'smiling' in the inactive 'cat' edit.
|
||||
# todo: build multiple edited_embeddings, one for each edit, and pass just the edited fragments through to the CrossAttentionControl functions
|
||||
edited_embeddings, edited_tokens = _get_embeddings_and_tokens_for_prompt(model,
|
||||
edited_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap replacements)")
|
||||
conditioning = original_embeddings
|
||||
edited_conditioning = edited_embeddings
|
||||
# print('>> got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
cac_args = cross_attention_control.Arguments(
|
||||
edited_conditioning=edited_conditioning,
|
||||
edit_opcodes=edit_opcodes,
|
||||
edit_options=edit_options
|
||||
)
|
||||
return conditioning, cac_args
|
||||
|
||||
|
||||
def _get_conditioning_for_blend(model, blend: Blend, log_tokens: bool = False):
|
||||
embeddings_to_blend = None
|
||||
for i, flattened_prompt in enumerate(blend.prompts):
|
||||
this_embedding, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label=f"(blend part {i + 1}, weight={blend.weights[i]})")
|
||||
embeddings_to_blend = this_embedding if embeddings_to_blend is None else torch.cat(
|
||||
(embeddings_to_blend, this_embedding))
|
||||
conditioning = WeightedFrozenCLIPEmbedder.apply_embedding_weights(embeddings_to_blend.unsqueeze(0),
|
||||
blend.weights,
|
||||
normalize=blend.normalize_weights)
|
||||
return conditioning
|
||||
|
||||
|
||||
def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedPrompt, log_tokens: bool = False,
|
||||
log_display_label: str = None):
|
||||
def build_embeddings_and_tokens_for_flattened_prompt(model, flattened_prompt: FlattenedPrompt, log_tokens: bool=False, log_display_label: str=None):
|
||||
if type(flattened_prompt) is not FlattenedPrompt:
|
||||
raise Exception(f"embeddings can only be made from FlattenedPrompts, got {type(flattened_prompt)} instead")
|
||||
fragments = [x.text for x in flattened_prompt.children]
|
||||
@@ -228,14 +170,12 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm
|
||||
|
||||
return embeddings, tokens
|
||||
|
||||
|
||||
def _get_tokens_length(model, fragments: list[Fragment]):
|
||||
def get_tokens_length(model, fragments: list[Fragment]):
|
||||
fragment_texts = [x.text for x in fragments]
|
||||
tokens = model.cond_stage_model.get_tokens(fragment_texts, include_start_and_end_markers=False)
|
||||
return sum([len(x) for x in tokens])
|
||||
|
||||
|
||||
def _flatten_hybrid_conditioning(uncond, cond):
|
||||
def flatten_hybrid_conditioning(uncond, cond):
|
||||
'''
|
||||
This handles the choice between a conditional conditioning
|
||||
that is a tensor (used by cross attention) vs one that has additional
|
||||
@@ -254,29 +194,4 @@ def _flatten_hybrid_conditioning(uncond, cond):
|
||||
cond_flattened[k] = torch.cat([uncond[k], cond[k]])
|
||||
return uncond, cond_flattened
|
||||
|
||||
|
||||
def log_tokenization(text, model, display_label=None):
|
||||
""" shows how the prompt is tokenized
|
||||
# usually tokens have '</w>' to indicate end-of-word,
|
||||
# but for readability it has been replaced with ' '
|
||||
"""
|
||||
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
tokenized = ""
|
||||
discarded = ""
|
||||
usedTokens = 0
|
||||
totalTokens = len(tokens)
|
||||
for i in range(0, totalTokens):
|
||||
token = tokens[i].replace('</w>', ' ')
|
||||
# alternate color
|
||||
s = (usedTokens % 6) + 1
|
||||
if i < model.cond_stage_model.max_length:
|
||||
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||
usedTokens += 1
|
||||
else: # over max token length
|
||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
|
||||
if discarded != "":
|
||||
print(
|
||||
f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import cv2 as cv
|
||||
from einops import rearrange, repeat
|
||||
from pytorch_lightning import seed_everything
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ldm.util import rand_perlin_2d
|
||||
|
||||
downsampling = 8
|
||||
@@ -52,12 +51,9 @@ class Generator():
|
||||
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||
safety_checker:dict=None,
|
||||
attention_maps_callback = None,
|
||||
**kwargs):
|
||||
scope = choose_autocast(self.precision)
|
||||
self.safety_checker = safety_checker
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
sampler = sampler,
|
||||
@@ -67,7 +63,6 @@ class Generator():
|
||||
step_callback = step_callback,
|
||||
threshold = threshold,
|
||||
perlin = perlin,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
@@ -103,13 +98,12 @@ class Generator():
|
||||
results.append([image, seed])
|
||||
|
||||
if image_callback is not None:
|
||||
attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1]
|
||||
image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image)
|
||||
image_callback(image, seed, first_seed=first_seed)
|
||||
|
||||
seed = self.new_seed()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def sample_to_image(self,samples)->Image.Image:
|
||||
"""
|
||||
Given samples returned from a sampler, converts
|
||||
@@ -172,12 +166,12 @@ class Generator():
|
||||
blurred_init_mask = pil_init_mask
|
||||
|
||||
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
|
||||
|
||||
|
||||
# Paste original on color-corrected generation (using blurred mask)
|
||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||
return matched_result
|
||||
|
||||
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(self,samples):
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
@@ -225,11 +219,11 @@ class Generator():
|
||||
(txt2img) or from the latent image (img2img, inpaint)
|
||||
"""
|
||||
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
||||
|
||||
|
||||
def get_perlin_noise(self,width,height):
|
||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||
|
||||
|
||||
def new_seed(self):
|
||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
return self.seed
|
||||
@@ -331,4 +325,4 @@ class Generator():
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
image.save(filepath,'PNG')
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class Embiggen(Generator):
|
||||
image = make_image()
|
||||
results.append([image, seed])
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed, prompt_in=prompt)
|
||||
image_callback(image, seed)
|
||||
seed = self.new_seed()
|
||||
return results
|
||||
|
||||
|
||||
@@ -48,10 +48,6 @@ class Img2Img(Generator):
|
||||
torch.tensor([t_enc]).to(self.model.device),
|
||||
noise=x_T
|
||||
)
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
# decode it
|
||||
samples = sampler.decode(
|
||||
z_enc,
|
||||
@@ -65,9 +61,6 @@ class Img2Img(Generator):
|
||||
all_timesteps_count = steps
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
@@ -94,4 +87,4 @@ class Img2Img(Generator):
|
||||
image = torch.from_numpy(image)
|
||||
if normalize:
|
||||
image = 2.0 * image - 1.0
|
||||
return image.to(self.model.device)
|
||||
return image.to(self.model.device)
|
||||
|
||||
@@ -27,7 +27,7 @@ if Globals.try_patchmatch:
|
||||
print('>> Patchmatch initialized')
|
||||
infill_methods.append('patchmatch')
|
||||
else:
|
||||
print('>> Patchmatch not loaded (nonfatal)')
|
||||
print('>> Patchmatch not loaded, please see https://github.com/invoke-ai/InvokeAI/blob/patchmatch-install-docs/docs/installation/INSTALL_PATCHMATCH.md')
|
||||
else:
|
||||
print('>> Patchmatch loading disabled')
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import torch
|
||||
import numpy as np
|
||||
from ldm.invoke.generator.base import Generator
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
import gc
|
||||
|
||||
|
||||
class Txt2Img(Generator):
|
||||
@@ -15,9 +14,7 @@ class Txt2Img(Generator):
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,
|
||||
attention_maps_callback=None,
|
||||
**kwargs):
|
||||
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
@@ -36,7 +33,7 @@ class Txt2Img(Generator):
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
@@ -52,15 +49,10 @@ class Txt2Img(Generator):
|
||||
eta = ddim_eta,
|
||||
img_callback = step_callback,
|
||||
threshold = threshold,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to('cpu')
|
||||
self.model.cond_stage_model.device = 'cpu'
|
||||
self.model.cond_stage_model.to('cpu')
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
|
||||
@@ -100,11 +100,7 @@ class Txt2Img2Img(Generator):
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to('cpu')
|
||||
self.model.cond_stage_model.device = 'cpu'
|
||||
self.model.cond_stage_model.to('cpu')
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
@@ -146,7 +142,7 @@ class Txt2Img2Img(Generator):
|
||||
**kwargs
|
||||
)
|
||||
return result[0][0]
|
||||
|
||||
|
||||
if sampler.uses_inpainting_model():
|
||||
return inpaint_make_image
|
||||
else:
|
||||
|
||||
@@ -5,9 +5,7 @@ otherwise have to be passed through long and complex call chains.
|
||||
It defines a Namespace object named "Globals" that contains
|
||||
the attributes:
|
||||
|
||||
- root - the root directory under which "models" and "outputs" can be found
|
||||
- initfile - path to the initialization file
|
||||
- try_patchmatch - option to globally disable loading of 'patchmatch' module
|
||||
- root - the root directory under which "models" and "outputs" can be found
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -16,10 +14,10 @@ from argparse import Namespace
|
||||
Globals = Namespace()
|
||||
|
||||
# This is usually overwritten by the command line and/or environment variables
|
||||
Globals.root = os.path.abspath(os.environ.get('INVOKEAI_ROOT') or os.path.expanduser('~/invokeai'))
|
||||
Globals.root = '.'
|
||||
|
||||
# Where to look for the initialization file
|
||||
Globals.initfile = 'invokeai.init'
|
||||
Globals.initfile = os.path.expanduser('~/.invokeai')
|
||||
|
||||
# Awkward workaround to disable attempted loading of pypatchmatch
|
||||
# which is causing CI tests to error out.
|
||||
|
||||
@@ -227,9 +227,7 @@ class ModelCache(object):
|
||||
model_hash = self._cached_sha256(weights,weight_bytes)
|
||||
sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu')
|
||||
del weight_bytes
|
||||
# merged models from auto11 merge board are flat for some reason
|
||||
if 'state_dict' in sd:
|
||||
sd = sd['state_dict']
|
||||
sd = sd['state_dict']
|
||||
model = instantiate_from_config(omega_config.model)
|
||||
model.load_state_dict(sd, strict=False)
|
||||
|
||||
|
||||
@@ -57,13 +57,8 @@ def retrieve_metadata(img_path):
|
||||
metadata stored there, as a dict
|
||||
'''
|
||||
im = Image.open(img_path)
|
||||
if hasattr(im, 'text'):
|
||||
md = im.text.get('sd-metadata', '{}')
|
||||
dream_prompt = im.text.get('Dream', '')
|
||||
else:
|
||||
# When trying to retrieve metadata from images without a 'text' payload, such as JPG images.
|
||||
md = '{}'
|
||||
dream_prompt = ''
|
||||
md = im.text.get('sd-metadata', '{}')
|
||||
dream_prompt = im.text.get('Dream', '')
|
||||
return {'sd-metadata': json.loads(md), 'Dream': dream_prompt}
|
||||
|
||||
def write_metadata(img_path:str, meta:dict):
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Union, Optional
|
||||
import re
|
||||
import pyparsing as pp
|
||||
'''
|
||||
This module parses prompt strings and produces tree-like structures that can be used generate and control the conditioning tensors.
|
||||
This module parses prompt strings and produces tree-like structures that can be used generate and control the conditioning tensors.
|
||||
weighted subprompts.
|
||||
|
||||
Useful class exports:
|
||||
@@ -69,12 +69,6 @@ class FlattenedPrompt():
|
||||
return len(self.children) == 0 or \
|
||||
(len(self.children) == 1 and len(self.children[0].text) == 0)
|
||||
|
||||
@property
|
||||
def wants_cross_attention_control(self):
|
||||
return any(
|
||||
[issubclass(type(x), CrossAttentionControlledFragment) for x in self.children]
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"FlattenedPrompt:{self.children}"
|
||||
def __eq__(self, other):
|
||||
@@ -246,12 +240,6 @@ class Blend():
|
||||
self.weights = weights
|
||||
self.normalize_weights = normalize_weights
|
||||
|
||||
@property
|
||||
def wants_cross_attention_control(self):
|
||||
# blends cannot cross-attention control
|
||||
return False
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return f"Blend:{self.prompts} | weights {' ' if self.normalize_weights else '(non-normalized) '}{self.weights}"
|
||||
def __eq__(self, other):
|
||||
@@ -289,8 +277,8 @@ class PromptParser():
|
||||
|
||||
return self.flatten(root[0])
|
||||
|
||||
def parse_legacy_blend(self, text: str, skip_normalize: bool) -> Optional[Blend]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
|
||||
def parse_legacy_blend(self, text: str) -> Optional[Blend]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=False)
|
||||
if len(weighted_subprompts) <= 1:
|
||||
return None
|
||||
strings = [x[0] for x in weighted_subprompts]
|
||||
@@ -299,7 +287,7 @@ class PromptParser():
|
||||
parsed_conjunctions = [self.parse_conjunction(x) for x in strings]
|
||||
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
|
||||
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=True)
|
||||
|
||||
|
||||
def flatten(self, root: Conjunction, verbose = False) -> Conjunction:
|
||||
@@ -653,3 +641,27 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
return [(x[0], equal_weight) for x in parsed_prompts]
|
||||
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
|
||||
|
||||
|
||||
# shows how the prompt is tokenized
|
||||
# usually tokens have '</w>' to indicate end-of-word,
|
||||
# but for readability it has been replaced with ' '
|
||||
def log_tokenization(text, model, display_label=None):
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
tokenized = ""
|
||||
discarded = ""
|
||||
usedTokens = 0
|
||||
totalTokens = len(tokens)
|
||||
for i in range(0, totalTokens):
|
||||
token = tokens[i].replace('</w>', ' ')
|
||||
# alternate color
|
||||
s = (usedTokens % 6) + 1
|
||||
if i < model.cond_stage_model.max_length:
|
||||
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||
usedTokens += 1
|
||||
else: # over max token length
|
||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
|
||||
if discarded != "":
|
||||
print(
|
||||
f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
|
||||
)
|
||||
|
||||
@@ -53,6 +53,7 @@ COMMANDS = (
|
||||
'--codeformer_fidelity','-cf',
|
||||
'--upscale','-U',
|
||||
'-save_orig','--save_original',
|
||||
'--skip_normalize','-x',
|
||||
'--log_tokenization','-t',
|
||||
'--hires_fix',
|
||||
'--inpaint_replace','-r',
|
||||
@@ -100,8 +101,7 @@ class Completer(object):
|
||||
self.linebuffer = None
|
||||
self.auto_history_active = True
|
||||
self.extensions = None
|
||||
self.concepts = None
|
||||
self.embedding_terms = set()
|
||||
self.concepts = Concepts().list_concepts()
|
||||
return
|
||||
|
||||
def complete(self, text, state):
|
||||
@@ -116,19 +116,19 @@ class Completer(object):
|
||||
# extensions defined, so go directly into path completion mode
|
||||
if self.extensions is not None:
|
||||
self.matches = self._path_completions(text, state, self.extensions)
|
||||
|
||||
|
||||
# looking for an image file
|
||||
elif re.search(path_regexp,buffer):
|
||||
do_shortcut = re.search('^'+'|'.join(IMG_FILE_COMMANDS),buffer)
|
||||
self.matches = self._path_completions(text, state, IMG_EXTENSIONS,shortcut_ok=do_shortcut)
|
||||
|
||||
# looking for a seed
|
||||
elif re.search('(-S\s*|--seed[=\s])\d*$',buffer):
|
||||
elif re.search('(-S\s*|--seed[=\s])\d*$',buffer):
|
||||
self.matches= self._seed_completions(text,state)
|
||||
|
||||
elif re.search('<[\w-]*$',buffer):
|
||||
elif re.search('<[\w-]*$',buffer):
|
||||
self.matches= self._concept_completions(text,state)
|
||||
|
||||
|
||||
# looking for a model
|
||||
elif re.match('^'+'|'.join(MODEL_COMMANDS),buffer):
|
||||
self.matches= self._model_completions(text, state)
|
||||
@@ -226,7 +226,7 @@ class Completer(object):
|
||||
if h_len < 1:
|
||||
print('<empty history>')
|
||||
return
|
||||
|
||||
|
||||
for i in range(0,h_len):
|
||||
line = self.get_history_item(i+1)
|
||||
if match and match not in line:
|
||||
@@ -270,21 +270,16 @@ class Completer(object):
|
||||
return matches
|
||||
|
||||
def add_embedding_terms(self, terms:list[str]):
|
||||
self.embedding_terms = set(terms)
|
||||
if self.concepts:
|
||||
self.embedding_terms.update(self.concepts)
|
||||
self.concepts = Concepts().list_concepts()
|
||||
self.concepts.extend(terms)
|
||||
|
||||
def _concept_completions(self, text, state):
|
||||
if self.concepts is None:
|
||||
self.concepts = set(Concepts().list_concepts())
|
||||
self.embedding_terms.update(self.concepts)
|
||||
|
||||
partial = text[1:] # this removes the leading '<'
|
||||
if len(partial) == 0:
|
||||
return list(self.embedding_terms) # whole dump - think if user wants this!
|
||||
return self.concepts # whole dump - think if user wants this!
|
||||
|
||||
matches = list()
|
||||
for concept in self.embedding_terms:
|
||||
for concept in self.concepts:
|
||||
if concept.startswith(partial):
|
||||
matches.append(f'<{concept}>')
|
||||
matches.sort()
|
||||
@@ -366,7 +361,7 @@ class DummyCompleter(Completer):
|
||||
def __init__(self,options):
|
||||
super().__init__(options)
|
||||
self.history = list()
|
||||
|
||||
|
||||
def add_history(self,line):
|
||||
self.history.append(line)
|
||||
|
||||
@@ -421,11 +416,7 @@ def get_completer(opt:Args, models=[])->Completer:
|
||||
readline.parse_and_bind('set skip-completed-text on')
|
||||
readline.parse_and_bind('set show-all-if-ambiguous on')
|
||||
|
||||
outdir = os.path.expanduser(opt.outdir)
|
||||
if os.path.isabs(outdir):
|
||||
histfile = os.path.join(outdir,'.invoke_history')
|
||||
else:
|
||||
histfile = os.path.join(Globals.root, outdir, '.invoke_history')
|
||||
histfile = os.path.join(os.path.expanduser(opt.outdir), '.invoke_history')
|
||||
try:
|
||||
readline.read_history_file(histfile)
|
||||
readline.set_history_length(1000)
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import enum
|
||||
import math
|
||||
from typing import Optional, Callable
|
||||
from typing import Optional
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
# adapted from bloc97's CrossAttentionControl colab
|
||||
# https://github.com/bloc97/CrossAttentionControl
|
||||
|
||||
|
||||
class Arguments:
|
||||
def __init__(self, edited_conditioning: torch.Tensor, edit_opcodes: list[tuple], edit_options: dict):
|
||||
"""
|
||||
@@ -65,13 +63,9 @@ class Context:
|
||||
self.clear_requests(cleanup=True)
|
||||
|
||||
def register_cross_attention_modules(self, model):
|
||||
for name,module in get_cross_attention_modules(model, CrossAttentionType.SELF):
|
||||
if name in self.self_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
for name,module in get_attention_modules(model, CrossAttentionType.SELF):
|
||||
self.self_cross_attention_module_identifiers.append(name)
|
||||
for name,module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
if name in self.tokens_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
for name,module in get_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
self.tokens_cross_attention_module_identifiers.append(name)
|
||||
|
||||
def request_save_attention_maps(self, cross_attention_type: CrossAttentionType):
|
||||
@@ -172,135 +166,6 @@ class Context:
|
||||
map_dict[offset] = slice.to('cpu')
|
||||
|
||||
|
||||
|
||||
class InvokeAICrossAttentionMixin:
|
||||
"""
|
||||
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
|
||||
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
|
||||
and dymamic slicing strategy selection.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.mem_total_gb = psutil.virtual_memory().total // (1 << 30)
|
||||
self.attention_slice_wrangler = None
|
||||
self.slicing_strategy_getter = None
|
||||
self.attention_slice_calculated_callback = None
|
||||
|
||||
def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]):
|
||||
'''
|
||||
Set custom attention calculator to be called when attention is calculated
|
||||
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
|
||||
which returns either the suggested_attention_slice or an adjusted equivalent.
|
||||
`module` is the current CrossAttention module for which the callback is being invoked.
|
||||
`suggested_attention_slice` is the default-calculated attention slice
|
||||
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
|
||||
|
||||
Pass None to use the default attention calculation.
|
||||
:return:
|
||||
'''
|
||||
self.attention_slice_wrangler = wrangler
|
||||
|
||||
def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]):
|
||||
self.slicing_strategy_getter = getter
|
||||
|
||||
def set_attention_slice_calculated_callback(self, callback: Optional[Callable[[torch.Tensor], None]]):
|
||||
self.attention_slice_calculated_callback = callback
|
||||
|
||||
def einsum_lowest_level(self, query, key, value, dim, offset, slice_size):
|
||||
# calculate attention scores
|
||||
#attention_scores = torch.einsum('b i d, b j d -> b i j', q, k)
|
||||
attention_scores = torch.baddbmm(
|
||||
torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
|
||||
query,
|
||||
key.transpose(-1, -2),
|
||||
beta=0,
|
||||
alpha=self.scale,
|
||||
)
|
||||
|
||||
# calculate attention slice by taking the best scores for each latent pixel
|
||||
default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype)
|
||||
attention_slice_wrangler = self.attention_slice_wrangler
|
||||
if attention_slice_wrangler is not None:
|
||||
attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size)
|
||||
else:
|
||||
attention_slice = default_attention_slice
|
||||
|
||||
if self.attention_slice_calculated_callback is not None:
|
||||
self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size)
|
||||
|
||||
hidden_states = torch.bmm(attention_slice, value)
|
||||
return hidden_states
|
||||
|
||||
def einsum_op_slice_dim0(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[0], slice_size):
|
||||
end = i + slice_size
|
||||
r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_slice_dim1(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[1], slice_size):
|
||||
end = i + slice_size
|
||||
r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_mps_v1(self, q, k, v):
|
||||
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
def einsum_op_mps_v2(self, q, k, v):
|
||||
if self.mem_total_gb > 8 and q.shape[1] <= 4096:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
return self.einsum_op_slice_dim0(q, k, v, 1)
|
||||
|
||||
def einsum_op_tensor_mem(self, q, k, v, max_tensor_mb):
|
||||
size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20)
|
||||
if size_mb <= max_tensor_mb:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length()
|
||||
if div <= q.shape[0]:
|
||||
return self.einsum_op_slice_dim0(q, k, v, q.shape[0] // div)
|
||||
return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1))
|
||||
|
||||
def einsum_op_cuda(self, q, k, v):
|
||||
# check if we already have a slicing strategy (this should only happen during cross-attention controlled generation)
|
||||
slicing_strategy_getter = self.slicing_strategy_getter
|
||||
if slicing_strategy_getter is not None:
|
||||
(dim, slice_size) = slicing_strategy_getter(self)
|
||||
if dim is not None:
|
||||
# print("using saved slicing strategy with dim", dim, "slice size", slice_size)
|
||||
if dim == 0:
|
||||
return self.einsum_op_slice_dim0(q, k, v, slice_size)
|
||||
elif dim == 1:
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
# fallback for when there is no saved strategy, or saved strategy does not slice
|
||||
mem_free_total = get_mem_free_total(q.device)
|
||||
# Divide factor of safety as there's copying and fragmentation
|
||||
return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20))
|
||||
|
||||
|
||||
def get_invokeai_attention_mem_efficient(self, q, k, v):
|
||||
if q.device.type == 'cuda':
|
||||
#print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device))
|
||||
return self.einsum_op_cuda(q, k, v)
|
||||
|
||||
if q.device.type == 'mps' or q.device.type == 'cpu':
|
||||
if self.mem_total_gb >= 32:
|
||||
return self.einsum_op_mps_v1(q, k, v)
|
||||
return self.einsum_op_mps_v2(q, k, v)
|
||||
|
||||
# Smaller slices are faster due to L2/L3/SLC caches.
|
||||
# Tested on i7 with 8MB L3 cache.
|
||||
return self.einsum_op_tensor_mem(q, k, v, 32)
|
||||
|
||||
|
||||
|
||||
def remove_cross_attention_control(model):
|
||||
remove_attention_function(model)
|
||||
|
||||
@@ -322,7 +187,7 @@ def setup_cross_attention_control(model, context: Context):
|
||||
# mask=1 means use base prompt attention, mask=0 means use edited prompt attention
|
||||
mask = torch.zeros(max_length)
|
||||
indices_target = torch.arange(max_length, dtype=torch.long)
|
||||
indices = torch.arange(max_length, dtype=torch.long)
|
||||
indices = torch.zeros(max_length, dtype=torch.long)
|
||||
for name, a0, a1, b0, b1 in context.arguments.edit_opcodes:
|
||||
if b0 < max_length:
|
||||
if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0):
|
||||
@@ -336,23 +201,10 @@ def setup_cross_attention_control(model, context: Context):
|
||||
inject_attention_function(model, context)
|
||||
|
||||
|
||||
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
|
||||
cross_attention_class: type = InvokeAICrossAttentionMixin
|
||||
# cross_attention_class: type = InvokeAIDiffusersCrossAttention
|
||||
def get_attention_modules(model, which: CrossAttentionType):
|
||||
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
|
||||
attention_module_tuples = [(name,module) for name, module in model.named_modules() if
|
||||
isinstance(module, cross_attention_class) and which_attn in name]
|
||||
cross_attention_modules_in_model_count = len(attention_module_tuples)
|
||||
expected_count = 16
|
||||
if cross_attention_modules_in_model_count != expected_count:
|
||||
# non-fatal error but .swap() won't work.
|
||||
print(f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " +
|
||||
f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " +
|
||||
f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " +
|
||||
f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " +
|
||||
f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " +
|
||||
f"work properly until it is fixed.")
|
||||
return attention_module_tuples
|
||||
return [(name,module) for name, module in model.named_modules() if
|
||||
type(module).__name__ == "CrossAttention" and which_attn in name]
|
||||
|
||||
|
||||
def inject_attention_function(unet, context: Context):
|
||||
@@ -392,52 +244,19 @@ def inject_attention_function(unet, context: Context):
|
||||
|
||||
return attention_slice
|
||||
|
||||
cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
|
||||
for identifier, module in cross_attention_modules:
|
||||
module.identifier = identifier
|
||||
try:
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
module.identifier = name
|
||||
module.set_attention_slice_wrangler(attention_slice_wrangler)
|
||||
module.set_slicing_strategy_getter(
|
||||
lambda module: context.get_slicing_strategy(identifier)
|
||||
)
|
||||
except AttributeError as e:
|
||||
if is_attribute_error_about(e, 'set_attention_slice_wrangler'):
|
||||
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO
|
||||
else:
|
||||
raise
|
||||
module.set_slicing_strategy_getter(lambda module, module_identifier=name: \
|
||||
context.get_slicing_strategy(module_identifier))
|
||||
|
||||
|
||||
def remove_attention_function(unet):
|
||||
cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
|
||||
for identifier, module in cross_attention_modules:
|
||||
try:
|
||||
# clear wrangler callback
|
||||
# clear wrangler callback
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
module.set_attention_slice_wrangler(None)
|
||||
module.set_slicing_strategy_getter(None)
|
||||
except AttributeError as e:
|
||||
if is_attribute_error_about(e, 'set_attention_slice_wrangler'):
|
||||
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def is_attribute_error_about(error: AttributeError, attribute: str):
|
||||
if hasattr(error, 'name'): # Python 3.10
|
||||
return error.name == attribute
|
||||
else: # Python 3.9
|
||||
return attribute in str(error)
|
||||
|
||||
|
||||
|
||||
def get_mem_free_total(device):
|
||||
#only on cuda
|
||||
if not torch.cuda.is_available():
|
||||
return None
|
||||
stats = torch.cuda.memory_stats(device)
|
||||
mem_active = stats['active_bytes.all.current']
|
||||
mem_reserved = stats['reserved_bytes.all.current']
|
||||
mem_free_cuda, _ = torch.cuda.mem_get_info(device)
|
||||
mem_free_torch = mem_reserved - mem_active
|
||||
mem_free_total = mem_free_cuda + mem_free_torch
|
||||
return mem_free_total
|
||||
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
import math
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
from torchvision.transforms.functional import resize as tv_resize, InterpolationMode
|
||||
|
||||
from ldm.models.diffusion.cross_attention_control import get_cross_attention_modules, CrossAttentionType
|
||||
|
||||
|
||||
class AttentionMapSaver():
|
||||
|
||||
def __init__(self, token_ids: range, latents_shape: torch.Size):
|
||||
self.token_ids = token_ids
|
||||
self.latents_shape = latents_shape
|
||||
#self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]])
|
||||
self.collated_maps = {}
|
||||
|
||||
def clear_maps(self):
|
||||
self.collated_maps = {}
|
||||
|
||||
def add_attention_maps(self, maps: torch.Tensor, key: str):
|
||||
"""
|
||||
Accumulate the given attention maps and store by summing with existing maps at the passed-in key (if any).
|
||||
:param maps: Attention maps to store. Expected shape [A, (H*W), N] where A is attention heads count, H and W are the map size (fixed per-key) and N is the number of tokens (typically 77).
|
||||
:param key: Storage key. If a map already exists for this key it will be summed with the incoming data. In this case the maps sizes (H and W) should match.
|
||||
:return: None
|
||||
"""
|
||||
key_and_size = f'{key}_{maps.shape[1]}'
|
||||
|
||||
# extract desired tokens
|
||||
maps = maps[:, :, self.token_ids]
|
||||
|
||||
# merge attention heads to a single map per token
|
||||
maps = torch.sum(maps, 0)
|
||||
|
||||
# store
|
||||
if key_and_size not in self.collated_maps:
|
||||
self.collated_maps[key_and_size] = torch.zeros_like(maps, device='cpu')
|
||||
self.collated_maps[key_and_size] += maps.cpu()
|
||||
|
||||
def write_maps_to_disk(self, path: str):
|
||||
pil_image = self.get_stacked_maps_image()
|
||||
pil_image.save(path, 'PNG')
|
||||
|
||||
def get_stacked_maps_image(self) -> PIL.Image:
|
||||
"""
|
||||
Scale all collected attention maps to the same size, blend them together and return as an image.
|
||||
:return: An image containing a vertical stack of blended attention maps, one for each requested token.
|
||||
"""
|
||||
num_tokens = len(self.token_ids)
|
||||
if num_tokens == 0:
|
||||
return None
|
||||
|
||||
latents_height = self.latents_shape[0]
|
||||
latents_width = self.latents_shape[1]
|
||||
|
||||
merged = None
|
||||
|
||||
for key, maps in self.collated_maps.items():
|
||||
|
||||
# maps has shape [(H*W), N] for N tokens
|
||||
# but we want [N, H, W]
|
||||
this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height))
|
||||
this_maps_height = int(float(latents_height) * this_scale_factor)
|
||||
this_maps_width = int(float(latents_width) * this_scale_factor)
|
||||
# and we need to do some dimension juggling
|
||||
maps = torch.reshape(torch.swapdims(maps, 0, 1), [num_tokens, this_maps_height, this_maps_width])
|
||||
|
||||
# scale to output size if necessary
|
||||
if this_scale_factor != 1:
|
||||
maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC)
|
||||
|
||||
# normalize
|
||||
maps_min = torch.min(maps)
|
||||
maps_range = torch.max(maps) - maps_min
|
||||
#print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}")
|
||||
maps_normalized = (maps - maps_min) / maps_range
|
||||
# expand to (-0.1, 1.1) and clamp
|
||||
maps_normalized_expanded = maps_normalized * 1.1 - 0.05
|
||||
maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1)
|
||||
|
||||
# merge together, producing a vertical stack
|
||||
maps_stacked = torch.reshape(maps_normalized_expanded_clamped, [num_tokens * latents_height, latents_width])
|
||||
|
||||
if merged is None:
|
||||
merged = maps_stacked
|
||||
else:
|
||||
# screen blend
|
||||
merged = 1 - (1 - maps_stacked)*(1 - merged)
|
||||
|
||||
if merged is None:
|
||||
return None
|
||||
|
||||
merged_bytes = merged.mul(0xff).byte()
|
||||
return PIL.Image.fromarray(merged_bytes.numpy(), mode='L')
|
||||
@@ -4,7 +4,6 @@ import k_diffusion as K
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from .cross_attention_map_saving import AttentionMapSaver
|
||||
from .sampler import Sampler
|
||||
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
|
||||
@@ -37,7 +36,6 @@ class CFGDenoiser(nn.Module):
|
||||
self.invokeai_diffuser = InvokeAIDiffuserComponent(model,
|
||||
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
|
||||
|
||||
|
||||
def prepare_to_sample(self, t_enc, **kwargs):
|
||||
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
@@ -108,12 +106,12 @@ class KSampler(Sampler):
|
||||
else:
|
||||
print(f'>> Ksampler using karras noise schedule (steps < {self.karras_max})')
|
||||
self.sigmas = self.karras_sigmas
|
||||
|
||||
|
||||
# ALERT: We are completely overriding the sample() method in the base class, which
|
||||
# means that inpainting will not work. To get this to work we need to be able to
|
||||
# modify the inner loop of k_heun, k_lms, etc, as is done in an ugly way
|
||||
# in the lstein/k-diffusion branch.
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def decode(
|
||||
self,
|
||||
@@ -147,7 +145,7 @@ class KSampler(Sampler):
|
||||
@torch.no_grad()
|
||||
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
||||
return x0
|
||||
|
||||
|
||||
# Most of these arguments are ignored and are only present for compatibility with
|
||||
# other samples
|
||||
@torch.no_grad()
|
||||
@@ -160,7 +158,6 @@ class KSampler(Sampler):
|
||||
callback=None,
|
||||
normals_sequence=None,
|
||||
img_callback=None,
|
||||
attention_maps_callback=None,
|
||||
quantize_x0=False,
|
||||
eta=0.0,
|
||||
mask=None,
|
||||
@@ -174,7 +171,7 @@ class KSampler(Sampler):
|
||||
log_every_t=100,
|
||||
unconditional_guidance_scale=1.0,
|
||||
unconditional_conditioning=None,
|
||||
extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo=None,
|
||||
extra_conditioning_info=None,
|
||||
threshold = 0,
|
||||
perlin = 0,
|
||||
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
||||
@@ -207,15 +204,6 @@ class KSampler(Sampler):
|
||||
|
||||
model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10))
|
||||
model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info)
|
||||
|
||||
# setup attention maps saving. checks for None are because there are multiple code paths to get here.
|
||||
attention_maps_saver = None
|
||||
if attention_maps_callback is not None and extra_conditioning_info is not None:
|
||||
eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1
|
||||
attention_map_token_ids = range(1, eos_token_index)
|
||||
attention_maps_saver = AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:])
|
||||
model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_maps_saver)
|
||||
|
||||
extra_args = {
|
||||
'cond': conditioning,
|
||||
'uncond': unconditional_conditioning,
|
||||
@@ -229,8 +217,6 @@ class KSampler(Sampler):
|
||||
),
|
||||
None,
|
||||
)
|
||||
if attention_maps_saver is not None:
|
||||
attention_maps_callback(attention_maps_saver)
|
||||
return sampling_result
|
||||
|
||||
# this code will support inpainting if and when ksampler API modified or
|
||||
@@ -262,7 +248,7 @@ class KSampler(Sampler):
|
||||
# terrible, confusing names here
|
||||
steps = self.ddim_num_steps
|
||||
t_enc = self.t_enc
|
||||
|
||||
|
||||
# sigmas is a full steps in length, but t_enc might
|
||||
# be less. We start in the middle of the sigma array
|
||||
# and work our way to the end after t_enc steps.
|
||||
@@ -294,7 +280,7 @@ class KSampler(Sampler):
|
||||
return x_T + x
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
def prepare_to_sample(self,t_enc,**kwargs):
|
||||
self.t_enc = t_enc
|
||||
self.model_wrap = None
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user