mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 13:48:24 -05:00
Compare commits
447 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25430f04c5 | ||
|
|
b2b53c4481 | ||
|
|
c6696d7913 | ||
|
|
8bcb6648f1 | ||
|
|
0ee360ba6c | ||
|
|
09bbe3eef9 | ||
|
|
d14b7a48f5 | ||
|
|
1db55b0ffa | ||
|
|
3104a1baa6 | ||
|
|
0e523ca2c1 | ||
|
|
75daef2aba | ||
|
|
b036b18986 | ||
|
|
93535fa3c2 | ||
|
|
dcafb44f8a | ||
|
|
44b1d8d1fc | ||
|
|
6f70a6bd10 | ||
|
|
0546aeed1d | ||
|
|
8933f3f5dd | ||
|
|
29cdefe873 | ||
|
|
df299bb37f | ||
|
|
481fb42371 | ||
|
|
631a04b48c | ||
|
|
547e1941f4 | ||
|
|
031d25ed63 | ||
|
|
27f4af0eb4 | ||
|
|
e0a0617093 | ||
|
|
e6a763b887 | ||
|
|
3c9c49f7d9 | ||
|
|
26690d47b7 | ||
|
|
fcaff6ce09 | ||
|
|
afd7296cb2 | ||
|
|
d6f42c76d5 | ||
|
|
68f39fe907 | ||
|
|
23a528545f | ||
|
|
c69d04a7f0 | ||
|
|
60f1e2d7ad | ||
|
|
cb386bec28 | ||
|
|
f29ceb3f12 | ||
|
|
4f51bc9421 | ||
|
|
0c41abab79 | ||
|
|
cb457c3402 | ||
|
|
606ad73814 | ||
|
|
fe70bd538a | ||
|
|
b5c7316c0a | ||
|
|
460aec03ea | ||
|
|
6730d86a13 | ||
|
|
c4bc03cb1f | ||
|
|
136ee28199 | ||
|
|
2c6d266c0a | ||
|
|
f779920eaa | ||
|
|
01bef5d165 | ||
|
|
72851d3e84 | ||
|
|
4ba85c62ca | ||
|
|
313aedb00a | ||
|
|
85bd324d74 | ||
|
|
4a04411e74 | ||
|
|
299a4db3bb | ||
|
|
390faa592c | ||
|
|
2463aeb84a | ||
|
|
ec8df163d1 | ||
|
|
a198b7da78 | ||
|
|
fb11770852 | ||
|
|
6b6f3d56f7 | ||
|
|
29d00eef9a | ||
|
|
6972cd708d | ||
|
|
82893804ff | ||
|
|
47ffe365bc | ||
|
|
f7b03b1e63 | ||
|
|
356e38e82a | ||
|
|
5ea077bb8c | ||
|
|
3c4b303555 | ||
|
|
b8651cb1a2 | ||
|
|
a6527c0ba1 | ||
|
|
6e40eca754 | ||
|
|
53fab17c33 | ||
|
|
3876d88b3c | ||
|
|
82b4526691 | ||
|
|
f56ba11394 | ||
|
|
32eb5190f2 | ||
|
|
72e378789d | ||
|
|
f10ddb0cab | ||
|
|
286127077d | ||
|
|
36278bc044 | ||
|
|
7a1c7ca43a | ||
|
|
8303d567d5 | ||
|
|
1fe19c1242 | ||
|
|
127a43865c | ||
|
|
24a48884cb | ||
|
|
47cee816fd | ||
|
|
90bacaddda | ||
|
|
c0cc9f421e | ||
|
|
dbb9032648 | ||
|
|
b9e32e59a2 | ||
|
|
545a1d8737 | ||
|
|
c4718403a2 | ||
|
|
eb308b1ff7 | ||
|
|
a277bea804 | ||
|
|
30619c0420 | ||
|
|
504d8e32be | ||
|
|
f21229cd14 | ||
|
|
640ec676c3 | ||
|
|
6370412e9c | ||
|
|
edec2c2775 | ||
|
|
bd38be31d8 | ||
|
|
b938ae0a7e | ||
|
|
6e5b1ed55f | ||
|
|
5970bd38c2 | ||
|
|
e046417cf5 | ||
|
|
27a2cd19bd | ||
|
|
0df631b802 | ||
|
|
5bb7cd168d | ||
|
|
b4ba84ad35 | ||
|
|
d1628f51c9 | ||
|
|
17c1304ce2 | ||
|
|
cc9a85f7d0 | ||
|
|
7e2999649a | ||
|
|
1473142f73 | ||
|
|
49343546e7 | ||
|
|
39d5879405 | ||
|
|
4b4ec29a09 | ||
|
|
dc6811076f | ||
|
|
0568784ee9 | ||
|
|
895eac6bcd | ||
|
|
fe0efa9bdf | ||
|
|
acabc8bd54 | ||
|
|
89f999af08 | ||
|
|
9ae76bef51 | ||
|
|
0999b43616 | ||
|
|
e6e4f58163 | ||
|
|
b371930e02 | ||
|
|
9b50e2303b | ||
|
|
49d1810991 | ||
|
|
b1b009f7b8 | ||
|
|
3431e6385c | ||
|
|
5db1027d32 | ||
|
|
579f182fe9 | ||
|
|
55bf41f63f | ||
|
|
fc32fd2d2e | ||
|
|
a2b6536078 | ||
|
|
144c54a6c8 | ||
|
|
ca40daeb97 | ||
|
|
e600cdc826 | ||
|
|
b7c52f33dc | ||
|
|
e78157fcf0 | ||
|
|
7d7b98249f | ||
|
|
f5bf84f304 | ||
|
|
c30d5bece2 | ||
|
|
27845b2f1b | ||
|
|
bad6eea077 | ||
|
|
9c26ac5ce3 | ||
|
|
b7306bb5c9 | ||
|
|
0c115177b2 | ||
|
|
5aae41b5bb | ||
|
|
7ad09a2f79 | ||
|
|
5a6d3639b7 | ||
|
|
84617d3df2 | ||
|
|
e05f30749e | ||
|
|
88a2e27338 | ||
|
|
15a6fd76c8 | ||
|
|
6adb46a86c | ||
|
|
e8a74eb79d | ||
|
|
dcd716c384 | ||
|
|
56697635dd | ||
|
|
5b5657e292 | ||
|
|
ad3dfbe1ed | ||
|
|
59ddc4f7b0 | ||
|
|
4653b79f12 | ||
|
|
778d6f167f | ||
|
|
05c71f50f1 | ||
|
|
406e0be39c | ||
|
|
0d71234a12 | ||
|
|
e38019bb70 | ||
|
|
a879880b42 | ||
|
|
71c8accbfe | ||
|
|
154fb99daf | ||
|
|
0df476ce13 | ||
|
|
e7ad830fa9 | ||
|
|
e81e0a8286 | ||
|
|
d0f7e72cbb | ||
|
|
fdead4fb8c | ||
|
|
31c9945b32 | ||
|
|
22de8a4b12 | ||
|
|
89cb3c3230 | ||
|
|
7bb99ece4e | ||
|
|
28f040123f | ||
|
|
1be3a4db64 | ||
|
|
cb44c995d2 | ||
|
|
9b9b35c315 | ||
|
|
f6edab6032 | ||
|
|
f79665b023 | ||
|
|
6b1bc7a87d | ||
|
|
c6f2994c84 | ||
|
|
0cff67ff23 | ||
|
|
e957c11c9a | ||
|
|
4baa685c7a | ||
|
|
1bd5907a12 | ||
|
|
2fd56e6029 | ||
|
|
b0548edc8c | ||
|
|
41d781176f | ||
|
|
8709de0b33 | ||
|
|
af43fe2fd4 | ||
|
|
ebbb11c3b1 | ||
|
|
0fc8c08da3 | ||
|
|
bfadcffe3c | ||
|
|
49c2332c13 | ||
|
|
dacef158c4 | ||
|
|
0c34d8201e | ||
|
|
77132075ff | ||
|
|
f008d3b0b2 | ||
|
|
4e66ccefe8 | ||
|
|
5d0ed45326 | ||
|
|
379d633ac6 | ||
|
|
93bba1b692 | ||
|
|
667e175ab7 | ||
|
|
de146aa4aa | ||
|
|
ed9c2c8208 | ||
|
|
9d984878f3 | ||
|
|
585eb8c69d | ||
|
|
c105bae127 | ||
|
|
c39f26266f | ||
|
|
47dffd123a | ||
|
|
b946ec3172 | ||
|
|
024c02329d | ||
|
|
4b43b59472 | ||
|
|
d11f115e1a | ||
|
|
92253ce854 | ||
|
|
0ebbfa90c9 | ||
|
|
fdfee11e37 | ||
|
|
6091bf4f60 | ||
|
|
07271ca468 | ||
|
|
3971382a6d | ||
|
|
0d827d8306 | ||
|
|
ec793cb636 | ||
|
|
e4f24c4dc4 | ||
|
|
a6b0581939 | ||
|
|
2a6cfde488 | ||
|
|
8c2e6a3988 | ||
|
|
0b05b24e9a | ||
|
|
842d729ec8 | ||
|
|
8642e8881d | ||
|
|
239fb86a46 | ||
|
|
269d4fe670 | ||
|
|
20813b5615 | ||
|
|
36c16d2781 | ||
|
|
3ae99df091 | ||
|
|
431fd83a43 | ||
|
|
ab41f71a36 | ||
|
|
1f526a1c27 | ||
|
|
8a60def51f | ||
|
|
4845d31857 | ||
|
|
0de5097207 | ||
|
|
505c75a5ab | ||
|
|
4c32b2a123 | ||
|
|
b2ed3c99d4 | ||
|
|
8eb3f40e1b | ||
|
|
9fcba3b876 | ||
|
|
5cabc37a87 | ||
|
|
c5a76806c1 | ||
|
|
bc6dd12083 | ||
|
|
41e1697e79 | ||
|
|
378f33bc92 | ||
|
|
1bf25fadb3 | ||
|
|
6a20271dba | ||
|
|
e36490c2ec | ||
|
|
d4378d9f2a | ||
|
|
1cc6893d0d | ||
|
|
b16d1a943d | ||
|
|
6c375b228e | ||
|
|
23cde86bc4 | ||
|
|
c6f2d127ef | ||
|
|
fb0a924918 | ||
|
|
2d9c82da85 | ||
|
|
7e031e9c01 | ||
|
|
26fe937d97 | ||
|
|
55139bb169 | ||
|
|
6a7fe6668b | ||
|
|
f5fdba795a | ||
|
|
84dc4e4ea9 | ||
|
|
24f22d539f | ||
|
|
89efe9c2b1 | ||
|
|
fbf8aa17c8 | ||
|
|
e55d39a20b | ||
|
|
3a1cedbced | ||
|
|
3d9889e272 | ||
|
|
b2026d9c00 | ||
|
|
f631b5178f | ||
|
|
8df3067599 | ||
|
|
b377b80446 | ||
|
|
7828102b67 | ||
|
|
1b0d599dc2 | ||
|
|
aa4e3adadb | ||
|
|
637d19c22b | ||
|
|
45b4432833 | ||
|
|
b71829a827 | ||
|
|
d95a698ebd | ||
|
|
49d569ec59 | ||
|
|
6ef1c2a5e1 | ||
|
|
0ec6d33086 | ||
|
|
64dfa125d2 | ||
|
|
67042e6dec | ||
|
|
a918198d4f | ||
|
|
288ac0a293 | ||
|
|
963c2ec60c | ||
|
|
79e8482b27 | ||
|
|
f98bbc32dd | ||
|
|
9380d8901c | ||
|
|
67de3f2d9b | ||
|
|
530d20c1be | ||
|
|
4d8bcad15b | ||
|
|
5c93e53195 | ||
|
|
e9c4e12454 | ||
|
|
295b5a20a8 | ||
|
|
eff9c7b92f | ||
|
|
07565d4015 | ||
|
|
94ba840948 | ||
|
|
bd251f8cce | ||
|
|
97719b0aab | ||
|
|
e89266bfe3 | ||
|
|
453ef1a220 | ||
|
|
faf8f0f291 | ||
|
|
5d36499982 | ||
|
|
151d67a0cc | ||
|
|
72431ff197 | ||
|
|
0de1feed76 | ||
|
|
7ffb626dbe | ||
|
|
79753289b1 | ||
|
|
bac4c05fd9 | ||
|
|
8a3b5d2c6f | ||
|
|
309578c19a | ||
|
|
fd58e1d0f2 | ||
|
|
04ffb979ce | ||
|
|
35c00d5a83 | ||
|
|
c2b49d58f5 | ||
|
|
6ff6b40a35 | ||
|
|
1f1beda567 | ||
|
|
91d62eb242 | ||
|
|
013e02d08b | ||
|
|
115053972c | ||
|
|
bcab754ac2 | ||
|
|
f1a542aca2 | ||
|
|
0701cc63a1 | ||
|
|
9337710b45 | ||
|
|
592ef5a9ee | ||
|
|
5fe39a3ae9 | ||
|
|
1888c586ca | ||
|
|
88922a467e | ||
|
|
84115e598c | ||
|
|
370fc67777 | ||
|
|
fa810e1d02 | ||
|
|
ec5043aa83 | ||
|
|
9a2a0cef74 | ||
|
|
c205c1d19e | ||
|
|
ae1a815453 | ||
|
|
687bc281e5 | ||
|
|
567316d753 | ||
|
|
53ac7c9d2c | ||
|
|
90be2a0cdf | ||
|
|
c7fb8f69ae | ||
|
|
7fecb8e88b | ||
|
|
ee6a2a6603 | ||
|
|
2496ac19c4 | ||
|
|
e34ed199c9 | ||
|
|
569533ef80 | ||
|
|
dfac73f9f0 | ||
|
|
f4219d5db3 | ||
|
|
04d1958e93 | ||
|
|
47d7d93e78 | ||
|
|
0e17950949 | ||
|
|
b0cfdc94b5 | ||
|
|
bb153b55d3 | ||
|
|
93ef637d59 | ||
|
|
c5689ca1a7 | ||
|
|
008e421ad4 | ||
|
|
28a77ab06c | ||
|
|
be48d3c12d | ||
|
|
518b21a49a | ||
|
|
68825ca9eb | ||
|
|
73c5f0b479 | ||
|
|
7b4e04cd7c | ||
|
|
ae4368fabe | ||
|
|
df8e39a9e1 | ||
|
|
45b43de571 | ||
|
|
6d18a72a05 | ||
|
|
af58a75e97 | ||
|
|
fd4c3bd27a | ||
|
|
1f8a60ded2 | ||
|
|
b1b677997d | ||
|
|
f17b43d736 | ||
|
|
c009a50489 | ||
|
|
97a16c455c | ||
|
|
a8a07598c8 | ||
|
|
23206e22e8 | ||
|
|
f4aba52b90 | ||
|
|
d17c273939 | ||
|
|
aeb5e7d50a | ||
|
|
580ad30832 | ||
|
|
6390f7d734 | ||
|
|
5ddbfefb6a | ||
|
|
bbf5ed7956 | ||
|
|
19cd6eed08 | ||
|
|
9c1eb263a8 | ||
|
|
75755189a7 | ||
|
|
a9ab72d27d | ||
|
|
678eb34995 | ||
|
|
ef7050f560 | ||
|
|
9787d9de74 | ||
|
|
bb4a50bab2 | ||
|
|
f3554b4e1b | ||
|
|
9dcb025241 | ||
|
|
ecf646066a | ||
|
|
3fd10b68cd | ||
|
|
6e32c7993c | ||
|
|
8329533848 | ||
|
|
fc7157b029 | ||
|
|
a1897f7490 | ||
|
|
a89b3efd14 | ||
|
|
5259693ed1 | ||
|
|
d77c24206d | ||
|
|
c5069557f3 | ||
|
|
9b220f61bd | ||
|
|
7fc3af12cc | ||
|
|
e2721b46b6 | ||
|
|
17118a04bd | ||
|
|
24788e3c83 | ||
|
|
056387c981 | ||
|
|
8a43d90273 | ||
|
|
4f9b9760db | ||
|
|
fdaddafa56 | ||
|
|
23d59abbd7 | ||
|
|
cf7fa5bce8 | ||
|
|
39e41998bb | ||
|
|
c6eff71b74 | ||
|
|
6ea4c47757 | ||
|
|
91f91aa835 | ||
|
|
ea7868d076 | ||
|
|
7d86f00d82 | ||
|
|
7785061e7d | ||
|
|
3370052e54 | ||
|
|
325dacd29c | ||
|
|
f4981a6ba9 | ||
|
|
8c159942eb | ||
|
|
deb4dc64af | ||
|
|
1a11437b6f | ||
|
|
04572c94ad | ||
|
|
1e9e78089e | ||
|
|
e65f93663d | ||
|
|
2a796fe25e |
8
.github/workflows/build-container.yml
vendored
8
.github/workflows/build-container.yml
vendored
@@ -45,6 +45,9 @@ jobs:
|
||||
steps:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
# the /mnt dir has 70GBs of free space
|
||||
# /dev/sda1 74G 28K 70G 1% /mnt
|
||||
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
@@ -52,6 +55,11 @@ jobs:
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
if [ -d /mnt ]; then
|
||||
sudo chmod -R 777 /mnt
|
||||
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
|
||||
30
.github/workflows/lfs-checks.yml
vendored
Normal file
30
.github/workflows/lfs-checks.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Checks that large files and LFS-tracked files are properly checked in with pointer format.
|
||||
# Uses https://github.com/ppremk/lfs-warning to detect LFS issues.
|
||||
|
||||
name: 'lfs checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lfs-check:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
# Required to label and comment on the PRs
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check lfs files
|
||||
uses: ppremk/lfs-warning@v3.3
|
||||
@@ -265,7 +265,7 @@ If the key is unrecognized, this call raises an
|
||||
|
||||
#### exists(key) -> AnyModelConfig
|
||||
|
||||
Returns True if a model with the given key exists in the databsae.
|
||||
Returns True if a model with the given key exists in the database.
|
||||
|
||||
#### search_by_path(path) -> AnyModelConfig
|
||||
|
||||
@@ -718,7 +718,7 @@ When downloading remote models is implemented, additional
|
||||
configuration information, such as list of trigger terms, will be
|
||||
retrieved from the HuggingFace and Civitai model repositories.
|
||||
|
||||
The probed values can be overriden by providing a dictionary in the
|
||||
The probed values can be overridden by providing a dictionary in the
|
||||
optional `config` argument passed to `import_model()`. You may provide
|
||||
overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
@@ -841,7 +841,7 @@ variable.
|
||||
|
||||
#### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API intialization routines when
|
||||
The `start` method is called by the API initialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
@@ -16,7 +16,7 @@ We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contrib
|
||||
- @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
- @joshistoast (Josh Corbett) - Web Development
|
||||
- @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @sunija - Standalone version
|
||||
- @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
- @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
|
||||
@@ -33,30 +33,45 @@ Hardware requirements vary significantly depending on model and image output siz
|
||||
|
||||
More detail on system requirements can be found [here](./requirements.md).
|
||||
|
||||
## Step 2: Download
|
||||
## Step 2: Download and Set Up the Launcher
|
||||
|
||||
Download the most recent launcher for your operating system:
|
||||
The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher.
|
||||
|
||||
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
|
||||
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)
|
||||
- [Download for Linux](https://download.invoke.ai/Invoke%20Community%20Edition.AppImage)
|
||||
!!! info "Instructions for each OS"
|
||||
|
||||
## Step 3: Install or Update
|
||||
=== "Windows"
|
||||
|
||||
Run the launcher you just downloaded, click **Install** and follow the instructions to get set up.
|
||||
- [Download for Windows](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe)
|
||||
- Run the `EXE` to install the Launcher and start it.
|
||||
- A desktop shortcut will be created; use this to run the Launcher in the future.
|
||||
- You can delete the `EXE` file you downloaded.
|
||||
|
||||
=== "macOS"
|
||||
|
||||
- [Download for macOS](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg)
|
||||
- Open the `DMG` and drag the app into `Applications`.
|
||||
- Run the Launcher using its entry in `Applications`.
|
||||
- You can delete the `DMG` file you downloaded.
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- [Download for Linux](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage)
|
||||
- You may need to edit the `AppImage` file properties and make it executable.
|
||||
- Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it.
|
||||
- Run the Launcher by double-clicking the `AppImage` or the shortcut you made.
|
||||
|
||||
## Step 3: Install Invoke
|
||||
|
||||
Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke.
|
||||
|
||||
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
|
||||
|
||||
!!! warning "Problem running the launcher on macOS"
|
||||
!!! tip "Updating"
|
||||
|
||||
macOS may not allow you to run the launcher. We are working to resolve this by signing the launcher executable. Until that is done, you can manually flag the launcher as safe:
|
||||
The Launcher will check for updates for itself _and_ Invoke.
|
||||
|
||||
- Open the **Invoke Community Edition.dmg** file.
|
||||
- Drag the launcher to **Applications**.
|
||||
- Open a terminal.
|
||||
- Run `xattr -d 'com.apple.quarantine' /Applications/Invoke\ Community\ Edition.app`.
|
||||
|
||||
You should now be able to run the launcher.
|
||||
- When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself.
|
||||
- When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke.
|
||||
|
||||
## Step 4: Launch
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ Nodes have a "Use Cache" option in their footer. This allows for performance imp
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Noise
|
||||
### Create Latent Noise
|
||||
|
||||
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.
|
||||
|
||||
|
||||
39
invokeai/app/api/routers/board_videos.py
Normal file
39
invokeai/app/api/routers/board_videos.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from fastapi import Body, HTTPException
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.services.videos_common import AddVideosToBoardResult, RemoveVideosFromBoardResult
|
||||
|
||||
board_videos_router = APIRouter(prefix="/v1/board_videos", tags=["boards"])
|
||||
|
||||
|
||||
@board_videos_router.post(
|
||||
"/batch",
|
||||
operation_id="add_videos_to_board",
|
||||
responses={
|
||||
201: {"description": "Videos were added to board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=AddVideosToBoardResult,
|
||||
)
|
||||
async def add_videos_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
video_ids: list[str] = Body(description="The ids of the videos to add", embed=True),
|
||||
) -> AddVideosToBoardResult:
|
||||
"""Adds a list of videos to a board"""
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@board_videos_router.post(
|
||||
"/batch/delete",
|
||||
operation_id="remove_videos_from_board",
|
||||
responses={
|
||||
201: {"description": "Videos were removed from board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=RemoveVideosFromBoardResult,
|
||||
)
|
||||
async def remove_videos_from_board(
|
||||
video_ids: list[str] = Body(description="The ids of the videos to remove", embed=True),
|
||||
) -> RemoveVideosFromBoardResult:
|
||||
"""Removes a list of videos from their board, if they had one"""
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
119
invokeai/app/api/routers/videos.py
Normal file
119
invokeai/app/api/routers/videos.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.services.videos_common import (
|
||||
DeleteVideosResult,
|
||||
StarredVideosResult,
|
||||
UnstarredVideosResult,
|
||||
VideoDTO,
|
||||
VideoIdsResult,
|
||||
VideoRecordChanges,
|
||||
)
|
||||
|
||||
videos_router = APIRouter(prefix="/v1/videos", tags=["videos"])
|
||||
|
||||
|
||||
@videos_router.patch(
|
||||
"/i/{video_id}",
|
||||
operation_id="update_video",
|
||||
response_model=VideoDTO,
|
||||
)
|
||||
async def update_video(
|
||||
video_id: str = Path(description="The id of the video to update"),
|
||||
video_changes: VideoRecordChanges = Body(description="The changes to apply to the video"),
|
||||
) -> VideoDTO:
|
||||
"""Updates a video"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get(
|
||||
"/i/{video_id}",
|
||||
operation_id="get_video_dto",
|
||||
response_model=VideoDTO,
|
||||
)
|
||||
async def get_video_dto(
|
||||
video_id: str = Path(description="The id of the video to get"),
|
||||
) -> VideoDTO:
|
||||
"""Gets a video's DTO"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/delete", operation_id="delete_videos_from_list", response_model=DeleteVideosResult)
|
||||
async def delete_videos_from_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to delete", embed=True),
|
||||
) -> DeleteVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/star", operation_id="star_videos_in_list", response_model=StarredVideosResult)
|
||||
async def star_videos_in_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to star", embed=True),
|
||||
) -> StarredVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/unstar", operation_id="unstar_videos_in_list", response_model=UnstarredVideosResult)
|
||||
async def unstar_videos_in_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to unstar", embed=True),
|
||||
) -> UnstarredVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.delete("/uncategorized", operation_id="delete_uncategorized_videos", response_model=DeleteVideosResult)
|
||||
async def delete_uncategorized_videos() -> DeleteVideosResult:
|
||||
"""Deletes all videos that are uncategorized"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get("/", operation_id="list_video_dtos", response_model=OffsetPaginatedResults[VideoDTO])
|
||||
async def list_video_dtos(
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find videos without a board.",
|
||||
),
|
||||
offset: int = Query(default=0, description="The page offset"),
|
||||
limit: int = Query(default=10, description="The number of videos per page"),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> OffsetPaginatedResults[VideoDTO]:
|
||||
"""Lists video DTOs"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get("/ids", operation_id="get_video_ids")
|
||||
async def get_video_ids(
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find videos without a board.",
|
||||
),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> VideoIdsResult:
|
||||
"""Gets ordered list of video ids with metadata for optimistic updates"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post(
|
||||
"/videos_by_ids",
|
||||
operation_id="get_videos_by_ids",
|
||||
responses={200: {"model": list[VideoDTO]}},
|
||||
)
|
||||
async def get_videos_by_ids(
|
||||
video_ids: list[str] = Body(embed=True, description="Object containing list of video ids to fetch DTOs for"),
|
||||
) -> list[VideoDTO]:
|
||||
"""Gets video DTOs for the specified video ids. Maintains order of input ids."""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
@@ -18,6 +18,7 @@ from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.app.api.routers import (
|
||||
app_info,
|
||||
board_images,
|
||||
board_videos,
|
||||
boards,
|
||||
client_state,
|
||||
download_queue,
|
||||
@@ -27,6 +28,7 @@ from invokeai.app.api.routers import (
|
||||
session_queue,
|
||||
style_presets,
|
||||
utilities,
|
||||
videos,
|
||||
workflows,
|
||||
)
|
||||
from invokeai.app.api.sockets import SocketIO
|
||||
@@ -125,8 +127,10 @@ app.include_router(utilities.utilities_router, prefix="/api")
|
||||
app.include_router(model_manager.model_manager_router, prefix="/api")
|
||||
app.include_router(download_queue.download_queue_router, prefix="/api")
|
||||
app.include_router(images.images_router, prefix="/api")
|
||||
app.include_router(videos.videos_router, prefix="/api")
|
||||
app.include_router(boards.boards_router, prefix="/api")
|
||||
app.include_router(board_images.board_images_router, prefix="/api")
|
||||
app.include_router(board_videos.board_videos_router, prefix="/api")
|
||||
app.include_router(model_relationships.model_relationships_router, prefix="/api")
|
||||
app.include_router(app_info.app_router, prefix="/api")
|
||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||
|
||||
@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3ImageToLatentsInvocation and a subset of ImageToLatentsInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
@@ -38,7 +39,11 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -62,6 +67,8 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -6,7 +6,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3LatentsToImageInvocation and a subset of LatentsToImageInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
@@ -39,22 +39,15 @@ class CogView4LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -64,12 +64,16 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
Imagen3Model = "Imagen3ModelField"
|
||||
Imagen4Model = "Imagen4ModelField"
|
||||
ChatGPT4oModel = "ChatGPT4oModelField"
|
||||
Gemini2_5Model = "Gemini2_5ModelField"
|
||||
FluxKontextModel = "FluxKontextModelField"
|
||||
Veo3Model = "Veo3ModelField"
|
||||
RunwayModel = "RunwayModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
Scheduler = "SchedulerField"
|
||||
Any = "AnyField"
|
||||
Video = "VideoField"
|
||||
# endregion
|
||||
|
||||
# region Internal Field Types
|
||||
@@ -224,6 +228,12 @@ class ImageField(BaseModel):
|
||||
image_name: str = Field(description="The name of the image")
|
||||
|
||||
|
||||
class VideoField(BaseModel):
|
||||
"""A video primitive field"""
|
||||
|
||||
video_id: str = Field(description="The id of the video")
|
||||
|
||||
|
||||
class BoardField(BaseModel):
|
||||
"""A board primitive field"""
|
||||
|
||||
|
||||
@@ -328,6 +328,21 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
cfg_scale_end_step=self.cfg_scale_end_step,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning
|
||||
if isinstance(self.kontext_conditioning, list)
|
||||
else [self.kontext_conditioning],
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
with ExitStack() as exit_stack:
|
||||
# Prepare ControlNet extensions.
|
||||
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
|
||||
@@ -385,21 +400,6 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning
|
||||
if isinstance(self.kontext_conditioning, list)
|
||||
else [self.kontext_conditioning],
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
# Prepare Kontext conditioning if provided
|
||||
img_cond_seq = None
|
||||
img_cond_seq_ids = None
|
||||
|
||||
@@ -3,7 +3,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -18,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -39,17 +39,11 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
assert isinstance(vae_info.model, AutoEncoder)
|
||||
estimated_working_memory = estimate_vae_working_memory_flux(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -41,8 +42,12 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
|
||||
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
|
||||
# should be used for VAE encode sampling.
|
||||
assert isinstance(vae_info.model, AutoEncoder)
|
||||
estimated_working_memory = estimate_vae_working_memory_flux(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -52,11 +53,24 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
|
||||
@staticmethod
|
||||
@classmethod
|
||||
def vae_encode(
|
||||
vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor, tile_size: int = 0
|
||||
cls,
|
||||
vae_info: LoadedModel,
|
||||
upcast: bool,
|
||||
tiled: bool,
|
||||
image_tensor: torch.Tensor,
|
||||
tile_size: int = 0,
|
||||
) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
|
||||
operation="encode",
|
||||
image_tensor=image_tensor,
|
||||
vae=vae_info.model,
|
||||
tile_size=tile_size if tiled else None,
|
||||
fp32=upcast,
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
orig_dtype = vae.dtype
|
||||
if upcast:
|
||||
@@ -113,6 +127,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
@@ -120,7 +135,11 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
vae_info=vae_info,
|
||||
upcast=self.fp32,
|
||||
tiled=self.tiled or context.config.get().force_tiled_decode,
|
||||
image_tensor=image_tensor,
|
||||
tile_size=self.tile_size,
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -53,39 +54,6 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
|
||||
def _estimate_working_memory(
|
||||
self, latents: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
element_size = 4 if self.fp32 else 2
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
|
||||
if use_tiling:
|
||||
tile_size = self.tile_size
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
out_h = tile_size
|
||||
out_w = tile_size
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
if self.fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
@@ -94,8 +62,13 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
estimated_working_memory = self._estimate_working_memory(latents, use_tiling, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
|
||||
operation="decode",
|
||||
image_tensor=latents,
|
||||
vae=vae_info.model,
|
||||
tile_size=self.tile_size if use_tiling else None,
|
||||
fp32=self.fp32,
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.app.invocations.fields import (
|
||||
SD3ConditioningField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
VideoField,
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -287,6 +288,30 @@ class ImageCollectionInvocation(BaseInvocation):
|
||||
return ImageCollectionOutput(collection=self.collection)
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
# region Video
|
||||
|
||||
|
||||
@invocation_output("video_output")
|
||||
class VideoOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a video"""
|
||||
|
||||
video: VideoField = OutputField(description="The output video")
|
||||
width: int = OutputField(description="The width of the video in pixels")
|
||||
height: int = OutputField(description="The height of the video in pixels")
|
||||
duration_seconds: float = OutputField(description="The duration of the video in seconds")
|
||||
|
||||
@classmethod
|
||||
def build(cls, video_id: str, width: int, height: int, duration_seconds: float) -> "VideoOutput":
|
||||
return cls(
|
||||
video=VideoField(video_id=video_id),
|
||||
width=width,
|
||||
height=height,
|
||||
duration_seconds=duration_seconds,
|
||||
)
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
# region DenoiseMask
|
||||
|
||||
@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -34,7 +35,11 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd3(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -58,6 +63,8 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -6,7 +6,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -41,22 +41,15 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd3(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -49,3 +49,11 @@ class BoardImageRecordStorageBase(ABC):
|
||||
) -> int:
|
||||
"""Gets the number of images for a board."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_asset_count_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
) -> int:
|
||||
"""Gets the number of assets for a board."""
|
||||
pass
|
||||
|
||||
@@ -3,6 +3,8 @@ from typing import Optional, cast
|
||||
|
||||
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ASSETS_CATEGORIES,
|
||||
IMAGE_CATEGORIES,
|
||||
ImageCategory,
|
||||
ImageRecord,
|
||||
deserialize_image_record,
|
||||
@@ -151,15 +153,38 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
|
||||
def get_image_count_for_board(self, board_id: str) -> int:
|
||||
with self._db.transaction() as cursor:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(IMAGE_CATEGORIES)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE images.is_intermediate = FALSE
|
||||
WHERE images.is_intermediate = FALSE AND images.image_category IN ( {placeholders} )
|
||||
AND board_images.board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
(*category_strings, board_id),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
|
||||
def get_asset_count_for_board(self, board_id: str) -> int:
|
||||
with self._db.transaction() as cursor:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(ASSETS_CATEGORIES)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE images.is_intermediate = FALSE AND images.image_category IN ( {placeholders} )
|
||||
AND board_images.board_id = ?;
|
||||
""",
|
||||
(*category_strings, board_id),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
|
||||
@@ -12,12 +12,20 @@ class BoardDTO(BoardRecord):
|
||||
"""The URL of the thumbnail of the most recent image in the board."""
|
||||
image_count: int = Field(description="The number of images in the board.")
|
||||
"""The number of images in the board."""
|
||||
asset_count: int = Field(description="The number of assets in the board.")
|
||||
"""The number of assets in the board."""
|
||||
video_count: int = Field(description="The number of videos in the board.")
|
||||
"""The number of videos in the board."""
|
||||
|
||||
|
||||
def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO:
|
||||
def board_record_to_dto(
|
||||
board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int, video_count: int
|
||||
) -> BoardDTO:
|
||||
"""Converts a board record to a board DTO."""
|
||||
return BoardDTO(
|
||||
**board_record.model_dump(exclude={"cover_image_name"}),
|
||||
cover_image_name=cover_image_name,
|
||||
image_count=image_count,
|
||||
asset_count=asset_count,
|
||||
video_count=video_count,
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ class BoardService(BoardServiceABC):
|
||||
board_name: str,
|
||||
) -> BoardDTO:
|
||||
board_record = self.__invoker.services.board_records.save(board_name)
|
||||
return board_record_to_dto(board_record, None, 0)
|
||||
return board_record_to_dto(board_record, None, 0, 0, 0)
|
||||
|
||||
def get_dto(self, board_id: str) -> BoardDTO:
|
||||
board_record = self.__invoker.services.board_records.get(board_id)
|
||||
@@ -27,7 +27,9 @@ class BoardService(BoardServiceABC):
|
||||
else:
|
||||
cover_image_name = None
|
||||
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
|
||||
return board_record_to_dto(board_record, cover_image_name, image_count)
|
||||
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
|
||||
video_count = 0 # noop for OSS
|
||||
return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -42,7 +44,9 @@ class BoardService(BoardServiceABC):
|
||||
cover_image_name = None
|
||||
|
||||
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
|
||||
return board_record_to_dto(board_record, cover_image_name, image_count)
|
||||
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
|
||||
video_count = 0 # noop for OSS
|
||||
return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
|
||||
|
||||
def delete(self, board_id: str) -> None:
|
||||
self.__invoker.services.board_records.delete(board_id)
|
||||
@@ -67,7 +71,9 @@ class BoardService(BoardServiceABC):
|
||||
cover_image_name = None
|
||||
|
||||
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
|
||||
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count))
|
||||
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
|
||||
video_count = 0 # noop for OSS
|
||||
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
|
||||
|
||||
return OffsetPaginatedResults[BoardDTO](items=board_dtos, offset=offset, limit=limit, total=len(board_dtos))
|
||||
|
||||
@@ -84,6 +90,8 @@ class BoardService(BoardServiceABC):
|
||||
cover_image_name = None
|
||||
|
||||
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
|
||||
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count))
|
||||
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
|
||||
video_count = 0 # noop for OSS
|
||||
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
|
||||
|
||||
return board_dtos
|
||||
|
||||
@@ -58,6 +58,15 @@ class ImageCategory(str, Enum, metaclass=MetaEnum):
|
||||
"""OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes."""
|
||||
|
||||
|
||||
IMAGE_CATEGORIES: list[ImageCategory] = [ImageCategory.GENERAL]
|
||||
ASSETS_CATEGORIES: list[ImageCategory] = [
|
||||
ImageCategory.CONTROL,
|
||||
ImageCategory.MASK,
|
||||
ImageCategory.USER,
|
||||
ImageCategory.OTHER,
|
||||
]
|
||||
|
||||
|
||||
class InvalidImageCategoryException(ValueError):
|
||||
"""Raised when a provided value is not a valid ImageCategory.
|
||||
|
||||
|
||||
@@ -186,8 +186,9 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
||||
|
||||
if preferred_name := config.name:
|
||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
||||
if Path(model_path).is_file():
|
||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
||||
|
||||
dest_path = (
|
||||
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
|
||||
@@ -622,16 +623,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
|
||||
if new_path.exists():
|
||||
raise FileExistsError(f"Cannot move {old_path} to {new_path}: destination already exists")
|
||||
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# if path already exists then we jigger the name to make it unique
|
||||
counter: int = 1
|
||||
while new_path.exists():
|
||||
path = new_path.with_stem(new_path.stem + f"_{counter:02d}")
|
||||
if not path.exists():
|
||||
new_path = path
|
||||
counter += 1
|
||||
move(old_path, new_path)
|
||||
|
||||
return new_path
|
||||
|
||||
def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
ControlAdapterDefaultSettings,
|
||||
LoraModelDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
@@ -83,8 +84,8 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
file_size: Optional[int] = Field(description="Size of model file", default=None)
|
||||
format: Optional[str] = Field(description="format of model file", default=None)
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
default_settings: Optional[MainModelDefaultSettings | LoraModelDefaultSettings | ControlAdapterDefaultSettings] = (
|
||||
Field(description="Default settings for this model", default=None)
|
||||
)
|
||||
|
||||
# Checkpoint-specific changes
|
||||
|
||||
179
invokeai/app/services/videos_common.py
Normal file
179
invokeai/app/services/videos_common.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import datetime
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, StrictBool, StrictStr
|
||||
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
|
||||
VIDEO_DTO_COLS = ", ".join(
|
||||
[
|
||||
"videos." + c
|
||||
for c in [
|
||||
"video_id",
|
||||
"width",
|
||||
"height",
|
||||
"session_id",
|
||||
"node_id",
|
||||
"is_intermediate",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
"deleted_at",
|
||||
"starred",
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class VideoRecord(BaseModelExcludeNull):
|
||||
"""Deserialized video record without metadata."""
|
||||
|
||||
video_id: str = Field(description="The unique id of the video.")
|
||||
"""The unique id of the video."""
|
||||
width: int = Field(description="The width of the video in px.")
|
||||
"""The actual width of the video in px. This may be different from the width in metadata."""
|
||||
height: int = Field(description="The height of the video in px.")
|
||||
"""The actual height of the video in px. This may be different from the height in metadata."""
|
||||
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the video.")
|
||||
"""The created timestamp of the video."""
|
||||
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the video.")
|
||||
"""The updated timestamp of the video."""
|
||||
deleted_at: Optional[Union[datetime.datetime, str]] = Field(
|
||||
default=None, description="The deleted timestamp of the video."
|
||||
)
|
||||
"""The deleted timestamp of the video."""
|
||||
is_intermediate: bool = Field(description="Whether this is an intermediate video.")
|
||||
"""Whether this is an intermediate video."""
|
||||
session_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The session ID that generated this video, if it is a generated video.",
|
||||
)
|
||||
"""The session ID that generated this video, if it is a generated video."""
|
||||
node_id: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The node ID that generated this video, if it is a generated video.",
|
||||
)
|
||||
"""The node ID that generated this video, if it is a generated video."""
|
||||
starred: bool = Field(description="Whether this video is starred.")
|
||||
"""Whether this video is starred."""
|
||||
|
||||
|
||||
class VideoRecordChanges(BaseModelExcludeNull):
|
||||
"""A set of changes to apply to a video record.
|
||||
|
||||
Only limited changes are valid:
|
||||
- `session_id`: change the session associated with a video
|
||||
- `is_intermediate`: change the video's `is_intermediate` flag
|
||||
- `starred`: change whether the video is starred
|
||||
"""
|
||||
|
||||
session_id: Optional[StrictStr] = Field(
|
||||
default=None,
|
||||
description="The video's new session ID.",
|
||||
)
|
||||
"""The video's new session ID."""
|
||||
is_intermediate: Optional[StrictBool] = Field(default=None, description="The video's new `is_intermediate` flag.")
|
||||
"""The video's new `is_intermediate` flag."""
|
||||
starred: Optional[StrictBool] = Field(default=None, description="The video's new `starred` state")
|
||||
"""The video's new `starred` state."""
|
||||
|
||||
|
||||
def deserialize_video_record(video_dict: dict) -> VideoRecord:
|
||||
"""Deserializes a video record."""
|
||||
|
||||
# Retrieve all the values, setting "reasonable" defaults if they are not present.
|
||||
video_id = video_dict.get("video_id", "unknown")
|
||||
width = video_dict.get("width", 0)
|
||||
height = video_dict.get("height", 0)
|
||||
session_id = video_dict.get("session_id", None)
|
||||
node_id = video_dict.get("node_id", None)
|
||||
created_at = video_dict.get("created_at", get_iso_timestamp())
|
||||
updated_at = video_dict.get("updated_at", get_iso_timestamp())
|
||||
deleted_at = video_dict.get("deleted_at", get_iso_timestamp())
|
||||
is_intermediate = video_dict.get("is_intermediate", False)
|
||||
starred = video_dict.get("starred", False)
|
||||
|
||||
return VideoRecord(
|
||||
video_id=video_id,
|
||||
width=width,
|
||||
height=height,
|
||||
session_id=session_id,
|
||||
node_id=node_id,
|
||||
created_at=created_at,
|
||||
updated_at=updated_at,
|
||||
deleted_at=deleted_at,
|
||||
is_intermediate=is_intermediate,
|
||||
starred=starred,
|
||||
)
|
||||
|
||||
|
||||
class VideoCollectionCounts(BaseModel):
|
||||
starred_count: int = Field(description="The number of starred videos in the collection.")
|
||||
unstarred_count: int = Field(description="The number of unstarred videos in the collection.")
|
||||
|
||||
|
||||
class VideoIdsResult(BaseModel):
|
||||
"""Response containing ordered video ids with metadata for optimistic updates."""
|
||||
|
||||
video_ids: list[str] = Field(description="Ordered list of video ids")
|
||||
starred_count: int = Field(description="Number of starred videos (when starred_first=True)")
|
||||
total_count: int = Field(description="Total number of videos matching the query")
|
||||
|
||||
|
||||
class VideoUrlsDTO(BaseModelExcludeNull):
|
||||
"""The URLs for an image and its thumbnail."""
|
||||
|
||||
video_id: str = Field(description="The unique id of the video.")
|
||||
"""The unique id of the video."""
|
||||
video_url: str = Field(description="The URL of the video.")
|
||||
"""The URL of the video."""
|
||||
thumbnail_url: str = Field(description="The URL of the video's thumbnail.")
|
||||
"""The URL of the video's thumbnail."""
|
||||
|
||||
|
||||
class VideoDTO(VideoRecord, VideoUrlsDTO):
|
||||
"""Deserialized video record, enriched for the frontend."""
|
||||
|
||||
board_id: Optional[str] = Field(
|
||||
default=None, description="The id of the board the image belongs to, if one exists."
|
||||
)
|
||||
"""The id of the board the image belongs to, if one exists."""
|
||||
|
||||
|
||||
def video_record_to_dto(
|
||||
video_record: VideoRecord,
|
||||
video_url: str,
|
||||
thumbnail_url: str,
|
||||
board_id: Optional[str],
|
||||
) -> VideoDTO:
|
||||
"""Converts a video record to a video DTO."""
|
||||
return VideoDTO(
|
||||
**video_record.model_dump(),
|
||||
video_url=video_url,
|
||||
thumbnail_url=thumbnail_url,
|
||||
board_id=board_id,
|
||||
)
|
||||
|
||||
|
||||
class ResultWithAffectedBoards(BaseModel):
|
||||
affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation")
|
||||
|
||||
|
||||
class DeleteVideosResult(ResultWithAffectedBoards):
|
||||
deleted_videos: list[str] = Field(description="The ids of the videos that were deleted")
|
||||
|
||||
|
||||
class StarredVideosResult(ResultWithAffectedBoards):
|
||||
starred_videos: list[str] = Field(description="The ids of the videos that were starred")
|
||||
|
||||
|
||||
class UnstarredVideosResult(ResultWithAffectedBoards):
|
||||
unstarred_videos: list[str] = Field(description="The ids of the videos that were unstarred")
|
||||
|
||||
|
||||
class AddVideosToBoardResult(ResultWithAffectedBoards):
|
||||
added_videos: list[str] = Field(description="The video ids that were added to the board")
|
||||
|
||||
|
||||
class RemoveVideosFromBoardResult(ResultWithAffectedBoards):
|
||||
removed_videos: list[str] = Field(description="The video ids that were removed from their board")
|
||||
@@ -106,8 +106,8 @@ class KontextExtension:
|
||||
|
||||
# Track cumulative dimensions for spatial tiling
|
||||
# These track the running extent of the virtual canvas in latent space
|
||||
h = 0 # Running height extent
|
||||
w = 0 # Running width extent
|
||||
canvas_h = 0 # Running canvas height
|
||||
canvas_w = 0 # Running canvas width
|
||||
|
||||
vae_info = self._context.models.load(self._vae_field.vae)
|
||||
|
||||
@@ -131,12 +131,20 @@ class KontextExtension:
|
||||
|
||||
# Continue with VAE encoding
|
||||
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
|
||||
with vae_info as vae:
|
||||
# Estimate working memory for encode operation (50% of decode memory requirements)
|
||||
img_h = image_tensor.shape[-2]
|
||||
img_w = image_tensor.shape[-1]
|
||||
element_size = next(vae_info.model.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
estimated_working_memory = int(img_h * img_w * element_size * scaling_constant)
|
||||
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
# Use sample=False to get the distribution mean without noise
|
||||
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
# Extract tensor dimensions
|
||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
@@ -154,21 +162,33 @@ class KontextExtension:
|
||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||
|
||||
# Determine spatial offsets for this reference image
|
||||
# - Compare the potential new canvas dimensions if we add the image vertically vs horizontally
|
||||
# - Choose the placement that results in a more square-like canvas
|
||||
h_offset = 0
|
||||
w_offset = 0
|
||||
|
||||
if idx > 0: # First image starts at (0, 0)
|
||||
# Check which placement would result in better canvas dimensions
|
||||
# If adding to height would make the canvas taller than wide, tile horizontally
|
||||
# Otherwise, tile vertically
|
||||
if latent_height + h > latent_width + w:
|
||||
# Calculate potential canvas dimensions for each tiling option
|
||||
# Option 1: Tile vertically (below existing content)
|
||||
potential_h_vertical = canvas_h + latent_height
|
||||
|
||||
# Option 2: Tile horizontally (to the right of existing content)
|
||||
potential_w_horizontal = canvas_w + latent_width
|
||||
|
||||
# Choose arrangement that minimizes the maximum dimension
|
||||
# This keeps the canvas closer to square, optimizing attention computation
|
||||
if potential_h_vertical > potential_w_horizontal:
|
||||
# Tile horizontally (to the right of existing images)
|
||||
w_offset = w
|
||||
w_offset = canvas_w
|
||||
canvas_w = canvas_w + latent_width
|
||||
canvas_h = max(canvas_h, latent_height)
|
||||
else:
|
||||
# Tile vertically (below existing images)
|
||||
h_offset = h
|
||||
h_offset = canvas_h
|
||||
canvas_h = canvas_h + latent_height
|
||||
canvas_w = max(canvas_w, latent_width)
|
||||
else:
|
||||
# First image - just set canvas dimensions
|
||||
canvas_h = latent_height
|
||||
canvas_w = latent_width
|
||||
|
||||
# Generate IDs with both index offset and spatial offsets
|
||||
kontext_ids = generate_img_ids_with_offset(
|
||||
@@ -182,11 +202,6 @@ class KontextExtension:
|
||||
w_offset=w_offset,
|
||||
)
|
||||
|
||||
# Update cumulative dimensions
|
||||
# Track the maximum extent of the virtual canvas after placing this image
|
||||
h = max(h, latent_height + h_offset)
|
||||
w = max(w, latent_width + w_offset)
|
||||
|
||||
all_latents.append(kontext_latents_packed)
|
||||
all_ids.append(kontext_ids)
|
||||
|
||||
|
||||
304
invokeai/backend/image_util/imwatermark/vendor.py
Normal file
304
invokeai/backend/image_util/imwatermark/vendor.py
Normal file
@@ -0,0 +1,304 @@
|
||||
# This file is vendored from https://github.com/ShieldMnt/invisible-watermark
|
||||
#
|
||||
# `invisible-watermark` is MIT licensed as of August 23, 2025, when the code was copied into this repo.
|
||||
#
|
||||
# Why we vendored it in:
|
||||
# `invisible-watermark` has a dependency on `opencv-python`, which conflicts with Invoke's dependency on
|
||||
# `opencv-contrib-python`. It's easier to copy the code over than complicate the installation process by
|
||||
# requiring an extra post-install step of removing `opencv-python` and installing `opencv-contrib-python`.
|
||||
|
||||
import struct
|
||||
import uuid
|
||||
import base64
|
||||
import cv2
|
||||
import numpy as np
|
||||
import pywt
|
||||
|
||||
|
||||
class WatermarkEncoder(object):
|
||||
def __init__(self, content=b""):
|
||||
seq = np.array([n for n in content], dtype=np.uint8)
|
||||
self._watermarks = list(np.unpackbits(seq))
|
||||
self._wmLen = len(self._watermarks)
|
||||
self._wmType = "bytes"
|
||||
|
||||
def set_by_ipv4(self, addr):
|
||||
bits = []
|
||||
ips = addr.split(".")
|
||||
for ip in ips:
|
||||
bits += list(np.unpackbits(np.array([ip % 255], dtype=np.uint8)))
|
||||
self._watermarks = bits
|
||||
self._wmLen = len(self._watermarks)
|
||||
self._wmType = "ipv4"
|
||||
assert self._wmLen == 32
|
||||
|
||||
def set_by_uuid(self, uid):
|
||||
u = uuid.UUID(uid)
|
||||
self._wmType = "uuid"
|
||||
seq = np.array([n for n in u.bytes], dtype=np.uint8)
|
||||
self._watermarks = list(np.unpackbits(seq))
|
||||
self._wmLen = len(self._watermarks)
|
||||
|
||||
def set_by_bytes(self, content):
|
||||
self._wmType = "bytes"
|
||||
seq = np.array([n for n in content], dtype=np.uint8)
|
||||
self._watermarks = list(np.unpackbits(seq))
|
||||
self._wmLen = len(self._watermarks)
|
||||
|
||||
def set_by_b16(self, b16):
|
||||
content = base64.b16decode(b16)
|
||||
self.set_by_bytes(content)
|
||||
self._wmType = "b16"
|
||||
|
||||
def set_by_bits(self, bits=[]):
|
||||
self._watermarks = [int(bit) % 2 for bit in bits]
|
||||
self._wmLen = len(self._watermarks)
|
||||
self._wmType = "bits"
|
||||
|
||||
def set_watermark(self, wmType="bytes", content=""):
|
||||
if wmType == "ipv4":
|
||||
self.set_by_ipv4(content)
|
||||
elif wmType == "uuid":
|
||||
self.set_by_uuid(content)
|
||||
elif wmType == "bits":
|
||||
self.set_by_bits(content)
|
||||
elif wmType == "bytes":
|
||||
self.set_by_bytes(content)
|
||||
elif wmType == "b16":
|
||||
self.set_by_b16(content)
|
||||
else:
|
||||
raise NameError("%s is not supported" % wmType)
|
||||
|
||||
def get_length(self):
|
||||
return self._wmLen
|
||||
|
||||
# @classmethod
|
||||
# def loadModel(cls):
|
||||
# RivaWatermark.loadModel()
|
||||
|
||||
def encode(self, cv2Image, method="dwtDct", **configs):
|
||||
(r, c, channels) = cv2Image.shape
|
||||
if r * c < 256 * 256:
|
||||
raise RuntimeError("image too small, should be larger than 256x256")
|
||||
|
||||
if method == "dwtDct":
|
||||
embed = EmbedMaxDct(self._watermarks, wmLen=self._wmLen, **configs)
|
||||
return embed.encode(cv2Image)
|
||||
# elif method == 'dwtDctSvd':
|
||||
# embed = EmbedDwtDctSvd(self._watermarks, wmLen=self._wmLen, **configs)
|
||||
# return embed.encode(cv2Image)
|
||||
# elif method == 'rivaGan':
|
||||
# embed = RivaWatermark(self._watermarks, self._wmLen)
|
||||
# return embed.encode(cv2Image)
|
||||
else:
|
||||
raise NameError("%s is not supported" % method)
|
||||
|
||||
|
||||
class WatermarkDecoder(object):
|
||||
def __init__(self, wm_type="bytes", length=0):
|
||||
self._wmType = wm_type
|
||||
if wm_type == "ipv4":
|
||||
self._wmLen = 32
|
||||
elif wm_type == "uuid":
|
||||
self._wmLen = 128
|
||||
elif wm_type == "bytes":
|
||||
self._wmLen = length
|
||||
elif wm_type == "bits":
|
||||
self._wmLen = length
|
||||
elif wm_type == "b16":
|
||||
self._wmLen = length
|
||||
else:
|
||||
raise NameError("%s is unsupported" % wm_type)
|
||||
|
||||
def reconstruct_ipv4(self, bits):
|
||||
ips = [str(ip) for ip in list(np.packbits(bits))]
|
||||
return ".".join(ips)
|
||||
|
||||
def reconstruct_uuid(self, bits):
|
||||
nums = np.packbits(bits)
|
||||
bstr = b""
|
||||
for i in range(16):
|
||||
bstr += struct.pack(">B", nums[i])
|
||||
|
||||
return str(uuid.UUID(bytes=bstr))
|
||||
|
||||
def reconstruct_bits(self, bits):
|
||||
# return ''.join([str(b) for b in bits])
|
||||
return bits
|
||||
|
||||
def reconstruct_b16(self, bits):
|
||||
bstr = self.reconstruct_bytes(bits)
|
||||
return base64.b16encode(bstr)
|
||||
|
||||
def reconstruct_bytes(self, bits):
|
||||
nums = np.packbits(bits)
|
||||
bstr = b""
|
||||
for i in range(self._wmLen // 8):
|
||||
bstr += struct.pack(">B", nums[i])
|
||||
return bstr
|
||||
|
||||
def reconstruct(self, bits):
|
||||
if len(bits) != self._wmLen:
|
||||
raise RuntimeError("bits are not matched with watermark length")
|
||||
|
||||
if self._wmType == "ipv4":
|
||||
return self.reconstruct_ipv4(bits)
|
||||
elif self._wmType == "uuid":
|
||||
return self.reconstruct_uuid(bits)
|
||||
elif self._wmType == "bits":
|
||||
return self.reconstruct_bits(bits)
|
||||
elif self._wmType == "b16":
|
||||
return self.reconstruct_b16(bits)
|
||||
else:
|
||||
return self.reconstruct_bytes(bits)
|
||||
|
||||
def decode(self, cv2Image, method="dwtDct", **configs):
|
||||
(r, c, channels) = cv2Image.shape
|
||||
if r * c < 256 * 256:
|
||||
raise RuntimeError("image too small, should be larger than 256x256")
|
||||
|
||||
bits = []
|
||||
if method == "dwtDct":
|
||||
embed = EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
|
||||
bits = embed.decode(cv2Image)
|
||||
# elif method == 'dwtDctSvd':
|
||||
# embed = EmbedDwtDctSvd(watermarks=[], wmLen=self._wmLen, **configs)
|
||||
# bits = embed.decode(cv2Image)
|
||||
# elif method == 'rivaGan':
|
||||
# embed = RivaWatermark(watermarks=[], wmLen=self._wmLen, **configs)
|
||||
# bits = embed.decode(cv2Image)
|
||||
else:
|
||||
raise NameError("%s is not supported" % method)
|
||||
return self.reconstruct(bits)
|
||||
|
||||
# @classmethod
|
||||
# def loadModel(cls):
|
||||
# RivaWatermark.loadModel()
|
||||
|
||||
|
||||
class EmbedMaxDct(object):
|
||||
def __init__(self, watermarks=[], wmLen=8, scales=[0, 36, 36], block=4):
|
||||
self._watermarks = watermarks
|
||||
self._wmLen = wmLen
|
||||
self._scales = scales
|
||||
self._block = block
|
||||
|
||||
def encode(self, bgr):
|
||||
(row, col, channels) = bgr.shape
|
||||
|
||||
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
||||
|
||||
for channel in range(2):
|
||||
if self._scales[channel] <= 0:
|
||||
continue
|
||||
|
||||
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
|
||||
self.encode_frame(ca1, self._scales[channel])
|
||||
|
||||
yuv[: row // 4 * 4, : col // 4 * 4, channel] = pywt.idwt2((ca1, (v1, h1, d1)), "haar")
|
||||
|
||||
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
|
||||
return bgr_encoded
|
||||
|
||||
def decode(self, bgr):
|
||||
(row, col, channels) = bgr.shape
|
||||
|
||||
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
||||
|
||||
scores = [[] for i in range(self._wmLen)]
|
||||
for channel in range(2):
|
||||
if self._scales[channel] <= 0:
|
||||
continue
|
||||
|
||||
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
|
||||
|
||||
scores = self.decode_frame(ca1, self._scales[channel], scores)
|
||||
|
||||
avgScores = list(map(lambda l: np.array(l).mean(), scores))
|
||||
|
||||
bits = np.array(avgScores) * 255 > 127
|
||||
return bits
|
||||
|
||||
def decode_frame(self, frame, scale, scores):
|
||||
(row, col) = frame.shape
|
||||
num = 0
|
||||
|
||||
for i in range(row // self._block):
|
||||
for j in range(col // self._block):
|
||||
block = frame[
|
||||
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
|
||||
]
|
||||
|
||||
score = self.infer_dct_matrix(block, scale)
|
||||
# score = self.infer_dct_svd(block, scale)
|
||||
wmBit = num % self._wmLen
|
||||
scores[wmBit].append(score)
|
||||
num = num + 1
|
||||
|
||||
return scores
|
||||
|
||||
def diffuse_dct_svd(self, block, wmBit, scale):
|
||||
u, s, v = np.linalg.svd(cv2.dct(block))
|
||||
|
||||
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
|
||||
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
|
||||
|
||||
def infer_dct_svd(self, block, scale):
|
||||
u, s, v = np.linalg.svd(cv2.dct(block))
|
||||
|
||||
score = 0
|
||||
score = int((s[0] % scale) > scale * 0.5)
|
||||
return score
|
||||
if score >= 0.5:
|
||||
return 1.0
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
def diffuse_dct_matrix(self, block, wmBit, scale):
|
||||
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
||||
i, j = pos // self._block, pos % self._block
|
||||
val = block[i][j]
|
||||
if val >= 0.0:
|
||||
block[i][j] = (val // scale + 0.25 + 0.5 * wmBit) * scale
|
||||
else:
|
||||
val = abs(val)
|
||||
block[i][j] = -1.0 * (val // scale + 0.25 + 0.5 * wmBit) * scale
|
||||
return block
|
||||
|
||||
def infer_dct_matrix(self, block, scale):
|
||||
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
||||
i, j = pos // self._block, pos % self._block
|
||||
|
||||
val = block[i][j]
|
||||
if val < 0:
|
||||
val = abs(val)
|
||||
|
||||
if (val % scale) > 0.5 * scale:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def encode_frame(self, frame, scale):
|
||||
"""
|
||||
frame is a matrix (M, N)
|
||||
|
||||
we get K (watermark bits size) blocks (self._block x self._block)
|
||||
|
||||
For i-th block, we encode watermark[i] bit into it
|
||||
"""
|
||||
(row, col) = frame.shape
|
||||
num = 0
|
||||
for i in range(row // self._block):
|
||||
for j in range(col // self._block):
|
||||
block = frame[
|
||||
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
|
||||
]
|
||||
wmBit = self._watermarks[(num % self._wmLen)]
|
||||
|
||||
diffusedBlock = self.diffuse_dct_matrix(block, wmBit, scale)
|
||||
# diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
|
||||
frame[
|
||||
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
|
||||
] = diffusedBlock
|
||||
|
||||
num = num + 1
|
||||
@@ -6,13 +6,10 @@ configuration variable, that allows the watermarking to be supressed.
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from imwatermark import WatermarkEncoder
|
||||
from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
config = get_config()
|
||||
from invokeai.backend.image_util.imwatermark.vendor import WatermarkEncoder
|
||||
|
||||
|
||||
class InvisibleWatermark:
|
||||
|
||||
@@ -90,6 +90,11 @@ class MainModelDefaultSettings(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
class LoraModelDefaultSettings(BaseModel):
|
||||
weight: float | None = Field(default=None, ge=-1, le=2, description="Default weight for this model")
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
class ControlAdapterDefaultSettings(BaseModel):
|
||||
# This could be narrowed to controlnet processor nodes, but they change. Leaving this a string is safer.
|
||||
preprocessor: str | None
|
||||
@@ -287,6 +292,9 @@ class LoRAConfigBase(ABC, BaseModel):
|
||||
|
||||
type: Literal[ModelType.LoRA] = ModelType.LoRA
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
default_settings: Optional[LoraModelDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flux_lora_format(cls, mod: ModelOnDisk):
|
||||
@@ -492,6 +500,15 @@ class MainConfigBase(ABC, BaseModel):
|
||||
variant: AnyVariant = ModelVariantType.Normal
|
||||
|
||||
|
||||
class VideoConfigBase(ABC, BaseModel):
|
||||
type: Literal[ModelType.Video] = ModelType.Video
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
default_settings: Optional[MainModelDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
variant: AnyVariant = ModelVariantType.Normal
|
||||
|
||||
|
||||
class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase):
|
||||
"""Model config for main checkpoint models."""
|
||||
|
||||
@@ -649,6 +666,21 @@ class ApiModelConfig(MainConfigBase, ModelConfigBase):
|
||||
raise NotImplementedError("API models are not parsed from disk.")
|
||||
|
||||
|
||||
class VideoApiModelConfig(VideoConfigBase, ModelConfigBase):
|
||||
"""Model config for API-based video models."""
|
||||
|
||||
format: Literal[ModelFormat.Api] = ModelFormat.Api
|
||||
|
||||
@classmethod
|
||||
def matches(cls, mod: ModelOnDisk) -> bool:
|
||||
# API models are not stored on disk, so we can't match them.
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
|
||||
raise NotImplementedError("API models are not parsed from disk.")
|
||||
|
||||
|
||||
def get_model_discriminator_value(v: Any) -> str:
|
||||
"""
|
||||
Computes the discriminator value for a model config.
|
||||
@@ -718,12 +750,13 @@ AnyModelConfig = Annotated[
|
||||
Annotated[FluxReduxConfig, FluxReduxConfig.get_tag()],
|
||||
Annotated[LlavaOnevisionConfig, LlavaOnevisionConfig.get_tag()],
|
||||
Annotated[ApiModelConfig, ApiModelConfig.get_tag()],
|
||||
Annotated[VideoApiModelConfig, VideoApiModelConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
AnyModelConfigValidator = TypeAdapter(AnyModelConfig)
|
||||
AnyDefaultSettings: TypeAlias = Union[MainModelDefaultSettings, ControlAdapterDefaultSettings]
|
||||
AnyDefaultSettings: TypeAlias = Union[MainModelDefaultSettings, LoraModelDefaultSettings, ControlAdapterDefaultSettings]
|
||||
|
||||
|
||||
class ModelConfigFactory:
|
||||
|
||||
@@ -23,6 +23,7 @@ from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
ControlAdapterDefaultSettings,
|
||||
InvalidModelConfigException,
|
||||
LoraModelDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
ModelConfigFactory,
|
||||
SubmodelDefinition,
|
||||
@@ -217,6 +218,8 @@ class ModelProbe(object):
|
||||
if not fields["default_settings"]:
|
||||
if fields["type"] in {ModelType.ControlNet, ModelType.T2IAdapter, ModelType.ControlLoRa}:
|
||||
fields["default_settings"] = get_default_settings_control_adapters(fields["name"])
|
||||
if fields["type"] in {ModelType.LoRA}:
|
||||
fields["default_settings"] = get_default_settings_lora()
|
||||
elif fields["type"] is ModelType.Main:
|
||||
fields["default_settings"] = get_default_settings_main(fields["base"])
|
||||
|
||||
@@ -543,6 +546,10 @@ def get_default_settings_control_adapters(model_name: str) -> Optional[ControlAd
|
||||
return None
|
||||
|
||||
|
||||
def get_default_settings_lora() -> LoraModelDefaultSettings:
|
||||
return LoraModelDefaultSettings()
|
||||
|
||||
|
||||
def get_default_settings_main(model_base: BaseModelType) -> Optional[MainModelDefaultSettings]:
|
||||
if model_base is BaseModelType.StableDiffusion1 or model_base is BaseModelType.StableDiffusion2:
|
||||
return MainModelDefaultSettings(width=512, height=512)
|
||||
|
||||
@@ -28,8 +28,11 @@ class BaseModelType(str, Enum):
|
||||
CogView4 = "cogview4"
|
||||
Imagen3 = "imagen3"
|
||||
Imagen4 = "imagen4"
|
||||
Gemini2_5 = "gemini-2.5"
|
||||
ChatGPT4o = "chatgpt-4o"
|
||||
FluxKontext = "flux-kontext"
|
||||
Veo3 = "veo3"
|
||||
Runway = "runway"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
@@ -51,6 +54,7 @@ class ModelType(str, Enum):
|
||||
SigLIP = "siglip"
|
||||
FluxRedux = "flux_redux"
|
||||
LlavaOnevision = "llava_onevision"
|
||||
Video = "video"
|
||||
|
||||
|
||||
class SubModelType(str, Enum):
|
||||
|
||||
@@ -18,16 +18,25 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
|
||||
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
|
||||
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
|
||||
|
||||
# Next, check that this is likely a FLUX model by spot-checking a few keys.
|
||||
expected_keys = [
|
||||
# Check if keys use transformer prefix
|
||||
transformer_prefix_keys = [
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
|
||||
]
|
||||
all_expected_keys_present = all(k in state_dict for k in expected_keys)
|
||||
transformer_keys_present = all(k in state_dict for k in transformer_prefix_keys)
|
||||
|
||||
return all_keys_in_peft_format and all_expected_keys_present
|
||||
# Check if keys use base_model.model prefix
|
||||
base_model_prefix_keys = [
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_A.weight",
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_B.weight",
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
|
||||
]
|
||||
base_model_keys_present = all(k in state_dict for k in base_model_prefix_keys)
|
||||
|
||||
return all_keys_in_peft_format and (transformer_keys_present or base_model_keys_present)
|
||||
|
||||
|
||||
def lora_model_from_flux_diffusers_state_dict(
|
||||
@@ -49,8 +58,16 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
|
||||
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
|
||||
"""
|
||||
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
# Determine which prefix is used and remove it from all keys.
|
||||
# Check if any key starts with "base_model.model." prefix
|
||||
has_base_model_prefix = any(k.startswith("base_model.model.") for k in grouped_state_dict.keys())
|
||||
|
||||
if has_base_model_prefix:
|
||||
# Remove the "base_model.model." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("base_model.model.", ""): v for k, v in grouped_state_dict.items()}
|
||||
else:
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
|
||||
@@ -20,7 +20,7 @@ def main():
|
||||
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
|
||||
)
|
||||
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ def main():
|
||||
)
|
||||
|
||||
# inference_dtype = torch.bfloat16
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def main():
|
||||
"""
|
||||
model_path = Path("/data/misc/text_encoder_2")
|
||||
|
||||
with log_time("Intialize T5 on meta device"):
|
||||
with log_time("Initialize T5 on meta device"):
|
||||
model_config = AutoConfig.from_pretrained(model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
model = AutoModelForTextEncoding.from_config(model_config)
|
||||
|
||||
117
invokeai/backend/util/vae_working_memory.py
Normal file
117
invokeai/backend/util/vae_working_memory.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from typing import Literal
|
||||
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
|
||||
|
||||
def estimate_vae_working_memory_sd15_sdxl(
|
||||
operation: Literal["encode", "decode"],
|
||||
image_tensor: torch.Tensor,
|
||||
vae: AutoencoderKL | AutoencoderTiny,
|
||||
tile_size: int | None,
|
||||
fp32: bool,
|
||||
) -> int:
|
||||
"""Estimate the working memory required to encode or decode the given tensor."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
element_size = 4 if fp32 else 2
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
if tile_size is not None:
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
h = tile_size
|
||||
w = tile_size
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
if fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
print(f"estimate_vae_working_memory_sd15_sdxl: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_cogview4(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_cogview4: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_flux(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoEncoder
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
out_h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
out_w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_flux: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_sd3(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_sd3: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
@@ -56,7 +56,7 @@
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"dockview": "^4.4.1",
|
||||
"dockview": "^4.7.1",
|
||||
"es-toolkit": "^1.39.7",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
@@ -69,6 +69,7 @@
|
||||
"linkify-react": "^4.3.1",
|
||||
"linkifyjs": "^4.3.1",
|
||||
"lru-cache": "^11.1.0",
|
||||
"media-chrome": "^4.13.0",
|
||||
"mtwist": "^1.0.2",
|
||||
"nanoid": "^5.1.5",
|
||||
"nanostores": "^1.0.1",
|
||||
@@ -87,6 +88,7 @@
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^15.5.3",
|
||||
"react-icons": "^5.5.0",
|
||||
"react-player": "^3.3.1",
|
||||
"react-redux": "9.2.0",
|
||||
"react-resizable-panels": "^3.0.3",
|
||||
"react-textarea-autosize": "^8.5.9",
|
||||
|
||||
381
invokeai/frontend/web/pnpm-lock.yaml
generated
381
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -60,8 +60,8 @@ importers:
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
dockview:
|
||||
specifier: ^4.4.1
|
||||
version: 4.4.1(react@18.3.1)
|
||||
specifier: ^4.7.1
|
||||
version: 4.7.1(react@18.3.1)
|
||||
es-toolkit:
|
||||
specifier: ^1.39.7
|
||||
version: 1.39.7
|
||||
@@ -98,6 +98,9 @@ importers:
|
||||
lru-cache:
|
||||
specifier: ^11.1.0
|
||||
version: 11.1.0
|
||||
media-chrome:
|
||||
specifier: ^4.13.0
|
||||
version: 4.13.0(react@18.3.1)
|
||||
mtwist:
|
||||
specifier: ^1.0.2
|
||||
version: 1.0.2
|
||||
@@ -152,6 +155,9 @@ importers:
|
||||
react-icons:
|
||||
specifier: ^5.5.0
|
||||
version: 5.5.0(react@18.3.1)
|
||||
react-player:
|
||||
specifier: ^3.3.1
|
||||
version: 3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
react-redux:
|
||||
specifier: 9.2.0
|
||||
version: 9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1)
|
||||
@@ -414,8 +420,8 @@ packages:
|
||||
resolution: {integrity: sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
|
||||
'@babel/runtime@7.28.2':
|
||||
resolution: {integrity: sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA==}
|
||||
'@babel/runtime@7.28.3':
|
||||
resolution: {integrity: sha512-9uIQ10o0WGdpP6GDhXcdOJPJuDgFtIDtN/9+ArJQ2NAfAmiuhTQdzkaTGR33v43GYS2UrSA0eX2pPPHoFVvpxA==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
|
||||
'@babel/template@7.27.2':
|
||||
@@ -937,6 +943,31 @@ packages:
|
||||
'@microsoft/tsdoc@0.15.1':
|
||||
resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==}
|
||||
|
||||
'@mux/mux-data-google-ima@0.2.8':
|
||||
resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==}
|
||||
|
||||
'@mux/mux-player-react@3.5.3':
|
||||
resolution: {integrity: sha512-f0McZbIXYDkzecFwhhkf0JgEInPnsOClgBqBhkdhRlLRdrAzMATib+D3Di3rPkRHNH7rc/WWORvSxgJz6m6zkA==}
|
||||
peerDependencies:
|
||||
'@types/react': ^17.0.0 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
|
||||
'@types/react-dom': '*'
|
||||
react: ^17.0.2 || ^17.0.0-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
|
||||
react-dom: ^17.0.2 || ^17.0.2-0 || ^18 || ^18.0.0-0 || ^19 || ^19.0.0-0
|
||||
peerDependenciesMeta:
|
||||
'@types/react':
|
||||
optional: true
|
||||
'@types/react-dom':
|
||||
optional: true
|
||||
|
||||
'@mux/mux-player@3.5.3':
|
||||
resolution: {integrity: sha512-uXKFXbdtioAi+clSVfD60Rw4r7OvA62u2jV6aar9loW9qMsmKv8LU+8uaIaWQjyAORp6E0S37GOVjo72T6O2eQ==}
|
||||
|
||||
'@mux/mux-video@0.26.1':
|
||||
resolution: {integrity: sha512-gkMdBAgNlB4+krANZHkQFzYWjWeNsJz69y1/hnPtmNQnpvW+O7oc71OffcZrbblyibSxWMQ6MQpYmBVjXlp6sA==}
|
||||
|
||||
'@mux/playback-core@0.30.1':
|
||||
resolution: {integrity: sha512-rnO1NE9xHDyzbAkmE6ygJYcD7cyyMt7xXqWTykxlceaoSXLjUqgp42HDio7Lcidto4x/O4FIa7ztjV2aCBCXgQ==}
|
||||
|
||||
'@nanostores/react@0.7.3':
|
||||
resolution: {integrity: sha512-/XuLAMENRu/Q71biW4AZ4qmU070vkZgiQ28gaTSNRPm2SZF5zGAR81zPE1MaMB4SeOp6ZTst92NBaG75XSspNg==}
|
||||
engines: {node: ^18.0.0 || >=20.0.0}
|
||||
@@ -1453,6 +1484,9 @@ packages:
|
||||
typescript:
|
||||
optional: true
|
||||
|
||||
'@svta/common-media-library@0.12.4':
|
||||
resolution: {integrity: sha512-9EuOoaNmz7JrfGwjsrD9SxF9otU5TNMnbLu1yU4BeLK0W5cDxVXXR58Z89q9u2AnHjIctscjMTYdlqQ1gojTuw==}
|
||||
|
||||
'@swc/core-darwin-arm64@1.12.9':
|
||||
resolution: {integrity: sha512-GACFEp4nD6V+TZNR2JwbMZRHB+Yyvp14FrcmB6UCUYmhuNWjkxi+CLnEvdbuiKyQYv0zA+TRpCHZ+whEs6gwfA==}
|
||||
engines: {node: '>=10'}
|
||||
@@ -1707,6 +1741,12 @@ packages:
|
||||
resolution: {integrity: sha512-YzfhzcTnZVPiLfP/oeKtDp2evwvHLMe0LOy7oe+hb9KKIumLNohYS9Hgp1ifwpu42YWxhZE8yieggz6JpqO/1w==}
|
||||
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
|
||||
|
||||
'@vercel/edge@1.2.2':
|
||||
resolution: {integrity: sha512-1+y+f6rk0Yc9ss9bRDgz/gdpLimwoRteKHhrcgHvEpjbP1nyT3ByqEMWm2BTcpIO5UtDmIFXc8zdq4LR190PDA==}
|
||||
|
||||
'@vimeo/player@2.29.0':
|
||||
resolution: {integrity: sha512-9JjvjeqUndb9otCCFd0/+2ESsLk7VkDE6sxOBy9iy2ukezuQbplVRi+g9g59yAurKofbmTi/KcKxBGO/22zWRw==}
|
||||
|
||||
'@vitejs/plugin-react-swc@3.10.2':
|
||||
resolution: {integrity: sha512-xD3Rdvrt5LgANug7WekBn1KhcvLn1H3jNBfJRL3reeOIua/WnZOEV5qi5qIBq5T8R0jUDmRtxuvk4bPhzGHDWw==}
|
||||
peerDependencies:
|
||||
@@ -1959,6 +1999,15 @@ packages:
|
||||
base64-js@1.5.1:
|
||||
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
|
||||
|
||||
bcp-47-match@2.0.3:
|
||||
resolution: {integrity: sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==}
|
||||
|
||||
bcp-47-normalize@2.3.0:
|
||||
resolution: {integrity: sha512-8I/wfzqQvttUFz7HVJgIZ7+dj3vUaIyIxYXaTRP1YWoSDfzt6TUmxaKZeuXR62qBmYr+nvuWINFRl6pZ5DlN4Q==}
|
||||
|
||||
bcp-47@2.1.0:
|
||||
resolution: {integrity: sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w==}
|
||||
|
||||
better-opn@3.0.2:
|
||||
resolution: {integrity: sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==}
|
||||
engines: {node: '>=12.0.0'}
|
||||
@@ -2014,6 +2063,14 @@ packages:
|
||||
caniuse-lite@1.0.30001727:
|
||||
resolution: {integrity: sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q==}
|
||||
|
||||
castable-video@1.1.10:
|
||||
resolution: {integrity: sha512-/T1I0A4VG769wTEZ8gWuy1Crn9saAfRTd1UYTb8xbOPlN78+zOi/1nU2dD5koNkfE5VWvgabkIqrGKmyNXOjSQ==}
|
||||
|
||||
ce-la-react@0.3.1:
|
||||
resolution: {integrity: sha512-g0YwpZDPIwTwFumGTzNHcgJA6VhFfFCJkSNdUdC04br2UfU+56JDrJrJva3FZ7MToB4NDHAFBiPE/PZdNl1mQA==}
|
||||
peerDependencies:
|
||||
react: '>=17.0.0'
|
||||
|
||||
chai@5.2.0:
|
||||
resolution: {integrity: sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==}
|
||||
engines: {node: '>=12'}
|
||||
@@ -2064,12 +2121,18 @@ packages:
|
||||
resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==}
|
||||
engines: {node: '>=0.8'}
|
||||
|
||||
cloudflare-video-element@1.3.3:
|
||||
resolution: {integrity: sha512-qrHzwLmUhisoIuEoKc7iBbdzBNj2Pi7ThHslU/9U/6PY9DEvo4mh/U+w7OVuzXT9ks7ZXfARvDBfPAaMGF/hIg==}
|
||||
|
||||
cmdk@1.1.1:
|
||||
resolution: {integrity: sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==}
|
||||
peerDependencies:
|
||||
react: ^18 || ^19 || ^19.0.0-rc
|
||||
react-dom: ^18 || ^19 || ^19.0.0-rc
|
||||
|
||||
codem-isoboxer@0.3.10:
|
||||
resolution: {integrity: sha512-eNk3TRV+xQMJ1PEj0FQGY8KD4m0GPxT487XJ+Iftm7mVa9WpPFDMWqPt+46buiP5j5Wzqe5oMIhqBcAeKfygSA==}
|
||||
|
||||
color-convert@2.0.1:
|
||||
resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
|
||||
engines: {node: '>=7.0.0'}
|
||||
@@ -2139,6 +2202,9 @@ packages:
|
||||
csstype@3.1.3:
|
||||
resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==}
|
||||
|
||||
custom-media-element@1.4.5:
|
||||
resolution: {integrity: sha512-cjrsQufETwxjvwZbYbKBCJNvmQ2++G9AvT45zDi7NXL9k2PdVcs2h0jQz96J6G4TMKRCcEsoJ+QTgQD00Igtjw==}
|
||||
|
||||
d3-color@3.1.0:
|
||||
resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==}
|
||||
engines: {node: '>=12'}
|
||||
@@ -2177,6 +2243,12 @@ packages:
|
||||
resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
dash-video-element@0.1.6:
|
||||
resolution: {integrity: sha512-4gHShaQjcFv6diX5EzB6qAdUGKlIUGGZY8J8yp2pQkWqR0jX4c6plYy0cFraN7mr0DZINe8ujDN1fssDYxJjcg==}
|
||||
|
||||
dashjs@5.0.3:
|
||||
resolution: {integrity: sha512-TXndNnCUjFjF2nYBxDVba+hWRpVkadkQ8flLp7kHkem+5+wZTfRShJCnVkPUosmjS0YPE9fVNLbYPJxHBeQZvA==}
|
||||
|
||||
data-view-buffer@1.0.2:
|
||||
resolution: {integrity: sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==}
|
||||
engines: {node: '>= 0.4'}
|
||||
@@ -2254,11 +2326,11 @@ packages:
|
||||
discontinuous-range@1.0.0:
|
||||
resolution: {integrity: sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==}
|
||||
|
||||
dockview-core@4.4.1:
|
||||
resolution: {integrity: sha512-pDQPlVfDYDuN3zSebVUMVn2x21bpYPGD1ybGYrKJMI1KDkSQSqy57FJRJXi7yEnkcrmBUF0xEEo4d0Yi3j2vGA==}
|
||||
dockview-core@4.7.1:
|
||||
resolution: {integrity: sha512-Tia3vYHtqACMZTiZv86yQOabwKj5KrBhQqlSr7qXV0qmmRSZ8dNbaU63LIHYFprST7JgHupIm9JVES+OhqMoTQ==}
|
||||
|
||||
dockview@4.4.1:
|
||||
resolution: {integrity: sha512-XEAwl+VYVZGkBd3hprF6kRLspWSF/hydbRHuV3KEg3BHev1i5xc+H+Kjp+u5DHTQ97klFAATl5MntNoVXQeg0w==}
|
||||
dockview@4.7.1:
|
||||
resolution: {integrity: sha512-DgMzSKNjDvZzIQjFfAV6I6EDkqe40Sjz1Qgyf88KG4U1Kgp/bIIEDSLpz65BsW5ZD9Qi3y18TCISYTgsNvU9TA==}
|
||||
peerDependencies:
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
|
||||
|
||||
@@ -2751,9 +2823,18 @@ packages:
|
||||
resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==}
|
||||
hasBin: true
|
||||
|
||||
hls-video-element@1.5.6:
|
||||
resolution: {integrity: sha512-KPdvSR+oBJPiCVb+m6pd2mn3rJEjNbaK8pGhSkxFI2pmyvZIeTVQrPbEO9PT/juwXHwhvCoKJnNxAuFwJG9H5A==}
|
||||
|
||||
hls.js@1.6.9:
|
||||
resolution: {integrity: sha512-q7qPrri6GRwjcNd7EkFCmhiJ6PBIxeUsdxKbquBkQZpg9jAnp6zSAeN9eEWFlOB09J8JfzAQGoXL5ZEAltjO9g==}
|
||||
|
||||
hoist-non-react-statics@3.3.2:
|
||||
resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==}
|
||||
|
||||
html-entities@2.6.0:
|
||||
resolution: {integrity: sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==}
|
||||
|
||||
html-escaper@2.0.2:
|
||||
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
|
||||
|
||||
@@ -2792,6 +2873,9 @@ packages:
|
||||
resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==}
|
||||
engines: {node: '>= 4'}
|
||||
|
||||
immediate@3.0.6:
|
||||
resolution: {integrity: sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==}
|
||||
|
||||
immer@10.1.1:
|
||||
resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==}
|
||||
|
||||
@@ -2803,6 +2887,9 @@ packages:
|
||||
resolution: {integrity: sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
imsc@1.1.5:
|
||||
resolution: {integrity: sha512-V8je+CGkcvGhgl2C1GlhqFFiUOIEdwXbXLiu1Fcubvvbo+g9inauqT3l0pNYXGoLPBj3jxtZz9t+wCopMkwadQ==}
|
||||
|
||||
imurmurhash@0.1.4:
|
||||
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
|
||||
engines: {node: '>=0.8.19'}
|
||||
@@ -2825,6 +2912,12 @@ packages:
|
||||
resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-alphabetical@2.0.1:
|
||||
resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==}
|
||||
|
||||
is-alphanumerical@2.0.1:
|
||||
resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==}
|
||||
|
||||
is-array-buffer@3.0.5:
|
||||
resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==}
|
||||
engines: {node: '>= 0.4'}
|
||||
@@ -2860,6 +2953,9 @@ packages:
|
||||
resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
is-decimal@2.0.1:
|
||||
resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==}
|
||||
|
||||
is-docker@2.2.1:
|
||||
resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==}
|
||||
engines: {node: '>=8'}
|
||||
@@ -3064,6 +3160,9 @@ packages:
|
||||
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
|
||||
engines: {node: '>= 0.8.0'}
|
||||
|
||||
lie@3.1.1:
|
||||
resolution: {integrity: sha512-RiNhHysUjhrDQntfYSfY4MU24coXXdEOgw9WGcKHNeEwffDYbF//u87M1EWaMGzuFoSbqW0C9C6lEEhDOAswfw==}
|
||||
|
||||
lines-and-columns@1.2.4:
|
||||
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
|
||||
|
||||
@@ -3088,6 +3187,9 @@ packages:
|
||||
resolution: {integrity: sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg==}
|
||||
engines: {node: '>=14'}
|
||||
|
||||
localforage@1.10.0:
|
||||
resolution: {integrity: sha512-14/H1aX7hzBBmmh7sGPd+AOMkkIrHM3Z1PAyGgZigA1H1p5O5ANnMyWzvpAETtG68/dC4pC0ncy3+PPGzXZHPg==}
|
||||
|
||||
locate-path@6.0.0:
|
||||
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
|
||||
engines: {node: '>=10'}
|
||||
@@ -3154,6 +3256,15 @@ packages:
|
||||
mdn-data@2.0.14:
|
||||
resolution: {integrity: sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==}
|
||||
|
||||
media-chrome@4.11.1:
|
||||
resolution: {integrity: sha512-+2niDc4qOwlpFAjwxg1OaizK/zKV6y7QqGm4nBFEVlSaG0ZBgOmfc4IXAPiirZqAlZGaFFUaMqCl1SpGU0/naA==}
|
||||
|
||||
media-chrome@4.13.0:
|
||||
resolution: {integrity: sha512-DfX/Hwxjae/tEHjr1tVnV/6XDFHriMXI1ev8Ji4Z/YwXnqMhNfRtvNsMjefnQK5pkMS/9hC+jmdS+VDWZfsSIw==}
|
||||
|
||||
media-tracks@0.3.3:
|
||||
resolution: {integrity: sha512-9P2FuUHnZZ3iji+2RQk7Zkh5AmZTnOG5fODACnjhCVveX1McY3jmCRHofIEI+yTBqplz7LXy48c7fQ3Uigp88w==}
|
||||
|
||||
memoize-one@6.0.0:
|
||||
resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==}
|
||||
|
||||
@@ -3219,6 +3330,12 @@ packages:
|
||||
muggle-string@0.4.1:
|
||||
resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==}
|
||||
|
||||
mux-embed@5.11.0:
|
||||
resolution: {integrity: sha512-uczzXVraqMRmyYmpGh2zthTmBKvvc5D5yaVKQRgGhFOnF7E4nkhqNkdkQc4C0WTPzdqdPl5OtCelNWMF4tg5RQ==}
|
||||
|
||||
mux-embed@5.9.0:
|
||||
resolution: {integrity: sha512-wmunL3uoPhma/tWy8PrDPZkvJpXvSFBwbD3KkC4PG8Ztjfb1X3hRJwGUAQyRz7z99b/ovLm2UTTitrkvStjH4w==}
|
||||
|
||||
nano-css@5.6.2:
|
||||
resolution: {integrity: sha512-+6bHaC8dSDGALM1HJjOHVXpuastdu2xFoZlC77Jh4cg+33Zcgm+Gxd+1xsnpZK14eyHObSp82+ll5y3SX75liw==}
|
||||
peerDependencies:
|
||||
@@ -3243,6 +3360,9 @@ packages:
|
||||
resolution: {integrity: sha512-kNZ9xnoJYKg/AfxjrVL4SS0fKX++4awQReGqWnwTRHxeHGZ1FJFVgTqr/eMrNQdp0Tz7M7tG/TDaX8QfHDwVCw==}
|
||||
engines: {node: ^20.0.0 || >=22.0.0}
|
||||
|
||||
native-promise-only@0.8.1:
|
||||
resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==}
|
||||
|
||||
natural-compare@1.4.0:
|
||||
resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
|
||||
|
||||
@@ -3433,6 +3553,9 @@ packages:
|
||||
pkg-types@2.2.0:
|
||||
resolution: {integrity: sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ==}
|
||||
|
||||
player.style@0.1.9:
|
||||
resolution: {integrity: sha512-aFmIhHMrnAP8YliFYFMnRw+5AlHqBvnqWy4vHGo2kFxlC+XjmTXqgg62qSxlE8ubAY83c0ViEZGYglSJi6mGCA==}
|
||||
|
||||
pluralize@8.0.0:
|
||||
resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -3575,6 +3698,13 @@ packages:
|
||||
react-is@17.0.2:
|
||||
resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==}
|
||||
|
||||
react-player@3.3.1:
|
||||
resolution: {integrity: sha512-wE/xLloneXZ1keelFCaNeIFVNUp4/7YoUjfHjwF945aQzsbDKiIB0LQuCchGL+la0Y1IybxnR0R6Cm3AiqInMw==}
|
||||
peerDependencies:
|
||||
'@types/react': ^17.0.0 || ^18 || ^19
|
||||
react: ^17.0.2 || ^18 || ^19
|
||||
react-dom: ^17.0.2 || ^18 || ^19
|
||||
|
||||
react-redux@9.2.0:
|
||||
resolution: {integrity: sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==}
|
||||
peerDependencies:
|
||||
@@ -3809,6 +3939,9 @@ packages:
|
||||
resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
sax@1.2.1:
|
||||
resolution: {integrity: sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==}
|
||||
|
||||
scheduler@0.23.2:
|
||||
resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==}
|
||||
|
||||
@@ -3931,6 +4064,9 @@ packages:
|
||||
resolution: {integrity: sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
spotify-audio-element@1.0.2:
|
||||
resolution: {integrity: sha512-YEovyyeJTJMzdSVqFw/Fx19e1gdcD4bmZZ/fWS0Ji58KTpvAT2rophgK87ocqpy6eJNSmIHikhgbRjGWumgZew==}
|
||||
|
||||
sprintf-js@1.0.3:
|
||||
resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==}
|
||||
|
||||
@@ -4039,6 +4175,9 @@ packages:
|
||||
stylis@4.3.6:
|
||||
resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==}
|
||||
|
||||
super-media-element@1.4.2:
|
||||
resolution: {integrity: sha512-9pP/CVNp4NF2MNlRzLwQkjiTgKKe9WYXrLh9+8QokWmMxz+zt2mf1utkWLco26IuA3AfVcTb//qtlTIjY3VHxA==}
|
||||
|
||||
supports-color@10.0.0:
|
||||
resolution: {integrity: sha512-HRVVSbCCMbj7/kdWF9Q+bbckjBHLtHMEoJWlkmYzzdwhYMkjkOwubLM6t7NbWKjgKamGDrWL1++KrjUO1t9oAQ==}
|
||||
engines: {node: '>=18'}
|
||||
@@ -4063,6 +4202,9 @@ packages:
|
||||
resolution: {integrity: sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
tiktok-video-element@0.1.0:
|
||||
resolution: {integrity: sha512-PVWUlpDdQ/LPXi7x4/furfD7Xh1L72CgkGCaMsZBIjvxucMGm1DDPJdM9IhWBFfo6tuR4cYVO/v596r6GG/lvQ==}
|
||||
|
||||
tiny-invariant@1.3.3:
|
||||
resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
|
||||
|
||||
@@ -4148,6 +4290,9 @@ packages:
|
||||
tslib@2.8.1:
|
||||
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
|
||||
|
||||
twitch-video-element@0.1.2:
|
||||
resolution: {integrity: sha512-/up4KiWiTYiav+CUo+/DbV8JhP4COwEKSo8h1H/Zft/5NzZ/ZiIQ48h7erFKvwzalN0GfkEGGIfwIzAO0h7FHQ==}
|
||||
|
||||
type-check@0.4.0:
|
||||
resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
|
||||
engines: {node: '>= 0.8.0'}
|
||||
@@ -4182,6 +4327,10 @@ packages:
|
||||
engines: {node: '>=14.17'}
|
||||
hasBin: true
|
||||
|
||||
ua-parser-js@1.0.40:
|
||||
resolution: {integrity: sha512-z6PJ8Lml+v3ichVojCiB8toQJBuwR42ySM4ezjXIqXK3M0HczmKQ3LF4rhU55PfD99KEEXQG6yb7iOMyvYuHew==}
|
||||
hasBin: true
|
||||
|
||||
ufo@1.6.1:
|
||||
resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==}
|
||||
|
||||
@@ -4286,6 +4435,9 @@ packages:
|
||||
resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==}
|
||||
hasBin: true
|
||||
|
||||
vimeo-video-element@1.5.3:
|
||||
resolution: {integrity: sha512-OQWyGS9nTouMqfRJyvmAm/n6IRhZ7x3EfPAef+Q+inGBeHa3SylDbtyeB/rEBd4B/T/lcYBW3rjaD9W2DRYkiQ==}
|
||||
|
||||
vite-node@3.2.4:
|
||||
resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==}
|
||||
engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0}
|
||||
@@ -4401,6 +4553,10 @@ packages:
|
||||
wcwidth@1.0.1:
|
||||
resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==}
|
||||
|
||||
weakmap-polyfill@2.0.4:
|
||||
resolution: {integrity: sha512-ZzxBf288iALJseijWelmECm/1x7ZwQn3sMYIkDr2VvZp7r6SEKuT8D0O9Wiq6L9Nl5mazrOMcmiZE/2NCenaxw==}
|
||||
engines: {node: '>=8.10.0'}
|
||||
|
||||
webidl-conversions@3.0.1:
|
||||
resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}
|
||||
|
||||
@@ -4436,6 +4592,9 @@ packages:
|
||||
engines: {node: '>=8'}
|
||||
hasBin: true
|
||||
|
||||
wistia-video-element@1.3.3:
|
||||
resolution: {integrity: sha512-ZVC8HH8uV3mQGcSz10MACLDalao/0YdVverNN4GNFsOXiumfqSiZnRVc8WZEywgVckBkR7+yerQYESYPDzvTfQ==}
|
||||
|
||||
word-wrap@1.2.5:
|
||||
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
|
||||
engines: {node: '>=0.10.0'}
|
||||
@@ -4509,6 +4668,9 @@ packages:
|
||||
resolution: {integrity: sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==}
|
||||
engines: {node: '>=12.20'}
|
||||
|
||||
youtube-video-element@1.6.1:
|
||||
resolution: {integrity: sha512-FDRgXlPxpe1bh6HlhL6GfJVcvVNaZKCcLEZ90X1G3Iu+z2g2cIhm2OWj9abPZq1Zqit6SY7Gwh13H9g7acoBnQ==}
|
||||
|
||||
zod-validation-error@3.5.3:
|
||||
resolution: {integrity: sha512-OT5Y8lbUadqVZCsnyFaTQ4/O2mys4tj7PqhdbBCp7McPwvIEKfPtdA6QfPeFQK2/Rz5LgwmAXRJTugBNBi0btw==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
@@ -4640,7 +4802,7 @@ snapshots:
|
||||
|
||||
'@babel/runtime@7.27.6': {}
|
||||
|
||||
'@babel/runtime@7.28.2': {}
|
||||
'@babel/runtime@7.28.3': {}
|
||||
|
||||
'@babel/template@7.27.2':
|
||||
dependencies:
|
||||
@@ -5235,6 +5397,43 @@ snapshots:
|
||||
|
||||
'@microsoft/tsdoc@0.15.1': {}
|
||||
|
||||
'@mux/mux-data-google-ima@0.2.8':
|
||||
dependencies:
|
||||
mux-embed: 5.9.0
|
||||
|
||||
'@mux/mux-player-react@3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
|
||||
dependencies:
|
||||
'@mux/mux-player': 3.5.3(react@18.3.1)
|
||||
'@mux/playback-core': 0.30.1
|
||||
prop-types: 15.8.1
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
optionalDependencies:
|
||||
'@types/react': 18.3.23
|
||||
'@types/react-dom': 18.3.7(@types/react@18.3.23)
|
||||
|
||||
'@mux/mux-player@3.5.3(react@18.3.1)':
|
||||
dependencies:
|
||||
'@mux/mux-video': 0.26.1
|
||||
'@mux/playback-core': 0.30.1
|
||||
media-chrome: 4.11.1(react@18.3.1)
|
||||
player.style: 0.1.9(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
|
||||
'@mux/mux-video@0.26.1':
|
||||
dependencies:
|
||||
'@mux/mux-data-google-ima': 0.2.8
|
||||
'@mux/playback-core': 0.30.1
|
||||
castable-video: 1.1.10
|
||||
custom-media-element: 1.4.5
|
||||
media-tracks: 0.3.3
|
||||
|
||||
'@mux/playback-core@0.30.1':
|
||||
dependencies:
|
||||
hls.js: 1.6.9
|
||||
mux-embed: 5.11.0
|
||||
|
||||
'@nanostores/react@0.7.3(nanostores@0.11.4)(react@18.3.1)':
|
||||
dependencies:
|
||||
nanostores: 0.11.4
|
||||
@@ -5690,6 +5889,8 @@ snapshots:
|
||||
optionalDependencies:
|
||||
typescript: 5.8.3
|
||||
|
||||
'@svta/common-media-library@0.12.4': {}
|
||||
|
||||
'@swc/core-darwin-arm64@1.12.9':
|
||||
optional: true
|
||||
|
||||
@@ -5745,7 +5946,7 @@ snapshots:
|
||||
'@testing-library/dom@10.4.0':
|
||||
dependencies:
|
||||
'@babel/code-frame': 7.27.1
|
||||
'@babel/runtime': 7.28.2
|
||||
'@babel/runtime': 7.28.3
|
||||
'@types/aria-query': 5.0.4
|
||||
aria-query: 5.3.0
|
||||
chalk: 4.1.2
|
||||
@@ -5971,6 +6172,13 @@ snapshots:
|
||||
'@typescript-eslint/types': 8.37.0
|
||||
eslint-visitor-keys: 4.2.1
|
||||
|
||||
'@vercel/edge@1.2.2': {}
|
||||
|
||||
'@vimeo/player@2.29.0':
|
||||
dependencies:
|
||||
native-promise-only: 0.8.1
|
||||
weakmap-polyfill: 2.0.4
|
||||
|
||||
'@vitejs/plugin-react-swc@3.10.2(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2))':
|
||||
dependencies:
|
||||
'@rolldown/pluginutils': 1.0.0-beta.11
|
||||
@@ -6304,6 +6512,19 @@ snapshots:
|
||||
|
||||
base64-js@1.5.1: {}
|
||||
|
||||
bcp-47-match@2.0.3: {}
|
||||
|
||||
bcp-47-normalize@2.3.0:
|
||||
dependencies:
|
||||
bcp-47: 2.1.0
|
||||
bcp-47-match: 2.0.3
|
||||
|
||||
bcp-47@2.1.0:
|
||||
dependencies:
|
||||
is-alphabetical: 2.0.1
|
||||
is-alphanumerical: 2.0.1
|
||||
is-decimal: 2.0.1
|
||||
|
||||
better-opn@3.0.2:
|
||||
dependencies:
|
||||
open: 8.4.2
|
||||
@@ -6366,6 +6587,14 @@ snapshots:
|
||||
|
||||
caniuse-lite@1.0.30001727: {}
|
||||
|
||||
castable-video@1.1.10:
|
||||
dependencies:
|
||||
custom-media-element: 1.4.5
|
||||
|
||||
ce-la-react@0.3.1(react@18.3.1):
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
|
||||
chai@5.2.0:
|
||||
dependencies:
|
||||
assertion-error: 2.0.1
|
||||
@@ -6423,6 +6652,8 @@ snapshots:
|
||||
|
||||
clone@1.0.4: {}
|
||||
|
||||
cloudflare-video-element@1.3.3: {}
|
||||
|
||||
cmdk@1.1.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@radix-ui/react-compose-refs': 1.1.2(@types/react@18.3.23)(react@18.3.1)
|
||||
@@ -6435,6 +6666,8 @@ snapshots:
|
||||
- '@types/react'
|
||||
- '@types/react-dom'
|
||||
|
||||
codem-isoboxer@0.3.10: {}
|
||||
|
||||
color-convert@2.0.1:
|
||||
dependencies:
|
||||
color-name: 1.1.4
|
||||
@@ -6510,6 +6743,8 @@ snapshots:
|
||||
|
||||
csstype@3.1.3: {}
|
||||
|
||||
custom-media-element@1.4.5: {}
|
||||
|
||||
d3-color@3.1.0: {}
|
||||
|
||||
d3-dispatch@3.0.1: {}
|
||||
@@ -6546,6 +6781,24 @@ snapshots:
|
||||
d3-selection: 3.0.0
|
||||
d3-transition: 3.0.1(d3-selection@3.0.0)
|
||||
|
||||
dash-video-element@0.1.6:
|
||||
dependencies:
|
||||
custom-media-element: 1.4.5
|
||||
dashjs: 5.0.3
|
||||
|
||||
dashjs@5.0.3:
|
||||
dependencies:
|
||||
'@svta/common-media-library': 0.12.4
|
||||
bcp-47-match: 2.0.3
|
||||
bcp-47-normalize: 2.3.0
|
||||
codem-isoboxer: 0.3.10
|
||||
fast-deep-equal: 3.1.3
|
||||
html-entities: 2.6.0
|
||||
imsc: 1.1.5
|
||||
localforage: 1.10.0
|
||||
path-browserify: 1.0.1
|
||||
ua-parser-js: 1.0.40
|
||||
|
||||
data-view-buffer@1.0.2:
|
||||
dependencies:
|
||||
call-bound: 1.0.4
|
||||
@@ -6610,11 +6863,11 @@ snapshots:
|
||||
|
||||
discontinuous-range@1.0.0: {}
|
||||
|
||||
dockview-core@4.4.1: {}
|
||||
dockview-core@4.7.1: {}
|
||||
|
||||
dockview@4.4.1(react@18.3.1):
|
||||
dockview@4.7.1(react@18.3.1):
|
||||
dependencies:
|
||||
dockview-core: 4.4.1
|
||||
dockview-core: 4.7.1
|
||||
react: 18.3.1
|
||||
|
||||
doctrine@2.1.0:
|
||||
@@ -7244,10 +7497,20 @@ snapshots:
|
||||
|
||||
he@1.2.0: {}
|
||||
|
||||
hls-video-element@1.5.6:
|
||||
dependencies:
|
||||
custom-media-element: 1.4.5
|
||||
hls.js: 1.6.9
|
||||
media-tracks: 0.3.3
|
||||
|
||||
hls.js@1.6.9: {}
|
||||
|
||||
hoist-non-react-statics@3.3.2:
|
||||
dependencies:
|
||||
react-is: 16.13.1
|
||||
|
||||
html-entities@2.6.0: {}
|
||||
|
||||
html-escaper@2.0.2: {}
|
||||
|
||||
html-parse-stringify@3.0.1:
|
||||
@@ -7283,6 +7546,8 @@ snapshots:
|
||||
|
||||
ignore@7.0.5: {}
|
||||
|
||||
immediate@3.0.6: {}
|
||||
|
||||
immer@10.1.1: {}
|
||||
|
||||
import-fresh@3.3.1:
|
||||
@@ -7292,6 +7557,10 @@ snapshots:
|
||||
|
||||
import-lazy@4.0.0: {}
|
||||
|
||||
imsc@1.1.5:
|
||||
dependencies:
|
||||
sax: 1.2.1
|
||||
|
||||
imurmurhash@0.1.4: {}
|
||||
|
||||
indent-string@4.0.0: {}
|
||||
@@ -7310,6 +7579,13 @@ snapshots:
|
||||
hasown: 2.0.2
|
||||
side-channel: 1.1.0
|
||||
|
||||
is-alphabetical@2.0.1: {}
|
||||
|
||||
is-alphanumerical@2.0.1:
|
||||
dependencies:
|
||||
is-alphabetical: 2.0.1
|
||||
is-decimal: 2.0.1
|
||||
|
||||
is-array-buffer@3.0.5:
|
||||
dependencies:
|
||||
call-bind: 1.0.8
|
||||
@@ -7352,6 +7628,8 @@ snapshots:
|
||||
call-bound: 1.0.4
|
||||
has-tostringtag: 1.0.2
|
||||
|
||||
is-decimal@2.0.1: {}
|
||||
|
||||
is-docker@2.2.1: {}
|
||||
|
||||
is-extglob@2.1.1: {}
|
||||
@@ -7553,6 +7831,10 @@ snapshots:
|
||||
prelude-ls: 1.2.1
|
||||
type-check: 0.4.0
|
||||
|
||||
lie@3.1.1:
|
||||
dependencies:
|
||||
immediate: 3.0.6
|
||||
|
||||
lines-and-columns@1.2.4: {}
|
||||
|
||||
linkify-react@4.3.1(linkifyjs@4.3.1)(react@18.3.1):
|
||||
@@ -7575,6 +7857,10 @@ snapshots:
|
||||
pkg-types: 2.2.0
|
||||
quansync: 0.2.10
|
||||
|
||||
localforage@1.10.0:
|
||||
dependencies:
|
||||
lie: 3.1.1
|
||||
|
||||
locate-path@6.0.0:
|
||||
dependencies:
|
||||
p-locate: 5.0.0
|
||||
@@ -7634,6 +7920,21 @@ snapshots:
|
||||
|
||||
mdn-data@2.0.14: {}
|
||||
|
||||
media-chrome@4.11.1(react@18.3.1):
|
||||
dependencies:
|
||||
'@vercel/edge': 1.2.2
|
||||
ce-la-react: 0.3.1(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
|
||||
media-chrome@4.13.0(react@18.3.1):
|
||||
dependencies:
|
||||
ce-la-react: 0.3.1(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
|
||||
media-tracks@0.3.3: {}
|
||||
|
||||
memoize-one@6.0.0: {}
|
||||
|
||||
merge2@1.4.1: {}
|
||||
@@ -7690,6 +7991,10 @@ snapshots:
|
||||
|
||||
muggle-string@0.4.1: {}
|
||||
|
||||
mux-embed@5.11.0: {}
|
||||
|
||||
mux-embed@5.9.0: {}
|
||||
|
||||
nano-css@5.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@jridgewell/sourcemap-codec': 1.5.4
|
||||
@@ -7711,6 +8016,8 @@ snapshots:
|
||||
|
||||
nanostores@1.0.1: {}
|
||||
|
||||
native-promise-only@0.8.1: {}
|
||||
|
||||
natural-compare@1.4.0: {}
|
||||
|
||||
nearley@2.20.1:
|
||||
@@ -7929,6 +8236,12 @@ snapshots:
|
||||
exsolve: 1.0.7
|
||||
pathe: 2.0.3
|
||||
|
||||
player.style@0.1.9(react@18.3.1):
|
||||
dependencies:
|
||||
media-chrome: 4.11.1(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
|
||||
pluralize@8.0.0: {}
|
||||
|
||||
possible-typed-array-names@1.1.0: {}
|
||||
@@ -8066,6 +8379,24 @@ snapshots:
|
||||
|
||||
react-is@17.0.2: {}
|
||||
|
||||
react-player@3.3.1(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
'@mux/mux-player-react': 3.5.3(@types/react-dom@18.3.7(@types/react@18.3.23))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
'@types/react': 18.3.23
|
||||
cloudflare-video-element: 1.3.3
|
||||
dash-video-element: 0.1.6
|
||||
hls-video-element: 1.5.6
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
spotify-audio-element: 1.0.2
|
||||
tiktok-video-element: 0.1.0
|
||||
twitch-video-element: 0.1.2
|
||||
vimeo-video-element: 1.5.3
|
||||
wistia-video-element: 1.3.3
|
||||
youtube-video-element: 1.6.1
|
||||
transitivePeerDependencies:
|
||||
- '@types/react-dom'
|
||||
|
||||
react-redux@9.2.0(@types/react@18.3.23)(react@18.3.1)(redux@5.0.1):
|
||||
dependencies:
|
||||
'@types/use-sync-external-store': 0.0.6
|
||||
@@ -8360,6 +8691,8 @@ snapshots:
|
||||
|
||||
safe-stable-stringify@2.5.0: {}
|
||||
|
||||
sax@1.2.1: {}
|
||||
|
||||
scheduler@0.23.2:
|
||||
dependencies:
|
||||
loose-envify: 1.4.0
|
||||
@@ -8484,6 +8817,8 @@ snapshots:
|
||||
|
||||
split-on-first@3.0.0: {}
|
||||
|
||||
spotify-audio-element@1.0.2: {}
|
||||
|
||||
sprintf-js@1.0.3: {}
|
||||
|
||||
stable-hash@0.0.6: {}
|
||||
@@ -8627,6 +8962,8 @@ snapshots:
|
||||
|
||||
stylis@4.3.6: {}
|
||||
|
||||
super-media-element@1.4.2: {}
|
||||
|
||||
supports-color@10.0.0: {}
|
||||
|
||||
supports-color@7.2.0:
|
||||
@@ -8647,6 +8984,8 @@ snapshots:
|
||||
|
||||
throttle-debounce@3.0.1: {}
|
||||
|
||||
tiktok-video-element@0.1.0: {}
|
||||
|
||||
tiny-invariant@1.3.3: {}
|
||||
|
||||
tinybench@2.9.0: {}
|
||||
@@ -8709,6 +9048,8 @@ snapshots:
|
||||
|
||||
tslib@2.8.1: {}
|
||||
|
||||
twitch-video-element@0.1.2: {}
|
||||
|
||||
type-check@0.4.0:
|
||||
dependencies:
|
||||
prelude-ls: 1.2.1
|
||||
@@ -8752,6 +9093,8 @@ snapshots:
|
||||
|
||||
typescript@5.8.3: {}
|
||||
|
||||
ua-parser-js@1.0.40: {}
|
||||
|
||||
ufo@1.6.1: {}
|
||||
|
||||
unbox-primitive@1.1.0:
|
||||
@@ -8834,6 +9177,10 @@ snapshots:
|
||||
|
||||
uuid@11.1.0: {}
|
||||
|
||||
vimeo-video-element@1.5.3:
|
||||
dependencies:
|
||||
'@vimeo/player': 2.29.0
|
||||
|
||||
vite-node@3.2.4(@types/node@22.16.0)(jiti@2.4.2):
|
||||
dependencies:
|
||||
cac: 6.7.14
|
||||
@@ -8962,6 +9309,8 @@ snapshots:
|
||||
dependencies:
|
||||
defaults: 1.0.4
|
||||
|
||||
weakmap-polyfill@2.0.4: {}
|
||||
|
||||
webidl-conversions@3.0.1: {}
|
||||
|
||||
webpack-virtual-modules@0.6.2: {}
|
||||
@@ -9021,6 +9370,10 @@ snapshots:
|
||||
siginfo: 2.0.0
|
||||
stackback: 0.0.2
|
||||
|
||||
wistia-video-element@1.3.3:
|
||||
dependencies:
|
||||
super-media-element: 1.4.2
|
||||
|
||||
word-wrap@1.2.5: {}
|
||||
|
||||
wrap-ansi@7.0.0:
|
||||
@@ -9067,6 +9420,8 @@ snapshots:
|
||||
|
||||
yocto-queue@1.2.1: {}
|
||||
|
||||
youtube-video-element@1.6.1: {}
|
||||
|
||||
zod-validation-error@3.5.3(zod@3.25.76):
|
||||
dependencies:
|
||||
zod: 3.25.76
|
||||
|
||||
@@ -210,10 +210,6 @@
|
||||
"title": "Bbox Werkzeug",
|
||||
"desc": "Bbox Werkzeug auswählen."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Farbe auf Weiß einstellen",
|
||||
"desc": "Setzt die aktuelle Werkzeugfarbe auf weiß."
|
||||
},
|
||||
"title": "Leinwand",
|
||||
"selectBrushTool": {
|
||||
"title": "Pinselwerkzeug",
|
||||
|
||||
@@ -38,10 +38,13 @@
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"locateInGalery": "Locate in Gallery",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
"move": "Move",
|
||||
"movingImagesToBoard_one": "Moving {{count}} image to board:",
|
||||
"movingImagesToBoard_other": "Moving {{count}} images to board:",
|
||||
"movingVideosToBoard_one": "Moving {{count}} video to board:",
|
||||
"movingVideosToBoard_other": "Moving {{count}} videos to board:",
|
||||
"myBoard": "My Board",
|
||||
"noBoards": "No {{boardType}} Boards",
|
||||
"noMatching": "No matching Boards",
|
||||
@@ -58,6 +61,8 @@
|
||||
"imagesWithCount_other": "{{count}} images",
|
||||
"assetsWithCount_one": "{{count}} asset",
|
||||
"assetsWithCount_other": "{{count}} assets",
|
||||
"videosWithCount_one": "{{count}} video",
|
||||
"videosWithCount_other": "{{count}} videos",
|
||||
"updateBoardError": "Error updating board"
|
||||
},
|
||||
"accordions": {
|
||||
@@ -114,6 +119,9 @@
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"positivePrompt": "Positive Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"removeNegativePrompt": "Remove Negative Prompt",
|
||||
"addNegativePrompt": "Add Negative Prompt",
|
||||
"selectYourModel": "Select Your Model",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"dontShowMeThese": "Don't show me these",
|
||||
@@ -357,6 +365,9 @@
|
||||
"deleteImage_one": "Delete Image",
|
||||
"deleteImage_other": "Delete {{count}} Images",
|
||||
"deleteImagePermanent": "Deleted images cannot be restored.",
|
||||
"deleteVideo_one": "Delete Video",
|
||||
"deleteVideo_other": "Delete {{count}} Videos",
|
||||
"deleteVideoPermanent": "Deleted videos cannot be restored.",
|
||||
"displayBoardSearch": "Board Search",
|
||||
"displaySearch": "Image Search",
|
||||
"download": "Download",
|
||||
@@ -376,9 +387,10 @@
|
||||
"sortDirection": "Sort Direction",
|
||||
"showStarredImagesFirst": "Show Starred Images First",
|
||||
"noImageSelected": "No Image Selected",
|
||||
"noVideoSelected": "No Video Selected",
|
||||
"noImagesInGallery": "No Images to Display",
|
||||
"starImage": "Star Image",
|
||||
"unstarImage": "Unstar Image",
|
||||
"starImage": "Star",
|
||||
"unstarImage": "Unstar",
|
||||
"unableToLoad": "Unable to load Gallery",
|
||||
"deleteSelection": "Delete Selection",
|
||||
"downloadSelection": "Download Selection",
|
||||
@@ -407,7 +419,9 @@
|
||||
"openViewer": "Open Viewer",
|
||||
"closeViewer": "Close Viewer",
|
||||
"move": "Move",
|
||||
"useForPromptGeneration": "Use for Prompt Generation"
|
||||
"useForPromptGeneration": "Use for Prompt Generation",
|
||||
"videos": "Videos",
|
||||
"videosTab": "Videos you've created and saved within Invoke."
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Hotkeys",
|
||||
@@ -452,6 +466,10 @@
|
||||
"title": "Select the Queue Tab",
|
||||
"desc": "Selects the Queue tab."
|
||||
},
|
||||
"selectVideoTab": {
|
||||
"title": "Select the Video Tab",
|
||||
"desc": "Selects the Video tab."
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Focus Prompt",
|
||||
"desc": "Move cursor focus to the positive prompt."
|
||||
@@ -478,6 +496,9 @@
|
||||
"key": "1"
|
||||
}
|
||||
},
|
||||
"video": {
|
||||
"title": "Video"
|
||||
},
|
||||
"canvas": {
|
||||
"title": "Canvas",
|
||||
"selectBrushTool": {
|
||||
@@ -568,9 +589,13 @@
|
||||
"title": "Prev Layer",
|
||||
"desc": "Select the previous layer in the list."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Set Color to White",
|
||||
"desc": "Set the current tool color to white."
|
||||
"setFillColorsToDefault": {
|
||||
"title": "Set Colors to Default",
|
||||
"desc": "Set the current tool colors to default."
|
||||
},
|
||||
"toggleFillColor": {
|
||||
"title": "Toggle Fill Color",
|
||||
"desc": "Toggle the current tool fill color."
|
||||
},
|
||||
"filterSelected": {
|
||||
"title": "Filter",
|
||||
@@ -618,6 +643,10 @@
|
||||
"title": "Fit Bbox To Masks",
|
||||
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
|
||||
},
|
||||
"toggleBbox": {
|
||||
"title": "Toggle Bbox Visibility",
|
||||
"desc": "Hide or show the generation bounding box"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Apply Segment Anything",
|
||||
"desc": "Apply the current Segment Anything mask.",
|
||||
@@ -763,20 +792,26 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"lora": {
|
||||
"weight": "Weight"
|
||||
},
|
||||
"metadata": {
|
||||
"allPrompts": "All Prompts",
|
||||
"cfgScale": "CFG scale",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"clipSkip": "$t(parameters.clipSkip)",
|
||||
"createdBy": "Created By",
|
||||
"generationMode": "Generation Mode",
|
||||
"guidance": "Guidance",
|
||||
"height": "Height",
|
||||
"imageDetails": "Image Details",
|
||||
"videoDetails": "Video Details",
|
||||
"imageDimensions": "Image Dimensions",
|
||||
"metadata": "Metadata",
|
||||
"model": "Model",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"noImageDetails": "No image details found",
|
||||
"noVideoDetails": "No video details found",
|
||||
"noMetaData": "No metadata found",
|
||||
"noRecallParameters": "No parameters to recall found",
|
||||
"parameterSet": "Parameter {{parameter}} set",
|
||||
@@ -794,7 +829,11 @@
|
||||
"vae": "VAE",
|
||||
"width": "Width",
|
||||
"workflow": "Workflow",
|
||||
"canvasV2Metadata": "Canvas Layers"
|
||||
"canvasV2Metadata": "Canvas Layers",
|
||||
"videoModel": "Model",
|
||||
"videoDuration": "Duration",
|
||||
"videoAspectRatio": "Aspect Ratio",
|
||||
"videoResolution": "Resolution"
|
||||
},
|
||||
"modelManager": {
|
||||
"active": "active",
|
||||
@@ -869,6 +908,9 @@
|
||||
"install": "Install",
|
||||
"installAll": "Install All",
|
||||
"installRepo": "Install Repo",
|
||||
"installBundle": "Install Bundle",
|
||||
"installBundleMsg1": "Are you sure you want to install the {{bundleName}} bundle?",
|
||||
"installBundleMsg2": "This bundle will install the following {{count}} models:",
|
||||
"ipAdapters": "IP Adapters",
|
||||
"learnMoreAboutSupportedModels": "Learn more about the models we support",
|
||||
"load": "Load",
|
||||
@@ -1177,6 +1219,7 @@
|
||||
},
|
||||
"parameters": {
|
||||
"aspect": "Aspect",
|
||||
"duration": "Duration",
|
||||
"lockAspectRatio": "Lock Aspect Ratio",
|
||||
"swapDimensions": "Swap Dimensions",
|
||||
"setToOptimalSize": "Optimize size for model",
|
||||
@@ -1201,9 +1244,14 @@
|
||||
"height": "Height",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"images": "Images",
|
||||
"images_withCount_one": "Image",
|
||||
"images_withCount_other": "Images",
|
||||
"videos_withCount_one": "Video",
|
||||
"videos_withCount_other": "Videos",
|
||||
"infillMethod": "Infill Method",
|
||||
"infillColorValue": "Fill Color",
|
||||
"info": "Info",
|
||||
"startingFrameImage": "Start Frame",
|
||||
"invoke": {
|
||||
"addingImagesTo": "Adding images to",
|
||||
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
|
||||
@@ -1227,6 +1275,7 @@
|
||||
"batchNodeCollectionSizeMismatchNoGroupId": "Batch group collection size mismatch",
|
||||
"batchNodeCollectionSizeMismatch": "Collection size mismatch on Batch {{batchGroupId}}",
|
||||
"noModelSelected": "No model selected",
|
||||
"noStartingFrameImage": "No starting frame image",
|
||||
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
|
||||
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
|
||||
"noCLIPEmbedModelSelected": "No CLIP Embed model selected for FLUX generation",
|
||||
@@ -1239,7 +1288,7 @@
|
||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with FLUX Kontext via BFL API",
|
||||
"incompatibleLoRAs": "Incompatible LoRA(s) added",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
@@ -1249,7 +1298,8 @@
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
"promptExpansionPending": "Prompt expansion in progress",
|
||||
"promptExpansionResultPending": "Please accept or discard your prompt expansion result"
|
||||
"promptExpansionResultPending": "Please accept or discard your prompt expansion result",
|
||||
"videoIsDisabled": "Video generation is not enabled for {{accountType}} accounts."
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
@@ -1267,9 +1317,11 @@
|
||||
"seamlessXAxis": "Seamless X Axis",
|
||||
"seamlessYAxis": "Seamless Y Axis",
|
||||
"seed": "Seed",
|
||||
"videoActions": "Video Actions",
|
||||
"imageActions": "Image Actions",
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
"sendToUpscale": "Send To Upscale",
|
||||
"sendToVideo": "Send To Video",
|
||||
"showOptionsPanel": "Show Side Panel (O or T)",
|
||||
"shuffle": "Shuffle Seed",
|
||||
"steps": "Steps",
|
||||
@@ -1281,16 +1333,19 @@
|
||||
"postProcessing": "Post-Processing (Shift + U)",
|
||||
"processImage": "Process Image",
|
||||
"upscaling": "Upscaling",
|
||||
"video": "Video",
|
||||
"useAll": "Use All",
|
||||
"useSize": "Use Size",
|
||||
"useCpuNoise": "Use CPU Noise",
|
||||
"remixImage": "Remix Image",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
"useClipSkip": "Use CLIP Skip",
|
||||
"width": "Width",
|
||||
"gaussianBlur": "Gaussian Blur",
|
||||
"boxBlur": "Box Blur",
|
||||
"staged": "Staged",
|
||||
"resolution": "Resolution",
|
||||
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your <LinkComponent>account settings</LinkComponent> to upgrade."
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1368,8 +1423,8 @@
|
||||
"addedToBoard": "Added to board {{name}}'s assets",
|
||||
"addedToUncategorized": "Added to board $t(boards.uncategorized)'s assets",
|
||||
"baseModelChanged": "Base Model Changed",
|
||||
"baseModelChangedCleared_one": "Cleared or disabled {{count}} incompatible submodel",
|
||||
"baseModelChangedCleared_other": "Cleared or disabled {{count}} incompatible submodels",
|
||||
"baseModelChangedCleared_one": "Updated, cleared or disabled {{count}} incompatible submodel",
|
||||
"baseModelChangedCleared_other": "Updated, cleared or disabled {{count}} incompatible submodels",
|
||||
"canceled": "Processing Canceled",
|
||||
"connected": "Connected to Server",
|
||||
"imageCopied": "Image Copied",
|
||||
@@ -1937,8 +1992,11 @@
|
||||
"zoomToNode": "Zoom to Node",
|
||||
"nodeFieldTooltip": "To add a node field, click the small plus sign button on the field in the Workflow Editor, or drag the field by its name into the form.",
|
||||
"addToForm": "Add to Form",
|
||||
"removeFromForm": "Remove from Form",
|
||||
"label": "Label",
|
||||
"showDescription": "Show Description",
|
||||
"showShuffle": "Show Shuffle",
|
||||
"shuffle": "Shuffle",
|
||||
"component": "Component",
|
||||
"numberInput": "Number Input",
|
||||
"singleLine": "Single Line",
|
||||
@@ -2180,7 +2238,8 @@
|
||||
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
|
||||
"rgNoRegion": "no region drawn",
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill",
|
||||
"bboxHidden": "Bounding box is hidden (shift+o to toggle)"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "Unable to find image",
|
||||
@@ -2216,6 +2275,8 @@
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
"bgFillColor": "Background Color",
|
||||
"fgFillColor": "Foreground Color",
|
||||
"fillStyle": "Fill Style",
|
||||
"solid": "Solid",
|
||||
"grid": "Grid",
|
||||
@@ -2548,19 +2609,30 @@
|
||||
"queue": "Queue",
|
||||
"upscaling": "Upscaling",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"video": "Video",
|
||||
"gallery": "Gallery"
|
||||
},
|
||||
"panels": {
|
||||
"launchpad": "Launchpad",
|
||||
"workflowEditor": "Workflow Editor",
|
||||
"imageViewer": "Image Viewer",
|
||||
"canvas": "Canvas"
|
||||
"imageViewer": "Viewer",
|
||||
"canvas": "Canvas",
|
||||
"video": "Video"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Go deep with Workflows.",
|
||||
"upscalingTitle": "Upscale and add detail.",
|
||||
"canvasTitle": "Edit and refine on Canvas.",
|
||||
"generateTitle": "Generate images from text prompts.",
|
||||
"videoTitle": "Generate videos from text prompts.",
|
||||
"video": {
|
||||
"startingFrameCalloutTitle": "Add a Starting Frame",
|
||||
"startingFrameCalloutDesc": "Add an image to control the first frame of your video."
|
||||
},
|
||||
"addStartingFrame": {
|
||||
"title": "Add a Starting Frame",
|
||||
"description": "Add an image to control the first frame of your video."
|
||||
},
|
||||
"modelGuideText": "Want to learn what prompts work best for each model?",
|
||||
"modelGuideLink": "Check out our Model Guide.",
|
||||
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
|
||||
@@ -2635,6 +2707,10 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"video": {
|
||||
"noVideoSelected": "No video selected",
|
||||
"selectFromGallery": "Select a video from the gallery to play"
|
||||
},
|
||||
"system": {
|
||||
"enableLogging": "Enable Logging",
|
||||
"logLevel": {
|
||||
@@ -2672,8 +2748,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Studio state is saved to the server, allowing you to continue your work on any device.",
|
||||
"Support for multiple reference images for FLUX Kontext (local model only)."
|
||||
"Canvas: Color Picker does not sample alpha, bbox respects aspect ratio lock when resizing shuffle button for number fields in Workflow Builder, hide pixel dimension sliders when using a model that doesn't support them",
|
||||
"Workflows: Add a Shuffle button to number input fields"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -591,10 +591,6 @@
|
||||
"title": "Ajustar capas al lienzo",
|
||||
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Establecer color en blanco",
|
||||
"desc": "Establece el color actual de la herramienta en blanco."
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "Restablecer capa",
|
||||
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
|
||||
|
||||
@@ -901,10 +901,6 @@
|
||||
"desc": "Définit le zoom de la toile à 400 %.",
|
||||
"title": "Zoomer à 400 %"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Définir la couleur sur blanc",
|
||||
"desc": "Définir la couleur de l'outil actuel sur blanc."
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "Transformer",
|
||||
"desc": "Transforme la couche sélectionnée."
|
||||
|
||||
@@ -128,7 +128,10 @@
|
||||
"search": "Cerca",
|
||||
"clear": "Cancella",
|
||||
"compactView": "Vista compatta",
|
||||
"fullView": "Vista completa"
|
||||
"fullView": "Vista completa",
|
||||
"removeNegativePrompt": "Rimuovi prompt negativo",
|
||||
"addNegativePrompt": "Aggiungi prompt negativo",
|
||||
"selectYourModel": "Seleziona il modello"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -151,9 +154,9 @@
|
||||
"deleteSelection": "Elimina la selezione",
|
||||
"image": "immagine",
|
||||
"drop": "Rilascia",
|
||||
"unstarImage": "Rimuovi contrassegno immagine",
|
||||
"unstarImage": "Rimuovi contrassegno",
|
||||
"dropOrUpload": "Rilascia o carica",
|
||||
"starImage": "Contrassegna l'immagine",
|
||||
"starImage": "Contrassegna",
|
||||
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||
"bulkDownloadRequested": "Preparazione del download",
|
||||
"bulkDownloadRequestedDesc": "La tua richiesta di download è in preparazione. L'operazione potrebbe richiedere alcuni istanti.",
|
||||
@@ -198,7 +201,14 @@
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse",
|
||||
"images": "Immagini",
|
||||
"useForPromptGeneration": "Usa per generare il prompt"
|
||||
"useForPromptGeneration": "Usa per generare il prompt",
|
||||
"deleteVideo_one": "Elimina video",
|
||||
"deleteVideo_many": "Elimina {{count}} video",
|
||||
"deleteVideo_other": "Elimina {{count}} video",
|
||||
"deleteVideoPermanent": "I video eliminati non possono essere ripristinati.",
|
||||
"noVideoSelected": "Nessun video selezionato",
|
||||
"videos": "Video",
|
||||
"videosTab": "Video creati e salvati in Invoke."
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -264,6 +274,10 @@
|
||||
"selectGenerateTab": {
|
||||
"title": "Seleziona la scheda Genera",
|
||||
"desc": "Seleziona la scheda Genera."
|
||||
},
|
||||
"selectVideoTab": {
|
||||
"title": "Seleziona la scheda Video",
|
||||
"desc": "Seleziona la scheda Video."
|
||||
}
|
||||
},
|
||||
"hotkeys": "Tasti di scelta rapida",
|
||||
@@ -344,10 +358,6 @@
|
||||
"title": "Livello precedente",
|
||||
"desc": "Seleziona il livello precedente nell'elenco."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Imposta il colore su bianco",
|
||||
"desc": "Imposta il colore dello strumento corrente su bianco."
|
||||
},
|
||||
"title": "Tela",
|
||||
"selectMoveTool": {
|
||||
"title": "Strumento Sposta",
|
||||
@@ -410,6 +420,14 @@
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Annulla Segment Anything",
|
||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Adatta il riquadro di delimitazione ai livelli",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo ai livelli visibili"
|
||||
},
|
||||
"toggleBbox": {
|
||||
"title": "Attiva/disattiva la visibilità del riquadro di delimitazione",
|
||||
"desc": "Nascondi o mostra il riquadro di delimitazione della generazione"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -544,6 +562,9 @@
|
||||
"desc": "Aggiungi/Rimuovi contrassegno all'immagine selezionata.",
|
||||
"title": "Aggiungi / Rimuovi contrassegno immagine"
|
||||
}
|
||||
},
|
||||
"video": {
|
||||
"title": "Video"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -711,7 +732,10 @@
|
||||
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
|
||||
"browseAll": "Oppure scopri tutti i modelli disponibili:"
|
||||
},
|
||||
"launchpadTab": "Rampa di lancio"
|
||||
"launchpadTab": "Rampa di lancio",
|
||||
"installBundle": "Installa pacchetto",
|
||||
"installBundleMsg1": "Vuoi davvero installare il pacchetto {{bundleName}}?",
|
||||
"installBundleMsg2": "Questo pacchetto installerà i seguenti {{count}} modelli:"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -798,9 +822,11 @@
|
||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
|
||||
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con FLUX Kontext tramite BFL API",
|
||||
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
|
||||
"promptExpansionPending": "Espansione del prompt in corso"
|
||||
"promptExpansionPending": "Espansione del prompt in corso",
|
||||
"noStartingFrameImage": "Nessuna immagine del fotogramma iniziale",
|
||||
"videoIsDisabled": "La generazione di video non è abilitata per gli account {{accountType}}.",
|
||||
"incompatibleLoRAs": "Aggiunti LoRA incompatibili"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -828,7 +854,20 @@
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Visita le <LinkComponent>impostazioni account</LinkComponent> per effettuare l'upgrade."
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Visita le <LinkComponent>impostazioni account</LinkComponent> per effettuare l'upgrade.",
|
||||
"useClipSkip": "Usa CLIP Skip",
|
||||
"duration": "Durata",
|
||||
"images_withCount_one": "Immagine",
|
||||
"images_withCount_many": "Immagini",
|
||||
"images_withCount_other": "Immagini",
|
||||
"videos_withCount_one": "Video",
|
||||
"videos_withCount_many": "Video",
|
||||
"videos_withCount_other": "Video",
|
||||
"startingFrameImage": "Fotogramma iniziale",
|
||||
"videoActions": "Azioni video",
|
||||
"sendToVideo": "Invia al Video",
|
||||
"video": "Video",
|
||||
"resolution": "Risoluzione"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -881,8 +920,8 @@
|
||||
"parameterSet": "Parametro richiamato",
|
||||
"parameterNotSet": "Parametro non richiamato",
|
||||
"problemCopyingImage": "Impossibile copiare l'immagine",
|
||||
"baseModelChangedCleared_one": "Cancellato o disabilitato {{count}} sottomodello incompatibile",
|
||||
"baseModelChangedCleared_many": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
|
||||
"baseModelChangedCleared_one": "Aggiornato, cancellato o disabilitato {{count}} sottomodello incompatibile",
|
||||
"baseModelChangedCleared_many": "Aggiornati, cancellati o disabilitati {{count}} sottomodelli incompatibili",
|
||||
"baseModelChangedCleared_other": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
|
||||
"loadedWithWarnings": "Flusso di lavoro caricato con avvisi",
|
||||
"imageUploaded": "Immagine caricata",
|
||||
@@ -1227,7 +1266,14 @@
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
|
||||
"uncategorizedImages": "Immagini non categorizzate",
|
||||
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate.",
|
||||
"locateInGalery": "Trova nella Galleria",
|
||||
"movingVideosToBoard_one": "Spostamento di {{count}} video sulla bacheca:",
|
||||
"movingVideosToBoard_many": "Spostamento di {{count}} video sulla bacheca:",
|
||||
"movingVideosToBoard_other": "Spostamento di {{count}} video sulla bacheca:",
|
||||
"videosWithCount_one": "{{count}} video",
|
||||
"videosWithCount_many": "{{count}} video",
|
||||
"videosWithCount_other": "{{count}} video"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -1850,7 +1896,13 @@
|
||||
"guidance": "Guida",
|
||||
"seamlessXAxis": "Asse X senza giunte",
|
||||
"seamlessYAxis": "Asse Y senza giunte",
|
||||
"vae": "VAE"
|
||||
"vae": "VAE",
|
||||
"videoDetails": "Dettagli video",
|
||||
"noVideoDetails": "Nessun dettaglio video trovato",
|
||||
"videoModel": "Modello",
|
||||
"videoDuration": "Durata",
|
||||
"videoAspectRatio": "Proporzioni",
|
||||
"videoResolution": "Risoluzione"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Abilita Correzione Alta Risoluzione",
|
||||
@@ -1974,7 +2026,10 @@
|
||||
"publishInProgress": "Pubblicazione in corso",
|
||||
"selectingOutputNode": "Selezione del nodo di uscita",
|
||||
"selectingOutputNodeDesc": "Fare clic su un nodo per selezionarlo come nodo di uscita del flusso di lavoro.",
|
||||
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati"
|
||||
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati",
|
||||
"showShuffle": "Mostra Mescola",
|
||||
"shuffle": "Mescola",
|
||||
"removeFromForm": "Rimuovi dal modulo"
|
||||
},
|
||||
"loadMore": "Carica altro",
|
||||
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
|
||||
@@ -2455,7 +2510,8 @@
|
||||
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
|
||||
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato",
|
||||
"fluxFillIncompatibleWithControlLoRA": "Il controllo LoRA non è compatibile con FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "Il controllo LoRA non è compatibile con FLUX Fill",
|
||||
"bboxHidden": "Il riquadro di delimitazione è nascosto (Shift+o per attivarlo)"
|
||||
},
|
||||
"pasteTo": "Incolla su",
|
||||
"pasteToBboxDesc": "Nuovo livello (nel riquadro di delimitazione)",
|
||||
@@ -2510,7 +2566,8 @@
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Galleria",
|
||||
"generate": "Genera"
|
||||
"generate": "Genera",
|
||||
"video": "Video"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Approfondisci i flussi di lavoro.",
|
||||
@@ -2588,13 +2645,23 @@
|
||||
"generate": {
|
||||
"canvasCalloutTitle": "Vuoi avere più controllo, modificare e affinare le tue immagini?",
|
||||
"canvasCalloutLink": "Per ulteriori funzionalità, vai su Tela."
|
||||
},
|
||||
"videoTitle": "Genera video da prompt testuale.",
|
||||
"video": {
|
||||
"startingFrameCalloutTitle": "Aggiungi un fotogramma iniziale",
|
||||
"startingFrameCalloutDesc": "Aggiungi un'immagine per controllare il primo fotogramma del tuo video."
|
||||
},
|
||||
"addStartingFrame": {
|
||||
"title": "Aggiungi un fotogramma iniziale",
|
||||
"description": "Aggiungi un'immagine per controllare il primo fotogramma del tuo video."
|
||||
}
|
||||
},
|
||||
"panels": {
|
||||
"launchpad": "Rampa di lancio",
|
||||
"workflowEditor": "Editor del flusso di lavoro",
|
||||
"imageViewer": "Visualizzatore immagini",
|
||||
"canvas": "Tela"
|
||||
"imageViewer": "Visualizzatore",
|
||||
"canvas": "Tela",
|
||||
"video": "Video"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2685,8 +2752,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Lo stato dello studio viene salvato sul server, consentendoti di continuare a lavorare su qualsiasi dispositivo.",
|
||||
"Supporto per più immagini di riferimento per FLUX Kontext (solo modello locale)."
|
||||
"Tela: Color Picker non campiona l'alfa, il riquadro di delimitazione rispetta il blocco delle proporzioni quando si ridimensiona il pulsante Mescola per i campi numerici nel generatore di flusso di lavoro, nasconde i cursori delle dimensioni dei pixel quando si utilizza un modello che non li supporta",
|
||||
"Flussi di lavoro: aggiunto un pulsante Mescola ai campi di input numerici"
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
@@ -2735,5 +2802,9 @@
|
||||
"clear": "Cancella la cache del modello",
|
||||
"clearSucceeded": "Cache del modello cancellata",
|
||||
"clearFailed": "Problema durante la cancellazione della cache del modello"
|
||||
},
|
||||
"video": {
|
||||
"noVideoSelected": "Nessun video selezionato",
|
||||
"selectFromGallery": "Seleziona un video dalla galleria per riprodurlo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,10 +325,6 @@
|
||||
"desc": "リスト内の前のレイヤーを選択します。",
|
||||
"title": "前のレイヤー"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "ツール色を白に設定",
|
||||
"desc": "現在のツールの色を白色に設定します。"
|
||||
},
|
||||
"selectViewTool": {
|
||||
"title": "表示ツール",
|
||||
"desc": "表示ツールを選択します。"
|
||||
@@ -755,7 +751,6 @@
|
||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
|
||||
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
|
||||
"promptExpansionPending": "プロンプト拡張が進行中",
|
||||
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
|
||||
},
|
||||
|
||||
@@ -285,10 +285,6 @@
|
||||
"title": "Next Layer",
|
||||
"desc": "Select the next layer in the list."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Set Color to White",
|
||||
"desc": "Set the current tool color to white."
|
||||
},
|
||||
"applyFilter": {
|
||||
"title": "Apply Filter",
|
||||
"desc": "Apply the pending filter to the selected layer."
|
||||
|
||||
@@ -55,7 +55,8 @@
|
||||
"assetsWithCount_other": "{{count}} tài nguyên",
|
||||
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
|
||||
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại.",
|
||||
"locateInGalery": "Vị Trí Ở Thư Viện Ảnh"
|
||||
},
|
||||
"gallery": {
|
||||
"swapImages": "Đổi Hình Ảnh",
|
||||
@@ -252,7 +253,10 @@
|
||||
"clear": "Dọn Dẹp",
|
||||
"compactView": "Chế Độ Xem Gọn",
|
||||
"fullView": "Chế Độ Xem Đầy Đủ",
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
"options_withCount_other": "{{count}} thiết lập",
|
||||
"removeNegativePrompt": "Xóa Lệnh Tiêu Cực",
|
||||
"addNegativePrompt": "Thêm Lệnh Tiêu Cực",
|
||||
"selectYourModel": "Chọn Model"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
||||
@@ -356,10 +360,6 @@
|
||||
"desc": "Phóng to canvas lên 800%.",
|
||||
"title": "Phóng To Vào 800%"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Chỉnh Màu Sang Trắng",
|
||||
"desc": "Chỉnh màu hiện tại sang màu trắng."
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "Biến Đổi",
|
||||
"desc": "Biến đổi layer được chọn."
|
||||
@@ -492,6 +492,14 @@
|
||||
"title": "Huỷ Segment Anything",
|
||||
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
|
||||
"key": "esc"
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Xếp Vừa Hộp Giới Hạn Vào Layer",
|
||||
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào layer nhìn thấy được"
|
||||
},
|
||||
"toggleBbox": {
|
||||
"title": "Bật/Tắt Hiển Thị Hộp Giới Hạn",
|
||||
"desc": "Ẩn hoặc hiện hộp giới hạn tạo sinh"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -865,7 +873,10 @@
|
||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
},
|
||||
"installBundle": "Tải Xuống Gói",
|
||||
"installBundleMsg1": "Bạn có chắc chắn muốn tải xuống gói {{bundleName}}?",
|
||||
"installBundleMsg2": "Gói này sẽ tải xuống {{count}} model sau đây:"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -898,7 +909,8 @@
|
||||
"recallParameters": "Gợi Nhớ Tham Số",
|
||||
"scheduler": "Scheduler",
|
||||
"noMetaData": "Không tìm thấy metadata",
|
||||
"imageDimensions": "Kích Thước Ảnh"
|
||||
"imageDimensions": "Kích Thước Ảnh",
|
||||
"clipSkip": "$t(parameters.clipSkip)"
|
||||
},
|
||||
"accordions": {
|
||||
"generation": {
|
||||
@@ -1641,7 +1653,6 @@
|
||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
|
||||
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
|
||||
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
|
||||
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
|
||||
},
|
||||
@@ -1707,7 +1718,8 @@
|
||||
"upscaling": "Upscale",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp."
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp.",
|
||||
"useClipSkip": "Dùng CLIP Skip"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"seedBehaviour": {
|
||||
@@ -2198,7 +2210,8 @@
|
||||
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgNoRegion": "không có khu vực được vẽ",
|
||||
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill",
|
||||
"bboxHidden": "Hộp giới hạn đang ẩn (shift+o để bật/tắt)"
|
||||
},
|
||||
"pasteTo": "Dán Vào",
|
||||
"pasteToAssets": "Tài Nguyên",
|
||||
@@ -2622,7 +2635,10 @@
|
||||
"publishingValidationRunInProgress": "Quá trình kiểm tra tính hợp lệ đang diễn ra.",
|
||||
"selectingOutputNodeDesc": "Bấm vào node để biến nó thành node đầu ra của workflow.",
|
||||
"selectingOutputNode": "Chọn node đầu ra",
|
||||
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata"
|
||||
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata",
|
||||
"removeFromForm": "Xóa Khỏi Vùng Nhập",
|
||||
"showShuffle": "Hiện Xáo Trộn",
|
||||
"shuffle": "Xáo Trộn"
|
||||
},
|
||||
"yourWorkflows": "Workflow Của Bạn",
|
||||
"browseWorkflows": "Khám Phá Workflow",
|
||||
@@ -2679,8 +2695,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
|
||||
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
|
||||
"Misc QoL: Bật/Tắt hiển thị hộp giới hạn, đánh dấu node bị lỗi, chặn lỗi thêm node vào vùng nhập nhiều lần, khả năng đọc lại metadata của CLIP Skip",
|
||||
"Giảm lượng tiêu thụ VRAM cho các ảnh mẫu Kontext và mã hóa VAE"
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -310,10 +310,6 @@
|
||||
"title": "移动工具",
|
||||
"desc": "选择移动工具。"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "将颜色设置为白色",
|
||||
"desc": "将当前工具的颜色设置为白色。"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"desc": "取消待处理的变换。",
|
||||
"title": "取消变换"
|
||||
|
||||
@@ -2,12 +2,12 @@ import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { useLoadWorkflow } from 'features/gallery/hooks/useLoadWorkflow';
|
||||
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
|
||||
import { useRecallAll } from 'features/gallery/hooks/useRecallAllImageMetadata';
|
||||
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
|
||||
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
|
||||
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
|
||||
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { memo } from 'react';
|
||||
import { useImageDTO } from 'services/api/endpoints/images';
|
||||
@@ -15,8 +15,8 @@ import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const GlobalImageHotkeys = memo(() => {
|
||||
useAssertSingleton('GlobalImageHotkeys');
|
||||
const imageName = useAppSelector(selectLastSelectedImage);
|
||||
const imageDTO = useImageDTO(imageName);
|
||||
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
|
||||
const imageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null);
|
||||
|
||||
if (!imageDTO) {
|
||||
return null;
|
||||
|
||||
@@ -3,10 +3,12 @@ import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardMo
|
||||
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { DeleteVideoModal } from 'features/deleteVideoModal/components/DeleteVideoModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ContextMenu/ImageContextMenu';
|
||||
import { VideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu';
|
||||
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal';
|
||||
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
|
||||
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
|
||||
@@ -31,6 +33,7 @@ export const GlobalModalIsolator = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<DeleteImageModal />
|
||||
<DeleteVideoModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
@@ -47,6 +50,7 @@ export const GlobalModalIsolator = memo(() => {
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<ImageContextMenu />
|
||||
<VideoContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
<SaveWorkflowAsDialog />
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
import 'i18n';
|
||||
|
||||
import type { Middleware } from '@reduxjs/toolkit';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import type { InvokeAIUIProps } from 'app/components/types';
|
||||
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
|
||||
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
||||
import { $accountTypeText } from 'app/store/nanostores/accountTypeText';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
|
||||
import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
|
||||
import { $customStarUI } from 'app/store/nanostores/customStarUI';
|
||||
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
||||
import { $logo } from 'app/store/nanostores/logo';
|
||||
@@ -20,11 +18,10 @@ import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/proj
|
||||
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
import { $toastMap } from 'app/store/nanostores/toastMap';
|
||||
import { $videoUpsellComponent } from 'app/store/nanostores/videoUpsellComponent';
|
||||
import { $whatsNew } from 'app/store/nanostores/whatsNew';
|
||||
import { createStore } from 'app/store/store';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import {
|
||||
$workflowLibraryCategoriesOptions,
|
||||
$workflowLibrarySortOptions,
|
||||
@@ -33,47 +30,13 @@ import {
|
||||
DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS,
|
||||
DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
|
||||
} from 'features/nodes/store/workflowLibrarySlice';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { ToastConfig } from 'features/toast/toast';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||
import { $socketOptions } from 'services/events/stores';
|
||||
import type { ManagerOptions, SocketOptions } from 'socket.io-client';
|
||||
|
||||
const App = lazy(() => import('./App'));
|
||||
|
||||
interface Props extends PropsWithChildren {
|
||||
apiUrl?: string;
|
||||
openAPISchemaUrl?: string;
|
||||
token?: string;
|
||||
config?: PartialAppConfig;
|
||||
customNavComponent?: ReactNode;
|
||||
accountSettingsLink?: string;
|
||||
middleware?: Middleware[];
|
||||
projectId?: string;
|
||||
projectName?: string;
|
||||
projectUrl?: string;
|
||||
queueId?: string;
|
||||
studioInitAction?: StudioInitAction;
|
||||
customStarUi?: CustomStarUi;
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
toastMap?: Record<string, ToastConfig>;
|
||||
whatsNew?: ReactNode[];
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
workflowTagCategories?: WorkflowTagCategory[];
|
||||
workflowSortOptions?: WorkflowSortOption[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
/**
|
||||
* If provided, overrides in-app navigation to the model manager
|
||||
*/
|
||||
onClickGoToModelManager?: () => void;
|
||||
storagePersistThrottle?: number;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
apiUrl,
|
||||
openAPISchemaUrl,
|
||||
@@ -92,14 +55,16 @@ const InvokeAIUI = ({
|
||||
isDebugging = false,
|
||||
logo,
|
||||
toastMap,
|
||||
accountTypeText,
|
||||
videoUpsellComponent,
|
||||
workflowCategories,
|
||||
workflowTagCategories,
|
||||
workflowSortOptions,
|
||||
loggingOverrides,
|
||||
onClickGoToModelManager,
|
||||
whatsNew,
|
||||
storagePersistThrottle = 2000,
|
||||
}: Props) => {
|
||||
storagePersistDebounce = 300,
|
||||
}: InvokeAIUIProps) => {
|
||||
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
|
||||
const [didRehydrate, setDidRehydrate] = useState(false);
|
||||
|
||||
@@ -180,6 +145,26 @@ const InvokeAIUI = ({
|
||||
};
|
||||
}, [customStarUi]);
|
||||
|
||||
useEffect(() => {
|
||||
if (accountTypeText) {
|
||||
$accountTypeText.set(accountTypeText);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$accountTypeText.set('');
|
||||
};
|
||||
}, [accountTypeText]);
|
||||
|
||||
useEffect(() => {
|
||||
if (videoUpsellComponent) {
|
||||
$videoUpsellComponent.set(videoUpsellComponent);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$videoUpsellComponent.set(undefined);
|
||||
};
|
||||
}, [videoUpsellComponent]);
|
||||
|
||||
useEffect(() => {
|
||||
if (customNavComponent) {
|
||||
$customNavComponent.set(customNavComponent);
|
||||
@@ -318,7 +303,7 @@ const InvokeAIUI = ({
|
||||
const onRehydrated = () => {
|
||||
setDidRehydrate(true);
|
||||
};
|
||||
const store = createStore({ persist: true, persistThrottle: storagePersistThrottle, onRehydrated });
|
||||
const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated });
|
||||
setStore(store);
|
||||
$store.set(store);
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
@@ -333,7 +318,7 @@ const InvokeAIUI = ({
|
||||
window.$store = undefined;
|
||||
}
|
||||
};
|
||||
}, [storagePersistThrottle]);
|
||||
}, [storagePersistDebounce]);
|
||||
|
||||
if (!store || !didRehydrate) {
|
||||
return <Loading />;
|
||||
|
||||
43
invokeai/frontend/web/src/app/components/types.ts
Normal file
43
invokeai/frontend/web/src/app/components/types.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import type { Middleware } from '@reduxjs/toolkit';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import type { SocketOptions } from 'dgram';
|
||||
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { ToastConfig } from 'features/toast/toast';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import type { ManagerOptions } from 'socket.io-client';
|
||||
|
||||
export interface InvokeAIUIProps extends PropsWithChildren {
|
||||
apiUrl?: string;
|
||||
openAPISchemaUrl?: string;
|
||||
token?: string;
|
||||
config?: PartialAppConfig;
|
||||
customNavComponent?: ReactNode;
|
||||
accountSettingsLink?: string;
|
||||
middleware?: Middleware[];
|
||||
projectId?: string;
|
||||
projectName?: string;
|
||||
projectUrl?: string;
|
||||
queueId?: string;
|
||||
studioInitAction?: StudioInitAction;
|
||||
customStarUi?: CustomStarUi;
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
toastMap?: Record<string, ToastConfig>;
|
||||
accountTypeText?: string;
|
||||
videoUpsellComponent?: ReactNode;
|
||||
whatsNew?: ReactNode[];
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
workflowTagCategories?: WorkflowTagCategory[];
|
||||
workflowSortOptions?: WorkflowSortOption[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
/**
|
||||
* If provided, overrides in-app navigation to the model manager
|
||||
*/
|
||||
onClickGoToModelManager?: () => void;
|
||||
storagePersistDebounce?: number;
|
||||
}
|
||||
@@ -42,6 +42,7 @@ type StudioDestinationAction = _StudioInitAction<
|
||||
| 'canvas'
|
||||
| 'workflows'
|
||||
| 'upscaling'
|
||||
| 'video'
|
||||
| 'viewAllWorkflows'
|
||||
| 'viewAllWorkflowsRecommended'
|
||||
| 'viewAllStylePresets';
|
||||
@@ -118,7 +119,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const metadata = getImageMetadataResult.value;
|
||||
store.dispatch(canvasReset());
|
||||
// This shows a toast
|
||||
await MetadataUtils.recallAll(metadata, store);
|
||||
await MetadataUtils.recallAllImageMetadata(metadata, store);
|
||||
},
|
||||
[store, t]
|
||||
);
|
||||
@@ -177,6 +178,10 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
// Go to the upscaling tab
|
||||
navigationApi.switchToTab('upscaling');
|
||||
break;
|
||||
case 'video':
|
||||
// Go to the video tab
|
||||
await navigationApi.focusPanel('video', LAUNCHPAD_PANEL_ID);
|
||||
break;
|
||||
case 'viewAllWorkflows':
|
||||
// Go to the workflows tab and open the workflow library modal
|
||||
navigationApi.switchToTab('workflows');
|
||||
|
||||
@@ -26,6 +26,7 @@ export const zLogNamespace = z.enum([
|
||||
'system',
|
||||
'queue',
|
||||
'workflows',
|
||||
'video',
|
||||
]);
|
||||
export type LogNamespace = z.infer<typeof zLogNamespace>;
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
|
||||
import { itemSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const appStarted = createAction('app/appStarted');
|
||||
@@ -18,11 +18,13 @@ export const addAppStartedListener = (startAppListening: AppStartListening) => {
|
||||
const firstImageLoad = await take(imagesApi.endpoints.getImageNames.matchFulfilled);
|
||||
if (firstImageLoad !== null) {
|
||||
const [{ payload }] = firstImageLoad;
|
||||
const selectedImage = selectLastSelectedImage(getState());
|
||||
const selectedImage = selectLastSelectedItem(getState());
|
||||
if (selectedImage) {
|
||||
return;
|
||||
}
|
||||
dispatch(imageSelected(payload.image_names.at(0) ?? null));
|
||||
if (payload.image_names[0]) {
|
||||
dispatch(itemSelected({ type: 'image', id: payload.image_names[0] }));
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
selectGalleryView,
|
||||
selectGetImageNamesQueryArgs,
|
||||
selectGetVideoIdsQueryArgs,
|
||||
selectSelectedBoardId,
|
||||
} from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { videosApi } from 'services/api/endpoints/videos';
|
||||
|
||||
export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -11,35 +17,65 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening)
|
||||
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
|
||||
cancelActiveListeners();
|
||||
|
||||
if (boardIdSelected.match(action) && action.payload.selectedImageName) {
|
||||
// This action already has a selected image name, we trust it is valid
|
||||
if (boardIdSelected.match(action) && action.payload.select) {
|
||||
// This action already has a resource selection - skip the below auto-selection logic
|
||||
return;
|
||||
}
|
||||
|
||||
const state = getState();
|
||||
|
||||
const board_id = selectSelectedBoardId(state);
|
||||
const view = selectGalleryView(state);
|
||||
|
||||
const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
|
||||
if (view === 'images' || view === 'assets') {
|
||||
const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
|
||||
// wait until the board has some images - maybe it already has some from a previous fetch
|
||||
// must use getState() to ensure we do not have stale state
|
||||
const isSuccess = await condition(
|
||||
() => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
|
||||
5000
|
||||
);
|
||||
|
||||
// wait until the board has some images - maybe it already has some from a previous fetch
|
||||
// must use getState() to ensure we do not have stale state
|
||||
const isSuccess = await condition(
|
||||
() => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
|
||||
5000
|
||||
);
|
||||
if (!isSuccess) {
|
||||
dispatch(itemSelected(null));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isSuccess) {
|
||||
dispatch(imageSelected(null));
|
||||
return;
|
||||
// the board was just changed - we can select the first image
|
||||
const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
|
||||
|
||||
const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null;
|
||||
|
||||
if (imageToSelect) {
|
||||
dispatch(itemSelected({ type: 'image', id: imageToSelect }));
|
||||
} else {
|
||||
dispatch(itemSelected(null));
|
||||
}
|
||||
} else {
|
||||
const queryArgs = { ...selectGetVideoIdsQueryArgs(state), board_id };
|
||||
// wait until the board has some images - maybe it already has some from a previous fetch
|
||||
// must use getState() to ensure we do not have stale state
|
||||
const isSuccess = await condition(
|
||||
() => videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).isSuccess,
|
||||
5000
|
||||
);
|
||||
|
||||
if (!isSuccess) {
|
||||
dispatch(itemSelected(null));
|
||||
return;
|
||||
}
|
||||
|
||||
// the board was just changed - we can select the first image
|
||||
const videoIds = videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).data?.video_ids;
|
||||
|
||||
const videoToSelect = videoIds && videoIds.length > 0 ? videoIds[0] : null;
|
||||
|
||||
if (videoToSelect) {
|
||||
dispatch(itemSelected({ type: 'video', id: videoToSelect }));
|
||||
} else {
|
||||
dispatch(itemSelected(null));
|
||||
}
|
||||
}
|
||||
|
||||
// the board was just changed - we can select the first image
|
||||
const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
|
||||
|
||||
const imageToSelect = imageNames?.at(0) ?? null;
|
||||
|
||||
dispatch(imageSelected(imageToSelect));
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { loraIsEnabledChanged } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
|
||||
import {
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
@@ -22,6 +23,7 @@ import {
|
||||
isFluxKontextApiModelConfig,
|
||||
isFluxKontextModelConfig,
|
||||
isFluxReduxModelConfig,
|
||||
isGemini2_5ModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
const log = logger('models');
|
||||
@@ -44,13 +46,13 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
|
||||
if (didBaseModelChange) {
|
||||
// we may need to reset some incompatible submodels
|
||||
let modelsCleared = 0;
|
||||
let modelsUpdatedDisabledOrCleared = 0;
|
||||
|
||||
// handle incompatible loras
|
||||
state.loras.loras.forEach((lora) => {
|
||||
if (lora.model.base !== newBase) {
|
||||
dispatch(loraDeleted({ id: lora.id }));
|
||||
modelsCleared += 1;
|
||||
dispatch(loraIsEnabledChanged({ id: lora.id, isEnabled: false }));
|
||||
modelsUpdatedDisabledOrCleared += 1;
|
||||
}
|
||||
});
|
||||
|
||||
@@ -58,52 +60,57 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
const { vae } = state.params;
|
||||
if (vae && vae.base !== newBase) {
|
||||
dispatch(vaeSelected(null));
|
||||
modelsCleared += 1;
|
||||
modelsUpdatedDisabledOrCleared += 1;
|
||||
}
|
||||
|
||||
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
|
||||
// to choose the best available model based on the new main model.
|
||||
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
|
||||
if (SUPPORTS_REF_IMAGES_BASE_MODELS.includes(newModel.base)) {
|
||||
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
|
||||
// to choose the best available model based on the new main model.
|
||||
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
|
||||
|
||||
let newGlobalRefImageModel = null;
|
||||
let newGlobalRefImageModel = null;
|
||||
|
||||
// Certain models require the ref image model to be the same as the main model - others just need a matching
|
||||
// base. Helper to grab the first exact match or the first available model if no exact match is found.
|
||||
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
|
||||
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
|
||||
// Certain models require the ref image model to be the same as the main model - others just need a matching
|
||||
// base. Helper to grab the first exact match or the first available model if no exact match is found.
|
||||
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
|
||||
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
|
||||
|
||||
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
|
||||
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
|
||||
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
|
||||
} else if (newModel.base === 'chatgpt-4o') {
|
||||
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
|
||||
} else if (newModel.base === 'flux-kontext') {
|
||||
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
|
||||
} else if (newModel.base === 'flux') {
|
||||
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
|
||||
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
|
||||
} else {
|
||||
newGlobalRefImageModel = allRefImageModels[0] ?? null;
|
||||
}
|
||||
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
|
||||
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
|
||||
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
|
||||
} else if (newModel.base === 'chatgpt-4o') {
|
||||
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
|
||||
} else if (newModel.base === 'gemini-2.5') {
|
||||
const gemini2_5Models = allRefImageModels.filter(isGemini2_5ModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(gemini2_5Models);
|
||||
} else if (newModel.base === 'flux-kontext') {
|
||||
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
|
||||
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
|
||||
} else if (newModel.base === 'flux') {
|
||||
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
|
||||
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
|
||||
} else {
|
||||
newGlobalRefImageModel = allRefImageModels[0] ?? null;
|
||||
}
|
||||
|
||||
// All ref image entities are updated to use the same new model
|
||||
const refImageEntities = selectReferenceImageEntities(state);
|
||||
for (const entity of refImageEntities) {
|
||||
const shouldUpdateModel =
|
||||
(entity.config.model && entity.config.model.base !== newBase) ||
|
||||
(!entity.config.model && newGlobalRefImageModel);
|
||||
// All ref image entities are updated to use the same new model
|
||||
const refImageEntities = selectReferenceImageEntities(state);
|
||||
for (const entity of refImageEntities) {
|
||||
const shouldUpdateModel =
|
||||
(entity.config.model && entity.config.model.base !== newBase) ||
|
||||
(!entity.config.model && newGlobalRefImageModel);
|
||||
|
||||
if (shouldUpdateModel) {
|
||||
dispatch(
|
||||
refImageModelChanged({
|
||||
id: entity.id,
|
||||
modelConfig: newGlobalRefImageModel,
|
||||
})
|
||||
);
|
||||
modelsCleared += 1;
|
||||
if (shouldUpdateModel) {
|
||||
dispatch(
|
||||
refImageModelChanged({
|
||||
id: entity.id,
|
||||
modelConfig: newGlobalRefImageModel,
|
||||
})
|
||||
);
|
||||
modelsUpdatedDisabledOrCleared += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,17 +135,17 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
modelConfig: newRegionalRefImageModel,
|
||||
})
|
||||
);
|
||||
modelsCleared += 1;
|
||||
modelsUpdatedDisabledOrCleared += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (modelsCleared > 0) {
|
||||
if (modelsUpdatedDisabledOrCleared > 0) {
|
||||
toast({
|
||||
id: 'BASE_MODEL_CHANGED',
|
||||
title: t('toast.baseModelChanged'),
|
||||
description: t('toast.baseModelChangedCleared', {
|
||||
count: modelsCleared,
|
||||
count: modelsUpdatedDisabledOrCleared,
|
||||
}),
|
||||
status: 'warning',
|
||||
});
|
||||
|
||||
@@ -13,12 +13,14 @@ import {
|
||||
import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import {
|
||||
postProcessingModelChanged,
|
||||
tileControlnetModelChanged,
|
||||
upscaleModelChanged,
|
||||
} from 'features/parameters/store/upscaleSlice';
|
||||
import { videoModelChanged } from 'features/parameters/store/videoSlice';
|
||||
import {
|
||||
zParameterCLIPEmbedModel,
|
||||
zParameterSpandrelImageToImageModel,
|
||||
@@ -41,6 +43,7 @@ import {
|
||||
isRefinerMainModelModelConfig,
|
||||
isSpandrelImageToImageModelConfig,
|
||||
isT5EncoderModelConfig,
|
||||
isVideoModelConfig,
|
||||
} from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
@@ -81,6 +84,7 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
|
||||
handleCLIPEmbedModels(models, state, dispatch, log);
|
||||
handleFLUXVAEModels(models, state, dispatch, log);
|
||||
handleFLUXReduxModels(models, state, dispatch, log);
|
||||
handleVideoModels(models, state, dispatch, log);
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -193,6 +197,22 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
});
|
||||
};
|
||||
|
||||
const handleVideoModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const videoModels = models.filter(isVideoModelConfig);
|
||||
const selectedVideoModel = state.video.videoModel;
|
||||
|
||||
if (selectedVideoModel && videoModels.some((m) => m.key === selectedVideoModel.key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const firstModel = videoModels[0] || null;
|
||||
if (firstModel) {
|
||||
log.debug({ firstModel }, 'No video model selected, selecting first available video model');
|
||||
dispatch(videoModelChanged({ videoModel: zModelIdentifierField.parse(firstModel) }));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const caModels = models.filter(isControlLayerModelConfig);
|
||||
selectCanvasSlice(state).controlLayers.entities.forEach((entity) => {
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
export const $accountTypeText = atom<string>('');
|
||||
@@ -1,6 +1,11 @@
|
||||
import { atom } from 'nanostores';
|
||||
import { atom, computed } from 'nanostores';
|
||||
|
||||
/**
|
||||
* The user's auth token.
|
||||
*/
|
||||
export const $authToken = atom<string | undefined>();
|
||||
|
||||
/**
|
||||
* The crossOrigin value to use for all image loading. Depends on whether the user is authenticated.
|
||||
*/
|
||||
export const $crossOrigin = computed($authToken, (token) => (token ? 'use-credentials' : 'anonymous'));
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
import { atom } from 'nanostores';
|
||||
import type { ReactNode } from 'react';
|
||||
|
||||
export const $videoUpsellComponent = atom<ReactNode | undefined>(undefined);
|
||||
@@ -33,6 +33,7 @@ import { nodesSliceConfig } from 'features/nodes/store/nodesSlice';
|
||||
import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice';
|
||||
import { videoSliceConfig } from 'features/parameters/store/videoSlice';
|
||||
import { queueSliceConfig } from 'features/queue/store/queueSlice';
|
||||
import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { configSliceConfig } from 'features/system/store/configSlice';
|
||||
@@ -78,6 +79,7 @@ const SLICE_CONFIGS = {
|
||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig,
|
||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig,
|
||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig,
|
||||
[videoSliceConfig.slice.reducerPath]: videoSliceConfig,
|
||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig,
|
||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig,
|
||||
};
|
||||
@@ -111,6 +113,7 @@ const ALL_REDUCERS = {
|
||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer,
|
||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer,
|
||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer,
|
||||
[videoSliceConfig.slice.reducerPath]: videoSliceConfig.slice.reducer,
|
||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer,
|
||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer,
|
||||
};
|
||||
@@ -184,7 +187,7 @@ const PERSISTED_KEYS = Object.values(SLICE_CONFIGS)
|
||||
.filter((sliceConfig) => !!sliceConfig.persistConfig)
|
||||
.map((sliceConfig) => sliceConfig.slice.reducerPath);
|
||||
|
||||
export const createStore = (options?: { persist?: boolean; persistThrottle?: number; onRehydrated?: () => void }) => {
|
||||
export const createStore = (options?: { persist?: boolean; persistDebounce?: number; onRehydrated?: () => void }) => {
|
||||
const store = configureStore({
|
||||
reducer: rememberedRootReducer,
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
@@ -204,7 +207,7 @@ export const createStore = (options?: { persist?: boolean; persistThrottle?: num
|
||||
if (options?.persist) {
|
||||
return enhancers.prepend(
|
||||
rememberEnhancer(reduxRememberDriver, PERSISTED_KEYS, {
|
||||
persistThrottle: options?.persistThrottle ?? 2000,
|
||||
persistDebounce: options?.persistDebounce ?? 2000,
|
||||
serialize,
|
||||
unserialize,
|
||||
prefix: '',
|
||||
|
||||
@@ -58,6 +58,7 @@ const zNumericalParameterConfig = z.object({
|
||||
fineStep: z.number().default(8),
|
||||
coarseStep: z.number().default(64),
|
||||
});
|
||||
export type NumericalParameterConfig = z.infer<typeof zNumericalParameterConfig>;
|
||||
|
||||
/**
|
||||
* Configuration options for the InvokeAI UI.
|
||||
@@ -79,6 +80,7 @@ export const zAppConfig = z.object({
|
||||
allowClientSideUpload: z.boolean(),
|
||||
allowPublishWorkflows: z.boolean(),
|
||||
allowPromptExpansion: z.boolean(),
|
||||
allowVideo: z.boolean(),
|
||||
disabledTabs: z.array(zTabName),
|
||||
disabledFeatures: z.array(zAppFeature),
|
||||
disabledSDFeatures: z.array(zSDFeature),
|
||||
@@ -139,8 +141,9 @@ export const getDefaultAppConfig = (): AppConfig => ({
|
||||
allowClientSideUpload: false,
|
||||
allowPublishWorkflows: false,
|
||||
allowPromptExpansion: false,
|
||||
allowVideo: false, // used to determine if video is enabled vs upsell
|
||||
shouldShowCredits: false,
|
||||
disabledTabs: [],
|
||||
disabledTabs: ['video'], // used to determine if video functionality is visible
|
||||
disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[],
|
||||
disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[],
|
||||
sd: {
|
||||
|
||||
@@ -37,6 +37,7 @@ const REGION_NAMES = [
|
||||
'workflows',
|
||||
'progress',
|
||||
'settings',
|
||||
'video',
|
||||
] as const;
|
||||
/**
|
||||
* The names of the focus regions.
|
||||
|
||||
@@ -6,13 +6,13 @@ import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const useDownloadImage = () => {
|
||||
export const useDownloadItem = () => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const authToken = useStore($authToken);
|
||||
|
||||
const downloadImage = useCallback(
|
||||
async (image_url: string, image_name: string) => {
|
||||
const downloadItem = useCallback(
|
||||
async (item_url: string, item_id: string) => {
|
||||
try {
|
||||
const requestOpts = authToken
|
||||
? {
|
||||
@@ -21,7 +21,7 @@ export const useDownloadImage = () => {
|
||||
},
|
||||
}
|
||||
: {};
|
||||
const blob = await fetch(image_url, requestOpts).then((resp) => resp.blob());
|
||||
const blob = await fetch(item_url, requestOpts).then((resp) => resp.blob());
|
||||
if (!blob) {
|
||||
throw new Error('Unable to create Blob');
|
||||
}
|
||||
@@ -30,7 +30,7 @@ export const useDownloadImage = () => {
|
||||
const a = document.createElement('a');
|
||||
a.style.display = 'none';
|
||||
a.href = url;
|
||||
a.download = image_name;
|
||||
a.download = item_id;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
@@ -47,5 +47,5 @@ export const useDownloadImage = () => {
|
||||
[t, dispatch, authToken]
|
||||
);
|
||||
|
||||
return { downloadImage };
|
||||
return { downloadItem };
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state';
|
||||
import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state';
|
||||
import { selectSelection } from 'features/gallery/store/gallerySelectors';
|
||||
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
|
||||
import { useDeleteCurrentQueueItem } from 'features/queue/hooks/useDeleteCurrentQueueItem';
|
||||
@@ -12,6 +13,7 @@ import { getFocusedRegion } from './focus';
|
||||
|
||||
export const useGlobalHotkeys = () => {
|
||||
const { dispatch, getState } = useAppStore();
|
||||
const isVideoEnabled = useFeatureStatus('video');
|
||||
const isModelManagerEnabled = useFeatureStatus('modelManager');
|
||||
const queue = useInvoke();
|
||||
|
||||
@@ -92,6 +94,18 @@ export const useGlobalHotkeys = () => {
|
||||
dependencies: [dispatch],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'selectVideoTab',
|
||||
category: 'app',
|
||||
callback: () => {
|
||||
navigationApi.switchToTab('video');
|
||||
},
|
||||
options: {
|
||||
enabled: isVideoEnabled,
|
||||
},
|
||||
dependencies: [dispatch],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'selectWorkflowsTab',
|
||||
category: 'app',
|
||||
@@ -123,6 +137,8 @@ export const useGlobalHotkeys = () => {
|
||||
});
|
||||
|
||||
const deleteImageModalApi = useDeleteImageModalApi();
|
||||
const deleteVideoModalApi = useDeleteVideoModalApi();
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'deleteSelection',
|
||||
category: 'gallery',
|
||||
@@ -135,7 +151,13 @@ export const useGlobalHotkeys = () => {
|
||||
if (!selection.length) {
|
||||
return;
|
||||
}
|
||||
deleteImageModalApi.delete(selection);
|
||||
if (selection.every(({ type }) => type === 'image')) {
|
||||
deleteImageModalApi.delete(selection.map((s) => s.id));
|
||||
} else if (selection.every(({ type }) => type === 'video')) {
|
||||
deleteVideoModalApi.delete(selection.map((s) => s.id));
|
||||
} else {
|
||||
// no-op, we expect selections to always be only images or only video
|
||||
}
|
||||
},
|
||||
dependencies: [getState, deleteImageModalApi],
|
||||
});
|
||||
|
||||
5
invokeai/frontend/web/src/common/util/randomFloat.ts
Normal file
5
invokeai/frontend/web/src/common/util/randomFloat.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
const randomFloat = (min: number, max: number): number => {
|
||||
return Math.random() * (max - min + Number.EPSILON) + min;
|
||||
};
|
||||
|
||||
export default randomFloat;
|
||||
@@ -8,16 +8,23 @@ import {
|
||||
isModalOpenChanged,
|
||||
selectChangeBoardModalSlice,
|
||||
} from 'features/changeBoardModal/store/slice';
|
||||
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
|
||||
import { useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation } from 'services/api/endpoints/images';
|
||||
import { useAddVideosToBoardMutation, useRemoveVideosFromBoardMutation } from 'services/api/endpoints/videos';
|
||||
|
||||
const selectImagesToChange = createSelector(
|
||||
selectChangeBoardModalSlice,
|
||||
(changeBoardModal) => changeBoardModal.image_names
|
||||
);
|
||||
|
||||
const selectVideosToChange = createSelector(
|
||||
selectChangeBoardModalSlice,
|
||||
(changeBoardModal) => changeBoardModal.video_ids
|
||||
);
|
||||
|
||||
const selectIsModalOpen = createSelector(
|
||||
selectChangeBoardModalSlice,
|
||||
(changeBoardModal) => changeBoardModal.isModalOpen
|
||||
@@ -26,24 +33,32 @@ const selectIsModalOpen = createSelector(
|
||||
const ChangeBoardModal = () => {
|
||||
useAssertSingleton('ChangeBoardModal');
|
||||
const dispatch = useAppDispatch();
|
||||
const [selectedBoard, setSelectedBoard] = useState<string | null>();
|
||||
const currentBoardId = useAppSelector(selectSelectedBoardId);
|
||||
const [selectedBoardId, setSelectedBoardId] = useState<string | null>();
|
||||
const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true });
|
||||
const isModalOpen = useAppSelector(selectIsModalOpen);
|
||||
const imagesToChange = useAppSelector(selectImagesToChange);
|
||||
const videosToChange = useAppSelector(selectVideosToChange);
|
||||
const [addImagesToBoard] = useAddImagesToBoardMutation();
|
||||
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();
|
||||
const [addVideosToBoard] = useAddVideosToBoardMutation();
|
||||
const [removeVideosFromBoard] = useRemoveVideosFromBoardMutation();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const options = useMemo<ComboboxOption[]>(() => {
|
||||
return [{ label: t('boards.uncategorized'), value: 'none' }].concat(
|
||||
(boards ?? []).map((board) => ({
|
||||
label: board.board_name,
|
||||
value: board.board_id,
|
||||
}))
|
||||
);
|
||||
}, [boards, t]);
|
||||
return [{ label: t('boards.uncategorized'), value: 'none' }]
|
||||
.concat(
|
||||
(boards ?? [])
|
||||
.map((board) => ({
|
||||
label: board.board_name,
|
||||
value: board.board_id,
|
||||
}))
|
||||
.sort((a, b) => a.label.localeCompare(b.label))
|
||||
)
|
||||
.filter((board) => board.value !== currentBoardId);
|
||||
}, [boards, currentBoardId, t]);
|
||||
|
||||
const value = useMemo(() => options.find((o) => o.value === selectedBoard), [options, selectedBoard]);
|
||||
const value = useMemo(() => options.find((o) => o.value === selectedBoardId), [options, selectedBoardId]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
dispatch(changeBoardReset());
|
||||
@@ -51,27 +66,47 @@ const ChangeBoardModal = () => {
|
||||
}, [dispatch]);
|
||||
|
||||
const handleChangeBoard = useCallback(() => {
|
||||
if (!imagesToChange.length || !selectedBoard) {
|
||||
if (!selectedBoardId || (imagesToChange.length === 0 && videosToChange.length === 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (selectedBoard === 'none') {
|
||||
removeImagesFromBoard({ image_names: imagesToChange });
|
||||
} else {
|
||||
addImagesToBoard({
|
||||
image_names: imagesToChange,
|
||||
board_id: selectedBoard,
|
||||
});
|
||||
if (imagesToChange.length) {
|
||||
if (selectedBoardId === 'none') {
|
||||
removeImagesFromBoard({ image_names: imagesToChange });
|
||||
} else {
|
||||
addImagesToBoard({
|
||||
image_names: imagesToChange,
|
||||
board_id: selectedBoardId,
|
||||
});
|
||||
}
|
||||
}
|
||||
if (videosToChange.length) {
|
||||
if (selectedBoardId === 'none') {
|
||||
removeVideosFromBoard({ video_ids: videosToChange });
|
||||
} else {
|
||||
addVideosToBoard({
|
||||
video_ids: videosToChange,
|
||||
board_id: selectedBoardId,
|
||||
});
|
||||
}
|
||||
}
|
||||
setSelectedBoard(null);
|
||||
dispatch(changeBoardReset());
|
||||
}, [addImagesToBoard, dispatch, imagesToChange, removeImagesFromBoard, selectedBoard]);
|
||||
}, [
|
||||
addImagesToBoard,
|
||||
dispatch,
|
||||
imagesToChange,
|
||||
videosToChange,
|
||||
removeImagesFromBoard,
|
||||
selectedBoardId,
|
||||
addVideosToBoard,
|
||||
removeVideosFromBoard,
|
||||
]);
|
||||
|
||||
const onChange = useCallback<ComboboxOnChange>((v) => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
setSelectedBoard(v.value);
|
||||
setSelectedBoardId(v.value);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
@@ -86,9 +121,14 @@ const ChangeBoardModal = () => {
|
||||
>
|
||||
<Flex flexDir="column" gap={4}>
|
||||
<Text>
|
||||
{t('boards.movingImagesToBoard', {
|
||||
count: imagesToChange.length,
|
||||
})}
|
||||
{imagesToChange.length > 0 &&
|
||||
t('boards.movingImagesToBoard', {
|
||||
count: imagesToChange.length,
|
||||
})}
|
||||
{videosToChange.length > 0 &&
|
||||
t('boards.movingVideosToBoard', {
|
||||
count: videosToChange.length,
|
||||
})}
|
||||
:
|
||||
</Text>
|
||||
<FormControl isDisabled={isFetching}>
|
||||
|
||||
@@ -7,6 +7,7 @@ import z from 'zod';
|
||||
const zChangeBoardModalState = z.object({
|
||||
isModalOpen: z.boolean().default(false),
|
||||
image_names: z.array(z.string()).default(() => []),
|
||||
video_ids: z.array(z.string()).default(() => []),
|
||||
});
|
||||
type ChangeBoardModalState = z.infer<typeof zChangeBoardModalState>;
|
||||
|
||||
@@ -22,6 +23,9 @@ const slice = createSlice({
|
||||
imagesToChangeSelected: (state, action: PayloadAction<string[]>) => {
|
||||
state.image_names = action.payload;
|
||||
},
|
||||
videosToChangeSelected: (state, action: PayloadAction<string[]>) => {
|
||||
state.video_ids = action.payload;
|
||||
},
|
||||
changeBoardReset: (state) => {
|
||||
state.image_names = [];
|
||||
state.isModalOpen = false;
|
||||
@@ -29,7 +33,7 @@ const slice = createSlice({
|
||||
},
|
||||
});
|
||||
|
||||
export const { isModalOpenChanged, imagesToChangeSelected, changeBoardReset } = slice.actions;
|
||||
export const { isModalOpenChanged, imagesToChangeSelected, videosToChangeSelected, changeBoardReset } = slice.actions;
|
||||
|
||||
export const selectChangeBoardModalSlice = (state: RootState) => state.changeBoardModal;
|
||||
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Alert, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasAlertsBboxVisibility = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const isBboxHidden = useStore(canvasManager.tool.tools.bbox.$isBboxHidden);
|
||||
|
||||
if (!isBboxHidden) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Alert status="warning" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
|
||||
<AlertIcon />
|
||||
<AlertTitle>{t('controlLayers.warnings.bboxHidden')}</AlertTitle>
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasAlertsBboxVisibility.displayName = 'CanvasAlertsBboxVisibility';
|
||||
@@ -10,13 +10,19 @@ import type {
|
||||
ChatGPT4oModelConfig,
|
||||
FLUXKontextModelConfig,
|
||||
FLUXReduxModelConfig,
|
||||
Gemini2_5ModelConfig,
|
||||
IPAdapterModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
modelKey: string | null;
|
||||
onChangeModel: (
|
||||
modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig
|
||||
modelConfig:
|
||||
| IPAdapterModelConfig
|
||||
| FLUXReduxModelConfig
|
||||
| ChatGPT4oModelConfig
|
||||
| FLUXKontextModelConfig
|
||||
| Gemini2_5ModelConfig
|
||||
) => void;
|
||||
};
|
||||
|
||||
@@ -28,7 +34,13 @@ export const RefImageModel = memo(({ modelKey, onChangeModel }: Props) => {
|
||||
|
||||
const _onChangeModel = useCallback(
|
||||
(
|
||||
modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig | null
|
||||
modelConfig:
|
||||
| IPAdapterModelConfig
|
||||
| FLUXReduxModelConfig
|
||||
| ChatGPT4oModelConfig
|
||||
| FLUXKontextModelConfig
|
||||
| Gemini2_5ModelConfig
|
||||
| null
|
||||
) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
@@ -39,7 +51,14 @@ export const RefImageModel = memo(({ modelKey, onChangeModel }: Props) => {
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig): boolean => {
|
||||
(
|
||||
model:
|
||||
| IPAdapterModelConfig
|
||||
| FLUXReduxModelConfig
|
||||
| ChatGPT4oModelConfig
|
||||
| FLUXKontextModelConfig
|
||||
| Gemini2_5ModelConfig
|
||||
): boolean => {
|
||||
return !areBasesCompatibleForRefImage(mainModelConfig, model);
|
||||
},
|
||||
[mainModelConfig]
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectBboxRect, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageNameToImageObject } from 'features/controlLayers/store/util';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { createContext, memo, useContext, useEffect, useMemo, useState } from 'react';
|
||||
import { getImageDTOSafe } from 'services/api/endpoints/images';
|
||||
@@ -71,8 +71,8 @@ export const StagingAreaContextProvider = memo(({ children, sessionId }: PropsWi
|
||||
},
|
||||
onAccept: (item, imageDTO) => {
|
||||
const bboxRect = selectBboxRect(store.getState());
|
||||
const { x, y, width, height } = bboxRect;
|
||||
const imageObject = imageNameToImageObject(imageDTO.image_name, { width, height });
|
||||
const { x, y } = bboxRect;
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const selectedEntityIdentifier = selectSelectedEntityIdentifier(store.getState());
|
||||
const overrides: Partial<CanvasRasterLayerState> = {
|
||||
position: { x, y },
|
||||
|
||||
@@ -12,29 +12,58 @@ import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import RgbaColorPicker from 'common/components/ColorPicker/RgbaColorPicker';
|
||||
import { rgbaColorToString } from 'common/util/colorCodeTransformers';
|
||||
import { selectCanvasSettingsSlice, settingsColorChanged } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import {
|
||||
selectCanvasSettingsSlice,
|
||||
settingsActiveColorToggled,
|
||||
settingsBgColorChanged,
|
||||
settingsColorsSetToDefault,
|
||||
settingsFgColorChanged,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import type { RgbaColor } from 'features/controlLayers/store/types';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectColor = createSelector(selectCanvasSettingsSlice, (settings) => settings.color);
|
||||
const selectActiveColor = createSelector(selectCanvasSettingsSlice, (settings) => settings.activeColor);
|
||||
const selectBgColor = createSelector(selectCanvasSettingsSlice, (settings) => settings.bgColor);
|
||||
const selectFgColor = createSelector(selectCanvasSettingsSlice, (settings) => settings.fgColor);
|
||||
|
||||
export const ToolColorPicker = memo(() => {
|
||||
export const ToolFillColorPicker = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const fill = useAppSelector(selectColor);
|
||||
const activeColorType = useAppSelector(selectActiveColor);
|
||||
const bgColor = useAppSelector(selectBgColor);
|
||||
const fgColor = useAppSelector(selectFgColor);
|
||||
const { activeColor, tooltip, bgColorzIndex, fgColorzIndex } = useMemo(() => {
|
||||
if (activeColorType === 'bgColor') {
|
||||
return { activeColor: bgColor, tooltip: t('controlLayers.fill.bgFillColor'), bgColorzIndex: 2, fgColorzIndex: 1 };
|
||||
} else {
|
||||
return { activeColor: fgColor, tooltip: t('controlLayers.fill.fgFillColor'), bgColorzIndex: 1, fgColorzIndex: 2 };
|
||||
}
|
||||
}, [activeColorType, bgColor, fgColor, t]);
|
||||
const dispatch = useAppDispatch();
|
||||
const onChange = useCallback(
|
||||
const onColorChange = useCallback(
|
||||
(color: RgbaColor) => {
|
||||
dispatch(settingsColorChanged(color));
|
||||
if (activeColorType === 'bgColor') {
|
||||
dispatch(settingsBgColorChanged(color));
|
||||
} else {
|
||||
dispatch(settingsFgColorChanged(color));
|
||||
}
|
||||
},
|
||||
[dispatch]
|
||||
[activeColorType, dispatch]
|
||||
);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'setFillToWhite',
|
||||
id: 'setFillColorsToDefault',
|
||||
category: 'canvas',
|
||||
callback: () => dispatch(settingsColorChanged({ r: 255, g: 255, b: 255, a: 1 })),
|
||||
callback: () => dispatch(settingsColorsSetToDefault()),
|
||||
options: { preventDefault: true },
|
||||
dependencies: [dispatch],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'toggleFillColor',
|
||||
category: 'canvas',
|
||||
callback: () => dispatch(settingsActiveColorToggled()),
|
||||
options: { preventDefault: true },
|
||||
dependencies: [dispatch],
|
||||
});
|
||||
@@ -43,15 +72,31 @@ export const ToolColorPicker = memo(() => {
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<Flex role="button" aria-label={t('controlLayers.fill.fillColor')} tabIndex={-1} w={8} h={8}>
|
||||
<Tooltip label={t('controlLayers.fill.fillColor')}>
|
||||
<Flex w="full" h="full" alignItems="center" justifyContent="center">
|
||||
<Tooltip label={tooltip}>
|
||||
<Flex alignItems="center" justifyContent="center" position="relative" w="full" h="full">
|
||||
<Box
|
||||
borderRadius="full"
|
||||
borderColor="base.600"
|
||||
w={6}
|
||||
h={6}
|
||||
borderWidth={2}
|
||||
bg={rgbaColorToString(fill)}
|
||||
bg={rgbaColorToString(bgColor)}
|
||||
position="absolute"
|
||||
top="0"
|
||||
left="0"
|
||||
zIndex={bgColorzIndex}
|
||||
/>
|
||||
<Box
|
||||
borderRadius="full"
|
||||
borderColor="base.600"
|
||||
w={6}
|
||||
h={6}
|
||||
borderWidth={2}
|
||||
bg={rgbaColorToString(fgColor)}
|
||||
position="absolute"
|
||||
top="2"
|
||||
left="2"
|
||||
zIndex={fgColorzIndex}
|
||||
/>
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
@@ -60,11 +105,11 @@ export const ToolColorPicker = memo(() => {
|
||||
<PopoverContent>
|
||||
<PopoverArrow />
|
||||
<PopoverBody minH={64}>
|
||||
<RgbaColorPicker color={fill} onChange={onChange} withNumberInput withSwatches />
|
||||
<RgbaColorPicker color={activeColor} onChange={onColorChange} withNumberInput withSwatches />
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
ToolColorPicker.displayName = 'ToolFillColorPicker';
|
||||
ToolFillColorPicker.displayName = 'ToolFillColorPicker';
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Divider, Flex } from '@invoke-ai/ui-library';
|
||||
import { CanvasSettingsPopover } from 'features/controlLayers/components/Settings/CanvasSettingsPopover';
|
||||
import { ToolColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolFillColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { CanvasToolbarFitBboxToLayersButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarFitBboxToLayersButton';
|
||||
import { CanvasToolbarFitBboxToMasksButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarFitBboxToMasksButton';
|
||||
@@ -15,6 +15,7 @@ import { useCanvasEntityQuickSwitchHotkey } from 'features/controlLayers/hooks/u
|
||||
import { useCanvasFilterHotkey } from 'features/controlLayers/hooks/useCanvasFilterHotkey';
|
||||
import { useCanvasInvertMaskHotkey } from 'features/controlLayers/hooks/useCanvasInvertMaskHotkey';
|
||||
import { useCanvasResetLayerHotkey } from 'features/controlLayers/hooks/useCanvasResetLayerHotkey';
|
||||
import { useCanvasToggleBboxHotkey } from 'features/controlLayers/hooks/useCanvasToggleBboxHotkey';
|
||||
import { useCanvasToggleNonRasterLayersHotkey } from 'features/controlLayers/hooks/useCanvasToggleNonRasterLayersHotkey';
|
||||
import { useCanvasTransformHotkey } from 'features/controlLayers/hooks/useCanvasTransformHotkey';
|
||||
import { useCanvasUndoRedoHotkeys } from 'features/controlLayers/hooks/useCanvasUndoRedoHotkeys';
|
||||
@@ -31,10 +32,11 @@ export const CanvasToolbar = memo(() => {
|
||||
useCanvasFilterHotkey();
|
||||
useCanvasInvertMaskHotkey();
|
||||
useCanvasToggleNonRasterLayersHotkey();
|
||||
useCanvasToggleBboxHotkey();
|
||||
|
||||
return (
|
||||
<Flex w="full" gap={2} alignItems="center" px={2}>
|
||||
<ToolColorPicker />
|
||||
<ToolFillColorPicker />
|
||||
<ToolSettings />
|
||||
<Flex alignItems="center" h="full" flexGrow={1} justifyContent="flex-end">
|
||||
<CanvasToolbarScale />
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import type { AppGetState } from 'app/store/store';
|
||||
import { useAppDispatch, useAppStore } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
@@ -16,7 +16,11 @@ import {
|
||||
rgRefImageAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectBase, selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
selectSelectedEntityIdentifier,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -24,6 +28,7 @@ import type {
|
||||
ControlLoRAConfig,
|
||||
ControlNetConfig,
|
||||
FluxKontextReferenceImageConfig,
|
||||
Gemini2_5ReferenceImageConfig,
|
||||
IPAdapterConfig,
|
||||
T2IAdapterConfig,
|
||||
} from 'features/controlLayers/store/types';
|
||||
@@ -31,6 +36,7 @@ import {
|
||||
initialChatGPT4oReferenceImage,
|
||||
initialControlNet,
|
||||
initialFluxKontextReferenceImage,
|
||||
initialGemini2_5ReferenceImage,
|
||||
initialIPAdapter,
|
||||
initialT2IAdapter,
|
||||
} from 'features/controlLayers/store/util';
|
||||
@@ -72,7 +78,11 @@ export const selectDefaultControlAdapter = createSelector(
|
||||
|
||||
export const getDefaultRefImageConfig = (
|
||||
getState: AppGetState
|
||||
): IPAdapterConfig | ChatGPT4oReferenceImageConfig | FluxKontextReferenceImageConfig => {
|
||||
):
|
||||
| IPAdapterConfig
|
||||
| ChatGPT4oReferenceImageConfig
|
||||
| FluxKontextReferenceImageConfig
|
||||
| Gemini2_5ReferenceImageConfig => {
|
||||
const state = getState();
|
||||
|
||||
const mainModelConfig = selectMainModelConfig(state);
|
||||
@@ -93,6 +103,12 @@ export const getDefaultRefImageConfig = (
|
||||
return config;
|
||||
}
|
||||
|
||||
if (base === 'gemini-2.5') {
|
||||
const config = deepClone(initialGemini2_5ReferenceImage);
|
||||
config.model = zModelIdentifierField.parse(mainModelConfig);
|
||||
return config;
|
||||
}
|
||||
|
||||
// Otherwise, find the first compatible IP Adapter model.
|
||||
const modelConfig = ipAdapterModelConfigs.find((m) => m.base === base);
|
||||
|
||||
@@ -136,37 +152,49 @@ export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState):
|
||||
|
||||
export const useAddControlLayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedControlLayer =
|
||||
selectedEntityIdentifier?.type === 'control_layer' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
const overrides = { controlAdapter: deepClone(initialControlNet) };
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides }));
|
||||
}, [dispatch]);
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides, addAfter: selectedControlLayer }));
|
||||
}, [dispatch, selectedControlLayer]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddRasterLayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedRasterLayer =
|
||||
selectedEntityIdentifier?.type === 'raster_layer' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(rasterLayerAdded({ isSelected: true, addAfter: selectedRasterLayer }));
|
||||
}, [dispatch, selectedRasterLayer]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMask = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedInpaintMask =
|
||||
selectedEntityIdentifier?.type === 'inpaint_mask' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(inpaintMaskAdded({ isSelected: true, addAfter: selectedInpaintMask }));
|
||||
}, [dispatch, selectedInpaintMask]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddRegionalGuidance = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedRegionalGuidance =
|
||||
selectedEntityIdentifier?.type === 'regional_guidance' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(rgAdded({ isSelected: true, addAfter: selectedRegionalGuidance }));
|
||||
}, [dispatch, selectedRegionalGuidance]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
export const useCanvasToggleBboxHotkey = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
|
||||
const handleToggleBboxVisibility = useCallback(() => {
|
||||
canvasManager.tool.tools.bbox.toggleBboxVisibility();
|
||||
}, [canvasManager]);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'toggleBbox',
|
||||
category: 'canvas',
|
||||
callback: handleToggleBboxVisibility,
|
||||
dependencies: [handleToggleBboxVisibility],
|
||||
});
|
||||
};
|
||||
@@ -3,6 +3,7 @@ import {
|
||||
selectIsChatGPT4o,
|
||||
selectIsCogView4,
|
||||
selectIsFluxKontext,
|
||||
selectIsGemini2_5,
|
||||
selectIsImagen3,
|
||||
selectIsImagen4,
|
||||
selectIsSD3,
|
||||
@@ -19,21 +20,22 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
|
||||
const isImagen4 = useAppSelector(selectIsImagen4);
|
||||
const isFluxKontext = useAppSelector(selectIsFluxKontext);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGPT4o);
|
||||
const isGemini2_5 = useAppSelector(selectIsGemini2_5);
|
||||
|
||||
const isEntityTypeEnabled = useMemo<boolean>(() => {
|
||||
switch (entityType) {
|
||||
case 'regional_guidance':
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
|
||||
case 'control_layer':
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
|
||||
case 'inpaint_mask':
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
|
||||
case 'raster_layer':
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
|
||||
default:
|
||||
assert<Equals<typeof entityType, never>>(false);
|
||||
}
|
||||
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o]);
|
||||
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, isGemini2_5]);
|
||||
|
||||
return isEntityTypeEnabled;
|
||||
};
|
||||
|
||||
@@ -372,6 +372,7 @@ export class CanvasCompositorModule extends CanvasModuleBase {
|
||||
position: { x: Math.floor(rect.x), y: Math.floor(rect.y) },
|
||||
},
|
||||
mergedEntitiesToDelete: deleteMergedEntities ? entityIdentifiers.map(mapId) : [],
|
||||
addAfter: entityIdentifiers.map(mapId).at(-1),
|
||||
};
|
||||
|
||||
switch (type) {
|
||||
|
||||
@@ -214,6 +214,9 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
const isVisible = this.parent.konva.layer.visible();
|
||||
const isCached = this.konva.objectGroup.isCached();
|
||||
|
||||
// We should also never cache if the entity has no dimensions. Konva will log an error to console like this:
|
||||
// Konva error: Can not cache the node. Width or height of the node equals 0. Caching is skipped.
|
||||
|
||||
if (isVisible && (force || !isCached)) {
|
||||
this.log.trace('Caching object group');
|
||||
this.konva.objectGroup.clearCache();
|
||||
|
||||
@@ -482,13 +482,24 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
// "contain" means that the entity should be scaled to fit within the bbox, but it should not exceed the bbox.
|
||||
const scale = Math.min(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
// Calculate the scaled dimensions
|
||||
const scaledWidth = width * scale;
|
||||
const scaledHeight = height * scale;
|
||||
|
||||
// Calculate centered position
|
||||
const centerX = rect.x + (rect.width - scaledWidth) / 2;
|
||||
const centerY = rect.y + (rect.height - scaledHeight) / 2;
|
||||
|
||||
// Round to grid and clamp to valid bounds
|
||||
const roundedX = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
|
||||
const roundedY = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
|
||||
|
||||
const x = clamp(roundedX, rect.x, rect.x + rect.width - scaledWidth);
|
||||
const y = clamp(roundedY, rect.y, rect.y + rect.height - scaledHeight);
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: clamp(roundToMultiple(rect.x + offsetX, gridSize), rect.x, rect.x + rect.width),
|
||||
y: clamp(roundToMultiple(rect.y + offsetY, gridSize), rect.y, rect.y + rect.height),
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
@@ -513,16 +524,32 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
const scaleX = rect.width / width;
|
||||
const scaleY = rect.height / height;
|
||||
|
||||
// "cover" is the same as "contain", but we choose the larger scale to cover the shape
|
||||
// "cover" means the entity should cover the entire bbox, potentially overflowing
|
||||
const scale = Math.max(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
// Calculate the scaled dimensions
|
||||
const scaledWidth = width * scale;
|
||||
const scaledHeight = height * scale;
|
||||
|
||||
// Calculate position - center only if entity exceeds bbox
|
||||
let x = rect.x;
|
||||
let y = rect.y;
|
||||
|
||||
// If scaled width exceeds bbox width, center horizontally
|
||||
if (scaledWidth > rect.width) {
|
||||
const centerX = rect.x + (rect.width - scaledWidth) / 2;
|
||||
x = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
|
||||
}
|
||||
|
||||
// If scaled height exceeds bbox height, center vertically
|
||||
if (scaledHeight > rect.height) {
|
||||
const centerY = rect.y + (rect.height - scaledHeight) / 2;
|
||||
y = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
|
||||
}
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: roundToMultiple(rect.x + offsetX, gridSize),
|
||||
y: roundToMultiple(rect.y + offsetY, gridSize),
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
|
||||
@@ -115,7 +115,7 @@ export abstract class CanvasModuleBase {
|
||||
* ```
|
||||
*/
|
||||
destroy: () => void = () => {
|
||||
this.log('Destroying module');
|
||||
this.log.debug('Destroying module');
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -2,6 +2,7 @@ import { objectEquals } from '@observ33r/object-equals';
|
||||
import { Mutex } from 'async-mutex';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
|
||||
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
|
||||
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
|
||||
@@ -10,12 +11,21 @@ import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase'
|
||||
import type { CanvasSegmentAnythingModule } from 'features/controlLayers/konva/CanvasSegmentAnythingModule';
|
||||
import type { CanvasStagingAreaModule } from 'features/controlLayers/konva/CanvasStagingAreaModule';
|
||||
import { getKonvaNodeDebugAttrs, loadImage } from 'features/controlLayers/konva/util';
|
||||
import type { CanvasImageState } from 'features/controlLayers/store/types';
|
||||
import type { CanvasImageState, Dimensions } from 'features/controlLayers/store/types';
|
||||
import { t } from 'i18next';
|
||||
import Konva from 'konva';
|
||||
import type { Logger } from 'roarr';
|
||||
import type { JsonObject } from 'roarr/dist/types';
|
||||
import { getImageDTOSafe } from 'services/api/endpoints/images';
|
||||
|
||||
type CanvasObjectImageConfig = {
|
||||
usePhysicalDimensions: boolean;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasObjectImageConfig = {
|
||||
usePhysicalDimensions: false,
|
||||
};
|
||||
|
||||
export class CanvasObjectImage extends CanvasModuleBase {
|
||||
readonly type = 'object_image';
|
||||
readonly id: string;
|
||||
@@ -30,6 +40,9 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
readonly log: Logger;
|
||||
|
||||
state: CanvasImageState;
|
||||
|
||||
config: CanvasObjectImageConfig = DEFAULT_CONFIG;
|
||||
|
||||
konva: {
|
||||
group: Konva.Group;
|
||||
placeholder: { group: Konva.Group; rect: Konva.Rect; text: Konva.Text };
|
||||
@@ -116,7 +129,10 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url, true));
|
||||
if (imageElementResult.isErr()) {
|
||||
// Image loading failed (e.g. the URL to the "physical" image is invalid)
|
||||
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));
|
||||
this.onFailedToLoadImage(
|
||||
t('controlLayers.unableToLoadImage', 'Unable to load image'),
|
||||
parseify(imageElementResult.error)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -139,7 +155,10 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
const imageElementResult = await withResultAsync(() => loadImage(dataURL, false));
|
||||
if (imageElementResult.isErr()) {
|
||||
// Image loading failed (e.g. the URL to the "physical" image is invalid)
|
||||
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));
|
||||
this.onFailedToLoadImage(
|
||||
t('controlLayers.unableToLoadImage', 'Unable to load image'),
|
||||
parseify(imageElementResult.error)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -148,8 +167,8 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
this.updateImageElement();
|
||||
};
|
||||
|
||||
onFailedToLoadImage = (message: string) => {
|
||||
this.log({ image: this.state.image }, message);
|
||||
onFailedToLoadImage = (message: string, error?: JsonObject) => {
|
||||
this.log.error({ image: this.state.image, error }, message);
|
||||
this.konva.image?.visible(false);
|
||||
this.isLoading = false;
|
||||
this.isError = true;
|
||||
@@ -157,9 +176,22 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
this.konva.placeholder.group.visible(true);
|
||||
};
|
||||
|
||||
getDimensions = (): Dimensions => {
|
||||
if (this.config.usePhysicalDimensions && this.imageElement) {
|
||||
return {
|
||||
width: this.imageElement.width,
|
||||
height: this.imageElement.height,
|
||||
};
|
||||
}
|
||||
return {
|
||||
width: this.state.image.width,
|
||||
height: this.state.image.height,
|
||||
};
|
||||
};
|
||||
|
||||
updateImageElement = () => {
|
||||
if (this.imageElement) {
|
||||
const { width, height } = this.state.image;
|
||||
const { width, height } = this.getDimensions();
|
||||
|
||||
if (this.konva.image) {
|
||||
this.log.trace('Updating Konva image attrs');
|
||||
@@ -196,7 +228,6 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
this.log.trace({ state }, 'Updating image');
|
||||
|
||||
const { image } = state;
|
||||
const { width, height } = image;
|
||||
|
||||
if (force || (!objectEquals(this.state, state) && !this.isLoading)) {
|
||||
const release = await this.mutex.acquire();
|
||||
@@ -212,7 +243,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
}
|
||||
}
|
||||
|
||||
this.konva.image?.setAttrs({ width, height });
|
||||
this.konva.image?.setAttrs(this.getDimensions());
|
||||
this.state = state;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -125,9 +125,14 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
this.konva.stage.on('dragmove', this.onStageDragMove);
|
||||
this.konva.stage.on('dragend', this.onStageDragEnd);
|
||||
|
||||
// Start dragging the stage when the middle mouse button is clicked. We do not need to listen for 'pointerdown' to
|
||||
// do cleanup - that is done in onStageDragEnd.
|
||||
// Start dragging the stage when the middle mouse button is clicked and stop dragging when it's released.
|
||||
// We _also_ stop dragging on dragend - but in case the user doesn't actually start a drag (just clicks MMB once),
|
||||
// we still need to stop dragging.
|
||||
//
|
||||
// Why start dragging on pointerdown instead of dragstart? Because it allows us to immediately show the cursor as
|
||||
// grabbing, instead of waiting for the user to actually move the mouse to start the drag. Minor UX affordance.
|
||||
this.konva.stage.on('pointerdown', this.onStagePointerDown);
|
||||
this.konva.stage.on('pointerup', this.onStagePointerUp);
|
||||
|
||||
this.subscriptions.add(() => this.konva.stage.off('wheel', this.onStageMouseWheel));
|
||||
this.subscriptions.add(() => this.konva.stage.off('dragmove', this.onStageDragMove));
|
||||
@@ -438,6 +443,13 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
}
|
||||
};
|
||||
|
||||
onStagePointerUp = (e: KonvaEventObject<PointerEvent>) => {
|
||||
// If the middle mouse button is released and we are dragging, stop dragging the stage
|
||||
if (e.evt.button === 1) {
|
||||
this.stopDragging();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Forcibly starts dragging the stage. This is useful when you want to start dragging the stage programmatically.
|
||||
*/
|
||||
|
||||
@@ -229,13 +229,25 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
|
||||
|
||||
if (imageSrc) {
|
||||
const image = this._getImageFromSrc(imageSrc, width, height);
|
||||
|
||||
// Some models do not make guarantees about their output dimensions. This flag allows the staged images to
|
||||
// render at their real dimensions, instead of the bbox size.
|
||||
//
|
||||
// When the image source is an image name, it is a final output image. In that case, we should use its
|
||||
// physical dimensions. Otherwise, if it is a dataURL, that means it is a progress image. These come in at
|
||||
// a smaller resolution and need to be stretched to fill the bbox, so we do not use the physical
|
||||
// dimensions in that case.
|
||||
const usePhysicalDimensions = imageSrc.type === 'imageName';
|
||||
|
||||
if (!this.image) {
|
||||
this.image = new CanvasObjectImage({ id: 'staging-area-image', type: 'image', image }, this);
|
||||
this.image.config.usePhysicalDimensions = usePhysicalDimensions;
|
||||
await this.image.update(this.image.state, true);
|
||||
this.konva.group.add(this.image.konva.group);
|
||||
} else if (this.image.isLoading || this.image.isError) {
|
||||
// noop
|
||||
} else {
|
||||
this.image.config.usePhysicalDimensions = usePhysicalDimensions;
|
||||
await this.image.update({ ...this.image.state, image });
|
||||
}
|
||||
this.konva.placeholder.group.visible(false);
|
||||
|
||||
@@ -10,9 +10,10 @@ import type { SubscriptionHandler } from 'features/controlLayers/konva/util';
|
||||
import { createReduxSubscription, getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
selectCanvasSettingsSlice,
|
||||
settingsBgColorChanged,
|
||||
settingsBrushWidthChanged,
|
||||
settingsColorChanged,
|
||||
settingsEraserWidthChanged,
|
||||
settingsFgColorChanged,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import {
|
||||
bboxChangedFromCanvas,
|
||||
@@ -231,8 +232,10 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
/**
|
||||
* Sets the drawing color, pushing state to redux.
|
||||
*/
|
||||
setColor = (color: RgbaColor) => {
|
||||
return this.store.dispatch(settingsColorChanged(color));
|
||||
setColor = (color: Partial<RgbaColor>) => {
|
||||
return this.getSettings().activeColor === 'bgColor'
|
||||
? this.store.dispatch(settingsBgColorChanged(color))
|
||||
: this.store.dispatch(settingsFgColorChanged(color));
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -421,7 +424,8 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
* consistency with conventional black and white mask images, we use black as the color for these entities.
|
||||
*/
|
||||
getCurrentColor = (): RgbaColor => {
|
||||
let color: RgbaColor = this.getSettings().color;
|
||||
let color: RgbaColor =
|
||||
this.getSettings().activeColor === 'bgColor' ? this.getSettings().bgColor : this.getSettings().fgColor;
|
||||
const selectedEntity = this.getSelectedEntityAdapter();
|
||||
if (selectedEntity) {
|
||||
// These two entity types use a compositing rect for opacity. Their fill is always a solid color.
|
||||
@@ -449,7 +453,7 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
// selected entity's fill color with 50% opacity.
|
||||
return { ...selectedEntity.state.fill.color, a: 0.5 };
|
||||
} else {
|
||||
return this.getSettings().color;
|
||||
return this.getSettings().activeColor === 'bgColor' ? this.getSettings().bgColor : this.getSettings().fgColor;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@ const ALL_ANCHORS: string[] = [
|
||||
'bottom-center',
|
||||
'bottom-right',
|
||||
];
|
||||
const CORNER_ANCHORS: string[] = ['top-left', 'top-right', 'bottom-left', 'bottom-right'];
|
||||
const NO_ANCHORS: string[] = [];
|
||||
|
||||
/**
|
||||
@@ -66,6 +65,11 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
*/
|
||||
$aspectRatioBuffer = atom(1);
|
||||
|
||||
/**
|
||||
* Buffer to store the visibility of the bbox.
|
||||
*/
|
||||
$isBboxHidden = atom(false);
|
||||
|
||||
constructor(parent: CanvasToolModule) {
|
||||
super();
|
||||
this.id = getPrefixedId(this.type);
|
||||
@@ -191,6 +195,9 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
|
||||
// Update on busy state changes
|
||||
this.subscriptions.add(this.manager.$isBusy.listen(this.render));
|
||||
|
||||
// Listen for stage changes to update the bbox's visibility
|
||||
this.subscriptions.add(this.$isBboxHidden.listen(this.render));
|
||||
}
|
||||
|
||||
// This is a noop. The cursor is changed when the cursor enters or leaves the bbox.
|
||||
@@ -206,13 +213,15 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
};
|
||||
|
||||
/**
|
||||
* Renders the bbox. The bbox is only visible when the tool is set to 'bbox'.
|
||||
* Renders the bbox.
|
||||
*/
|
||||
render = () => {
|
||||
const tool = this.manager.tool.$tool.get();
|
||||
|
||||
const { x, y, width, height } = this.manager.stateApi.runSelector(selectBbox).rect;
|
||||
|
||||
this.konva.group.visible(!this.$isBboxHidden.get());
|
||||
|
||||
// We need to reach up to the preview layer to enable/disable listening so that the bbox can be interacted with.
|
||||
// If the mangaer is busy, we disable listening so the bbox cannot be interacted with.
|
||||
this.konva.group.listening(tool === 'bbox' && !this.manager.$isBusy.get());
|
||||
@@ -334,9 +343,23 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
let width = roundToMultipleMin(this.konva.proxyRect.width() * this.konva.proxyRect.scaleX(), gridSize);
|
||||
let height = roundToMultipleMin(this.konva.proxyRect.height() * this.konva.proxyRect.scaleY(), gridSize);
|
||||
|
||||
// If shift is held and we are resizing from a corner, retain aspect ratio - needs special handling. We skip this
|
||||
// if alt/opt is held - this requires math too big for my brain.
|
||||
if (shift && CORNER_ANCHORS.includes(anchor) && !alt) {
|
||||
// When resizing the bbox using the transformer, we may need to do some extra math to maintain the current aspect
|
||||
// ratio. Need to check a few things to determine if we should be maintaining the aspect ratio or not.
|
||||
let shouldMaintainAspectRatio = false;
|
||||
|
||||
if (alt) {
|
||||
// If alt is held, we are doing center-anchored transforming. In this case, maintaining aspect ratio is rather
|
||||
// complicated.
|
||||
shouldMaintainAspectRatio = false;
|
||||
} else if (this.manager.stateApi.getBbox().aspectRatio.isLocked) {
|
||||
// When the aspect ratio is locked, holding shift means we SHOULD NOT maintain the aspect ratio
|
||||
shouldMaintainAspectRatio = !shift;
|
||||
} else {
|
||||
// When the aspect ratio is not locked, holding shift means we SHOULD maintain aspect ratio
|
||||
shouldMaintainAspectRatio = shift;
|
||||
}
|
||||
|
||||
if (shouldMaintainAspectRatio) {
|
||||
// Fit the bbox to the last aspect ratio
|
||||
let fittedWidth = Math.sqrt(width * height * this.$aspectRatioBuffer.get());
|
||||
let fittedHeight = fittedWidth / this.$aspectRatioBuffer.get();
|
||||
@@ -377,7 +400,7 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
|
||||
// Update the aspect ratio buffer whenever the shift key is not held - this allows for a nice UX where you can start
|
||||
// a transform, get the right aspect ratio, then hold shift to lock it in.
|
||||
if (!shift) {
|
||||
if (!shouldMaintainAspectRatio) {
|
||||
this.$aspectRatioBuffer.set(bboxRect.width / bboxRect.height);
|
||||
}
|
||||
};
|
||||
@@ -478,4 +501,8 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
this.subscriptions.clear();
|
||||
this.konva.group.destroy();
|
||||
};
|
||||
|
||||
toggleBboxVisibility = () => {
|
||||
this.$isBboxHidden.set(!this.$isBboxHidden.get());
|
||||
};
|
||||
}
|
||||
|
||||
@@ -289,6 +289,14 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
this.manager.stage.setCursor('none');
|
||||
};
|
||||
|
||||
getCanPick = () => {
|
||||
if (this.manager.stage.getIsDragging()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Renders the color picker tool preview on the canvas.
|
||||
*/
|
||||
@@ -298,6 +306,11 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.getCanPick()) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const cursorPos = this.parent.$cursorPos.get();
|
||||
|
||||
if (!cursorPos) {
|
||||
@@ -315,6 +328,7 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
const colorPickerOuterRadius = this.manager.stage.unscale(this.config.RING_OUTER_RADIUS);
|
||||
const onePixel = this.manager.stage.unscale(1);
|
||||
const twoPixels = this.manager.stage.unscale(2);
|
||||
const color = settings.activeColor === 'bgColor' ? settings.bgColor : settings.fgColor;
|
||||
|
||||
this.konva.ringCandidateColor.setAttrs({
|
||||
x,
|
||||
@@ -326,7 +340,7 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
this.konva.ringCurrentColor.setAttrs({
|
||||
x,
|
||||
y,
|
||||
fill: rgbColorToString(settings.color),
|
||||
fill: rgbColorToString(color),
|
||||
innerRadius: colorPickerInnerRadius,
|
||||
outerRadius: colorPickerOuterRadius,
|
||||
});
|
||||
@@ -406,11 +420,21 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
};
|
||||
|
||||
onStagePointerUp = (_e: KonvaEventObject<PointerEvent>) => {
|
||||
const color = this.$colorUnderCursor.get();
|
||||
this.manager.stateApi.setColor({ ...color, a: color.a / 255 });
|
||||
if (!this.getCanPick()) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const { a: _, ...color } = this.$colorUnderCursor.get();
|
||||
this.manager.stateApi.setColor(color);
|
||||
};
|
||||
|
||||
onStagePointerMove = (_e: KonvaEventObject<PointerEvent>) => {
|
||||
if (!this.getCanPick()) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
this.syncColorUnderCursor();
|
||||
};
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ export class CanvasToolModule extends CanvasModuleBase {
|
||||
const selectedEntityAdapter = this.manager.stateApi.getSelectedEntityAdapter();
|
||||
|
||||
if (this.manager.stage.getIsDragging()) {
|
||||
this.tools.view.syncCursorStyle();
|
||||
stage.setCursor('grabbing');
|
||||
} else if (tool === 'view') {
|
||||
this.tools.view.syncCursorStyle();
|
||||
} else if (segmentingAdapter) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { Selector, Store } from '@reduxjs/toolkit';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $authToken, $crossOrigin } from 'app/store/nanostores/authToken';
|
||||
import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import { clamp } from 'es-toolkit/compat';
|
||||
import type {
|
||||
@@ -494,7 +494,7 @@ export async function loadImage(src: string, fetchUrlFirst?: boolean): Promise<H
|
||||
const imageElement = new Image();
|
||||
imageElement.onload = () => resolve(imageElement);
|
||||
imageElement.onerror = (error) => reject(error);
|
||||
imageElement.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
|
||||
imageElement.crossOrigin = $crossOrigin.get();
|
||||
imageElement.src = url;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ import type { PayloadAction, Selector } from '@reduxjs/toolkit';
|
||||
import { createSelector, createSlice } from '@reduxjs/toolkit';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { SliceConfig } from 'app/store/types';
|
||||
import { zRgbaColor } from 'features/controlLayers/store/types';
|
||||
import type { RgbaColor } from 'features/controlLayers/store/types';
|
||||
import { RGBA_BLACK, RGBA_WHITE, zRgbaColor } from 'features/controlLayers/store/types';
|
||||
import { z } from 'zod';
|
||||
|
||||
const zAutoSwitchMode = z.enum(['off', 'switch_on_start', 'switch_on_finish']);
|
||||
@@ -35,9 +36,11 @@ const zCanvasSettingsState = z.object({
|
||||
*/
|
||||
eraserWidth: z.int().gt(0),
|
||||
/**
|
||||
* The color to use when drawing lines or filling shapes.
|
||||
* The colors to use when drawing lines or filling shapes.
|
||||
*/
|
||||
color: zRgbaColor,
|
||||
activeColor: z.enum(['bgColor', 'fgColor']),
|
||||
bgColor: zRgbaColor,
|
||||
fgColor: zRgbaColor,
|
||||
/**
|
||||
* Whether to composite inpainted/outpainted regions back onto the source image when saving canvas generations.
|
||||
*
|
||||
@@ -100,7 +103,9 @@ const getInitialState = (): CanvasSettingsState => ({
|
||||
invertScrollForToolWidth: false,
|
||||
brushWidth: 50,
|
||||
eraserWidth: 50,
|
||||
color: { r: 31, g: 160, b: 224, a: 1 }, // invokeBlue.500
|
||||
activeColor: 'fgColor',
|
||||
bgColor: RGBA_BLACK,
|
||||
fgColor: RGBA_WHITE,
|
||||
outputOnlyMaskedRegions: true,
|
||||
autoProcess: true,
|
||||
snapToGrid: true,
|
||||
@@ -134,8 +139,18 @@ const slice = createSlice({
|
||||
settingsEraserWidthChanged: (state, action: PayloadAction<CanvasSettingsState['eraserWidth']>) => {
|
||||
state.eraserWidth = Math.round(action.payload);
|
||||
},
|
||||
settingsColorChanged: (state, action: PayloadAction<CanvasSettingsState['color']>) => {
|
||||
state.color = action.payload;
|
||||
settingsActiveColorToggled: (state) => {
|
||||
state.activeColor = state.activeColor === 'bgColor' ? 'fgColor' : 'bgColor';
|
||||
},
|
||||
settingsBgColorChanged: (state, action: PayloadAction<Partial<RgbaColor>>) => {
|
||||
state.bgColor = { ...state.bgColor, ...action.payload };
|
||||
},
|
||||
settingsFgColorChanged: (state, action: PayloadAction<Partial<RgbaColor>>) => {
|
||||
state.fgColor = { ...state.fgColor, ...action.payload };
|
||||
},
|
||||
settingsColorsSetToDefault: (state) => {
|
||||
state.bgColor = RGBA_BLACK;
|
||||
state.fgColor = RGBA_WHITE;
|
||||
},
|
||||
settingsInvertScrollForToolWidthChanged: (
|
||||
state,
|
||||
@@ -191,7 +206,10 @@ export const {
|
||||
settingsShowHUDToggled,
|
||||
settingsBrushWidthChanged,
|
||||
settingsEraserWidthChanged,
|
||||
settingsColorChanged,
|
||||
settingsActiveColorToggled,
|
||||
settingsBgColorChanged,
|
||||
settingsFgColorChanged,
|
||||
settingsColorsSetToDefault,
|
||||
settingsInvertScrollForToolWidthChanged,
|
||||
settingsOutputOnlyMaskedRegionsToggled,
|
||||
settingsAutoProcessToggled,
|
||||
|
||||
@@ -72,12 +72,14 @@ import {
|
||||
CHATGPT_ASPECT_RATIOS,
|
||||
DEFAULT_ASPECT_RATIO_CONFIG,
|
||||
FLUX_KONTEXT_ASPECT_RATIOS,
|
||||
GEMINI_2_5_ASPECT_RATIOS,
|
||||
getEntityIdentifier,
|
||||
getInitialCanvasState,
|
||||
IMAGEN_ASPECT_RATIOS,
|
||||
isChatGPT4oAspectRatioID,
|
||||
isFluxKontextAspectRatioID,
|
||||
isFLUXReduxConfig,
|
||||
isGemini2_5AspectRatioID,
|
||||
isImagenAspectRatioID,
|
||||
isIPAdapterConfig,
|
||||
zCanvasState,
|
||||
@@ -111,12 +113,16 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
const entityState = getRasterLayerState(id, overrides);
|
||||
|
||||
state.rasterLayers.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.rasterLayers.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.rasterLayers.entities.length;
|
||||
state.rasterLayers.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter(
|
||||
@@ -139,6 +145,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('raster_layer') },
|
||||
}),
|
||||
@@ -272,13 +279,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getControlLayerState(id, overrides);
|
||||
|
||||
state.controlLayers.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.controlLayers.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.controlLayers.entities.length;
|
||||
state.controlLayers.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter(
|
||||
@@ -300,6 +311,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('control_layer') },
|
||||
}),
|
||||
@@ -570,13 +582,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getRegionalGuidanceState(id, overrides);
|
||||
|
||||
state.regionalGuidance.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.regionalGuidance.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.regionalGuidance.entities.length;
|
||||
state.regionalGuidance.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.regionalGuidance.entities = state.regionalGuidance.entities.filter(
|
||||
@@ -598,6 +614,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
@@ -874,13 +891,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getInpaintMaskState(id, overrides);
|
||||
|
||||
state.inpaintMasks.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.inpaintMasks.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.inpaintMasks.entities.length;
|
||||
state.inpaintMasks.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.inpaintMasks.entities = state.inpaintMasks.entities.filter(
|
||||
@@ -902,6 +923,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('inpaint_mask') },
|
||||
}),
|
||||
@@ -1124,6 +1146,12 @@ const slice = createSlice({
|
||||
state.bbox.rect.height = height;
|
||||
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
} else if (state.bbox.modelBase === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
|
||||
const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
|
||||
state.bbox.rect.width = width;
|
||||
state.bbox.rect.height = height;
|
||||
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
} else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
|
||||
const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
|
||||
state.bbox.rect.width = width;
|
||||
@@ -1249,25 +1277,33 @@ const slice = createSlice({
|
||||
newEntity.name = `${newEntity.name} (Copy)`;
|
||||
}
|
||||
switch (newEntity.type) {
|
||||
case 'raster_layer':
|
||||
case 'raster_layer': {
|
||||
newEntity.id = getPrefixedId('raster_layer');
|
||||
state.rasterLayers.entities.push(newEntity);
|
||||
const newEntityIndex = state.rasterLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.rasterLayers.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'control_layer':
|
||||
}
|
||||
case 'control_layer': {
|
||||
newEntity.id = getPrefixedId('control_layer');
|
||||
state.controlLayers.entities.push(newEntity);
|
||||
const newEntityIndex = state.controlLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.controlLayers.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'regional_guidance':
|
||||
}
|
||||
case 'regional_guidance': {
|
||||
newEntity.id = getPrefixedId('regional_guidance');
|
||||
for (const refImage of newEntity.referenceImages) {
|
||||
refImage.id = getPrefixedId('regional_guidance_ip_adapter');
|
||||
}
|
||||
state.regionalGuidance.entities.push(newEntity);
|
||||
const newEntityIndex = state.regionalGuidance.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.regionalGuidance.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'inpaint_mask':
|
||||
}
|
||||
case 'inpaint_mask': {
|
||||
newEntity.id = getPrefixedId('inpaint_mask');
|
||||
state.inpaintMasks.entities.push(newEntity);
|
||||
const newEntityIndex = state.inpaintMasks.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.inpaintMasks.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
state.selectedEntityIdentifier = getEntityIdentifier(newEntity);
|
||||
@@ -1575,6 +1611,7 @@ const slice = createSlice({
|
||||
state.bbox.rect.width = 1024;
|
||||
state.bbox.rect.height = 1024;
|
||||
}
|
||||
|
||||
syncScaledSize(state);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -4,6 +4,7 @@ import type { SliceConfig } from 'app/store/types';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { type LoRA, zLoRA } from 'features/controlLayers/store/types';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { DEFAULT_LORA_WEIGHT_CONFIG } from 'features/system/store/configSlice';
|
||||
import type { LoRAModelConfig } from 'services/api/types';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import z from 'zod';
|
||||
@@ -13,11 +14,6 @@ const zLoRAsState = z.object({
|
||||
});
|
||||
type LoRAsState = z.infer<typeof zLoRAsState>;
|
||||
|
||||
const defaultLoRAConfig: Pick<LoRA, 'weight' | 'isEnabled'> = {
|
||||
weight: 0.75,
|
||||
isEnabled: true,
|
||||
};
|
||||
|
||||
const getInitialState = (): LoRAsState => ({
|
||||
loras: [],
|
||||
});
|
||||
@@ -32,6 +28,10 @@ const slice = createSlice({
|
||||
reducer: (state, action: PayloadAction<{ model: LoRAModelConfig; id: string }>) => {
|
||||
const { model, id } = action.payload;
|
||||
const parsedModel = zModelIdentifierField.parse(model);
|
||||
const defaultLoRAConfig: Pick<LoRA, 'weight' | 'isEnabled'> = {
|
||||
weight: model.default_settings?.weight ?? DEFAULT_LORA_WEIGHT_CONFIG.initial,
|
||||
isEnabled: true,
|
||||
};
|
||||
state.loras.push({ ...defaultLoRAConfig, model: parsedModel, id });
|
||||
},
|
||||
prepare: (payload: { model: LoRAModelConfig }) => ({ payload: { ...payload, id: uuidv4() } }),
|
||||
@@ -87,3 +87,7 @@ export const lorasSliceConfig: SliceConfig<typeof slice> = {
|
||||
|
||||
export const selectLoRAsSlice = (state: RootState) => state.loras;
|
||||
export const selectAddedLoRAs = createSelector(selectLoRAsSlice, (loras) => loras.loras);
|
||||
export const buildSelectLoRA = (id: string) =>
|
||||
createSelector([selectLoRAsSlice], (loras) => {
|
||||
return selectLoRA(loras, id);
|
||||
});
|
||||
|
||||
@@ -11,15 +11,26 @@ import {
|
||||
CHATGPT_ASPECT_RATIOS,
|
||||
DEFAULT_ASPECT_RATIO_CONFIG,
|
||||
FLUX_KONTEXT_ASPECT_RATIOS,
|
||||
GEMINI_2_5_ASPECT_RATIOS,
|
||||
getInitialParamsState,
|
||||
IMAGEN_ASPECT_RATIOS,
|
||||
isChatGPT4oAspectRatioID,
|
||||
isFluxKontextAspectRatioID,
|
||||
isGemini2_5AspectRatioID,
|
||||
isImagenAspectRatioID,
|
||||
zParamsState,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
|
||||
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
|
||||
import {
|
||||
API_BASE_MODELS,
|
||||
CLIP_SKIP_MAP,
|
||||
SUPPORTS_ASPECT_RATIO_BASE_MODELS,
|
||||
SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS,
|
||||
SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS,
|
||||
SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS,
|
||||
SUPPORTS_REF_IMAGES_BASE_MODELS,
|
||||
SUPPORTS_SEED_BASE_MODELS,
|
||||
} from 'features/parameters/types/constants';
|
||||
import type {
|
||||
ParameterCanvasCoherenceMode,
|
||||
ParameterCFGRescaleMultiplier,
|
||||
@@ -107,14 +118,15 @@ const slice = createSlice({
|
||||
return;
|
||||
}
|
||||
|
||||
// Clamp CLIP skip layer count to the bounds of the new model
|
||||
if (model.base === 'sdxl') {
|
||||
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
|
||||
state.clipSkip = 0;
|
||||
} else {
|
||||
const { maxClip } = CLIP_SKIP_MAP[model.base];
|
||||
state.clipSkip = clamp(state.clipSkip, 0, maxClip);
|
||||
if (API_BASE_MODELS.includes(model.base)) {
|
||||
state.dimensions.aspectRatio.isLocked = true;
|
||||
state.dimensions.aspectRatio.value = 1;
|
||||
state.dimensions.aspectRatio.id = '1:1';
|
||||
state.dimensions.rect.width = 1024;
|
||||
state.dimensions.rect.height = 1024;
|
||||
}
|
||||
|
||||
applyClipSkip(state, model, state.clipSkip);
|
||||
},
|
||||
vaeSelected: (state, action: PayloadAction<ParameterVAEModel | null>) => {
|
||||
// null is a valid VAE!
|
||||
@@ -170,7 +182,7 @@ const slice = createSlice({
|
||||
state.vaePrecision = action.payload;
|
||||
},
|
||||
setClipSkip: (state, action: PayloadAction<number>) => {
|
||||
state.clipSkip = action.payload;
|
||||
applyClipSkip(state, state.model, action.payload);
|
||||
},
|
||||
shouldUseCpuNoiseChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldUseCpuNoise = action.payload;
|
||||
@@ -181,15 +193,6 @@ const slice = createSlice({
|
||||
negativePromptChanged: (state, action: PayloadAction<ParameterNegativePrompt>) => {
|
||||
state.negativePrompt = action.payload;
|
||||
},
|
||||
positivePrompt2Changed: (state, action: PayloadAction<string>) => {
|
||||
state.positivePrompt2 = action.payload;
|
||||
},
|
||||
negativePrompt2Changed: (state, action: PayloadAction<string>) => {
|
||||
state.negativePrompt2 = action.payload;
|
||||
},
|
||||
shouldConcatPromptsChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldConcatPrompts = action.payload;
|
||||
},
|
||||
refinerModelChanged: (state, action: PayloadAction<ParameterSDXLRefinerModel | null>) => {
|
||||
const result = zParamsState.shape.refinerModel.safeParse(action.payload);
|
||||
if (!result.success) {
|
||||
@@ -306,6 +309,12 @@ const slice = createSlice({
|
||||
state.dimensions.rect.height = height;
|
||||
state.dimensions.aspectRatio.value = state.dimensions.rect.width / state.dimensions.rect.height;
|
||||
state.dimensions.aspectRatio.isLocked = true;
|
||||
} else if (state.model?.base === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
|
||||
const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
|
||||
state.dimensions.rect.width = width;
|
||||
state.dimensions.rect.height = height;
|
||||
state.dimensions.aspectRatio.value = state.dimensions.rect.width / state.dimensions.rect.height;
|
||||
state.dimensions.aspectRatio.isLocked = true;
|
||||
} else if (state.model?.base === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
|
||||
const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
|
||||
state.dimensions.rect.width = width;
|
||||
@@ -375,6 +384,33 @@ const slice = createSlice({
|
||||
},
|
||||
});
|
||||
|
||||
const applyClipSkip = (state: { clipSkip: number }, model: ParameterModel | null, clipSkip: number) => {
|
||||
if (model === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const maxClip = getModelMaxClipSkip(model);
|
||||
|
||||
state.clipSkip = clamp(clipSkip, 0, maxClip ?? 0);
|
||||
};
|
||||
|
||||
const hasModelClipSkip = (model: ParameterModel | null) => {
|
||||
if (model === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return getModelMaxClipSkip(model) ?? 0 > 0;
|
||||
};
|
||||
|
||||
const getModelMaxClipSkip = (model: ParameterModel) => {
|
||||
if (model.base === 'sdxl') {
|
||||
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
|
||||
return 0;
|
||||
}
|
||||
|
||||
return CLIP_SKIP_MAP[model.base]?.maxClip;
|
||||
};
|
||||
|
||||
const resetState = (state: ParamsState): ParamsState => {
|
||||
// When a new session is requested, we need to keep the current model selections, plus dependent state
|
||||
// like VAE precision. Everything else gets reset to default.
|
||||
@@ -425,9 +461,6 @@ export const {
|
||||
shouldUseCpuNoiseChanged,
|
||||
positivePromptChanged,
|
||||
negativePromptChanged,
|
||||
positivePrompt2Changed,
|
||||
negativePrompt2Changed,
|
||||
shouldConcatPromptsChanged,
|
||||
refinerModelChanged,
|
||||
setRefinerSteps,
|
||||
setRefinerCFGScale,
|
||||
@@ -460,8 +493,7 @@ export const paramsSliceConfig: SliceConfig<typeof slice> = {
|
||||
};
|
||||
|
||||
export const selectParamsSlice = (state: RootState) => state.params;
|
||||
export const createParamsSelector = <T>(selector: Selector<ParamsState, T>) =>
|
||||
createSelector(selectParamsSlice, selector);
|
||||
const createParamsSelector = <T>(selector: Selector<ParamsState, T>) => createSelector(selectParamsSlice, selector);
|
||||
|
||||
export const selectBase = createParamsSelector((params) => params.model?.base);
|
||||
export const selectIsSDXL = createParamsSelector((params) => params.model?.base === 'sdxl');
|
||||
@@ -470,7 +502,6 @@ export const selectIsSD3 = createParamsSelector((params) => params.model?.base =
|
||||
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
|
||||
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
|
||||
export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
|
||||
export const selectIsFluxKontextApi = createParamsSelector((params) => params.model?.base === 'flux-kontext');
|
||||
export const selectIsFluxKontext = createParamsSelector((params) => {
|
||||
if (params.model?.base === 'flux-kontext') {
|
||||
return true;
|
||||
@@ -481,6 +512,7 @@ export const selectIsFluxKontext = createParamsSelector((params) => {
|
||||
return false;
|
||||
});
|
||||
export const selectIsChatGPT4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
|
||||
export const selectIsGemini2_5 = createParamsSelector((params) => params.model?.base === 'gemini-2.5');
|
||||
|
||||
export const selectModel = createParamsSelector((params) => params.model);
|
||||
export const selectModelKey = createParamsSelector((params) => params.model?.key);
|
||||
@@ -497,7 +529,8 @@ export const selectCFGScale = createParamsSelector((params) => params.cfgScale);
|
||||
export const selectGuidance = createParamsSelector((params) => params.guidance);
|
||||
export const selectSteps = createParamsSelector((params) => params.steps);
|
||||
export const selectCFGRescaleMultiplier = createParamsSelector((params) => params.cfgRescaleMultiplier);
|
||||
export const selectCLIPSKip = createParamsSelector((params) => params.clipSkip);
|
||||
export const selectCLIPSkip = createParamsSelector((params) => params.clipSkip);
|
||||
export const selectHasModelCLIPSkip = createParamsSelector((params) => hasModelClipSkip(params.model));
|
||||
export const selectCanvasCoherenceEdgeSize = createParamsSelector((params) => params.canvasCoherenceEdgeSize);
|
||||
export const selectCanvasCoherenceMinDenoise = createParamsSelector((params) => params.canvasCoherenceMinDenoise);
|
||||
export const selectCanvasCoherenceMode = createParamsSelector((params) => params.canvasCoherenceMode);
|
||||
@@ -515,12 +548,33 @@ export const selectNegativePrompt = createParamsSelector((params) => params.nega
|
||||
export const selectNegativePromptWithFallback = createParamsSelector((params) => params.negativePrompt ?? '');
|
||||
export const selectHasNegativePrompt = createParamsSelector((params) => params.negativePrompt !== null);
|
||||
export const selectModelSupportsNegativePrompt = createSelector(
|
||||
[selectIsFLUX, selectIsChatGPT4o, selectIsFluxKontext],
|
||||
(isFLUX, isChatGPT4o, isFluxKontext) => !isFLUX && !isChatGPT4o && !isFluxKontext
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectModelSupportsSeed = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_SEED_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectModelSupportsRefImages = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_REF_IMAGES_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectModelSupportsAspectRatio = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_ASPECT_RATIO_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectModelSupportsPixelDimensions = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectIsApiBaseModel = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && API_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectModelSupportsOptimizedDenoising = createSelector(
|
||||
selectModel,
|
||||
(model) => !!model && SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS.includes(model.base)
|
||||
);
|
||||
export const selectPositivePrompt2 = createParamsSelector((params) => params.positivePrompt2);
|
||||
export const selectNegativePrompt2 = createParamsSelector((params) => params.negativePrompt2);
|
||||
export const selectShouldConcatPrompts = createParamsSelector((params) => params.shouldConcatPrompts);
|
||||
export const selectScheduler = createParamsSelector((params) => params.scheduler);
|
||||
export const selectSeamlessXAxis = createParamsSelector((params) => params.seamlessXAxis);
|
||||
export const selectSeamlessYAxis = createParamsSelector((params) => params.seamlessYAxis);
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
initialChatGPT4oReferenceImage,
|
||||
initialFluxKontextReferenceImage,
|
||||
initialFLUXRedux,
|
||||
initialGemini2_5ReferenceImage,
|
||||
initialIPAdapter,
|
||||
} from './util';
|
||||
|
||||
@@ -136,6 +137,16 @@ const slice = createSlice({
|
||||
return;
|
||||
}
|
||||
|
||||
if (entity.config.model.base === 'gemini-2.5') {
|
||||
// Switching to Gemini 2.5 Flash Preview (nano banana) ref image
|
||||
entity.config = {
|
||||
...initialGemini2_5ReferenceImage,
|
||||
image: entity.config.image,
|
||||
model: entity.config.model,
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
entity.config.model.base === 'flux-kontext' ||
|
||||
(entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext'))
|
||||
|
||||
@@ -14,9 +14,7 @@ import {
|
||||
zParameterMaskBlurMethod,
|
||||
zParameterModel,
|
||||
zParameterNegativePrompt,
|
||||
zParameterNegativeStylePromptSDXL,
|
||||
zParameterPositivePrompt,
|
||||
zParameterPositiveStylePromptSDXL,
|
||||
zParameterPrecision,
|
||||
zParameterScheduler,
|
||||
zParameterSDXLRefinerModel,
|
||||
@@ -84,6 +82,7 @@ export const zRgbaColor = zRgbColor.extend({
|
||||
});
|
||||
export type RgbaColor = z.infer<typeof zRgbaColor>;
|
||||
export const RGBA_BLACK: RgbaColor = { r: 0, g: 0, b: 0, a: 1 };
|
||||
export const RGBA_WHITE: RgbaColor = { r: 255, g: 255, b: 255, a: 1 };
|
||||
|
||||
const zOpacity = z.number().gte(0).lte(1);
|
||||
|
||||
@@ -266,6 +265,13 @@ const zChatGPT4oReferenceImageConfig = z.object({
|
||||
});
|
||||
export type ChatGPT4oReferenceImageConfig = z.infer<typeof zChatGPT4oReferenceImageConfig>;
|
||||
|
||||
const zGemini2_5ReferenceImageConfig = z.object({
|
||||
type: z.literal('gemini_2_5_reference_image'),
|
||||
image: zImageWithDims.nullable(),
|
||||
model: zModelIdentifierField.nullable(),
|
||||
});
|
||||
export type Gemini2_5ReferenceImageConfig = z.infer<typeof zGemini2_5ReferenceImageConfig>;
|
||||
|
||||
const zFluxKontextReferenceImageConfig = z.object({
|
||||
type: z.literal('flux_kontext_reference_image'),
|
||||
image: zImageWithDims.nullable(),
|
||||
@@ -288,6 +294,7 @@ export const zRefImageState = z.object({
|
||||
zFLUXReduxConfig,
|
||||
zChatGPT4oReferenceImageConfig,
|
||||
zFluxKontextReferenceImageConfig,
|
||||
zGemini2_5ReferenceImageConfig,
|
||||
]),
|
||||
});
|
||||
export type RefImageState = z.infer<typeof zRefImageState>;
|
||||
@@ -300,10 +307,15 @@ export const isFLUXReduxConfig = (config: RefImageState['config']): config is FL
|
||||
export const isChatGPT4oReferenceImageConfig = (
|
||||
config: RefImageState['config']
|
||||
): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
|
||||
|
||||
export const isFluxKontextReferenceImageConfig = (
|
||||
config: RefImageState['config']
|
||||
): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image';
|
||||
|
||||
export const isGemini2_5ReferenceImageConfig = (
|
||||
config: RefImageState['config']
|
||||
): config is Gemini2_5ReferenceImageConfig => config.type === 'gemini_2_5_reference_image';
|
||||
|
||||
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
|
||||
export type FillStyle = z.infer<typeof zFillStyle>;
|
||||
export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success;
|
||||
@@ -449,6 +461,14 @@ export const CHATGPT_ASPECT_RATIOS: Record<ChatGPT4oAspectRatio, Dimensions> = {
|
||||
'2:3': { width: 1024, height: 1536 },
|
||||
} as const;
|
||||
|
||||
export const zGemini2_5AspectRatioID = z.enum(['1:1']);
|
||||
type Gemini2_5AspectRatio = z.infer<typeof zGemini2_5AspectRatioID>;
|
||||
export const isGemini2_5AspectRatioID = (v: unknown): v is Gemini2_5AspectRatio =>
|
||||
zGemini2_5AspectRatioID.safeParse(v).success;
|
||||
export const GEMINI_2_5_ASPECT_RATIOS: Record<Gemini2_5AspectRatio, Dimensions> = {
|
||||
'1:1': { width: 1024, height: 1024 },
|
||||
} as const;
|
||||
|
||||
export const zFluxKontextAspectRatioID = z.enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']);
|
||||
type FluxKontextAspectRatio = z.infer<typeof zFluxKontextAspectRatioID>;
|
||||
export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer<typeof zFluxKontextAspectRatioID> =>
|
||||
@@ -463,6 +483,33 @@ export const FLUX_KONTEXT_ASPECT_RATIOS: Record<FluxKontextAspectRatio, Dimensio
|
||||
'1:1': { width: 1024, height: 1024 },
|
||||
};
|
||||
|
||||
export const zVeo3AspectRatioID = z.enum(['16:9']);
|
||||
type Veo3AspectRatio = z.infer<typeof zVeo3AspectRatioID>;
|
||||
export const isVeo3AspectRatioID = (v: unknown): v is Veo3AspectRatio => zVeo3AspectRatioID.safeParse(v).success;
|
||||
|
||||
export const zRunwayAspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16', '21:9']);
|
||||
type RunwayAspectRatio = z.infer<typeof zRunwayAspectRatioID>;
|
||||
export const isRunwayAspectRatioID = (v: unknown): v is RunwayAspectRatio => zRunwayAspectRatioID.safeParse(v).success;
|
||||
|
||||
export const zVideoAspectRatio = z.union([zVeo3AspectRatioID, zRunwayAspectRatioID]);
|
||||
export type VideoAspectRatio = z.infer<typeof zVideoAspectRatio>;
|
||||
export const isVideoAspectRatio = (v: unknown): v is VideoAspectRatio => zVideoAspectRatio.safeParse(v).success;
|
||||
|
||||
export const zVeo3Resolution = z.enum(['720p', '1080p']);
|
||||
type Veo3Resolution = z.infer<typeof zVeo3Resolution>;
|
||||
export const isVeo3Resolution = (v: unknown): v is Veo3Resolution => zVeo3Resolution.safeParse(v).success;
|
||||
export const RESOLUTION_MAP: Record<Veo3Resolution | RunwayResolution, Dimensions> = {
|
||||
'720p': { width: 1280, height: 720 },
|
||||
'1080p': { width: 1920, height: 1080 },
|
||||
};
|
||||
|
||||
export const zRunwayResolution = z.enum(['720p']);
|
||||
type RunwayResolution = z.infer<typeof zRunwayResolution>;
|
||||
export const isRunwayResolution = (v: unknown): v is RunwayResolution => zRunwayResolution.safeParse(v).success;
|
||||
|
||||
export const zVideoResolution = z.union([zVeo3Resolution, zRunwayResolution]);
|
||||
export type VideoResolution = z.infer<typeof zVideoResolution>;
|
||||
|
||||
const zAspectRatioConfig = z.object({
|
||||
id: zAspectRatioID,
|
||||
value: z.number().gt(0),
|
||||
@@ -476,6 +523,24 @@ export const DEFAULT_ASPECT_RATIO_CONFIG: AspectRatioConfig = {
|
||||
isLocked: false,
|
||||
};
|
||||
|
||||
const zVeo3DurationID = z.enum(['8']);
|
||||
type Veo3Duration = z.infer<typeof zVeo3DurationID>;
|
||||
export const isVeo3DurationID = (v: unknown): v is Veo3Duration => zVeo3DurationID.safeParse(v).success;
|
||||
export const VEO3_DURATIONS: Record<Veo3Duration, string> = {
|
||||
'8': '8 seconds',
|
||||
};
|
||||
|
||||
const zRunwayDurationID = z.enum(['5', '10']);
|
||||
type RunwayDuration = z.infer<typeof zRunwayDurationID>;
|
||||
export const isRunwayDurationID = (v: unknown): v is RunwayDuration => zRunwayDurationID.safeParse(v).success;
|
||||
export const RUNWAY_DURATIONS: Record<RunwayDuration, string> = {
|
||||
'5': '5 seconds',
|
||||
'10': '10 seconds',
|
||||
};
|
||||
|
||||
export const zVideoDuration = z.union([zVeo3DurationID, zRunwayDurationID]);
|
||||
export type VideoDuration = z.infer<typeof zVideoDuration>;
|
||||
|
||||
const zBboxState = z.object({
|
||||
rect: z.object({
|
||||
x: z.number().int(),
|
||||
@@ -493,6 +558,8 @@ const zBboxState = z.object({
|
||||
});
|
||||
|
||||
const zDimensionsState = z.object({
|
||||
// TODO(psyche): There is no concept of x/y coords for the dimensions state here... It's just width and height.
|
||||
// Remove the extraneous data.
|
||||
rect: z.object({
|
||||
x: z.number().int(),
|
||||
y: z.number().int(),
|
||||
@@ -534,9 +601,6 @@ export const zParamsState = z.object({
|
||||
shouldUseCpuNoise: z.boolean(),
|
||||
positivePrompt: zParameterPositivePrompt,
|
||||
negativePrompt: zParameterNegativePrompt,
|
||||
positivePrompt2: zParameterPositiveStylePromptSDXL,
|
||||
negativePrompt2: zParameterNegativeStylePromptSDXL,
|
||||
shouldConcatPrompts: z.boolean(),
|
||||
refinerModel: zParameterSDXLRefinerModel.nullable(),
|
||||
refinerSteps: z.number(),
|
||||
refinerCFGScale: z.number(),
|
||||
@@ -584,9 +648,6 @@ export const getInitialParamsState = (): ParamsState => ({
|
||||
shouldUseCpuNoise: true,
|
||||
positivePrompt: '',
|
||||
negativePrompt: null,
|
||||
positivePrompt2: '',
|
||||
negativePrompt2: '',
|
||||
shouldConcatPrompts: true,
|
||||
refinerModel: null,
|
||||
refinerSteps: 20,
|
||||
refinerCFGScale: 7.5,
|
||||
@@ -663,7 +724,12 @@ export const getInitialRefImagesState = (): RefImagesState => ({
|
||||
|
||||
export const zCanvasReferenceImageState_OLD = zCanvasEntityBase.extend({
|
||||
type: z.literal('reference_image'),
|
||||
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zChatGPT4oReferenceImageConfig]),
|
||||
ipAdapter: z.discriminatedUnion('type', [
|
||||
zIPAdapterConfig,
|
||||
zFLUXReduxConfig,
|
||||
zChatGPT4oReferenceImageConfig,
|
||||
zGemini2_5ReferenceImageConfig,
|
||||
]),
|
||||
});
|
||||
|
||||
export const zCanvasMetadata = z.object({
|
||||
|
||||
@@ -10,9 +10,9 @@ import type {
|
||||
ChatGPT4oReferenceImageConfig,
|
||||
ControlLoRAConfig,
|
||||
ControlNetConfig,
|
||||
Dimensions,
|
||||
FluxKontextReferenceImageConfig,
|
||||
FLUXReduxConfig,
|
||||
Gemini2_5ReferenceImageConfig,
|
||||
ImageWithDims,
|
||||
IPAdapterConfig,
|
||||
RefImageState,
|
||||
@@ -38,22 +38,6 @@ export const imageDTOToImageObject = (imageDTO: ImageDTO, overrides?: Partial<Ca
|
||||
};
|
||||
};
|
||||
|
||||
export const imageNameToImageObject = (
|
||||
imageName: string,
|
||||
dimensions: Dimensions,
|
||||
overrides?: Partial<CanvasImageState>
|
||||
): CanvasImageState => {
|
||||
return {
|
||||
id: getPrefixedId('image'),
|
||||
type: 'image',
|
||||
image: {
|
||||
image_name: imageName,
|
||||
...dimensions,
|
||||
},
|
||||
...overrides,
|
||||
};
|
||||
};
|
||||
|
||||
export const imageDTOToImageWithDims = ({ image_name, width, height }: ImageDTO): ImageWithDims => ({
|
||||
image_name,
|
||||
width,
|
||||
@@ -105,6 +89,11 @@ export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = {
|
||||
image: null,
|
||||
model: null,
|
||||
};
|
||||
export const initialGemini2_5ReferenceImage: Gemini2_5ReferenceImageConfig = {
|
||||
type: 'gemini_2_5_reference_image',
|
||||
image: null,
|
||||
model: null,
|
||||
};
|
||||
export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = {
|
||||
type: 'flux_kontext_reference_image',
|
||||
image: null,
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import type { VideoDTO } from 'services/api/types';
|
||||
|
||||
export const useDeleteVideo = (videoDTO?: VideoDTO | null) => {
|
||||
const deleteImageModal = useDeleteVideoModalApi();
|
||||
|
||||
const isEnabled = useMemo(() => {
|
||||
if (!videoDTO) {
|
||||
return;
|
||||
}
|
||||
return true;
|
||||
}, [videoDTO]);
|
||||
const _delete = useCallback(() => {
|
||||
if (!videoDTO) {
|
||||
return;
|
||||
}
|
||||
if (!isEnabled) {
|
||||
return;
|
||||
}
|
||||
deleteImageModal.delete([videoDTO.video_id]);
|
||||
}, [deleteImageModal, videoDTO, isEnabled]);
|
||||
|
||||
return {
|
||||
delete: _delete,
|
||||
isEnabled,
|
||||
};
|
||||
};
|
||||
@@ -12,7 +12,7 @@ import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasState, RefImagesState } from 'features/controlLayers/store/types';
|
||||
import type { ImageUsage } from 'features/deleteImageModal/store/types';
|
||||
import { selectGetImageNamesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { itemSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageCollectionValueChanged, fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { NodesState } from 'features/nodes/store/types';
|
||||
@@ -89,9 +89,15 @@ const handleDeletions = async (image_names: string[], store: AppStore) => {
|
||||
const newImageNames = data?.image_names.filter((name) => !deleted_images.includes(name)) || [];
|
||||
const newSelectedImage = newImageNames[index ?? 0] || null;
|
||||
|
||||
if (intersection(state.gallery.selection, image_names).length > 0) {
|
||||
// Some selected images were deleted, clear selection
|
||||
dispatch(imageSelected(newSelectedImage));
|
||||
const galleryImageNames = state.gallery.selection.map((s) => s.id);
|
||||
|
||||
if (intersection(galleryImageNames, image_names).length > 0) {
|
||||
if (newSelectedImage) {
|
||||
// Some selected images were deleted, clear selection
|
||||
dispatch(itemSelected({ type: 'image', id: newSelectedImage }));
|
||||
} else {
|
||||
dispatch(itemSelected(null));
|
||||
}
|
||||
}
|
||||
|
||||
// We need to reset the features where the image is in use - none of these work if their image(s) don't exist
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user