mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 00:17:56 -05:00
Compare commits
216 Commits
merged-cha
...
3.6.3rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c781998b54 | ||
|
|
2175fe3823 | ||
|
|
f64fc2c8b7 | ||
|
|
3d1b5c57ea | ||
|
|
31b9538976 | ||
|
|
97c1545cca | ||
|
|
6a8a3b50bc | ||
|
|
5a816818dc | ||
|
|
1cb866d1fc | ||
|
|
29bcc4b595 | ||
|
|
ca2bb6f0cc | ||
|
|
1c8fc908b2 | ||
|
|
d397beaa47 | ||
|
|
60eea09629 | ||
|
|
5b7b1122cb | ||
|
|
dfc8d1bb10 | ||
|
|
f9fa62164e | ||
|
|
d47905d2fb | ||
|
|
03b1cde97d | ||
|
|
7162ff04df | ||
|
|
32b1e974ca | ||
|
|
82c3c7fc38 | ||
|
|
3dcbb79ef7 | ||
|
|
3b41104427 | ||
|
|
35bf7ee66d | ||
|
|
430e17a5d2 | ||
|
|
400d66fa5d | ||
|
|
800c481515 | ||
|
|
79ae9c4e64 | ||
|
|
0dc6cb0535 | ||
|
|
810fc19e43 | ||
|
|
e0e106367d | ||
|
|
14472dc09d | ||
|
|
e8095b73ae | ||
|
|
c979cf5ecc | ||
|
|
1b4dbd283e | ||
|
|
fb50a221f8 | ||
|
|
52e07db06b | ||
|
|
6643b5cec4 | ||
|
|
e8bf9ea058 | ||
|
|
ce3d37e829 | ||
|
|
8a61063e84 | ||
|
|
87ff96553a | ||
|
|
209bf105bc | ||
|
|
804dbeba34 | ||
|
|
067cd4dc2e | ||
|
|
feb4a3f242 | ||
|
|
4a886c0a4a | ||
|
|
8e500283b6 | ||
|
|
3205371654 | ||
|
|
d713620d9e | ||
|
|
c1300fa8b1 | ||
|
|
0976ddba23 | ||
|
|
3ebb806410 | ||
|
|
9f274c79dc | ||
|
|
88c08bbfc7 | ||
|
|
c2af124622 | ||
|
|
f972fe9836 | ||
|
|
dcfc883ab3 | ||
|
|
1d2bd6b8f7 | ||
|
|
f2777f5096 | ||
|
|
d3320dc4ee | ||
|
|
72db2ee352 | ||
|
|
60c3a4ad5e | ||
|
|
cf7a7928af | ||
|
|
1057314508 | ||
|
|
73a077956b | ||
|
|
5e1e50bd47 | ||
|
|
413fe566b8 | ||
|
|
c9b5f06c42 | ||
|
|
b53e432b0f | ||
|
|
88164447e9 | ||
|
|
1ac85fd049 | ||
|
|
ee6fc4ab1d | ||
|
|
9f793bdae8 | ||
|
|
a0eecaecd0 | ||
|
|
d532073f5b | ||
|
|
198e8c9d55 | ||
|
|
30367deeca | ||
|
|
e73298aea2 | ||
|
|
59279851a3 | ||
|
|
2965357d99 | ||
|
|
8bd32ee142 | ||
|
|
a4f892dcfb | ||
|
|
e675983e20 | ||
|
|
e9558f97c4 | ||
|
|
a1a611f8cb | ||
|
|
182dc859a0 | ||
|
|
c0240a8568 | ||
|
|
02bcff29e8 | ||
|
|
d4ed64df7d | ||
|
|
701f14c1e3 | ||
|
|
45bf2c7da6 | ||
|
|
67ada70a26 | ||
|
|
06bcc07f65 | ||
|
|
4410ecf62c | ||
|
|
9f6b9d4d23 | ||
|
|
b24e8dd829 | ||
|
|
25291a2e01 | ||
|
|
332f3930a5 | ||
|
|
ed466a99ec | ||
|
|
f68f8898c0 | ||
|
|
a0996b1c0a | ||
|
|
522ff4a042 | ||
|
|
a769f93be0 | ||
|
|
2c5ef92979 | ||
|
|
5d773dc94c | ||
|
|
088e3420e6 | ||
|
|
14efc95707 | ||
|
|
f48a2c5fd2 | ||
|
|
74ae4d7774 | ||
|
|
191203ea0c | ||
|
|
6aceae5c22 | ||
|
|
8c6b3efd39 | ||
|
|
4602efd598 | ||
|
|
f70c0936ca | ||
|
|
0d4de4cc63 | ||
|
|
1e855f8290 | ||
|
|
bb2787584d | ||
|
|
a04981b418 | ||
|
|
d7f16b7c87 | ||
|
|
4477e04d59 | ||
|
|
30e11b4b42 | ||
|
|
b93695b78f | ||
|
|
b01311813b | ||
|
|
5ae80fab87 | ||
|
|
c4291f2136 | ||
|
|
287d3c2b04 | ||
|
|
7fde19730e | ||
|
|
13575642d8 | ||
|
|
3f5370b284 | ||
|
|
d048eb5b20 | ||
|
|
dd7031a472 | ||
|
|
4160d5ef26 | ||
|
|
51bdf2fd19 | ||
|
|
6a44697911 | ||
|
|
7a1d0ec228 | ||
|
|
b5928fd411 | ||
|
|
2f345d1976 | ||
|
|
f5d0721fa8 | ||
|
|
c3b36cb61d | ||
|
|
189c430e46 | ||
|
|
b922ee566a | ||
|
|
89da69f647 | ||
|
|
138caa34de | ||
|
|
26c3378ede | ||
|
|
aa134a2db8 | ||
|
|
d0391cb430 | ||
|
|
c955ea9de0 | ||
|
|
fc29a5d439 | ||
|
|
7e9942dbab | ||
|
|
c003967eaa | ||
|
|
b28fcc6be5 | ||
|
|
418cdbabb7 | ||
|
|
18e61e92d9 | ||
|
|
de20711637 | ||
|
|
55e91b97be | ||
|
|
f79bbd2d6e | ||
|
|
e1c2c3905d | ||
|
|
03ac93bfc7 | ||
|
|
89da976949 | ||
|
|
57dafd294d | ||
|
|
e611baa4b4 | ||
|
|
fc448d5b6d | ||
|
|
e59954f956 | ||
|
|
e160cbb1e9 | ||
|
|
86c857b9c2 | ||
|
|
0a13d7d2c7 | ||
|
|
68da5c6d22 | ||
|
|
f82744b95e | ||
|
|
5a67bc68a1 | ||
|
|
61cf4d4c70 | ||
|
|
9d20a2d5a3 | ||
|
|
8b0ac451e3 | ||
|
|
470dbe75a2 | ||
|
|
b7d19b8130 | ||
|
|
3dc13221d8 | ||
|
|
35184dbd86 | ||
|
|
0868fc2558 | ||
|
|
92fb09c4df | ||
|
|
b4cf5496b6 | ||
|
|
a0e68705dd | ||
|
|
7cb49e65bd | ||
|
|
39fedb090b | ||
|
|
f36a691219 | ||
|
|
6a2eb1d2e4 | ||
|
|
13123daa3f | ||
|
|
c859eb865e | ||
|
|
8f5e2cbcc7 | ||
|
|
2aed6e2dba | ||
|
|
52b51a6088 | ||
|
|
52b24e01e2 | ||
|
|
1178fd8bd3 | ||
|
|
a0187cc9df | ||
|
|
2f656cc357 | ||
|
|
71f9ac9985 | ||
|
|
8bbdfc45fa | ||
|
|
3cbb1a7671 | ||
|
|
b74e0de74a | ||
|
|
e7e7793896 | ||
|
|
504bdac14a | ||
|
|
b76d2cd716 | ||
|
|
022b32c724 | ||
|
|
653b820da1 | ||
|
|
68232e642f | ||
|
|
4ba0bf4dcf | ||
|
|
5e4daf4bc6 | ||
|
|
7e0713c869 | ||
|
|
099d516ac0 | ||
|
|
b94f6a4a29 | ||
|
|
4caf63d53d | ||
|
|
6057229ceb | ||
|
|
6a2856e46f | ||
|
|
4dedd63b74 | ||
|
|
db74837eb1 | ||
|
|
892fe62264 |
98
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
98
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -6,10 +6,6 @@ title: '[bug]: '
|
||||
|
||||
labels: ['bug']
|
||||
|
||||
# assignees:
|
||||
# - moderator_bot
|
||||
# - lstein
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@@ -18,10 +14,9 @@ body:
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
label: Is there an existing issue for this problem?
|
||||
description: |
|
||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
irst to see if an issue already exists for the bug you encountered.
|
||||
Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -33,80 +28,119 @@ body:
|
||||
- type: dropdown
|
||||
id: os_dropdown
|
||||
attributes:
|
||||
label: OS
|
||||
description: Which operating System did you use when the bug occured
|
||||
label: Operating system
|
||||
description: Your computer's operating system.
|
||||
multiple: false
|
||||
options:
|
||||
- 'Linux'
|
||||
- 'Windows'
|
||||
- 'macOS'
|
||||
- 'other'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: gpu_dropdown
|
||||
attributes:
|
||||
label: GPU
|
||||
description: Which kind of Graphic-Adapter is your System using
|
||||
label: GPU vendor
|
||||
description: Your GPU's vendor.
|
||||
multiple: false
|
||||
options:
|
||||
- 'cuda'
|
||||
- 'amd'
|
||||
- 'mps'
|
||||
- 'cpu'
|
||||
- 'Nvidia (CUDA)'
|
||||
- 'AMD (ROCm)'
|
||||
- 'Apple Silicon (MPS)'
|
||||
- 'None (CPU)'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: gpu_model
|
||||
attributes:
|
||||
label: GPU model
|
||||
description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
|
||||
placeholder: ex. RTX 2080 Ti, Mac M1 Pro
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: vram
|
||||
attributes:
|
||||
label: VRAM
|
||||
description: Size of the VRAM if known
|
||||
label: GPU VRAM
|
||||
description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
|
||||
placeholder: 8GB
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: input
|
||||
id: version-number
|
||||
attributes:
|
||||
label: What version did you experience this issue on?
|
||||
label: Version number
|
||||
description: |
|
||||
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: X.X.X
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: Python dependencies
|
||||
description: |
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
label: What happened
|
||||
description: |
|
||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
||||
Describe what happened. Include any relevant error messages, stack traces and screenshots here.
|
||||
placeholder: I clicked button X and then Y happened.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: what-you-expected
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
placeholder: this is what the result looked like <screenshot>
|
||||
label: What you expected to happen
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: I expected Z to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: how-to-repro
|
||||
attributes:
|
||||
label: How to reproduce the problem
|
||||
description: List steps to reproduce the problem.
|
||||
placeholder: Start the app, generate an image with these settings, then click button X.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
description: Any other context that might help us to understand the problem.
|
||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
id: contact
|
||||
id: discord-username
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
||||
label: Discord username
|
||||
description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
|
||||
placeholder: supercoolusername123
|
||||
validations:
|
||||
required: false
|
||||
|
||||
59
.github/pr_labels.yml
vendored
Normal file
59
.github/pr_labels.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
Root:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '*'
|
||||
|
||||
PythonDeps:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'pyproject.toml'
|
||||
|
||||
Python:
|
||||
- changed-files:
|
||||
- all-globs-to-any-file:
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
|
||||
PythonTests:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'tests/**'
|
||||
|
||||
CICD:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: .github/**
|
||||
|
||||
Docker:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docker/**
|
||||
|
||||
Installer:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: installer/**
|
||||
|
||||
Documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Invocations:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/invocations/**'
|
||||
|
||||
Backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/backend/**'
|
||||
|
||||
Api:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/api/**'
|
||||
|
||||
Services:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/app/services/**'
|
||||
|
||||
FrontendDeps:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- '**/*/package.json'
|
||||
- '**/*/pnpm-lock.yaml'
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'invokeai/frontend/web/**'
|
||||
29
.github/workflows/change-monitor.yml
vendored
29
.github/workflows/change-monitor.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: Trigger Target Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
trigger:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Trigger Workflow in Another Repository
|
||||
run: |
|
||||
# Set the required variables
|
||||
repo_owner="invoke-ai"
|
||||
repo_name="Invoke"
|
||||
event_type="invokeai-pr-merge"
|
||||
service=${{ github.event.inputs.target_service }}"
|
||||
version="${{ github.event.inputs.target_version }}"
|
||||
|
||||
curl -L \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.PAT }}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
https://api.github.com/repos/$repo_owner/$repo_name/dispatches \
|
||||
-d "{\"event_type\": \"$event_type\", \"client_payload\": {\"service\": \"$service\", \"version\": \"$version\", \"unit\": false, \"integration\": true}}"
|
||||
16
.github/workflows/label-pr.yml
vendored
Normal file
16
.github/workflows/label-pr.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: "Pull Request Labeler"
|
||||
on:
|
||||
- pull_request_target
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
configuration-path: .github/pr_labels.yml
|
||||
@@ -169,7 +169,7 @@ the command `npm install -g pnpm` if needed)
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
_For non-GPU systems:_
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
# Contributing to the Frontend
|
||||
|
||||
# InvokeAI Web UI
|
||||
|
||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
||||
|
||||
Code is located in `invokeai/frontend/web/` for review.
|
||||
|
||||
## Stack
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
|
||||
2. Install [pnpm](https://pnpm.io/) and confirm it is installed by running this:
|
||||
```bash
|
||||
npm install --global pnpm
|
||||
pnpm --version
|
||||
```
|
||||
|
||||
From `invokeai/frontend/web/` run `pnpm install` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
1. Ensure your virtual environment is running
|
||||
2. Start the dev server: `pnpm dev`
|
||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
||||
|
||||
### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `pnpm build`.
|
||||
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](./contributingToFrontend.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
|
||||
BIN
docs/img/favicon.ico
Normal file
BIN
docs/img/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
@@ -117,6 +117,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
### Installation
|
||||
- [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
- [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
- [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
|
||||
### The InvokeAI Web Interface
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
|
||||
@@ -477,7 +477,7 @@ Then type the following commands:
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
@@ -154,7 +154,7 @@ manager, please follow these steps:
|
||||
=== "ROCm (AMD)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
@@ -313,7 +313,7 @@ code for InvokeAI. For this to work, you will need to install the
|
||||
on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md).
|
||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md).
|
||||
|
||||
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
||||
|
||||
@@ -345,7 +345,7 @@ installation protocol (important!)
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
@@ -361,7 +361,7 @@ installation protocol (important!)
|
||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||
dot ("."). It is part of the command.
|
||||
|
||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md) and do a production build of the UI as described.
|
||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md) and do a production build of the UI as described.
|
||||
|
||||
6. You can now run `invokeai` and its related commands. The code will be
|
||||
read from the repository, so that you can edit the .py source files
|
||||
|
||||
@@ -134,7 +134,7 @@ recipes are available
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||
https://download.pytorch.org/whl/rocm5.6` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
|
||||
@@ -18,13 +18,18 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
|
||||
## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
## **[Automated Installer (Recommended)](010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
third party libraries and InvokeAI itself.
|
||||
|
||||
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
|
||||
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
|
||||
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
|
||||
|
||||
|
||||
|
||||
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
||||
This method is recommended for experienced users and developers.
|
||||
|
||||
@@ -14,6 +14,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
|
||||
- Community Nodes
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
@@ -25,7 +26,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
+ [Halftone](#halftone)
|
||||
+ [Ideal Size](#ideal-size)
|
||||
+ [Hand Refiner with MeshGraphormer](#hand-refiner-with-meshgraphormer)
|
||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||
+ [Image Dominant Color](#image-dominant-color)
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
@@ -41,6 +42,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
@@ -67,6 +69,17 @@ Note: These are inherited from the core nodes so any update to the core nodes sh
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/adapters-linked-nodes
|
||||
|
||||
--------------------------------
|
||||
### Autostereogram Nodes
|
||||
|
||||
**Description:** Generate autostereogram images from a depth map. This is not a very practically useful node but more a 90s nostalgic indulgence as I used to love these images as a kid.
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/autostereogram_nodes
|
||||
|
||||
**Example Usage:**
|
||||
</br>
|
||||
<img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider-depth.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-dots.png" width="200" /> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-pattern.png" width="200" />
|
||||
|
||||
--------------------------------
|
||||
### Average Images
|
||||
|
||||
@@ -197,13 +210,18 @@ CMYK Halftone Output:
|
||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Ideal Size
|
||||
|
||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
||||
### Hand Refiner with MeshGraphormer
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
||||
**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion
|
||||
|
||||
**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer
|
||||
|
||||
**View**
|
||||
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_meshgraphormer/main/assets/preview.jpg" />
|
||||
|
||||
--------------------------------
|
||||
|
||||
### Image and Mask Composition Pack
|
||||
|
||||
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
||||
@@ -417,6 +435,17 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||
|
||||
--------------------------------
|
||||
|
||||
### BriaAI Remove Background
|
||||
|
||||
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
|
||||
|
||||
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg
|
||||
|
||||
**View**
|
||||
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_bria_rmbg/main/assets/preview.jpg" />
|
||||
|
||||
--------------------------------
|
||||
### Remove Background
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ their descriptions.
|
||||
| Integer Math | Perform basic math operations on two integers |
|
||||
| Convert Image Mode | Converts an image to a different mode. |
|
||||
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
||||
| Ideal Size | Calculates an ideal image size for latents for a first pass of a multi-pass upscaling to avoid duplication and other artifacts |
|
||||
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
||||
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
||||
| Image Primitive | An image primitive value |
|
||||
|
||||
@@ -13,46 +13,69 @@ We thank them for all of their time and hard work.
|
||||
|
||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||
|
||||
## **Current core team**
|
||||
## **Current Core Team**
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @josh is toast (Josh Corbett) - Web Development
|
||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @sunija - Standalone version
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @JPPhoto - Core image generation nodes
|
||||
* @dunkeroni - Image generation backend
|
||||
* @SkunkWorxDark - Image generation backend
|
||||
* @keturn (Kevin Turner) - Diffusers
|
||||
* @millu (Millun Atluri) - Community Wizard, Documentation, Node-wrangler,
|
||||
* @glimmerleaf (Devon Hopkins) - Community Wizard
|
||||
* @gogurt enjoyer - Discord moderator and end user support
|
||||
* @whosawhatsis - Discord moderator and end user support
|
||||
* @dwinrger - Discord moderator and end user support
|
||||
* @526christian - Discord moderator and end user support
|
||||
* @harvester62 - Discord moderator and end user support
|
||||
|
||||
|
||||
## **Honored Team Alumni**
|
||||
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @netsvetaev (Artur) - Localization support
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @tildebyte - Installation and configuration
|
||||
* @mauwii (Matthias Wilde) - Installation, release, continuous integration
|
||||
|
||||
|
||||
## **Full List of Contributors by Commit Name**
|
||||
|
||||
- 이승석
|
||||
- AbdBarho
|
||||
- ablattmann
|
||||
- AdamOStark
|
||||
- Adam Rice
|
||||
- Airton Silva
|
||||
- Aldo Hoeben
|
||||
- Alexander Eichhorn
|
||||
- Alexandre D. Roberge
|
||||
- Alexandre Macabies
|
||||
- Alfie John
|
||||
- Andreas Rozek
|
||||
- Andre LaBranche
|
||||
- Andy Bearman
|
||||
- Andy Luhrs
|
||||
- Andy Pilate
|
||||
- Anonymous
|
||||
- Anthony Monthe
|
||||
- Any-Winter-4079
|
||||
- apolinario
|
||||
- Ar7ific1al
|
||||
- ArDiouscuros
|
||||
- Armando C. Santisbon
|
||||
- Arnold Cordewiner
|
||||
- Arthur Holstvoogd
|
||||
- artmen1516
|
||||
- Artur
|
||||
@@ -64,13 +87,16 @@ We thank them for all of their time and hard work.
|
||||
- blhook
|
||||
- BlueAmulet
|
||||
- Bouncyknighter
|
||||
- Brandon
|
||||
- Brandon Rising
|
||||
- Brent Ozar
|
||||
- Brian Racer
|
||||
- bsilvereagle
|
||||
- c67e708d
|
||||
- camenduru
|
||||
- CapableWeb
|
||||
- Carson Katri
|
||||
- chainchompa
|
||||
- Chloe
|
||||
- Chris Dawson
|
||||
- Chris Hayes
|
||||
@@ -86,30 +112,45 @@ We thank them for all of their time and hard work.
|
||||
- cpacker
|
||||
- Cragin Godley
|
||||
- creachec
|
||||
- CrypticWit
|
||||
- d8ahazard
|
||||
- damian
|
||||
- damian0815
|
||||
- Damian at mba
|
||||
- Damian Stewart
|
||||
- Daniel Manzke
|
||||
- Danny Beer
|
||||
- Dan Sully
|
||||
- Darren Ringer
|
||||
- David Burnett
|
||||
- David Ford
|
||||
- David Regla
|
||||
- David Sisco
|
||||
- David Wager
|
||||
- Daya Adianto
|
||||
- db3000
|
||||
- DekitaRPG
|
||||
- Denis Olshin
|
||||
- Dennis
|
||||
- dependabot[bot]
|
||||
- Dmitry Parnas
|
||||
- Dobrynia100
|
||||
- Dominic Letz
|
||||
- DrGunnarMallon
|
||||
- Drun555
|
||||
- dunkeroni
|
||||
- Edward Johan
|
||||
- elliotsayes
|
||||
- Elrik
|
||||
- ElrikUnderlake
|
||||
- Eric Khun
|
||||
- Eric Wolf
|
||||
- Eugene
|
||||
- Eugene Brodsky
|
||||
- ExperimentalCyborg
|
||||
- Fabian Bahl
|
||||
- Fabio 'MrWHO' Torchetti
|
||||
- Fattire
|
||||
- fattire
|
||||
- Felipe Nogueira
|
||||
- Félix Sanz
|
||||
@@ -118,8 +159,12 @@ We thank them for all of their time and hard work.
|
||||
- gabrielrotbart
|
||||
- gallegonovato
|
||||
- Gérald LONLAS
|
||||
- Gille
|
||||
- GitHub Actions Bot
|
||||
- glibesyck
|
||||
- gogurtenjoyer
|
||||
- Gohsuke Shimada
|
||||
- greatwolf
|
||||
- greentext2
|
||||
- Gregg Helt
|
||||
- H4rk
|
||||
@@ -131,6 +176,7 @@ We thank them for all of their time and hard work.
|
||||
- Hosted Weblate
|
||||
- Iman Karim
|
||||
- ismail ihsan bülbül
|
||||
- ItzAttila
|
||||
- Ivan Efimov
|
||||
- jakehl
|
||||
- Jakub Kolčář
|
||||
@@ -141,6 +187,7 @@ We thank them for all of their time and hard work.
|
||||
- Jason Toffaletti
|
||||
- Jaulustus
|
||||
- Jeff Mahoney
|
||||
- Jennifer Player
|
||||
- jeremy
|
||||
- Jeremy Clark
|
||||
- JigenD
|
||||
@@ -148,19 +195,26 @@ We thank them for all of their time and hard work.
|
||||
- Johan Roxendal
|
||||
- Johnathon Selstad
|
||||
- Jonathan
|
||||
- Jordan Hewitt
|
||||
- Joseph Dries III
|
||||
- Josh Corbett
|
||||
- JPPhoto
|
||||
- jspraul
|
||||
- junzi
|
||||
- Justin Wong
|
||||
- Juuso V
|
||||
- Kaspar Emanuel
|
||||
- Katsuyuki-Karasawa
|
||||
- Keerigan45
|
||||
- Kent Keirsey
|
||||
- Kevin Brack
|
||||
- Kevin Coakley
|
||||
- Kevin Gibbons
|
||||
- Kevin Schaul
|
||||
- Kevin Turner
|
||||
- Kieran Klaassen
|
||||
- krummrey
|
||||
- Kyle
|
||||
- Kyle Lacy
|
||||
- Kyle Schouviller
|
||||
- Lawrence Norton
|
||||
@@ -171,10 +225,15 @@ We thank them for all of their time and hard work.
|
||||
- Lynne Whitehorn
|
||||
- majick
|
||||
- Marco Labarile
|
||||
- Marta Nahorniuk
|
||||
- Martin Kristiansen
|
||||
- Mary Hipp
|
||||
- maryhipp
|
||||
- Mary Hipp Rogers
|
||||
- mastercaster
|
||||
- mastercaster9000
|
||||
- Matthias Wild
|
||||
- mauwii
|
||||
- michaelk71
|
||||
- mickr777
|
||||
- Mihai
|
||||
@@ -182,11 +241,15 @@ We thank them for all of their time and hard work.
|
||||
- Mikhail Tishin
|
||||
- Millun Atluri
|
||||
- Minjune Song
|
||||
- Mitchell Allain
|
||||
- mitien
|
||||
- mofuzz
|
||||
- Muhammad Usama
|
||||
- Name
|
||||
- _nderscore
|
||||
- Neil Wang
|
||||
- nekowaiz
|
||||
- nemuruibai
|
||||
- Netzer R
|
||||
- Nicholas Koh
|
||||
- Nicholas Körfer
|
||||
@@ -197,9 +260,11 @@ We thank them for all of their time and hard work.
|
||||
- ofirkris
|
||||
- Olivier Louvignes
|
||||
- owenvincent
|
||||
- pand4z31
|
||||
- Patrick Esser
|
||||
- Patrick Tien
|
||||
- Patrick von Platen
|
||||
- Paul Curry
|
||||
- Paul Sajna
|
||||
- pejotr
|
||||
- Peter Baylies
|
||||
@@ -207,6 +272,7 @@ We thank them for all of their time and hard work.
|
||||
- plucked
|
||||
- prixt
|
||||
- psychedelicious
|
||||
- psychedelicious@windows
|
||||
- Rainer Bernhardt
|
||||
- Riccardo Giovanetti
|
||||
- Rich Jones
|
||||
@@ -215,17 +281,22 @@ We thank them for all of their time and hard work.
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- rohinish404
|
||||
- Rohinish
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
- Ryan
|
||||
- Ryan Cao
|
||||
- Ryan Dick
|
||||
- Saifeddine
|
||||
- Saifeddine ALOUI
|
||||
- Sam
|
||||
- SammCheese
|
||||
- Sam McLeod
|
||||
- Sammy
|
||||
- sammyf
|
||||
- Samuel Husso
|
||||
- Saurav Maheshkar
|
||||
- Scott Lahteine
|
||||
- Sean McLellan
|
||||
- Sebastian Aigner
|
||||
@@ -233,16 +304,21 @@ We thank them for all of their time and hard work.
|
||||
- Sergey Krashevich
|
||||
- Shapor Naghibzadeh
|
||||
- Shawn Zhong
|
||||
- Simona Liliac
|
||||
- Simon Vans-Colina
|
||||
- skunkworxdark
|
||||
- slashtechno
|
||||
- SoheilRezaei
|
||||
- Song, Pengcheng
|
||||
- spezialspezial
|
||||
- ssantos
|
||||
- StAlKeR7779
|
||||
- Stefan Tobler
|
||||
- Stephan Koglin-Fischer
|
||||
- SteveCaruso
|
||||
- Steve Martinelli
|
||||
- Steven Frank
|
||||
- Surisen
|
||||
- System X - Files
|
||||
- Taylor Kems
|
||||
- techicode
|
||||
@@ -261,26 +337,34 @@ We thank them for all of their time and hard work.
|
||||
- tyler
|
||||
- unknown
|
||||
- user1
|
||||
- vedant-3010
|
||||
- Vedant Madane
|
||||
- veprogames
|
||||
- wa.code
|
||||
- wfng92
|
||||
- whjms
|
||||
- whosawhatsis
|
||||
- Will
|
||||
- William Becher
|
||||
- William Chong
|
||||
- Wilson E. Alvarez
|
||||
- woweenie
|
||||
- Wubbbi
|
||||
- xra
|
||||
- Yeung Yiu Hung
|
||||
- ymgenesis
|
||||
- Yorzaren
|
||||
- Yosuke Shinya
|
||||
- yun saki
|
||||
- ZachNagengast
|
||||
- Zadagu
|
||||
- zeptofine
|
||||
- Zerdoumi
|
||||
- Васянатор
|
||||
- 冯不游
|
||||
- 唐澤 克幸
|
||||
|
||||
## **Original CompVis Authors**
|
||||
## **Original CompVis (Stable Diffusion) Authors**
|
||||
|
||||
- [Robin Rombach](https://github.com/rromb)
|
||||
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,11 +14,19 @@ function is_bin_in_path {
|
||||
}
|
||||
|
||||
function git_show {
|
||||
git show -s --format='%h %s' $1
|
||||
git show -s --format=oneline --abbrev-commit "$1" | cat
|
||||
}
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo
|
||||
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||
echo "The current working directory is $(pwd)"
|
||||
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||
@@ -32,13 +40,6 @@ if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
}
|
||||
fi
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
@@ -47,38 +48,9 @@ PATCH=""
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show
|
||||
git_show HEAD
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
echo
|
||||
echo "Installing frontend dependencies..."
|
||||
echo
|
||||
pnpm i --frozen-lockfile
|
||||
echo
|
||||
echo "Building frontend..."
|
||||
echo
|
||||
pnpm build
|
||||
popd
|
||||
|
||||
# ---------------------- BACKEND ----------------------
|
||||
|
||||
echo
|
||||
echo "Building wheel..."
|
||||
echo
|
||||
|
||||
# install the 'build' package in the user site packages, if needed
|
||||
# could be improved by using a temporary venv, but it's tiny and harmless
|
||||
if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then
|
||||
pip install --user build
|
||||
fi
|
||||
|
||||
rm -rf ../build
|
||||
|
||||
python -m build --wheel --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo
|
||||
@@ -97,16 +69,13 @@ done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Move the wheel
|
||||
mv dist/*.whl InvokeAI-Installer/lib/
|
||||
|
||||
# Install scripts
|
||||
# Mac/Linux
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+x InvokeAI-Installer/install.sh
|
||||
|
||||
# Windows
|
||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in >InvokeAI-Installer/install.bat
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# Zip everything up
|
||||
|
||||
@@ -15,7 +15,6 @@ if "%1" == "use-cache" (
|
||||
@rem Config
|
||||
@rem The version in the next line is replaced by an up to date release number
|
||||
@rem when create_installer.sh is run. Change the release number there.
|
||||
set INVOKEAI_VERSION=latest
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
|
||||
@@ -11,7 +11,7 @@ import sys
|
||||
import venv
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Union
|
||||
from typing import Optional, Tuple
|
||||
|
||||
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||
@@ -21,40 +21,20 @@ OS = platform.uname().system
|
||||
ARCH = platform.uname().machine
|
||||
VERSION = "latest"
|
||||
|
||||
### Feature flags
|
||||
# Install the virtualenv into the runtime dir
|
||||
FF_VENV_IN_RUNTIME = True
|
||||
|
||||
# Install the wheel packaged with the installer
|
||||
FF_USE_LOCAL_WHEEL = True
|
||||
|
||||
|
||||
class Installer:
|
||||
"""
|
||||
Deploys an InvokeAI installation into a given path
|
||||
"""
|
||||
|
||||
reqs: list[str] = INSTALLER_REQS
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.reqs = INSTALLER_REQS
|
||||
self.preflight()
|
||||
if os.getenv("VIRTUAL_ENV") is not None:
|
||||
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||
sys.exit(-1)
|
||||
self.bootstrap()
|
||||
|
||||
def preflight(self) -> None:
|
||||
"""
|
||||
Preflight checks
|
||||
"""
|
||||
|
||||
# TODO
|
||||
# verify python version
|
||||
# on macOS verify XCode tools are present
|
||||
# verify libmesa, libglx on linux
|
||||
# check that the system arch is not i386 (?)
|
||||
# check that the system has a GPU, and the type of GPU
|
||||
|
||||
pass
|
||||
self.available_releases = get_github_releases()
|
||||
|
||||
def mktemp_venv(self) -> TemporaryDirectory:
|
||||
"""
|
||||
@@ -78,12 +58,9 @@ class Installer:
|
||||
|
||||
return venv_dir
|
||||
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
|
||||
"""
|
||||
Bootstrap the installer venv with packages required at install time
|
||||
|
||||
:return: path to the virtual environment directory that was bootstrapped
|
||||
:rtype: TemporaryDirectory
|
||||
"""
|
||||
|
||||
print("Initializing the installer. This may take a minute - please wait...")
|
||||
@@ -95,39 +72,27 @@ class Installer:
|
||||
cmd.extend(self.reqs)
|
||||
|
||||
try:
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
# upgrade pip to the latest version to avoid a confusing message
|
||||
res = upgrade_pip(Path(venv_dir.name))
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
# run the install prerequisites installation
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
return venv_dir
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
|
||||
def app_venv(self, path: str = None):
|
||||
def app_venv(self, venv_parent) -> Path:
|
||||
"""
|
||||
Create a virtualenv for the InvokeAI installation
|
||||
"""
|
||||
|
||||
# explicit venv location
|
||||
# currently unused in normal operation
|
||||
# useful for testing or special cases
|
||||
if path is not None:
|
||||
venv_dir = Path(path)
|
||||
|
||||
# experimental / testing
|
||||
elif not FF_VENV_IN_RUNTIME:
|
||||
if OS == "Windows":
|
||||
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
||||
elif OS == "Darwin":
|
||||
# there is no environment variable on macOS to find this
|
||||
# TODO: confirm this is working as expected
|
||||
venv_dir_parent = "~/Library/Application Support"
|
||||
elif OS == "Linux":
|
||||
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
||||
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
||||
|
||||
# stable / current
|
||||
else:
|
||||
venv_dir = self.dest / ".venv"
|
||||
venv_dir = venv_parent / ".venv"
|
||||
|
||||
# Prefer to copy python executables
|
||||
# so that updates to system python don't break InvokeAI
|
||||
@@ -141,7 +106,7 @@ class Installer:
|
||||
return venv_dir
|
||||
|
||||
def install(
|
||||
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
|
||||
self, version=None, root: str = "~/invokeai", yes_to_all=False, find_links: Optional[Path] = None
|
||||
) -> None:
|
||||
"""
|
||||
Install the InvokeAI application into the given runtime path
|
||||
@@ -158,15 +123,20 @@ class Installer:
|
||||
|
||||
import messages
|
||||
|
||||
messages.welcome()
|
||||
messages.welcome(self.available_releases)
|
||||
|
||||
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
|
||||
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
||||
version = messages.choose_version(self.available_releases)
|
||||
|
||||
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
|
||||
destination = auto_dest if yes_to_all else messages.dest_path(root)
|
||||
if destination is None:
|
||||
print("Could not find or create the destination directory. Installation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
# create the venv for the app
|
||||
self.venv = self.app_venv()
|
||||
self.venv = self.app_venv(venv_parent=destination)
|
||||
|
||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
||||
self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
|
||||
|
||||
# install dependencies and the InvokeAI application
|
||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||
@@ -190,7 +160,7 @@ class InvokeAiInstance:
|
||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
||||
def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
|
||||
self.runtime = runtime
|
||||
self.venv = venv
|
||||
self.pip = get_pip_from_venv(venv)
|
||||
@@ -199,6 +169,7 @@ class InvokeAiInstance:
|
||||
set_sys_path(venv)
|
||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||
upgrade_pip(venv)
|
||||
|
||||
def get(self) -> tuple[Path, Path]:
|
||||
"""
|
||||
@@ -212,54 +183,7 @@ class InvokeAiInstance:
|
||||
|
||||
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install this instance, including dependencies and the app itself
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
# install torch first to ensure the correct version gets installed.
|
||||
# works with either source or wheel install with negligible impact on installation times.
|
||||
messages.simple_banner("Installing PyTorch :fire:")
|
||||
self.install_torch(extra_index_url, find_links)
|
||||
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
self.install_app(extra_index_url, optional_modules, find_links)
|
||||
|
||||
def install_torch(self, extra_index_url=None, find_links=None):
|
||||
"""
|
||||
Install PyTorch
|
||||
"""
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"numpy==1.26.3", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch==2.1.2",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision==0.16.2",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
|
||||
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||
"""
|
||||
Install the application with pip.
|
||||
Supports installation from PyPi or from a local source directory.
|
||||
Install the package from PyPi.
|
||||
|
||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
:type extra_index_url: str
|
||||
@@ -271,53 +195,52 @@ class InvokeAiInstance:
|
||||
:type find_links: Path
|
||||
"""
|
||||
|
||||
## this only applies to pypi installs; TODO actually use this
|
||||
if self.version == "pre":
|
||||
import messages
|
||||
|
||||
# not currently used, but may be useful for "install most recent version" option
|
||||
if self.version == "prerelease":
|
||||
version = None
|
||||
pre = "--pre"
|
||||
pre_flag = "--pre"
|
||||
elif self.version == "stable":
|
||||
version = None
|
||||
pre_flag = None
|
||||
else:
|
||||
version = self.version
|
||||
pre = None
|
||||
pre_flag = None
|
||||
|
||||
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
||||
if FF_USE_LOCAL_WHEEL:
|
||||
# if no wheel, try to do a source install before giving up
|
||||
try:
|
||||
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
|
||||
except StopIteration:
|
||||
try:
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
# if the above directory contains one of these files, we'll do a source install
|
||||
next(src.glob("pyproject.toml"))
|
||||
next(src.glob("invokeai"))
|
||||
except StopIteration:
|
||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||
src = "invokeai"
|
||||
if optional_modules:
|
||||
src += optional_modules
|
||||
if version:
|
||||
src += f"=={version}"
|
||||
|
||||
elif version == "source":
|
||||
# this makes an assumption about the location of the installer package in the source tree
|
||||
src = Path(__file__).parents[1].expanduser().resolve()
|
||||
else:
|
||||
# will install from PyPi
|
||||
src = f"invokeai=={version}" if version is not None else "invokeai"
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
|
||||
from plumbum import FG, local
|
||||
from plumbum import FG, ProcessExecutionError, local # type: ignore
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--use-pep517",
|
||||
str(src) + (optional_modules if optional_modules else ""),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre,
|
||||
]
|
||||
& FG
|
||||
)
|
||||
pipeline = pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--force-reinstall",
|
||||
"--use-pep517",
|
||||
str(src),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre_flag,
|
||||
]
|
||||
|
||||
try:
|
||||
_ = pipeline & FG
|
||||
except ProcessExecutionError as e:
|
||||
print(f"Error: {e}")
|
||||
print(
|
||||
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def configure(self):
|
||||
"""
|
||||
@@ -373,7 +296,6 @@ class InvokeAiInstance:
|
||||
|
||||
ext = "bat" if OS == "Windows" else "sh"
|
||||
|
||||
# scripts = ['invoke', 'update']
|
||||
scripts = ["invoke"]
|
||||
|
||||
for script in scripts:
|
||||
@@ -408,6 +330,23 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
def upgrade_pip(venv_path: Path) -> str | None:
|
||||
"""
|
||||
Upgrade the pip executable in the given virtual environment
|
||||
"""
|
||||
|
||||
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
|
||||
python = str(venv_path.expanduser().resolve() / python)
|
||||
|
||||
try:
|
||||
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
result = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def set_sys_path(venv_path: Path) -> None:
|
||||
"""
|
||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||
@@ -431,7 +370,43 @@ def set_sys_path(venv_path: Path) -> None:
|
||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||
|
||||
|
||||
def get_torch_source() -> (Union[str, None], str):
|
||||
def get_github_releases() -> tuple[list, list] | None:
|
||||
"""
|
||||
Query Github for published (pre-)release versions.
|
||||
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
|
||||
Return None if the query fails for any reason.
|
||||
"""
|
||||
|
||||
import requests
|
||||
|
||||
## get latest releases using github api
|
||||
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
releases, pre_releases = [], []
|
||||
try:
|
||||
res = requests.get(url)
|
||||
res.raise_for_status()
|
||||
tag_info = res.json()
|
||||
for tag in tag_info:
|
||||
if not tag["prerelease"]:
|
||||
releases.append(tag["tag_name"].lstrip("v"))
|
||||
else:
|
||||
pre_releases.append(tag["tag_name"].lstrip("v"))
|
||||
except requests.HTTPError as e:
|
||||
print(f"Error: {e}")
|
||||
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
|
||||
return
|
||||
|
||||
releases.sort(reverse=True)
|
||||
pre_releases.sort(reverse=True)
|
||||
|
||||
return releases, pre_releases
|
||||
|
||||
|
||||
def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
"""
|
||||
Determine the extra index URL for pip to use for torch installation.
|
||||
This depends on the OS and the graphics accelerator in use.
|
||||
@@ -446,25 +421,26 @@ def get_torch_source() -> (Union[str, None], str):
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
from messages import graphical_accelerator
|
||||
from messages import select_gpu
|
||||
|
||||
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
||||
device = graphical_accelerator()
|
||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||
device = select_gpu()
|
||||
|
||||
url = None
|
||||
optional_modules = "[onnx]"
|
||||
if OS == "Linux":
|
||||
if device == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||
elif device == "cpu":
|
||||
if device.value == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
elif device.value == "cpu":
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
|
||||
if device == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
elif OS == "Windows":
|
||||
if device.value == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device.value == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
|
||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||
|
||||
|
||||
@@ -5,10 +5,11 @@ Installer user interaction
|
||||
|
||||
import os
|
||||
import platform
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from prompt_toolkit import HTML, prompt
|
||||
from prompt_toolkit.completion import PathCompleter
|
||||
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
@@ -35,16 +36,26 @@ else:
|
||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||
|
||||
|
||||
def welcome():
|
||||
def welcome(available_releases: tuple | None = None) -> None:
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) != "":
|
||||
if (platform_specific := _platform_specific_help()) is not None:
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||
justify="center",
|
||||
)
|
||||
if available_releases is not None:
|
||||
latest_stable = available_releases[0][0]
|
||||
last_pre = available_releases[1][0]
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
|
||||
)
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
|
||||
)
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -61,19 +72,30 @@ def welcome():
|
||||
console.line()
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
||||
dest_confirmed = Confirm.ask(
|
||||
":stop_sign: (re)install in this location?",
|
||||
default=False,
|
||||
)
|
||||
else:
|
||||
print(f"InvokeAI will be installed in {dest}")
|
||||
dest_confirmed = Confirm.ask("Use this location?", default=True)
|
||||
def choose_version(available_releases: tuple | None = None) -> str:
|
||||
"""
|
||||
Prompt the user to choose an Invoke version to install
|
||||
"""
|
||||
|
||||
# short circuit if we couldn't get a version list
|
||||
# still try to install the latest stable version
|
||||
if available_releases is None:
|
||||
return "stable"
|
||||
|
||||
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
|
||||
|
||||
choices = available_releases[0] + available_releases[1]
|
||||
|
||||
response = prompt(
|
||||
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
|
||||
complete_while_typing=True,
|
||||
completer=FuzzyWordCompleter(choices),
|
||||
)
|
||||
|
||||
console.print(f" Version {choices[0]} will be installed.")
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
return "stable" if response == "" else response
|
||||
|
||||
|
||||
def user_wants_auto_configuration() -> bool:
|
||||
@@ -109,7 +131,23 @@ def user_wants_auto_configuration() -> bool:
|
||||
return choice.lower().startswith("a")
|
||||
|
||||
|
||||
def dest_path(dest=None) -> Path:
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":stop_sign: Directory {dest} already exists!")
|
||||
print(" Is this location correct?")
|
||||
default = False
|
||||
else:
|
||||
print(f":file_folder: InvokeAI will be installed in {dest}")
|
||||
default = True
|
||||
|
||||
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
|
||||
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
|
||||
|
||||
def dest_path(dest=None) -> Path | None:
|
||||
"""
|
||||
Prompt the user for the destination path and create the path
|
||||
|
||||
@@ -124,25 +162,21 @@ def dest_path(dest=None) -> Path:
|
||||
else:
|
||||
dest = Path.cwd().expanduser().resolve()
|
||||
prev_dest = init_path = dest
|
||||
|
||||
dest_confirmed = confirm_install(dest)
|
||||
dest_confirmed = False
|
||||
|
||||
while not dest_confirmed:
|
||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
||||
# since we can't read their mind, start browsing at Path.cwd().
|
||||
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
||||
browse_start = (dest or Path.cwd()).expanduser().resolve()
|
||||
|
||||
path_completer = PathCompleter(
|
||||
only_directories=True,
|
||||
expanduser=True,
|
||||
get_paths=lambda: [browse_start], # noqa: B023
|
||||
get_paths=lambda: [str(browse_start)], # noqa: B023
|
||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||
)
|
||||
|
||||
console.line()
|
||||
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
|
||||
|
||||
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
|
||||
selected = prompt(
|
||||
">>> ",
|
||||
complete_in_thread=True,
|
||||
@@ -155,6 +189,7 @@ def dest_path(dest=None) -> Path:
|
||||
)
|
||||
prev_dest = dest
|
||||
dest = Path(selected)
|
||||
|
||||
console.line()
|
||||
|
||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||
@@ -182,41 +217,45 @@ def dest_path(dest=None) -> Path:
|
||||
console.rule("Goodbye!")
|
||||
|
||||
|
||||
def graphical_accelerator():
|
||||
class GpuType(Enum):
|
||||
CUDA = "cuda"
|
||||
CUDA_AND_DML = "cuda_and_dml"
|
||||
ROCM = "rocm"
|
||||
CPU = "cpu"
|
||||
AUTODETECT = "autodetect"
|
||||
|
||||
|
||||
def select_gpu() -> GpuType:
|
||||
"""
|
||||
Prompt the user to select the graphical accelerator in their system
|
||||
This does not validate user's choices (yet), but only offers choices
|
||||
valid for the platform.
|
||||
CUDA is the fallback.
|
||||
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
||||
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
||||
Prompt the user to select the GPU driver
|
||||
"""
|
||||
|
||||
if ARCH == "arm64" and OS != "Darwin":
|
||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||
return "cpu"
|
||||
return GpuType.CPU
|
||||
|
||||
nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
"cuda",
|
||||
GpuType.CUDA,
|
||||
)
|
||||
nvidia_with_dml = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
||||
"cuda_and_dml",
|
||||
GpuType.CUDA_AND_DML,
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
"rocm",
|
||||
GpuType.ROCM,
|
||||
)
|
||||
cpu = (
|
||||
"no compatible GPU, or specifically prefer to use the CPU",
|
||||
"cpu",
|
||||
"Do not install any GPU support, use CPU for generation (slow)",
|
||||
GpuType.CPU,
|
||||
)
|
||||
idk = (
|
||||
autodetect = (
|
||||
"I'm not sure what to choose",
|
||||
"idk",
|
||||
GpuType.AUTODETECT,
|
||||
)
|
||||
|
||||
options = []
|
||||
if OS == "Windows":
|
||||
options = [nvidia, nvidia_with_dml, cpu]
|
||||
if OS == "Linux":
|
||||
@@ -230,7 +269,7 @@ def graphical_accelerator():
|
||||
return options[0][1]
|
||||
|
||||
# "I don't know" is always added the last option
|
||||
options.append(idk)
|
||||
options.append(autodetect) # type: ignore
|
||||
|
||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||
|
||||
@@ -265,9 +304,9 @@ def graphical_accelerator():
|
||||
),
|
||||
)
|
||||
|
||||
if options[choice][1] == "idk":
|
||||
if options[choice][1] is GpuType.AUTODETECT:
|
||||
console.print(
|
||||
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
||||
"No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
|
||||
)
|
||||
|
||||
return options[choice][1]
|
||||
@@ -291,7 +330,7 @@ def windows_long_paths_registry() -> None:
|
||||
"""
|
||||
|
||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||
syntax = Syntax(code.read(), line_numbers=True)
|
||||
syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
@@ -301,7 +340,7 @@ def windows_long_paths_registry() -> None:
|
||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||
"",
|
||||
"This is the change that will be applied:",
|
||||
syntax,
|
||||
str(syntax),
|
||||
]
|
||||
)
|
||||
),
|
||||
@@ -340,7 +379,7 @@ def introduction() -> None:
|
||||
console.line(2)
|
||||
|
||||
|
||||
def _platform_specific_help() -> str:
|
||||
def _platform_specific_help() -> Text | None:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||
@@ -354,5 +393,5 @@ def _platform_specific_help() -> str:
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||
)
|
||||
else:
|
||||
text = ""
|
||||
return
|
||||
return text
|
||||
|
||||
@@ -15,7 +15,7 @@ echo 4. Download and install models
|
||||
echo 5. Change InvokeAI startup options
|
||||
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
|
||||
echo 7. Open the developer console
|
||||
echo 8. Update InvokeAI
|
||||
echo 8. Update InvokeAI (DEPRECATED - please use the installer)
|
||||
echo 9. Run the InvokeAI image database maintenance script
|
||||
echo 10. Command-line help
|
||||
echo Q - Quit
|
||||
@@ -52,8 +52,10 @@ IF /I "%choice%" == "1" (
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "8" (
|
||||
echo Running invokeai-update...
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
|
||||
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
|
||||
timeout 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
) ELSE IF /I "%choice%" == "9" (
|
||||
echo Running the db maintenance script...
|
||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||
@@ -77,4 +79,3 @@ pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
|
||||
|
||||
@@ -90,7 +90,9 @@ do_choice() {
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "Update InvokeAI\n"
|
||||
printf "UPDATING FROM WITHIN THE APP IS BEING DEPRECATED\n"
|
||||
printf "Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.\n"
|
||||
sleep 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
;;
|
||||
9)
|
||||
@@ -122,7 +124,7 @@ do_dialog() {
|
||||
5 "Change InvokeAI startup options"
|
||||
6 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
7 "Open the developer console"
|
||||
8 "Update InvokeAI"
|
||||
8 "Update InvokeAI (DEPRECATED - please use the installer)"
|
||||
9 "Run the InvokeAI image database maintenance script"
|
||||
10 "Command-line help"
|
||||
)
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_VERSION=latest
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,2%" equ "/?" (
|
||||
echo Usage: update.bat ^<release name or branch^>
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the version or branch for the release you want, and pass it as the argument.
|
||||
echo For example '.\update.bat v2.2.5' for release 2.2.5.
|
||||
echo '.\update.bat main' for the latest development version
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent release, equivalent to
|
||||
echo '.\update.bat latest'
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_VERSION=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
set INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/!INVOKE_AI_VERSION!.zip"
|
||||
set INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/!INVOKE_AI_VERSION!/environments-and-requirements/requirements-base.txt
|
||||
set INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
call curl -I "%INVOKE_AI_DEP%" -fs >.tmp.out
|
||||
if %errorlevel% neq 0 (
|
||||
echo '!INVOKE_AI_VERSION!' is not a known branch name or tag. Please check the version and try again.
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
del .tmp.out
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call curl -L "%INVOKE_AI_DEP%" > environments-and-requirements/requirements-base.txt
|
||||
call curl -L "%INVOKE_AI_MODELS%" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
call .venv\Scripts\python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python -mpip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem call .venv\Scripts\invokeai-configure --root=.
|
||||
|
||||
@rem if %errorlevel% neq 0 (
|
||||
@rem echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
@rem pause
|
||||
@rem exit /b
|
||||
@rem )
|
||||
|
||||
echo InvokeAI has been updated to '%INVOKE_AI_VERSION%'
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:2}" == "-h" ]; then
|
||||
echo "Usage: update.sh <release>"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the version or branch for the release you want, and pass it as the argument."
|
||||
echo "For example: update.sh v2.2.5 for release 2.2.5."
|
||||
echo " update.sh main for the current development version."
|
||||
echo ""
|
||||
echo "If no argument provided then will install the version tagged with 'latest', equivalent to"
|
||||
echo "update.sh latest"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_VERSION=${1:-latest}
|
||||
|
||||
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive/$INVOKE_AI_VERSION.zip"
|
||||
INVOKE_AI_DEP=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/environments-and-requirements/requirements-base.txt
|
||||
INVOKE_AI_MODELS=https://raw.githubusercontent.com/invoke-ai/InvokeAI/$INVOKE_AI_VERSION/configs/INITIAL_MODELS.yaml
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
if ! curl -I "$INVOKE_AI_DEP" -fs >/dev/null; then
|
||||
echo \'$INVOKE_AI_VERSION\' is not a known branch name or tag. Please check the version and try again.
|
||||
exit
|
||||
fi
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to version \'$INVOKE_AI_VERSION\'.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
curl -L "$INVOKE_AI_DEP" > environments-and-requirements/requirements-base.txt
|
||||
curl -L "$INVOKE_AI_MODELS" > configs/INITIAL_MODELS.yaml
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
./.venv/bin/python -mpip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
./.venv/bin/python -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
echo InvokeAI updated to \'$INVOKE_AI_VERSION\'
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
@@ -22,7 +23,6 @@ from ..services.invocation_queue.invocation_queue_memory import MemoryInvocation
|
||||
from ..services.invocation_services import InvocationServices
|
||||
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
|
||||
from ..services.invoker import Invoker
|
||||
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
|
||||
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
|
||||
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
|
||||
from ..services.model_install import ModelInstallService
|
||||
@@ -80,7 +80,7 @@ class ApiDependencies:
|
||||
board_records = SqliteBoardRecordStorage(db=db)
|
||||
boards = BoardService()
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
|
||||
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
|
||||
image_records = SqliteImageRecordStorage(db=db)
|
||||
images = ImageService()
|
||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for model configuration records."""
|
||||
|
||||
|
||||
import pathlib
|
||||
from hashlib import sha1
|
||||
from random import randbytes
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
@@ -27,6 +27,7 @@ from invokeai.backend.model_manager.config import (
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
@@ -415,3 +416,57 @@ async def sync_models_to_config() -> Response:
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@model_records_router.put(
|
||||
"/merge",
|
||||
operation_id="merge",
|
||||
)
|
||||
async def merge(
|
||||
keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3),
|
||||
merged_model_name: Optional[str] = Body(description="Name of destination model", default=None),
|
||||
alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
|
||||
force: bool = Body(
|
||||
description="Force merging of models created with different versions of diffusers",
|
||||
default=False,
|
||||
),
|
||||
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None),
|
||||
merge_dest_directory: Optional[str] = Body(
|
||||
description="Save the merged model to the designated directory (with 'merged_model_name' appended)",
|
||||
default=None,
|
||||
),
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
Merge diffusers models.
|
||||
|
||||
keys: List of 2-3 model keys to merge together. All models must use the same base type.
|
||||
merged_model_name: Name for the merged model [Concat model names]
|
||||
alpha: Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5]
|
||||
force: If true, force the merge even if the models were generated by different versions of the diffusers library [False]
|
||||
interp: Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum]
|
||||
merge_dest_directory: Specify a directory to store the merged model in [models directory]
|
||||
"""
|
||||
print(f"here i am, keys={keys}")
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
try:
|
||||
logger.info(f"Merging models: {keys} into {merge_dest_directory or '<MODELS>'}/{merged_model_name}")
|
||||
dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
merger = ModelMerger(installer)
|
||||
model_names = [installer.record_store.get_model(x).name for x in keys]
|
||||
response = merger.merge_diffusion_models_and_save(
|
||||
model_keys=keys,
|
||||
merged_model_name=merged_model_name or "+".join(model_names),
|
||||
alpha=alpha,
|
||||
interp=interp,
|
||||
force=force,
|
||||
merge_dest_directory=dest,
|
||||
)
|
||||
except UnknownModelException:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"One or more of the models '{keys}' not found",
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
return response
|
||||
|
||||
@@ -30,6 +30,7 @@ from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
@@ -602,3 +603,33 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
color_map = Image.fromarray(color_map)
|
||||
return color_map
|
||||
|
||||
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
|
||||
|
||||
@invocation(
|
||||
"depth_anything_image_processor",
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
if image.mode == "RGBA":
|
||||
image = image.convert("RGB")
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
return processed_image
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import List, Literal, Optional, Union
|
||||
@@ -1228,3 +1229,57 @@ class CropLatentsCoreInvocation(BaseInvocation):
|
||||
context.services.latents.save(name, cropped_latents)
|
||||
|
||||
return build_latents_output(latents_name=name, latents=cropped_latents)
|
||||
|
||||
|
||||
@invocation_output("ideal_size_output")
|
||||
class IdealSizeOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
width: int = OutputField(description="The ideal width of the image (in pixels)")
|
||||
height: int = OutputField(description="The ideal height of the image (in pixels)")
|
||||
|
||||
|
||||
@invocation(
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.2",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
||||
width: int = InputField(default=1024, description="Final image width")
|
||||
height: int = InputField(default=576, description="Final image height")
|
||||
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
|
||||
multiplier: float = InputField(
|
||||
default=1.0,
|
||||
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)",
|
||||
)
|
||||
|
||||
def trim_to_multiple_of(self, *args, multiple_of=LATENT_SCALE_FACTOR):
|
||||
return tuple((x - x % multiple_of) for x in args)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
||||
aspect = self.width / self.height
|
||||
dimension = 512
|
||||
if self.unet.unet.base_model == BaseModelType.StableDiffusion2:
|
||||
dimension = 768
|
||||
elif self.unet.unet.base_model == BaseModelType.StableDiffusionXL:
|
||||
dimension = 1024
|
||||
dimension = dimension * self.multiplier
|
||||
min_dimension = math.floor(dimension * 0.5)
|
||||
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
||||
|
||||
if aspect > 1.0:
|
||||
init_height = max(min_dimension, math.sqrt(model_area / aspect))
|
||||
init_width = init_height * aspect
|
||||
else:
|
||||
init_width = max(min_dimension, math.sqrt(model_area * aspect))
|
||||
init_height = init_width / aspect
|
||||
|
||||
scaled_width, scaled_height = self.trim_to_multiple_of(
|
||||
math.floor(init_width),
|
||||
math.floor(init_height),
|
||||
)
|
||||
|
||||
return IdealSizeOutput(width=scaled_width, height=scaled_height)
|
||||
|
||||
@@ -173,10 +173,10 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_type_hints
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import Field, TypeAdapter
|
||||
from pydantic import Field
|
||||
from pydantic.config import JsonDict
|
||||
from pydantic_settings import SettingsConfigDict
|
||||
|
||||
@@ -251,7 +251,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
|
||||
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
|
||||
|
||||
# Development
|
||||
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
|
||||
profile_graphs : bool = Field(default=False, description="Enable graph profiling", json_schema_extra=Categories.Development)
|
||||
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development)
|
||||
profiles_dir : Path = Field(default=Path('profiles'), description="Directory for graph profiles", json_schema_extra=Categories.Development)
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
||||
|
||||
@@ -270,7 +274,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
|
||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
|
||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
|
||||
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||
|
||||
# QUEUE
|
||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
|
||||
@@ -280,6 +284,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
||||
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
||||
|
||||
# MODEL IMPORT
|
||||
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
|
||||
|
||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||
@@ -289,6 +296,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
|
||||
# this is not referred to in the source code and can be removed entirely
|
||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||
|
||||
@@ -328,13 +336,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
super().parse_args(argv)
|
||||
|
||||
if self.singleton_init and not clobber:
|
||||
hints = get_type_hints(self.__class__)
|
||||
for k in self.singleton_init:
|
||||
setattr(
|
||||
self,
|
||||
k,
|
||||
TypeAdapter(hints[k]).validate_python(self.singleton_init[k]),
|
||||
)
|
||||
# When setting values in this way, set validate_assignment to true if you want to validate the value.
|
||||
for k, v in self.singleton_init.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
|
||||
@@ -449,6 +453,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
disabled_in_config = not self.xformers_enabled
|
||||
return disabled_in_config and self.attention_type != "xformers"
|
||||
|
||||
@property
|
||||
def profiles_path(self) -> Path:
|
||||
"""Path to the graph profiles directory."""
|
||||
return self._resolve(self.profiles_dir)
|
||||
|
||||
@staticmethod
|
||||
def find_root() -> Path:
|
||||
"""Choose the runtime root directory when not specified on command line or init file."""
|
||||
|
||||
@@ -208,7 +208,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
job = self._queue.get(timeout=1)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
try:
|
||||
job.job_started = get_iso_timestamp()
|
||||
self._do_download(job)
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import time
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Event, Thread
|
||||
from typing import Optional
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.invocations.baseinvocation import InvocationContext
|
||||
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import (
|
||||
GESStatsNotFoundError,
|
||||
)
|
||||
from invokeai.app.util.profiler import Profiler
|
||||
|
||||
from ..invoker import Invoker
|
||||
from .invocation_processor_base import InvocationProcessorABC
|
||||
@@ -18,7 +23,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
__invoker: Invoker
|
||||
__threadLimit: BoundedSemaphore
|
||||
|
||||
def start(self, invoker) -> None:
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
# if we do want multithreading at some point, we could make this configurable
|
||||
self.__threadLimit = BoundedSemaphore(1)
|
||||
self.__invoker = invoker
|
||||
@@ -39,6 +44,27 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
self.__threadLimit.acquire()
|
||||
queue_item: Optional[InvocationQueueItem] = None
|
||||
|
||||
profiler = (
|
||||
Profiler(
|
||||
logger=self.__invoker.services.logger,
|
||||
output_dir=self.__invoker.services.configuration.profiles_path,
|
||||
prefix=self.__invoker.services.configuration.profile_prefix,
|
||||
)
|
||||
if self.__invoker.services.configuration.profile_graphs
|
||||
else None
|
||||
)
|
||||
|
||||
def stats_cleanup(graph_execution_state_id: str) -> None:
|
||||
if profiler:
|
||||
profile_path = profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self.__invoker.services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=graph_execution_state_id, output_path=stats_path
|
||||
)
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id)
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
queue_item = self.__invoker.services.queue.get()
|
||||
@@ -49,6 +75,10 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
# do not hammer the queue
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
if profiler and profiler.profile_id != queue_item.graph_execution_state_id:
|
||||
profiler.start(profile_id=queue_item.graph_execution_state_id)
|
||||
|
||||
try:
|
||||
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
|
||||
queue_item.graph_execution_state_id
|
||||
@@ -137,7 +167,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
pass
|
||||
|
||||
except CanceledException:
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
@@ -162,7 +192,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
|
||||
pass
|
||||
|
||||
# Check queue to see if this is canceled, and skip if so
|
||||
@@ -194,13 +223,13 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error=traceback.format_exc(),
|
||||
)
|
||||
elif is_complete:
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
||||
self.__invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=queue_item.session_queue_batch_id,
|
||||
queue_item_id=queue_item.session_queue_item_id,
|
||||
queue_id=queue_item.session_queue_id,
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
)
|
||||
stats_cleanup(graph_execution_state.id)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
|
||||
|
||||
@@ -30,8 +30,10 @@ writes to the system log is stored in InvocationServices.performance_statistics.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import AbstractContextManager
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary
|
||||
|
||||
|
||||
class InvocationStatsServiceBase(ABC):
|
||||
@@ -61,8 +63,9 @@ class InvocationStatsServiceBase(ABC):
|
||||
@abstractmethod
|
||||
def reset_stats(self, graph_execution_state_id: str):
|
||||
"""
|
||||
Reset all statistics for the indicated graph
|
||||
:param graph_execution_state_id
|
||||
Reset all statistics for the indicated graph.
|
||||
:param graph_execution_state_id: The id of the session whose stats to reset.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -70,5 +73,26 @@ class InvocationStatsServiceBase(ABC):
|
||||
def log_stats(self, graph_execution_state_id: str):
|
||||
"""
|
||||
Write out the accumulated statistics to the log or somewhere else.
|
||||
:param graph_execution_state_id: The id of the session whose stats to log.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
"""
|
||||
Gets the accumulated statistics for the indicated graph.
|
||||
:param graph_execution_state_id: The id of the session whose stats to get.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||
"""
|
||||
Write out the accumulated statistics to the indicated path as JSON.
|
||||
:param graph_execution_state_id: The id of the session whose stats to dump.
|
||||
:param output_path: The file to write the stats to.
|
||||
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,91 @@
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import asdict, dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class GESStatsNotFoundError(Exception):
|
||||
"""Raised when execution stats are not found for a given Graph Execution State."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeExecutionStatsSummary:
|
||||
"""The stats for a specific type of node."""
|
||||
|
||||
node_type: str
|
||||
num_calls: int
|
||||
time_used_seconds: float
|
||||
peak_vram_gb: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelCacheStatsSummary:
|
||||
"""The stats for the model cache."""
|
||||
|
||||
high_water_mark_gb: float
|
||||
cache_size_gb: float
|
||||
total_usage_gb: float
|
||||
cache_hits: int
|
||||
cache_misses: int
|
||||
models_cached: int
|
||||
models_cleared: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExecutionStatsSummary:
|
||||
"""The stats for the graph execution state."""
|
||||
|
||||
graph_execution_state_id: str
|
||||
execution_time_seconds: float
|
||||
# `wall_time_seconds`, `ram_usage_gb` and `ram_change_gb` are derived from the node execution stats.
|
||||
# In some situations, there are no node stats, so these values are optional.
|
||||
wall_time_seconds: Optional[float]
|
||||
ram_usage_gb: Optional[float]
|
||||
ram_change_gb: Optional[float]
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvocationStatsSummary:
|
||||
"""
|
||||
The accumulated stats for a graph execution.
|
||||
Its `__str__` method returns a human-readable stats summary.
|
||||
"""
|
||||
|
||||
vram_usage_gb: Optional[float]
|
||||
graph_stats: GraphExecutionStatsSummary
|
||||
model_cache_stats: ModelCacheStatsSummary
|
||||
node_stats: list[NodeExecutionStatsSummary]
|
||||
|
||||
def __str__(self) -> str:
|
||||
_str = ""
|
||||
_str = f"Graph stats: {self.graph_stats.graph_execution_state_id}\n"
|
||||
_str += f"{'Node':>30} {'Calls':>7} {'Seconds':>9} {'VRAM Used':>10}\n"
|
||||
|
||||
for summary in self.node_stats:
|
||||
_str += f"{summary.node_type:>30} {summary.num_calls:>7} {summary.time_used_seconds:>8.3f}s {summary.peak_vram_gb:>9.3f}G\n"
|
||||
|
||||
_str += f"TOTAL GRAPH EXECUTION TIME: {self.graph_stats.execution_time_seconds:7.3f}s\n"
|
||||
|
||||
if self.graph_stats.wall_time_seconds is not None:
|
||||
_str += f"TOTAL GRAPH WALL TIME: {self.graph_stats.wall_time_seconds:7.3f}s\n"
|
||||
|
||||
if self.graph_stats.ram_usage_gb is not None and self.graph_stats.ram_change_gb is not None:
|
||||
_str += f"RAM used by InvokeAI process: {self.graph_stats.ram_usage_gb:4.2f}G ({self.graph_stats.ram_change_gb:+5.3f}G)\n"
|
||||
|
||||
_str += f"RAM used to load models: {self.model_cache_stats.total_usage_gb:4.2f}G\n"
|
||||
if self.vram_usage_gb:
|
||||
_str += f"VRAM in use: {self.vram_usage_gb:4.3f}G\n"
|
||||
_str += "RAM cache statistics:\n"
|
||||
_str += f" Model cache hits: {self.model_cache_stats.cache_hits}\n"
|
||||
_str += f" Model cache misses: {self.model_cache_stats.cache_misses}\n"
|
||||
_str += f" Models cached: {self.model_cache_stats.models_cached}\n"
|
||||
_str += f" Models cleared from cache: {self.model_cache_stats.models_cleared}\n"
|
||||
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}/{self.model_cache_stats.cache_size_gb:4.2f}G\n"
|
||||
|
||||
return _str
|
||||
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
"""Returns the stats as a dictionary."""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -55,12 +141,33 @@ class GraphExecutionStats:
|
||||
|
||||
return last_node
|
||||
|
||||
def get_pretty_log(self, graph_execution_state_id: str) -> str:
|
||||
log = f"Graph stats: {graph_execution_state_id}\n"
|
||||
log += f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}\n"
|
||||
def get_graph_stats_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||
"""Get a summary of the graph stats."""
|
||||
first_node = self.get_first_node_stats()
|
||||
last_node = self.get_last_node_stats()
|
||||
|
||||
# Log stats aggregated by node type.
|
||||
wall_time_seconds: Optional[float] = None
|
||||
ram_usage_gb: Optional[float] = None
|
||||
ram_change_gb: Optional[float] = None
|
||||
|
||||
if last_node and first_node:
|
||||
wall_time_seconds = last_node.end_time - first_node.start_time
|
||||
ram_usage_gb = last_node.end_ram_gb
|
||||
ram_change_gb = last_node.end_ram_gb - first_node.start_ram_gb
|
||||
|
||||
return GraphExecutionStatsSummary(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
execution_time_seconds=self.get_total_run_time(),
|
||||
wall_time_seconds=wall_time_seconds,
|
||||
ram_usage_gb=ram_usage_gb,
|
||||
ram_change_gb=ram_change_gb,
|
||||
)
|
||||
|
||||
def get_node_stats_summaries(self) -> list[NodeExecutionStatsSummary]:
|
||||
"""Get a summary of the node stats."""
|
||||
summaries: list[NodeExecutionStatsSummary] = []
|
||||
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
||||
|
||||
for node_stats in self._node_stats_list:
|
||||
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
||||
|
||||
@@ -68,17 +175,9 @@ class GraphExecutionStats:
|
||||
num_calls = len(node_type_stats_list)
|
||||
time_used = sum([n.total_time() for n in node_type_stats_list])
|
||||
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
||||
log += f"{node_type:>30} {num_calls:>4} {time_used:7.3f}s {peak_vram:4.3f}G\n"
|
||||
summary = NodeExecutionStatsSummary(
|
||||
node_type=node_type, num_calls=num_calls, time_used_seconds=time_used, peak_vram_gb=peak_vram
|
||||
)
|
||||
summaries.append(summary)
|
||||
|
||||
# Log stats for the entire graph.
|
||||
log += f"TOTAL GRAPH EXECUTION TIME: {self.get_total_run_time():7.3f}s\n"
|
||||
|
||||
first_node = self.get_first_node_stats()
|
||||
last_node = self.get_last_node_stats()
|
||||
if first_node is not None and last_node is not None:
|
||||
total_wall_time = last_node.end_time - first_node.start_time
|
||||
ram_change = last_node.end_ram_gb - first_node.start_ram_gb
|
||||
log += f"TOTAL GRAPH WALL TIME: {total_wall_time:7.3f}s\n"
|
||||
log += f"RAM used by InvokeAI process: {last_node.end_ram_gb:4.2f}G ({ram_change:+5.3f}G)\n"
|
||||
|
||||
return log
|
||||
return summaries
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import json
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
@@ -7,10 +9,19 @@ import torch
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.item_storage.item_storage_common import ItemNotFoundError
|
||||
from invokeai.backend.model_management.model_cache import CacheStats
|
||||
|
||||
from .invocation_stats_base import InvocationStatsServiceBase
|
||||
from .invocation_stats_common import GraphExecutionStats, NodeExecutionStats
|
||||
from .invocation_stats_common import (
|
||||
GESStatsNotFoundError,
|
||||
GraphExecutionStats,
|
||||
GraphExecutionStatsSummary,
|
||||
InvocationStatsSummary,
|
||||
ModelCacheStatsSummary,
|
||||
NodeExecutionStats,
|
||||
NodeExecutionStatsSummary,
|
||||
)
|
||||
|
||||
# Size of 1GB in bytes.
|
||||
GB = 2**30
|
||||
@@ -53,7 +64,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
finally:
|
||||
# Record state after the invocation.
|
||||
node_stats = NodeExecutionStats(
|
||||
invocation_type=invocation.type,
|
||||
invocation_type=invocation.get_type(),
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
start_ram_gb=start_ram / GB,
|
||||
@@ -68,11 +79,11 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so
|
||||
for now we call this function periodically to prevent them from accumulating.
|
||||
"""
|
||||
to_prune = []
|
||||
to_prune: list[str] = []
|
||||
for graph_execution_state_id in self._stats:
|
||||
try:
|
||||
graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id)
|
||||
except Exception:
|
||||
except ItemNotFoundError:
|
||||
# TODO(ryand): What would cause this? Should this exception just be allowed to propagate?
|
||||
logger.warning(f"Failed to get graph state for {graph_execution_state_id}.")
|
||||
continue
|
||||
@@ -95,31 +106,66 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
def log_stats(self, graph_execution_state_id: str):
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
|
||||
node_stats_summaries = self._get_node_summaries(graph_execution_state_id)
|
||||
model_cache_stats_summary = self._get_model_cache_summary(graph_execution_state_id)
|
||||
vram_usage_gb = torch.cuda.memory_allocated() / GB if torch.cuda.is_available() else None
|
||||
|
||||
return InvocationStatsSummary(
|
||||
graph_stats=graph_stats_summary,
|
||||
model_cache_stats=model_cache_stats_summary,
|
||||
node_stats=node_stats_summaries,
|
||||
vram_usage_gb=vram_usage_gb,
|
||||
)
|
||||
|
||||
def log_stats(self, graph_execution_state_id: str) -> None:
|
||||
stats = self.get_stats(graph_execution_state_id)
|
||||
logger.info(str(stats))
|
||||
|
||||
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
|
||||
stats = self.get_stats(graph_execution_state_id)
|
||||
with open(output_path, "w") as f:
|
||||
f.write(json.dumps(stats.as_dict(), indent=2))
|
||||
|
||||
def _get_model_cache_summary(self, graph_execution_state_id: str) -> ModelCacheStatsSummary:
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
logger.warning(f"Attempted to log statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||
return
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
log = graph_stats.get_pretty_log(graph_execution_state_id)
|
||||
return ModelCacheStatsSummary(
|
||||
cache_hits=cache_stats.hits,
|
||||
cache_misses=cache_stats.misses,
|
||||
high_water_mark_gb=cache_stats.high_watermark / GB,
|
||||
cache_size_gb=cache_stats.cache_size / GB,
|
||||
total_usage_gb=sum(list(cache_stats.loaded_model_sizes.values())) / GB,
|
||||
models_cached=cache_stats.in_cache,
|
||||
models_cleared=cache_stats.cleared,
|
||||
)
|
||||
|
||||
hwm = cache_stats.high_watermark / GB
|
||||
tot = cache_stats.cache_size / GB
|
||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GB
|
||||
log += f"RAM used to load models: {loaded:4.2f}G\n"
|
||||
if torch.cuda.is_available():
|
||||
log += f"VRAM in use: {(torch.cuda.memory_allocated() / GB):4.3f}G\n"
|
||||
log += "RAM cache statistics:\n"
|
||||
log += f" Model cache hits: {cache_stats.hits}\n"
|
||||
log += f" Model cache misses: {cache_stats.misses}\n"
|
||||
log += f" Models cached: {cache_stats.in_cache}\n"
|
||||
log += f" Models cleared from cache: {cache_stats.cleared}\n"
|
||||
log += f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G\n"
|
||||
logger.info(log)
|
||||
def _get_graph_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
return graph_stats.get_graph_stats_summary(graph_execution_state_id)
|
||||
|
||||
def _get_node_summaries(self, graph_execution_state_id: str) -> list[NodeExecutionStatsSummary]:
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
raise GESStatsNotFoundError(
|
||||
f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
|
||||
) from e
|
||||
|
||||
return graph_stats.get_node_stats_summaries()
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Callable, Generic, Optional, TypeVar
|
||||
from typing import Callable, Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
|
||||
@@ -22,26 +20,26 @@ class ItemStorageABC(ABC, Generic[T]):
|
||||
|
||||
@abstractmethod
|
||||
def get(self, item_id: str) -> T:
|
||||
"""Gets the item, parsing it into a Pydantic model"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_raw(self, item_id: str) -> Optional[str]:
|
||||
"""Gets the raw item as a string, skipping Pydantic parsing"""
|
||||
"""
|
||||
Gets the item.
|
||||
:param item_id: the id of the item to get
|
||||
:raises ItemNotFoundError: if the item is not found
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set(self, item: T) -> None:
|
||||
"""Sets the item"""
|
||||
"""
|
||||
Sets the item. The id will be extracted based on id_field.
|
||||
:param item: the item to set
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
"""Gets a paginated list of items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
def delete(self, item_id: str) -> None:
|
||||
"""
|
||||
Deletes the item, if it exists.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
class ItemNotFoundError(KeyError):
|
||||
"""Raised when an item is not found in storage"""
|
||||
|
||||
def __init__(self, item_id: str) -> None:
|
||||
super().__init__(f"Item with id {item_id} not found")
|
||||
52
invokeai/app/services/item_storage/item_storage_memory.py
Normal file
52
invokeai/app/services/item_storage/item_storage_memory.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from collections import OrderedDict
|
||||
from contextlib import suppress
|
||||
from typing import Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC
|
||||
from invokeai.app.services.item_storage.item_storage_common import ItemNotFoundError
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
|
||||
class ItemStorageMemory(ItemStorageABC[T], Generic[T]):
|
||||
"""
|
||||
Provides a simple in-memory storage for items, with a maximum number of items to store.
|
||||
The storage uses the LRU strategy to evict items from storage when the max has been reached.
|
||||
"""
|
||||
|
||||
def __init__(self, id_field: str = "id", max_items: int = 10) -> None:
|
||||
super().__init__()
|
||||
if max_items < 1:
|
||||
raise ValueError("max_items must be at least 1")
|
||||
if not id_field:
|
||||
raise ValueError("id_field must not be empty")
|
||||
self._id_field = id_field
|
||||
self._items: OrderedDict[str, T] = OrderedDict()
|
||||
self._max_items = max_items
|
||||
|
||||
def get(self, item_id: str) -> T:
|
||||
# If the item exists, move it to the end of the OrderedDict.
|
||||
item = self._items.pop(item_id, None)
|
||||
if item is None:
|
||||
raise ItemNotFoundError(item_id)
|
||||
self._items[item_id] = item
|
||||
return item
|
||||
|
||||
def set(self, item: T) -> None:
|
||||
item_id = getattr(item, self._id_field)
|
||||
if item_id in self._items:
|
||||
# If item already exists, remove it and add it to the end
|
||||
self._items.pop(item_id)
|
||||
elif len(self._items) >= self._max_items:
|
||||
# If cache is full, evict the least recently used item
|
||||
self._items.popitem(last=False)
|
||||
self._items[item_id] = item
|
||||
self._on_changed(item)
|
||||
|
||||
def delete(self, item_id: str) -> None:
|
||||
# This is a no-op if the item doesn't exist.
|
||||
with suppress(KeyError):
|
||||
del self._items[item_id]
|
||||
self._on_deleted(item_id)
|
||||
@@ -1,147 +0,0 @@
|
||||
import sqlite3
|
||||
import threading
|
||||
from typing import Generic, Optional, TypeVar, get_args
|
||||
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
from .item_storage_base import ItemStorageABC
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
|
||||
class SqliteItemStorage(ItemStorageABC, Generic[T]):
|
||||
_table_name: str
|
||||
_conn: sqlite3.Connection
|
||||
_cursor: sqlite3.Cursor
|
||||
_id_field: str
|
||||
_lock: threading.RLock
|
||||
_validator: Optional[TypeAdapter[T]]
|
||||
|
||||
def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"):
|
||||
super().__init__()
|
||||
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._table_name = table_name
|
||||
self._id_field = id_field # TODO: validate that T has this field
|
||||
self._cursor = self._conn.cursor()
|
||||
self._validator: Optional[TypeAdapter[T]] = None
|
||||
|
||||
self._create_table()
|
||||
|
||||
def _create_table(self):
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
|
||||
item TEXT,
|
||||
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
|
||||
)
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _parse_item(self, item: str) -> T:
|
||||
if self._validator is None:
|
||||
"""
|
||||
We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so
|
||||
we can create it when it is first needed instead.
|
||||
__orig_class__ is technically an implementation detail of the typing module, not a supported API
|
||||
"""
|
||||
self._validator = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined]
|
||||
return self._validator.validate_json(item)
|
||||
|
||||
def set(self, item: T):
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
|
||||
(item.model_dump_json(warnings=False, exclude_none=True),),
|
||||
)
|
||||
self._conn.commit()
|
||||
finally:
|
||||
self._lock.release()
|
||||
self._on_changed(item)
|
||||
|
||||
def get(self, id: str) -> Optional[T]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
||||
result = self._cursor.fetchone()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return self._parse_item(result[0])
|
||||
|
||||
def get_raw(self, id: str) -> Optional[str]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
||||
result = self._cursor.fetchone()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return result[0]
|
||||
|
||||
def delete(self, id: str):
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),))
|
||||
self._conn.commit()
|
||||
finally:
|
||||
self._lock.release()
|
||||
self._on_deleted(id)
|
||||
|
||||
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
|
||||
(per_page, page * per_page),
|
||||
)
|
||||
result = self._cursor.fetchall()
|
||||
|
||||
items = [self._parse_item(r[0]) for r in result]
|
||||
|
||||
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
|
||||
count = self._cursor.fetchone()[0]
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
pageCount = int(count / per_page) + 1
|
||||
|
||||
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
|
||||
|
||||
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
|
||||
(f"%{query}%", per_page, page * per_page),
|
||||
)
|
||||
result = self._cursor.fetchall()
|
||||
|
||||
items = [self._parse_item(r[0]) for r in result]
|
||||
|
||||
self._cursor.execute(
|
||||
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
|
||||
(f"%{query}%",),
|
||||
)
|
||||
count = self._cursor.fetchone()[0]
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
pageCount = int(count / per_page) + 1
|
||||
|
||||
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
|
||||
@@ -165,8 +165,8 @@ class ModelInstallJob(BaseModel):
|
||||
)
|
||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||
bytes: Optional[int] = Field(
|
||||
default=None, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||
bytes: int = Field(
|
||||
default=0, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||
)
|
||||
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||
|
||||
@@ -535,19 +535,19 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
# URLs from Civitai or HuggingFace will be handled specially
|
||||
url_patterns = {
|
||||
r"https?://civitai.com/": CivitaiMetadataFetch,
|
||||
r"https?://huggingface.co/": HuggingFaceMetadataFetch,
|
||||
r"^https?://civitai.com/": CivitaiMetadataFetch,
|
||||
r"^https?://huggingface.co/[^/]+/[^/]+$": HuggingFaceMetadataFetch,
|
||||
}
|
||||
metadata = None
|
||||
for pattern, fetcher in url_patterns.items():
|
||||
if re.match(pattern, str(source.url), re.IGNORECASE):
|
||||
metadata = fetcher(self._session).from_url(source.url)
|
||||
break
|
||||
self._logger.debug(f"metadata={metadata}")
|
||||
if metadata and isinstance(metadata, ModelMetadataWithFiles):
|
||||
remote_files = metadata.download_urls(session=self._session)
|
||||
else:
|
||||
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
|
||||
|
||||
return self._import_remote_model(
|
||||
source=source,
|
||||
config=config,
|
||||
@@ -586,6 +586,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||
|
||||
self._logger.info(f"Queuing {source} for downloading")
|
||||
self._logger.debug(f"remote_files={remote_files}")
|
||||
for model_file in remote_files:
|
||||
url = model_file.url
|
||||
path = model_file.path
|
||||
|
||||
@@ -7,6 +7,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_5 import build_migration_5
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -31,6 +32,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
||||
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
|
||||
migrator.register_migration(build_migration_4())
|
||||
migrator.register_migration(build_migration_5())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration5Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._drop_graph_executions(cursor)
|
||||
|
||||
def _drop_graph_executions(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `graph_executions` table."""
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DROP TABLE IF EXISTS graph_executions;
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def build_migration_5() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 4 to 5.
|
||||
|
||||
Introduced in v3.6.3, this migration:
|
||||
- Drops the `graph_executions` table. We are able to do this because we are moving the graph storage
|
||||
to be purely in-memory.
|
||||
"""
|
||||
migration_5 = Migration(
|
||||
from_version=4,
|
||||
to_version=5,
|
||||
callback=Migration5Callback(),
|
||||
)
|
||||
|
||||
return migration_5
|
||||
@@ -72,7 +72,12 @@ class MigrateModelYamlToDb1:
|
||||
continue
|
||||
|
||||
base_type, model_type, model_name = str(model_key).split("/")
|
||||
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
||||
try:
|
||||
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
||||
except OSError:
|
||||
self.logger.warning(f"The model at {stanza.path} is not a valid file or directory. Skipping migration.")
|
||||
continue
|
||||
|
||||
assert isinstance(model_key, str)
|
||||
new_key = sha1(model_key.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
||||
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||
User = "user"
|
||||
Default = "default"
|
||||
Project = "project"
|
||||
|
||||
|
||||
class WorkflowMeta(BaseModel):
|
||||
|
||||
67
invokeai/app/util/profiler.py
Normal file
67
invokeai/app/util/profiler.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import cProfile
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Profiler:
|
||||
"""
|
||||
Simple wrapper around cProfile.
|
||||
|
||||
Usage
|
||||
```
|
||||
# Create a profiler
|
||||
profiler = Profiler(logger, output_dir, "sql_query_perf")
|
||||
# Start a new profile
|
||||
profiler.start("my_profile")
|
||||
# Do stuff
|
||||
profiler.stop()
|
||||
```
|
||||
|
||||
Visualize a profile as a flamegraph with [snakeviz](https://jiffyclub.github.io/snakeviz/)
|
||||
```sh
|
||||
snakeviz my_profile.prof
|
||||
```
|
||||
|
||||
Visualize a profile as directed graph with [graphviz](https://graphviz.org/download/) & [gprof2dot](https://github.com/jrfonseca/gprof2dot)
|
||||
```sh
|
||||
gprof2dot -f pstats my_profile.prof | dot -Tpng -o my_profile.png
|
||||
# SVG or PDF may be nicer - you can search for function names
|
||||
gprof2dot -f pstats my_profile.prof | dot -Tsvg -o my_profile.svg
|
||||
gprof2dot -f pstats my_profile.prof | dot -Tpdf -o my_profile.pdf
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, logger: Logger, output_dir: Path, prefix: Optional[str] = None) -> None:
|
||||
self._logger = logger.getChild(f"profiler.{prefix}" if prefix else "profiler")
|
||||
self._output_dir = output_dir
|
||||
self._output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._profiler: Optional[cProfile.Profile] = None
|
||||
self._prefix = prefix
|
||||
|
||||
self.profile_id: Optional[str] = None
|
||||
|
||||
def start(self, profile_id: str) -> None:
|
||||
if self._profiler:
|
||||
self.stop()
|
||||
|
||||
self.profile_id = profile_id
|
||||
|
||||
self._profiler = cProfile.Profile()
|
||||
self._profiler.enable()
|
||||
self._logger.info(f"Started profiling {self.profile_id}.")
|
||||
|
||||
def stop(self) -> Path:
|
||||
if not self._profiler:
|
||||
raise RuntimeError("Profiler not initialized. Call start() first.")
|
||||
self._profiler.disable()
|
||||
|
||||
filename = f"{self._prefix}_{self.profile_id}.prof" if self._prefix else f"{self.profile_id}.prof"
|
||||
path = Path(self._output_dir, filename)
|
||||
|
||||
self._profiler.dump_stats(path)
|
||||
self._logger.info(f"Stopped profiling, profile dumped to {path}.")
|
||||
self._profiler = None
|
||||
self.profile_id = None
|
||||
|
||||
return path
|
||||
109
invokeai/backend/image_util/depth_anything/__init__.py
Normal file
109
invokeai/backend/image_util/depth_anything/__init__.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import pathlib
|
||||
from typing import Literal, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from einops import repeat
|
||||
from PIL import Image
|
||||
from torchvision.transforms import Compose
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vitl14.pth",
|
||||
},
|
||||
"base": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vitb14.pth",
|
||||
},
|
||||
"small": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vits14.pth",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
transform = Compose(
|
||||
[
|
||||
Resize(
|
||||
width=518,
|
||||
height=518,
|
||||
resize_target=False,
|
||||
keep_aspect_ratio=True,
|
||||
ensure_multiple_of=14,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_CUBIC,
|
||||
),
|
||||
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
||||
PrepareForNet(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class DepthAnythingDetector:
|
||||
def __init__(self) -> None:
|
||||
self.model = None
|
||||
self.model_size: Union[Literal["large", "base", "small"], None] = None
|
||||
|
||||
def load_model(self, model_size=Literal["large", "base", "small"]):
|
||||
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
|
||||
if not DEPTH_ANYTHING_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
|
||||
|
||||
if not self.model or model_size != self.model_size:
|
||||
del self.model
|
||||
self.model_size = model_size
|
||||
|
||||
match self.model_size:
|
||||
case "small":
|
||||
self.model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
|
||||
case "base":
|
||||
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
|
||||
case "large":
|
||||
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
|
||||
case _:
|
||||
raise TypeError("Not a supported model")
|
||||
|
||||
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
|
||||
self.model.eval()
|
||||
|
||||
self.model.to(choose_torch_device())
|
||||
return self.model
|
||||
|
||||
def to(self, device):
|
||||
self.model.to(device)
|
||||
return self
|
||||
|
||||
def __call__(self, image, resolution=512, offload=False):
|
||||
image = np.array(image, dtype=np.uint8)
|
||||
image = image[:, :, ::-1] / 255.0
|
||||
|
||||
image_height, image_width = image.shape[:2]
|
||||
image = transform({"image": image})["image"]
|
||||
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
|
||||
|
||||
with torch.no_grad():
|
||||
depth = self.model(image)
|
||||
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
|
||||
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
||||
|
||||
depth_map = repeat(depth, "h w -> h w 3").cpu().numpy().astype(np.uint8)
|
||||
depth_map = Image.fromarray(depth_map)
|
||||
|
||||
new_height = int(image_height * (resolution / image_width))
|
||||
depth_map = depth_map.resize((resolution, new_height))
|
||||
|
||||
if offload:
|
||||
del self.model
|
||||
|
||||
return depth_map
|
||||
145
invokeai/backend/image_util/depth_anything/model/blocks.py
Normal file
145
invokeai/backend/image_util/depth_anything/model/blocks.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
||||
scratch = nn.Module()
|
||||
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape
|
||||
out_shape3 = out_shape
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape
|
||||
|
||||
if expand:
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape * 2
|
||||
out_shape3 = out_shape * 4
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape * 8
|
||||
|
||||
scratch.layer1_rn = nn.Conv2d(
|
||||
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer2_rn = nn.Conv2d(
|
||||
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer3_rn = nn.Conv2d(
|
||||
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
if len(in_shape) >= 4:
|
||||
scratch.layer4_rn = nn.Conv2d(
|
||||
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
class ResidualConvUnit(nn.Module):
|
||||
"""Residual convolution module."""
|
||||
|
||||
def __init__(self, features, activation, bn):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.bn = bn
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
if self.bn:
|
||||
self.bn1 = nn.BatchNorm2d(features)
|
||||
self.bn2 = nn.BatchNorm2d(features)
|
||||
|
||||
self.activation = activation
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
|
||||
out = self.activation(x)
|
||||
out = self.conv1(out)
|
||||
if self.bn:
|
||||
out = self.bn1(out)
|
||||
|
||||
out = self.activation(out)
|
||||
out = self.conv2(out)
|
||||
if self.bn:
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.groups > 1:
|
||||
out = self.conv_merge(out)
|
||||
|
||||
return self.skip_add.add(out, x)
|
||||
|
||||
|
||||
class FeatureFusionBlock(nn.Module):
|
||||
"""Feature fusion block."""
|
||||
|
||||
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super(FeatureFusionBlock, self).__init__()
|
||||
|
||||
self.deconv = deconv
|
||||
self.align_corners = align_corners
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.expand = expand
|
||||
out_features = features
|
||||
if self.expand:
|
||||
out_features = features // 2
|
||||
|
||||
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||
|
||||
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
||||
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
self.size = size
|
||||
|
||||
def forward(self, *xs, size=None):
|
||||
"""Forward pass.
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
res = self.resConfUnit1(xs[1])
|
||||
output = self.skip_add.add(output, res)
|
||||
|
||||
output = self.resConfUnit2(output)
|
||||
|
||||
if (size is None) and (self.size is None):
|
||||
modifier = {"scale_factor": 2}
|
||||
elif size is None:
|
||||
modifier = {"size": self.size}
|
||||
else:
|
||||
modifier = {"size": size}
|
||||
|
||||
output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
|
||||
|
||||
output = self.out_conv(output)
|
||||
|
||||
return output
|
||||
183
invokeai/backend/image_util/depth_anything/model/dpt.py
Normal file
183
invokeai/backend/image_util/depth_anything/model/dpt.py
Normal file
@@ -0,0 +1,183 @@
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .blocks import FeatureFusionBlock, _make_scratch
|
||||
|
||||
torchhub_path = Path(__file__).parent.parent / "torchhub"
|
||||
|
||||
|
||||
def _make_fusion_block(features, use_bn, size=None):
|
||||
return FeatureFusionBlock(
|
||||
features,
|
||||
nn.ReLU(False),
|
||||
deconv=False,
|
||||
bn=use_bn,
|
||||
expand=False,
|
||||
align_corners=True,
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
class DPTHead(nn.Module):
|
||||
def __init__(self, nclass, in_channels, features, out_channels, use_bn=False, use_clstoken=False):
|
||||
super(DPTHead, self).__init__()
|
||||
|
||||
self.nclass = nclass
|
||||
self.use_clstoken = use_clstoken
|
||||
|
||||
self.projects = nn.ModuleList(
|
||||
[
|
||||
nn.Conv2d(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channel,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
)
|
||||
for out_channel in out_channels
|
||||
]
|
||||
)
|
||||
|
||||
self.resize_layers = nn.ModuleList(
|
||||
[
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0
|
||||
),
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0
|
||||
),
|
||||
nn.Identity(),
|
||||
nn.Conv2d(
|
||||
in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
if use_clstoken:
|
||||
self.readout_projects = nn.ModuleList()
|
||||
for _ in range(len(self.projects)):
|
||||
self.readout_projects.append(nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()))
|
||||
|
||||
self.scratch = _make_scratch(
|
||||
out_channels,
|
||||
features,
|
||||
groups=1,
|
||||
expand=False,
|
||||
)
|
||||
|
||||
self.scratch.stem_transpose = None
|
||||
|
||||
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
||||
|
||||
head_features_1 = features
|
||||
head_features_2 = 32
|
||||
|
||||
if nclass > 1:
|
||||
self.scratch.output_conv = nn.Sequential(
|
||||
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
|
||||
)
|
||||
else:
|
||||
self.scratch.output_conv1 = nn.Conv2d(
|
||||
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
self.scratch.output_conv2 = nn.Sequential(
|
||||
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True),
|
||||
nn.Identity(),
|
||||
)
|
||||
|
||||
def forward(self, out_features, patch_h, patch_w):
|
||||
out = []
|
||||
for i, x in enumerate(out_features):
|
||||
if self.use_clstoken:
|
||||
x, cls_token = x[0], x[1]
|
||||
readout = cls_token.unsqueeze(1).expand_as(x)
|
||||
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
||||
else:
|
||||
x = x[0]
|
||||
|
||||
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
|
||||
|
||||
x = self.projects[i](x)
|
||||
x = self.resize_layers[i](x)
|
||||
|
||||
out.append(x)
|
||||
|
||||
layer_1, layer_2, layer_3, layer_4 = out
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
out = self.scratch.output_conv1(path_1)
|
||||
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
|
||||
out = self.scratch.output_conv2(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class DPT_DINOv2(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
features,
|
||||
out_channels,
|
||||
encoder="vitl",
|
||||
use_bn=False,
|
||||
use_clstoken=False,
|
||||
):
|
||||
super(DPT_DINOv2, self).__init__()
|
||||
|
||||
assert encoder in ["vits", "vitb", "vitl"]
|
||||
|
||||
# # in case the Internet connection is not stable, please load the DINOv2 locally
|
||||
# if use_local:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# torchhub_path / "facebookresearch_dinov2_main",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# source="local",
|
||||
# pretrained=False,
|
||||
# )
|
||||
# else:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# "facebookresearch/dinov2",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# )
|
||||
|
||||
self.pretrained = torch.hub.load(
|
||||
"facebookresearch/dinov2",
|
||||
"dinov2_{:}14".format(encoder),
|
||||
)
|
||||
|
||||
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
||||
|
||||
self.depth_head = DPTHead(1, dim, features, out_channels=out_channels, use_bn=use_bn, use_clstoken=use_clstoken)
|
||||
|
||||
def forward(self, x):
|
||||
h, w = x.shape[-2:]
|
||||
|
||||
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
|
||||
|
||||
patch_h, patch_w = h // 14, w // 14
|
||||
|
||||
depth = self.depth_head(features, patch_h, patch_w)
|
||||
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
|
||||
depth = F.relu(depth)
|
||||
|
||||
return depth.squeeze(1)
|
||||
227
invokeai/backend/image_util/depth_anything/utilities/util.py
Normal file
227
invokeai/backend/image_util/depth_anything/utilities/util.py
Normal file
@@ -0,0 +1,227 @@
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
||||
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
||||
|
||||
Args:
|
||||
sample (dict): sample
|
||||
size (tuple): image size
|
||||
|
||||
Returns:
|
||||
tuple: new size
|
||||
"""
|
||||
shape = list(sample["disparity"].shape)
|
||||
|
||||
if shape[0] >= size[0] and shape[1] >= size[1]:
|
||||
return sample
|
||||
|
||||
scale = [0, 0]
|
||||
scale[0] = size[0] / shape[0]
|
||||
scale[1] = size[1] / shape[1]
|
||||
|
||||
scale = max(scale)
|
||||
|
||||
shape[0] = math.ceil(scale * shape[0])
|
||||
shape[1] = math.ceil(scale * shape[1])
|
||||
|
||||
# resize
|
||||
sample["image"] = cv2.resize(sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method)
|
||||
|
||||
sample["disparity"] = cv2.resize(sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
tuple(shape[::-1]),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
return tuple(shape)
|
||||
|
||||
|
||||
class Resize(object):
|
||||
"""Resize sample to given size (width, height)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
height,
|
||||
resize_target=True,
|
||||
keep_aspect_ratio=False,
|
||||
ensure_multiple_of=1,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_AREA,
|
||||
):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
width (int): desired output width
|
||||
height (int): desired output height
|
||||
resize_target (bool, optional):
|
||||
True: Resize the full sample (image, mask, target).
|
||||
False: Resize image only.
|
||||
Defaults to True.
|
||||
keep_aspect_ratio (bool, optional):
|
||||
True: Keep the aspect ratio of the input sample.
|
||||
Output sample might not have the given width and height, and
|
||||
resize behaviour depends on the parameter 'resize_method'.
|
||||
Defaults to False.
|
||||
ensure_multiple_of (int, optional):
|
||||
Output width and height is constrained to be multiple of this parameter.
|
||||
Defaults to 1.
|
||||
resize_method (str, optional):
|
||||
"lower_bound": Output will be at least as large as the given size.
|
||||
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller
|
||||
than given size.)
|
||||
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
||||
Defaults to "lower_bound".
|
||||
"""
|
||||
self.__width = width
|
||||
self.__height = height
|
||||
|
||||
self.__resize_target = resize_target
|
||||
self.__keep_aspect_ratio = keep_aspect_ratio
|
||||
self.__multiple_of = ensure_multiple_of
|
||||
self.__resize_method = resize_method
|
||||
self.__image_interpolation_method = image_interpolation_method
|
||||
|
||||
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
||||
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if max_val is not None and y > max_val:
|
||||
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if y < min_val:
|
||||
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
return y
|
||||
|
||||
def get_size(self, width, height):
|
||||
# determine new height and width
|
||||
scale_height = self.__height / height
|
||||
scale_width = self.__width / width
|
||||
|
||||
if self.__keep_aspect_ratio:
|
||||
if self.__resize_method == "lower_bound":
|
||||
# scale such that output size is lower bound
|
||||
if scale_width > scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "upper_bound":
|
||||
# scale such that output size is upper bound
|
||||
if scale_width < scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "minimal":
|
||||
# scale as least as possbile
|
||||
if abs(1 - scale_width) < abs(1 - scale_height):
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
if self.__resize_method == "lower_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
|
||||
elif self.__resize_method == "upper_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
|
||||
elif self.__resize_method == "minimal":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width)
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
return (new_width, new_height)
|
||||
|
||||
def __call__(self, sample):
|
||||
width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
|
||||
|
||||
# resize sample
|
||||
sample["image"] = cv2.resize(
|
||||
sample["image"],
|
||||
(width, height),
|
||||
interpolation=self.__image_interpolation_method,
|
||||
)
|
||||
|
||||
if self.__resize_target:
|
||||
if "disparity" in sample:
|
||||
sample["disparity"] = cv2.resize(
|
||||
sample["disparity"],
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
|
||||
if "depth" in sample:
|
||||
sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
# sample["semseg_mask"] = cv2.resize(
|
||||
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
|
||||
# )
|
||||
sample["semseg_mask"] = F.interpolate(
|
||||
torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode="nearest"
|
||||
).numpy()[0, 0]
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
# sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
# print(sample['image'].shape, sample['depth'].shape)
|
||||
return sample
|
||||
|
||||
|
||||
class NormalizeImage(object):
|
||||
"""Normlize image by given mean and std."""
|
||||
|
||||
def __init__(self, mean, std):
|
||||
self.__mean = mean
|
||||
self.__std = std
|
||||
|
||||
def __call__(self, sample):
|
||||
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
||||
|
||||
return sample
|
||||
|
||||
|
||||
class PrepareForNet(object):
|
||||
"""Prepare sample for usage as network input."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __call__(self, sample):
|
||||
image = np.transpose(sample["image"], (2, 0, 1))
|
||||
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = sample["mask"].astype(np.float32)
|
||||
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
||||
|
||||
if "depth" in sample:
|
||||
depth = sample["depth"].astype(np.float32)
|
||||
sample["depth"] = np.ascontiguousarray(depth)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
|
||||
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
|
||||
|
||||
return sample
|
||||
281
invokeai/backend/install/install_helper.py
Normal file
281
invokeai/backend/install/install_helper.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""Utility (backend) functions used by model_install.py"""
|
||||
import re
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import omegaconf
|
||||
from huggingface_hub import HfFolder
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.dataclasses import dataclass
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests import HTTPError
|
||||
from tqdm import tqdm
|
||||
|
||||
import invokeai.configs as configs
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadQueueService
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage
|
||||
from invokeai.app.services.model_install import (
|
||||
HFModelSource,
|
||||
LocalModelSource,
|
||||
ModelInstallService,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
URLModelSource,
|
||||
)
|
||||
from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.backend.model_manager import (
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata import UnknownMetadataException
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# name of the starter models file
|
||||
INITIAL_MODELS = "INITIAL_MODELS2.yaml"
|
||||
|
||||
|
||||
def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase:
|
||||
"""Return an initialized ModelConfigRecordServiceBase object."""
|
||||
logger = InvokeAILogger.get_logger(config=app_config)
|
||||
image_files = DiskImageFileStorage(f"{app_config.output_path}/images")
|
||||
db = init_db(config=app_config, logger=logger, image_files=image_files)
|
||||
obj: ModelRecordServiceBase = ModelRecordServiceSQL(db)
|
||||
return obj
|
||||
|
||||
|
||||
def initialize_installer(
|
||||
app_config: InvokeAIAppConfig, event_bus: Optional[EventServiceBase] = None
|
||||
) -> ModelInstallServiceBase:
|
||||
"""Return an initialized ModelInstallService object."""
|
||||
record_store = initialize_record_store(app_config)
|
||||
metadata_store = record_store.metadata_store
|
||||
download_queue = DownloadQueueService()
|
||||
installer = ModelInstallService(
|
||||
app_config=app_config,
|
||||
record_store=record_store,
|
||||
metadata_store=metadata_store,
|
||||
download_queue=download_queue,
|
||||
event_bus=event_bus,
|
||||
)
|
||||
download_queue.start()
|
||||
installer.start()
|
||||
return installer
|
||||
|
||||
|
||||
class UnifiedModelInfo(BaseModel):
|
||||
"""Catchall class for information in INITIAL_MODELS2.yaml."""
|
||||
|
||||
name: Optional[str] = None
|
||||
base: Optional[BaseModelType] = None
|
||||
type: Optional[ModelType] = None
|
||||
source: Optional[str] = None
|
||||
subfolder: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
recommended: bool = False
|
||||
installed: bool = False
|
||||
default: bool = False
|
||||
requires: List[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InstallSelections:
|
||||
"""Lists of models to install and remove."""
|
||||
|
||||
install_models: List[UnifiedModelInfo] = Field(default_factory=list)
|
||||
remove_models: List[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class TqdmEventService(EventServiceBase):
|
||||
"""An event service to track downloads."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Create a new TqdmEventService object."""
|
||||
super().__init__()
|
||||
self._bars: Dict[str, tqdm] = {}
|
||||
self._last: Dict[str, int] = {}
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
"""Dispatch an event by appending it to self.events."""
|
||||
if payload["event"] == "model_install_downloading":
|
||||
data = payload["data"]
|
||||
dest = data["local_path"]
|
||||
total_bytes = data["total_bytes"]
|
||||
bytes = data["bytes"]
|
||||
if dest not in self._bars:
|
||||
self._bars[dest] = tqdm(desc=Path(dest).name, initial=0, total=total_bytes, unit="iB", unit_scale=True)
|
||||
self._last[dest] = 0
|
||||
self._bars[dest].update(bytes - self._last[dest])
|
||||
self._last[dest] = bytes
|
||||
|
||||
|
||||
class InstallHelper(object):
|
||||
"""Capture information stored jointly in INITIAL_MODELS.yaml and the installed models db."""
|
||||
|
||||
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger):
|
||||
"""Create new InstallHelper object."""
|
||||
self._app_config = app_config
|
||||
self.all_models: Dict[str, UnifiedModelInfo] = {}
|
||||
|
||||
omega = omegaconf.OmegaConf.load(Path(configs.__path__[0]) / INITIAL_MODELS)
|
||||
assert isinstance(omega, omegaconf.dictconfig.DictConfig)
|
||||
|
||||
self._installer = initialize_installer(app_config, TqdmEventService())
|
||||
self._initial_models = omega
|
||||
self._installed_models: List[str] = []
|
||||
self._starter_models: List[str] = []
|
||||
self._default_model: Optional[str] = None
|
||||
self._logger = logger
|
||||
self._initialize_model_lists()
|
||||
|
||||
@property
|
||||
def installer(self) -> ModelInstallServiceBase:
|
||||
"""Return the installer object used internally."""
|
||||
return self._installer
|
||||
|
||||
def _initialize_model_lists(self) -> None:
|
||||
"""
|
||||
Initialize our model slots.
|
||||
|
||||
Set up the following:
|
||||
installed_models -- list of installed model keys
|
||||
starter_models -- list of starter model keys from INITIAL_MODELS
|
||||
all_models -- dict of key => UnifiedModelInfo
|
||||
default_model -- key to default model
|
||||
"""
|
||||
# previously-installed models
|
||||
for model in self._installer.record_store.all_models():
|
||||
info = UnifiedModelInfo.parse_obj(model.dict())
|
||||
info.installed = True
|
||||
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
|
||||
self.all_models[model_key] = info
|
||||
self._installed_models.append(model_key)
|
||||
|
||||
for key in self._initial_models.keys():
|
||||
assert isinstance(key, str)
|
||||
if key in self.all_models:
|
||||
# we want to preserve the description
|
||||
description = self.all_models[key].description or self._initial_models[key].get("description")
|
||||
self.all_models[key].description = description
|
||||
else:
|
||||
base_model, model_type, model_name = key.split("/")
|
||||
info = UnifiedModelInfo(
|
||||
name=model_name,
|
||||
type=ModelType(model_type),
|
||||
base=BaseModelType(base_model),
|
||||
source=self._initial_models[key].source,
|
||||
description=self._initial_models[key].get("description"),
|
||||
recommended=self._initial_models[key].get("recommended", False),
|
||||
default=self._initial_models[key].get("default", False),
|
||||
subfolder=self._initial_models[key].get("subfolder"),
|
||||
requires=list(self._initial_models[key].get("requires", [])),
|
||||
)
|
||||
self.all_models[key] = info
|
||||
if not self.default_model():
|
||||
self._default_model = key
|
||||
elif self._initial_models[key].get("default", False):
|
||||
self._default_model = key
|
||||
self._starter_models.append(key)
|
||||
|
||||
# previously-installed models
|
||||
for model in self._installer.record_store.all_models():
|
||||
info = UnifiedModelInfo.parse_obj(model.dict())
|
||||
info.installed = True
|
||||
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
|
||||
self.all_models[model_key] = info
|
||||
self._installed_models.append(model_key)
|
||||
|
||||
def recommended_models(self) -> List[UnifiedModelInfo]:
|
||||
"""List of the models recommended in INITIAL_MODELS.yaml."""
|
||||
return [self._to_model(x) for x in self._starter_models if self._to_model(x).recommended]
|
||||
|
||||
def installed_models(self) -> List[UnifiedModelInfo]:
|
||||
"""List of models already installed."""
|
||||
return [self._to_model(x) for x in self._installed_models]
|
||||
|
||||
def starter_models(self) -> List[UnifiedModelInfo]:
|
||||
"""List of starter models."""
|
||||
return [self._to_model(x) for x in self._starter_models]
|
||||
|
||||
def default_model(self) -> Optional[UnifiedModelInfo]:
|
||||
"""Return the default model."""
|
||||
return self._to_model(self._default_model) if self._default_model else None
|
||||
|
||||
def _to_model(self, key: str) -> UnifiedModelInfo:
|
||||
return self.all_models[key]
|
||||
|
||||
def _add_required_models(self, model_list: List[UnifiedModelInfo]) -> None:
|
||||
installed = {x.source for x in self.installed_models()}
|
||||
reverse_source = {x.source: x for x in self.all_models.values()}
|
||||
additional_models: List[UnifiedModelInfo] = []
|
||||
for model_info in model_list:
|
||||
for requirement in model_info.requires:
|
||||
if requirement not in installed and reverse_source.get(requirement):
|
||||
additional_models.append(reverse_source[requirement])
|
||||
model_list.extend(additional_models)
|
||||
|
||||
def _make_install_source(self, model_info: UnifiedModelInfo) -> ModelSource:
|
||||
assert model_info.source
|
||||
model_path_id_or_url = model_info.source.strip("\"' ")
|
||||
model_path = Path(model_path_id_or_url)
|
||||
|
||||
if model_path.exists(): # local file on disk
|
||||
return LocalModelSource(path=model_path.absolute(), inplace=True)
|
||||
if re.match(r"^[^/]+/[^/]+$", model_path_id_or_url): # hugging face repo_id
|
||||
return HFModelSource(
|
||||
repo_id=model_path_id_or_url,
|
||||
access_token=HfFolder.get_token(),
|
||||
subfolder=model_info.subfolder,
|
||||
)
|
||||
if re.match(r"^(http|https):", model_path_id_or_url):
|
||||
return URLModelSource(url=AnyHttpUrl(model_path_id_or_url))
|
||||
raise ValueError(f"Unsupported model source: {model_path_id_or_url}")
|
||||
|
||||
def add_or_delete(self, selections: InstallSelections) -> None:
|
||||
"""Add or delete selected models."""
|
||||
installer = self._installer
|
||||
self._add_required_models(selections.install_models)
|
||||
for model in selections.install_models:
|
||||
source = self._make_install_source(model)
|
||||
config = (
|
||||
{
|
||||
"description": model.description,
|
||||
"name": model.name,
|
||||
}
|
||||
if model.name
|
||||
else None
|
||||
)
|
||||
|
||||
try:
|
||||
installer.import_model(
|
||||
source=source,
|
||||
config=config,
|
||||
)
|
||||
except (UnknownMetadataException, InvalidModelConfigException, HTTPError, OSError) as e:
|
||||
self._logger.warning(f"{source}: {e}")
|
||||
|
||||
for model_to_remove in selections.remove_models:
|
||||
parts = model_to_remove.split("/")
|
||||
if len(parts) == 1:
|
||||
base_model, model_type, model_name = (None, None, model_to_remove)
|
||||
else:
|
||||
base_model, model_type, model_name = parts
|
||||
matches = installer.record_store.search_by_attr(
|
||||
base_model=BaseModelType(base_model) if base_model else None,
|
||||
model_type=ModelType(model_type) if model_type else None,
|
||||
model_name=model_name,
|
||||
)
|
||||
if len(matches) > 1:
|
||||
print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.")
|
||||
elif not matches:
|
||||
print(f"{model}: unknown model")
|
||||
else:
|
||||
for m in matches:
|
||||
print(f"Deleting {m.type}:{m.name}")
|
||||
installer.delete(m.key)
|
||||
|
||||
installer.wait_for_installs()
|
||||
@@ -849,7 +849,7 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool:
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def main():
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||
parser.add_argument(
|
||||
"--skip-sd-weights",
|
||||
|
||||
@@ -104,12 +104,14 @@ class ModelInstall(object):
|
||||
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
|
||||
model_manager: Optional[ModelManager] = None,
|
||||
access_token: Optional[str] = None,
|
||||
civitai_api_key: Optional[str] = None,
|
||||
):
|
||||
self.config = config
|
||||
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
||||
self.datasets = OmegaConf.load(Dataset_path)
|
||||
self.prediction_helper = prediction_type_helper
|
||||
self.access_token = access_token or HfFolder.get_token()
|
||||
self.civitai_api_key = civitai_api_key or config.civitai_api_key
|
||||
self.reverse_paths = self._reverse_paths(self.datasets)
|
||||
|
||||
def all_models(self) -> Dict[str, ModelLoadInfo]:
|
||||
@@ -326,7 +328,11 @@ class ModelInstall(object):
|
||||
|
||||
def _install_url(self, url: str) -> AddModelResult:
|
||||
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
||||
location = download_with_resume(url, Path(staging))
|
||||
CIVITAI_RE = r".*civitai.com.*"
|
||||
civit_url = re.match(CIVITAI_RE, url, re.IGNORECASE)
|
||||
location = download_with_resume(
|
||||
url, Path(staging), access_token=self.civitai_api_key if civit_url else None
|
||||
)
|
||||
if not location:
|
||||
logger.error(f"Unable to download {url}. Skipping.")
|
||||
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
|
||||
|
||||
@@ -42,8 +42,7 @@ from diffusers.schedulers import (
|
||||
PNDMScheduler,
|
||||
UnCLIPScheduler,
|
||||
)
|
||||
from diffusers.utils import is_accelerate_available, is_omegaconf_available
|
||||
from diffusers.utils.import_utils import BACKENDS_MAPPING
|
||||
from diffusers.utils import is_accelerate_available
|
||||
from picklescan.scanner import scan_file_path
|
||||
from transformers import (
|
||||
AutoFeatureExtractor,
|
||||
@@ -1211,9 +1210,6 @@ def download_from_original_stable_diffusion_ckpt(
|
||||
if prediction_type == "v-prediction":
|
||||
prediction_type = "v_prediction"
|
||||
|
||||
if not is_omegaconf_available():
|
||||
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
|
||||
|
||||
if from_safetensors:
|
||||
from safetensors.torch import load_file as safe_load
|
||||
|
||||
@@ -1647,11 +1643,6 @@ def download_controlnet_from_original_ckpt(
|
||||
cross_attention_dim: Optional[bool] = None,
|
||||
scan_needed: bool = False,
|
||||
) -> DiffusionPipeline:
|
||||
if not is_omegaconf_available():
|
||||
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
|
||||
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
if from_safetensors:
|
||||
from safetensors import safe_open
|
||||
|
||||
|
||||
@@ -141,7 +141,7 @@ class StableDiffusionXLModel(DiffusersModel):
|
||||
version=base_model,
|
||||
model_config=config,
|
||||
output_path=output_path,
|
||||
use_safetensors=False, # corrupts sdxl models for some reason
|
||||
use_safetensors=True,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
|
||||
177
invokeai/backend/model_manager/merge.py
Normal file
177
invokeai/backend/model_manager/merge.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""
|
||||
invokeai.backend.model_manager.merge exports:
|
||||
merge_diffusion_models() -- combine multiple models by location and return a pipeline object
|
||||
merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml
|
||||
|
||||
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Set
|
||||
|
||||
import torch
|
||||
from diffusers import AutoPipelineForText2Image
|
||||
from diffusers import logging as dlogging
|
||||
|
||||
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
||||
|
||||
from . import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
)
|
||||
from .config import MainDiffusersConfig
|
||||
|
||||
|
||||
class MergeInterpolationMethod(str, Enum):
|
||||
WeightedSum = "weighted_sum"
|
||||
Sigmoid = "sigmoid"
|
||||
InvSigmoid = "inv_sigmoid"
|
||||
AddDifference = "add_difference"
|
||||
|
||||
|
||||
class ModelMerger(object):
|
||||
"""Wrapper class for model merge function."""
|
||||
|
||||
def __init__(self, installer: ModelInstallServiceBase):
|
||||
"""
|
||||
Initialize a ModelMerger object.
|
||||
|
||||
:param store: Underlying storage manager for the running process.
|
||||
:param config: InvokeAIAppConfig object (if not provided, default will be selected).
|
||||
"""
|
||||
self._installer = installer
|
||||
|
||||
def merge_diffusion_models(
|
||||
self,
|
||||
model_paths: List[Path],
|
||||
alpha: float = 0.5,
|
||||
interp: Optional[MergeInterpolationMethod] = None,
|
||||
force: bool = False,
|
||||
variant: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any: # pipe.merge is an untyped function.
|
||||
"""
|
||||
:param model_paths: up to three models, designated by their local paths or HuggingFace repo_ids
|
||||
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
||||
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
||||
:param interp: The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
|
||||
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
|
||||
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
||||
|
||||
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||
"""
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
verbosity = dlogging.get_verbosity()
|
||||
dlogging.set_verbosity_error()
|
||||
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||
|
||||
# Note that checkpoint_merger will not work with downloaded HuggingFace fp16 models
|
||||
# until upstream https://github.com/huggingface/diffusers/pull/6670 is merged and released.
|
||||
pipe = AutoPipelineForText2Image.from_pretrained(
|
||||
model_paths[0],
|
||||
custom_pipeline="checkpoint_merger",
|
||||
torch_dtype=dtype,
|
||||
variant=variant,
|
||||
)
|
||||
merged_pipe = pipe.merge(
|
||||
pretrained_model_name_or_path_list=model_paths,
|
||||
alpha=alpha,
|
||||
interp=interp.value if interp else None, # diffusers API treats None as "weighted sum"
|
||||
force=force,
|
||||
torch_dtype=dtype,
|
||||
variant=variant,
|
||||
**kwargs,
|
||||
)
|
||||
dlogging.set_verbosity(verbosity)
|
||||
return merged_pipe
|
||||
|
||||
def merge_diffusion_models_and_save(
|
||||
self,
|
||||
model_keys: List[str],
|
||||
merged_model_name: str,
|
||||
alpha: float = 0.5,
|
||||
force: bool = False,
|
||||
interp: Optional[MergeInterpolationMethod] = None,
|
||||
merge_dest_directory: Optional[Path] = None,
|
||||
variant: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
:param models: up to three models, designated by their InvokeAI models.yaml model name
|
||||
:param merged_model_name: name for new model
|
||||
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
||||
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
||||
:param interp: The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None.
|
||||
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C).
|
||||
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
||||
:param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended)
|
||||
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||
"""
|
||||
model_paths: List[Path] = []
|
||||
model_names: List[str] = []
|
||||
config = self._installer.app_config
|
||||
store = self._installer.record_store
|
||||
base_models: Set[BaseModelType] = set()
|
||||
vae = None
|
||||
variant = None if self._installer.app_config.full_precision else "fp16"
|
||||
|
||||
assert (
|
||||
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
|
||||
), "When merging three models, only the 'add_difference' merge method is supported"
|
||||
|
||||
for key in model_keys:
|
||||
info = store.get_model(key)
|
||||
model_names.append(info.name)
|
||||
assert isinstance(
|
||||
info, MainDiffusersConfig
|
||||
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
|
||||
assert info.variant == ModelVariantType(
|
||||
"normal"
|
||||
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
|
||||
|
||||
# pick up the first model's vae
|
||||
if key == model_keys[0]:
|
||||
vae = info.vae
|
||||
|
||||
# tally base models used
|
||||
base_models.add(info.base)
|
||||
model_paths.extend([config.models_path / info.path])
|
||||
|
||||
assert len(base_models) == 1, f"All models to merge must have same base model, but found bases {base_models}"
|
||||
base_model = base_models.pop()
|
||||
|
||||
merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp)
|
||||
merged_pipe = self.merge_diffusion_models(model_paths, alpha, merge_method, force, variant=variant, **kwargs)
|
||||
dump_path = (
|
||||
Path(merge_dest_directory)
|
||||
if merge_dest_directory
|
||||
else config.models_path / base_model.value / ModelType.Main.value
|
||||
)
|
||||
dump_path.mkdir(parents=True, exist_ok=True)
|
||||
dump_path = dump_path / merged_model_name
|
||||
|
||||
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||
merged_pipe.save_pretrained(dump_path.as_posix(), safe_serialization=True, torch_dtype=dtype, variant=variant)
|
||||
|
||||
# register model and get its unique key
|
||||
key = self._installer.register_path(dump_path)
|
||||
|
||||
# update model's config
|
||||
model_config = self._installer.record_store.get_model(key)
|
||||
model_config.update(
|
||||
{
|
||||
"name": merged_model_name,
|
||||
"description": f"Merge of models {', '.join(model_names)}",
|
||||
"vae": vae,
|
||||
}
|
||||
)
|
||||
self._installer.record_store.update_model(key, model_config)
|
||||
return model_config
|
||||
@@ -170,6 +170,8 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase):
|
||||
if model_id is None:
|
||||
version_url = CIVITAI_VERSION_ENDPOINT + str(version_id)
|
||||
version = self._requests.get(version_url).json()
|
||||
if error := version.get("error"):
|
||||
raise UnknownMetadataException(error)
|
||||
model_id = version["modelId"]
|
||||
|
||||
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)
|
||||
|
||||
@@ -12,7 +12,7 @@ import psutil
|
||||
import torch
|
||||
from compel.cross_attention_control import Arguments
|
||||
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
|
||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from torch import nn
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
@@ -11,6 +11,7 @@ import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
@@ -30,8 +31,6 @@ from diffusers.optimization import get_scheduler
|
||||
from diffusers.utils import check_min_version
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from huggingface_hub import HfFolder, Repository, whoami
|
||||
|
||||
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
||||
from packaging import version
|
||||
from PIL import Image
|
||||
from torch.utils.data import Dataset
|
||||
@@ -41,8 +40,8 @@ from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
# invokeai stuff
|
||||
from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser
|
||||
from invokeai.app.services.model_manager import ModelManagerService
|
||||
from invokeai.backend.model_management.models import SubModelType
|
||||
from invokeai.backend.install.install_helper import initialize_record_store
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||
|
||||
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
||||
PIL_INTERPOLATION = {
|
||||
@@ -77,7 +76,7 @@ def save_progress(text_encoder, placeholder_token_id, accelerator, placeholder_t
|
||||
torch.save(learned_embeds_dict, save_path)
|
||||
|
||||
|
||||
def parse_args():
|
||||
def parse_args() -> Namespace:
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
parser = PagingArgumentParser(description="Textual inversion training")
|
||||
general_group = parser.add_argument_group("General")
|
||||
@@ -444,7 +443,7 @@ class TextualInversionDataset(Dataset):
|
||||
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
|
||||
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
return self._length
|
||||
|
||||
def __getitem__(self, i):
|
||||
@@ -509,11 +508,10 @@ def do_textual_inversion_training(
|
||||
initializer_token: str,
|
||||
save_steps: int = 500,
|
||||
only_save_embeds: bool = False,
|
||||
revision: str = None,
|
||||
tokenizer_name: str = None,
|
||||
tokenizer_name: Optional[str] = None,
|
||||
learnable_property: str = "object",
|
||||
repeats: int = 100,
|
||||
seed: int = None,
|
||||
seed: Optional[int] = None,
|
||||
resolution: int = 512,
|
||||
center_crop: bool = False,
|
||||
train_batch_size: int = 16,
|
||||
@@ -530,18 +528,18 @@ def do_textual_inversion_training(
|
||||
adam_weight_decay: float = 1e-02,
|
||||
adam_epsilon: float = 1e-08,
|
||||
push_to_hub: bool = False,
|
||||
hub_token: str = None,
|
||||
hub_token: Optional[str] = None,
|
||||
logging_dir: Path = Path("logs"),
|
||||
mixed_precision: str = "fp16",
|
||||
allow_tf32: bool = False,
|
||||
report_to: str = "tensorboard",
|
||||
local_rank: int = -1,
|
||||
checkpointing_steps: int = 500,
|
||||
resume_from_checkpoint: Path = None,
|
||||
resume_from_checkpoint: Optional[Path] = None,
|
||||
enable_xformers_memory_efficient_attention: bool = False,
|
||||
hub_model_id: str = None,
|
||||
hub_model_id: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
) -> None:
|
||||
assert model, "Please specify a base model with --model"
|
||||
assert train_data_dir, "Please specify a directory containing the training images using --train_data_dir"
|
||||
assert placeholder_token, "Please specify a trigger term using --placeholder_token"
|
||||
@@ -564,8 +562,6 @@ def do_textual_inversion_training(
|
||||
project_config=accelerator_config,
|
||||
)
|
||||
|
||||
model_manager = ModelManagerService(config, logger)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
@@ -603,44 +599,37 @@ def do_textual_inversion_training(
|
||||
elif output_dir is not None:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
known_models = model_manager.model_names()
|
||||
model_name = model.split("/")[-1]
|
||||
model_meta = next((mm for mm in known_models if mm[0].endswith(model_name)), None)
|
||||
assert model_meta is not None, f"Unknown model: {model}"
|
||||
model_info = model_manager.model_info(*model_meta)
|
||||
assert model_info["model_format"] == "diffusers", "This script only works with models of type 'diffusers'"
|
||||
tokenizer_info = model_manager.get_model(*model_meta, submodel=SubModelType.Tokenizer)
|
||||
noise_scheduler_info = model_manager.get_model(*model_meta, submodel=SubModelType.Scheduler)
|
||||
text_encoder_info = model_manager.get_model(*model_meta, submodel=SubModelType.TextEncoder)
|
||||
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
|
||||
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
|
||||
model_records = initialize_record_store(config)
|
||||
base, type, name = model.split("/") # note frontend still returns old-style keys
|
||||
try:
|
||||
model_config = model_records.search_by_attr(
|
||||
model_name=name, model_type=ModelType(type), base_model=BaseModelType(base)
|
||||
)[0]
|
||||
except IndexError:
|
||||
raise Exception(f"Unknown model {model}")
|
||||
model_path = config.models_path / model_config.path
|
||||
|
||||
pipeline_args = {"local_files_only": True}
|
||||
if tokenizer_name:
|
||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
|
||||
else:
|
||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_info.location, subfolder="tokenizer", **pipeline_args)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer", **pipeline_args)
|
||||
|
||||
# Load scheduler and models
|
||||
noise_scheduler = DDPMScheduler.from_pretrained(
|
||||
noise_scheduler_info.location, subfolder="scheduler", **pipeline_args
|
||||
)
|
||||
noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder="scheduler", **pipeline_args)
|
||||
text_encoder = CLIPTextModel.from_pretrained(
|
||||
text_encoder_info.location,
|
||||
model_path,
|
||||
subfolder="text_encoder",
|
||||
revision=revision,
|
||||
**pipeline_args,
|
||||
)
|
||||
vae = AutoencoderKL.from_pretrained(
|
||||
vae_info.location,
|
||||
model_path,
|
||||
subfolder="vae",
|
||||
revision=revision,
|
||||
**pipeline_args,
|
||||
)
|
||||
unet = UNet2DConditionModel.from_pretrained(
|
||||
unet_info.location,
|
||||
model_path,
|
||||
subfolder="unet",
|
||||
revision=revision,
|
||||
**pipeline_args,
|
||||
)
|
||||
|
||||
@@ -728,7 +717,7 @@ def do_textual_inversion_training(
|
||||
max_train_steps = num_train_epochs * num_update_steps_per_epoch
|
||||
overrode_max_train_steps = True
|
||||
|
||||
lr_scheduler = get_scheduler(
|
||||
scheduler = get_scheduler(
|
||||
lr_scheduler,
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=lr_warmup_steps * gradient_accumulation_steps,
|
||||
@@ -737,7 +726,7 @@ def do_textual_inversion_training(
|
||||
|
||||
# Prepare everything with our `accelerator`.
|
||||
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
text_encoder, optimizer, train_dataloader, lr_scheduler
|
||||
text_encoder, optimizer, train_dataloader, scheduler
|
||||
)
|
||||
|
||||
# For mixed precision training we cast the unet and vae weights to half-precision
|
||||
@@ -863,7 +852,7 @@ def do_textual_inversion_training(
|
||||
accelerator.backward(loss)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Let's make sure we don't update any embedding weights besides the newly added token
|
||||
@@ -893,7 +882,7 @@ def do_textual_inversion_training(
|
||||
accelerator.save_state(save_path)
|
||||
logger.info(f"Saved state to {save_path}")
|
||||
|
||||
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
||||
logs = {"loss": loss.detach().item(), "lr": scheduler.get_last_lr()[0]}
|
||||
progress_bar.set_postfix(**logs)
|
||||
accelerator.log(logs, step=global_step)
|
||||
|
||||
@@ -910,7 +899,7 @@ def do_textual_inversion_training(
|
||||
save_full_model = not only_save_embeds
|
||||
if save_full_model:
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(
|
||||
unet_info.location,
|
||||
model_path,
|
||||
text_encoder=accelerator.unwrap_model(text_encoder),
|
||||
vae=vae,
|
||||
unet=unet,
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
import diffusers
|
||||
import torch
|
||||
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
||||
from diffusers.loaders import FromOriginalControlnetMixin
|
||||
from diffusers.loaders import FromOriginalControlNetMixin
|
||||
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
|
||||
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
|
||||
from diffusers.models.embeddings import (
|
||||
@@ -14,8 +14,13 @@ from diffusers.models.embeddings import (
|
||||
Timesteps,
|
||||
)
|
||||
from diffusers.models.modeling_utils import ModelMixin
|
||||
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, get_down_block
|
||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.models.unets.unet_2d_blocks import (
|
||||
CrossAttnDownBlock2D,
|
||||
DownBlock2D,
|
||||
UNetMidBlock2DCrossAttn,
|
||||
get_down_block,
|
||||
)
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from torch import nn
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
@@ -27,7 +32,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
||||
logger = InvokeAILogger.get_logger(__name__)
|
||||
|
||||
|
||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
|
||||
"""
|
||||
A ControlNet model.
|
||||
|
||||
|
||||
@@ -286,7 +286,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
open_mode = "wb"
|
||||
exist_size = 0
|
||||
|
||||
resp = requests.get(url, header, stream=True)
|
||||
resp = requests.get(url, headers=header, stream=True, allow_redirects=True)
|
||||
content_length = int(resp.headers.get("content-length", 0))
|
||||
|
||||
if dest.is_dir():
|
||||
|
||||
157
invokeai/configs/INITIAL_MODELS2.yaml
Normal file
157
invokeai/configs/INITIAL_MODELS2.yaml
Normal file
@@ -0,0 +1,157 @@
|
||||
# This file predefines a few models that the user may want to install.
|
||||
sd-1/main/stable-diffusion-v1-5:
|
||||
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||
source: runwayml/stable-diffusion-v1-5
|
||||
recommended: True
|
||||
default: True
|
||||
sd-1/main/stable-diffusion-v1-5-inpainting:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||
source: runwayml/stable-diffusion-inpainting
|
||||
recommended: True
|
||||
sd-2/main/stable-diffusion-2-1:
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||
source: stabilityai/stable-diffusion-2-1
|
||||
recommended: False
|
||||
sd-2/main/stable-diffusion-2-inpainting:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
source: stabilityai/stable-diffusion-2-inpainting
|
||||
recommended: False
|
||||
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||
description: Stable Diffusion XL base model (12 GB)
|
||||
source: stabilityai/stable-diffusion-xl-base-1.0
|
||||
recommended: True
|
||||
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||
description: Stable Diffusion XL refiner model (12 GB)
|
||||
source: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||
recommended: False
|
||||
sdxl/vae/sdxl-vae-fp16-fix:
|
||||
description: Version of the SDXL-1.0 VAE that works in half precision mode
|
||||
source: madebyollin/sdxl-vae-fp16-fix
|
||||
recommended: True
|
||||
sd-1/main/Analog-Diffusion:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
source: wavymulder/Analog-Diffusion
|
||||
recommended: False
|
||||
sd-1/main/Deliberate:
|
||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||
source: XpucT/Deliberate
|
||||
recommended: False
|
||||
sd-1/main/Dungeons-and-Diffusion:
|
||||
description: Dungeons & Dragons characters (2.13 GB)
|
||||
source: 0xJustin/Dungeons-and-Diffusion
|
||||
recommended: False
|
||||
sd-1/main/dreamlike-photoreal-2:
|
||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||
source: dreamlike-art/dreamlike-photoreal-2.0
|
||||
recommended: False
|
||||
sd-1/main/Inkpunk-Diffusion:
|
||||
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||
source: Envvi/Inkpunk-Diffusion
|
||||
recommended: False
|
||||
sd-1/main/openjourney:
|
||||
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||
source: prompthero/openjourney
|
||||
recommended: False
|
||||
sd-1/main/seek.art_MEGA:
|
||||
source: coreco/seek.art_MEGA
|
||||
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||
recommended: False
|
||||
sd-1/main/trinart_stable_diffusion_v2:
|
||||
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||
source: naclbit/trinart_stable_diffusion_v2
|
||||
recommended: False
|
||||
sd-1/controlnet/qrcode_monster:
|
||||
source: monster-labs/control_v1p_sd15_qrcode_monster
|
||||
subfolder: v2
|
||||
sd-1/controlnet/canny:
|
||||
source: lllyasviel/control_v11p_sd15_canny
|
||||
recommended: True
|
||||
sd-1/controlnet/inpaint:
|
||||
source: lllyasviel/control_v11p_sd15_inpaint
|
||||
sd-1/controlnet/mlsd:
|
||||
source: lllyasviel/control_v11p_sd15_mlsd
|
||||
sd-1/controlnet/depth:
|
||||
source: lllyasviel/control_v11f1p_sd15_depth
|
||||
recommended: True
|
||||
sd-1/controlnet/normal_bae:
|
||||
source: lllyasviel/control_v11p_sd15_normalbae
|
||||
sd-1/controlnet/seg:
|
||||
source: lllyasviel/control_v11p_sd15_seg
|
||||
sd-1/controlnet/lineart:
|
||||
source: lllyasviel/control_v11p_sd15_lineart
|
||||
recommended: True
|
||||
sd-1/controlnet/lineart_anime:
|
||||
source: lllyasviel/control_v11p_sd15s2_lineart_anime
|
||||
sd-1/controlnet/openpose:
|
||||
source: lllyasviel/control_v11p_sd15_openpose
|
||||
recommended: True
|
||||
sd-1/controlnet/scribble:
|
||||
source: lllyasviel/control_v11p_sd15_scribble
|
||||
recommended: False
|
||||
sd-1/controlnet/softedge:
|
||||
source: lllyasviel/control_v11p_sd15_softedge
|
||||
sd-1/controlnet/shuffle:
|
||||
source: lllyasviel/control_v11e_sd15_shuffle
|
||||
sd-1/controlnet/tile:
|
||||
source: lllyasviel/control_v11f1e_sd15_tile
|
||||
sd-1/controlnet/ip2p:
|
||||
source: lllyasviel/control_v11e_sd15_ip2p
|
||||
sd-1/t2i_adapter/canny-sd15:
|
||||
source: TencentARC/t2iadapter_canny_sd15v2
|
||||
sd-1/t2i_adapter/sketch-sd15:
|
||||
source: TencentARC/t2iadapter_sketch_sd15v2
|
||||
sd-1/t2i_adapter/depth-sd15:
|
||||
source: TencentARC/t2iadapter_depth_sd15v2
|
||||
sd-1/t2i_adapter/zoedepth-sd15:
|
||||
source: TencentARC/t2iadapter_zoedepth_sd15v1
|
||||
sdxl/t2i_adapter/canny-sdxl:
|
||||
source: TencentARC/t2i-adapter-canny-sdxl-1.0
|
||||
sdxl/t2i_adapter/zoedepth-sdxl:
|
||||
source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
||||
sdxl/t2i_adapter/lineart-sdxl:
|
||||
source: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
||||
sdxl/t2i_adapter/sketch-sdxl:
|
||||
source: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
||||
sd-1/embedding/EasyNegative:
|
||||
source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
||||
recommended: True
|
||||
description: A textual inversion to use in the negative prompt to reduce bad anatomy
|
||||
sd-1/lora/FlatColor:
|
||||
source: https://civitai.com/models/6433/loraflatcolor
|
||||
recommended: True
|
||||
description: A LoRA that generates scenery using solid blocks of color
|
||||
sd-1/lora/Ink scenery:
|
||||
source: https://civitai.com/api/download/models/83390
|
||||
description: Generate india ink-like landscapes
|
||||
sd-1/ip_adapter/ip_adapter_sd15:
|
||||
source: InvokeAI/ip_adapter_sd15
|
||||
recommended: True
|
||||
requires:
|
||||
- InvokeAI/ip_adapter_sd_image_encoder
|
||||
description: IP-Adapter for SD 1.5 models
|
||||
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
||||
source: InvokeAI/ip_adapter_plus_sd15
|
||||
recommended: False
|
||||
requires:
|
||||
- InvokeAI/ip_adapter_sd_image_encoder
|
||||
description: Refined IP-Adapter for SD 1.5 models
|
||||
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
||||
source: InvokeAI/ip_adapter_plus_face_sd15
|
||||
recommended: False
|
||||
requires:
|
||||
- InvokeAI/ip_adapter_sd_image_encoder
|
||||
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
||||
sdxl/ip_adapter/ip_adapter_sdxl:
|
||||
source: InvokeAI/ip_adapter_sdxl
|
||||
recommended: False
|
||||
requires:
|
||||
- InvokeAI/ip_adapter_sdxl_image_encoder
|
||||
description: IP-Adapter for SDXL models
|
||||
any/clip_vision/ip_adapter_sd_image_encoder:
|
||||
source: InvokeAI/ip_adapter_sd_image_encoder
|
||||
recommended: False
|
||||
description: Required model for using IP-Adapters with SD-1/2 models
|
||||
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
||||
source: InvokeAI/ip_adapter_sdxl_image_encoder
|
||||
recommended: False
|
||||
description: Required model for using IP-Adapters with SDXL models
|
||||
@@ -2,3 +2,5 @@
|
||||
Wrapper for invokeai.backend.configure.invokeai_configure
|
||||
"""
|
||||
from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
|
||||
|
||||
__all__ = ["invokeai_configure"]
|
||||
|
||||
@@ -5,14 +5,14 @@ pip install <path_to_git_source>.
|
||||
import os
|
||||
import platform
|
||||
from distutils.version import LooseVersion
|
||||
from importlib.metadata import PackageNotFoundError, distribution, distributions
|
||||
|
||||
import pkg_resources
|
||||
import psutil
|
||||
import requests
|
||||
from rich import box, print
|
||||
from rich.console import Console, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt
|
||||
from rich.prompt import Confirm, Prompt
|
||||
from rich.style import Style
|
||||
|
||||
from invokeai.version import __version__
|
||||
@@ -61,6 +61,65 @@ def get_pypi_versions():
|
||||
return latest_version, latest_release_candidate, versions
|
||||
|
||||
|
||||
def get_torch_extra_index_url() -> str | None:
|
||||
"""
|
||||
Determine torch wheel source URL and optional modules based on the user's OS.
|
||||
"""
|
||||
|
||||
resolved_url = None
|
||||
|
||||
# In all other cases (like MacOS (MPS) or Linux+CUDA), there is no need to specify the extra index URL.
|
||||
torch_package_urls = {
|
||||
"windows_cuda": "https://download.pytorch.org/whl/cu121",
|
||||
"linux_rocm": "https://download.pytorch.org/whl/rocm5.6",
|
||||
"linux_cpu": "https://download.pytorch.org/whl/cpu",
|
||||
}
|
||||
|
||||
nvidia_packages_present = (
|
||||
len([d.metadata["Name"] for d in distributions() if d.metadata["Name"].startswith("nvidia")]) > 0
|
||||
)
|
||||
device = "cuda" if nvidia_packages_present else None
|
||||
manual_gpu_selection_prompt = (
|
||||
"[bold]We tried and failed to guess your GPU capabilities[/] :thinking_face:. Please select the GPU type:"
|
||||
)
|
||||
|
||||
if OS == "Linux":
|
||||
if not device:
|
||||
# do we even need to offer a CPU-only install option?
|
||||
print(manual_gpu_selection_prompt)
|
||||
print("1: NVIDIA (CUDA)")
|
||||
print("2: AMD (ROCm)")
|
||||
print("3: No GPU - CPU only")
|
||||
answer = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
|
||||
match answer:
|
||||
case "1":
|
||||
device = "cuda"
|
||||
case "2":
|
||||
device = "rocm"
|
||||
case "3":
|
||||
device = "cpu"
|
||||
|
||||
if device != "cuda":
|
||||
resolved_url = torch_package_urls[f"linux_{device}"]
|
||||
|
||||
if OS == "Windows":
|
||||
if not device:
|
||||
print(manual_gpu_selection_prompt)
|
||||
print("1: NVIDIA (CUDA)")
|
||||
print("2: No GPU - CPU only")
|
||||
answer = Prompt.ask("Your choice:", choices=["1", "2"], default="1")
|
||||
match answer:
|
||||
case "1":
|
||||
device = "cuda"
|
||||
case "2":
|
||||
device = "cpu"
|
||||
|
||||
if device == "cuda":
|
||||
resolved_url = torch_package_urls[f"windows_{device}"]
|
||||
|
||||
return resolved_url
|
||||
|
||||
|
||||
def welcome(latest_release: str, latest_prerelease: str):
|
||||
@group()
|
||||
def text():
|
||||
@@ -89,12 +148,11 @@ def welcome(latest_release: str, latest_prerelease: str):
|
||||
|
||||
|
||||
def get_extras():
|
||||
extras = ""
|
||||
try:
|
||||
_ = pkg_resources.get_distribution("xformers")
|
||||
distribution("xformers")
|
||||
extras = "[xformers]"
|
||||
except pkg_resources.DistributionNotFound:
|
||||
pass
|
||||
except PackageNotFoundError:
|
||||
extras = ""
|
||||
return extras
|
||||
|
||||
|
||||
@@ -125,8 +183,22 @@ def main():
|
||||
|
||||
extras = get_extras()
|
||||
|
||||
console.line()
|
||||
force_reinstall = Confirm.ask(
|
||||
"[bold]Force reinstallation of all dependencies?[/] This [i]may[/] help fix a broken upgrade, but is usually not necessary.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
console.line()
|
||||
flags = []
|
||||
if (index_url := get_torch_extra_index_url()) is not None:
|
||||
flags.append(f"--extra-index-url {index_url}")
|
||||
if force_reinstall:
|
||||
flags.append("--force-reinstall")
|
||||
flags = " ".join(flags)
|
||||
|
||||
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
|
||||
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade'
|
||||
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade {flags}'
|
||||
|
||||
print("")
|
||||
print("")
|
||||
|
||||
645
invokeai/frontend/install/model_install2.py
Normal file
645
invokeai/frontend/install/model_install2.py
Normal file
@@ -0,0 +1,645 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||
# Before running stable-diffusion on an internet-isolated machine,
|
||||
# run this script from one with internet connectivity. The
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
"""
|
||||
This is the npyscreen frontend to the model installation application.
|
||||
It is currently named model_install2.py, but will ultimately replace model_install.py.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from argparse import Namespace
|
||||
from shutil import get_terminal_size
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
|
||||
import npyscreen
|
||||
import torch
|
||||
from npyscreen import widget
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_install import ModelInstallService
|
||||
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo
|
||||
from invokeai.backend.model_manager import ModelType
|
||||
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.install.widgets import (
|
||||
MIN_COLS,
|
||||
MIN_LINES,
|
||||
CenteredTitleText,
|
||||
CyclingForm,
|
||||
MultiSelectColumns,
|
||||
SingleSelectColumns,
|
||||
TextBox,
|
||||
WindowTooSmallException,
|
||||
set_min_terminal_size,
|
||||
)
|
||||
|
||||
warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
logger = InvokeAILogger.get_logger("ModelInstallService")
|
||||
logger.setLevel("WARNING")
|
||||
# logger.setLevel('DEBUG')
|
||||
|
||||
# build a table mapping all non-printable characters to None
|
||||
# for stripping control characters
|
||||
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
|
||||
NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()}
|
||||
|
||||
# maximum number of installed models we can display before overflowing vertically
|
||||
MAX_OTHER_MODELS = 72
|
||||
|
||||
|
||||
def make_printable(s: str) -> str:
|
||||
"""Replace non-printable characters in a string."""
|
||||
return s.translate(NOPRINT_TRANS_TABLE)
|
||||
|
||||
|
||||
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
"""Main form for interactive TUI."""
|
||||
|
||||
# for responsive resizing set to False, but this seems to cause a crash!
|
||||
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
||||
|
||||
# for persistence
|
||||
current_tab = 0
|
||||
|
||||
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any):
|
||||
self.multipage = multipage
|
||||
self.subprocess = None
|
||||
super().__init__(parentApp=parentApp, name=name, **keywords)
|
||||
|
||||
def create(self) -> None:
|
||||
self.installer = self.parentApp.install_helper.installer
|
||||
self.model_labels = self._get_model_labels()
|
||||
self.keypress_timeout = 10
|
||||
self.counter = 0
|
||||
self.subprocess_connection = None
|
||||
|
||||
window_width, window_height = get_terminal_size()
|
||||
|
||||
# npyscreen has no typing hints
|
||||
self.nextrely -= 1 # type: ignore
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
||||
editable=False,
|
||||
color="CAUTION",
|
||||
)
|
||||
self.nextrely += 1 # type: ignore
|
||||
self.tabs = self.add_widget_intelligent(
|
||||
SingleSelectColumns,
|
||||
values=[
|
||||
"STARTERS",
|
||||
"MAINS",
|
||||
"CONTROLNETS",
|
||||
"T2I-ADAPTERS",
|
||||
"IP-ADAPTERS",
|
||||
"LORAS",
|
||||
"TI EMBEDDINGS",
|
||||
],
|
||||
value=[self.current_tab],
|
||||
columns=7,
|
||||
max_height=2,
|
||||
relx=8,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.tabs.on_changed = self._toggle_tables
|
||||
|
||||
top_of_table = self.nextrely # type: ignore
|
||||
self.starter_pipelines = self.add_starter_pipelines()
|
||||
bottom_of_table = self.nextrely # type: ignore
|
||||
|
||||
self.nextrely = top_of_table
|
||||
self.pipeline_models = self.add_pipeline_widgets(
|
||||
model_type=ModelType.Main, window_width=window_width, exclude=self.starter_models
|
||||
)
|
||||
# self.pipeline_models['autoload_pending'] = True
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
|
||||
self.nextrely = top_of_table
|
||||
self.controlnet_models = self.add_model_widgets(
|
||||
model_type=ModelType.ControlNet,
|
||||
window_width=window_width,
|
||||
)
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
|
||||
self.nextrely = top_of_table
|
||||
self.t2i_models = self.add_model_widgets(
|
||||
model_type=ModelType.T2IAdapter,
|
||||
window_width=window_width,
|
||||
)
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
self.nextrely = top_of_table
|
||||
self.ipadapter_models = self.add_model_widgets(
|
||||
model_type=ModelType.IPAdapter,
|
||||
window_width=window_width,
|
||||
)
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
|
||||
self.nextrely = top_of_table
|
||||
self.lora_models = self.add_model_widgets(
|
||||
model_type=ModelType.Lora,
|
||||
window_width=window_width,
|
||||
)
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
|
||||
self.nextrely = top_of_table
|
||||
self.ti_models = self.add_model_widgets(
|
||||
model_type=ModelType.TextualInversion,
|
||||
window_width=window_width,
|
||||
)
|
||||
bottom_of_table = max(bottom_of_table, self.nextrely)
|
||||
|
||||
self.nextrely = bottom_of_table + 1
|
||||
|
||||
self.nextrely += 1
|
||||
back_label = "BACK"
|
||||
cancel_label = "CANCEL"
|
||||
current_position = self.nextrely
|
||||
if self.multipage:
|
||||
self.back_button = self.add_widget_intelligent(
|
||||
npyscreen.ButtonPress,
|
||||
name=back_label,
|
||||
when_pressed_function=self.on_back,
|
||||
)
|
||||
else:
|
||||
self.nextrely = current_position
|
||||
self.cancel_button = self.add_widget_intelligent(
|
||||
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
||||
)
|
||||
self.nextrely = current_position
|
||||
|
||||
label = "APPLY CHANGES"
|
||||
self.nextrely = current_position
|
||||
self.done = self.add_widget_intelligent(
|
||||
npyscreen.ButtonPress,
|
||||
name=label,
|
||||
relx=window_width - len(label) - 15,
|
||||
when_pressed_function=self.on_done,
|
||||
)
|
||||
|
||||
# This restores the selected page on return from an installation
|
||||
for _i in range(1, self.current_tab + 1):
|
||||
self.tabs.h_cursor_line_down(1)
|
||||
self._toggle_tables([self.current_tab])
|
||||
|
||||
############# diffusers tab ##########
|
||||
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
||||
"""Add widgets responsible for selecting diffusers models"""
|
||||
widgets: Dict[str, npyscreen.widget] = {}
|
||||
|
||||
all_models = self.all_models # master dict of all models, indexed by key
|
||||
model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]]
|
||||
model_labels = [self.model_labels[x] for x in model_list]
|
||||
|
||||
widgets.update(
|
||||
label1=self.add_widget_intelligent(
|
||||
CenteredTitleText,
|
||||
name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.",
|
||||
editable=False,
|
||||
labelColor="CAUTION",
|
||||
)
|
||||
)
|
||||
|
||||
self.nextrely -= 1
|
||||
# if user has already installed some initial models, then don't patronize them
|
||||
# by showing more recommendations
|
||||
show_recommended = len(self.installed_models) == 0
|
||||
|
||||
checked = [
|
||||
model_list.index(x)
|
||||
for x in model_list
|
||||
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||
]
|
||||
widgets.update(
|
||||
models_selected=self.add_widget_intelligent(
|
||||
MultiSelectColumns,
|
||||
columns=1,
|
||||
name="Install Starter Models",
|
||||
values=model_labels,
|
||||
value=checked,
|
||||
max_height=len(model_list) + 1,
|
||||
relx=4,
|
||||
scroll_exit=True,
|
||||
),
|
||||
models=model_list,
|
||||
)
|
||||
|
||||
self.nextrely += 1
|
||||
return widgets
|
||||
|
||||
############# Add a set of model install widgets ########
|
||||
def add_model_widgets(
|
||||
self,
|
||||
model_type: ModelType,
|
||||
window_width: int = 120,
|
||||
install_prompt: Optional[str] = None,
|
||||
exclude: Optional[Set[str]] = None,
|
||||
) -> dict[str, npyscreen.widget]:
|
||||
"""Generic code to create model selection widgets"""
|
||||
if exclude is None:
|
||||
exclude = set()
|
||||
widgets: Dict[str, npyscreen.widget] = {}
|
||||
all_models = self.all_models
|
||||
model_list = sorted(
|
||||
[x for x in all_models if all_models[x].type == model_type and x not in exclude],
|
||||
key=lambda x: all_models[x].name or "",
|
||||
)
|
||||
model_labels = [self.model_labels[x] for x in model_list]
|
||||
|
||||
show_recommended = len(self.installed_models) == 0
|
||||
truncated = False
|
||||
if len(model_list) > 0:
|
||||
max_width = max([len(x) for x in model_labels])
|
||||
columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding
|
||||
columns = min(len(model_list), columns) or 1
|
||||
prompt = (
|
||||
install_prompt
|
||||
or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
|
||||
)
|
||||
|
||||
widgets.update(
|
||||
label1=self.add_widget_intelligent(
|
||||
CenteredTitleText,
|
||||
name=prompt,
|
||||
editable=False,
|
||||
labelColor="CAUTION",
|
||||
)
|
||||
)
|
||||
|
||||
if len(model_labels) > MAX_OTHER_MODELS:
|
||||
model_labels = model_labels[0:MAX_OTHER_MODELS]
|
||||
truncated = True
|
||||
|
||||
widgets.update(
|
||||
models_selected=self.add_widget_intelligent(
|
||||
MultiSelectColumns,
|
||||
columns=columns,
|
||||
name=f"Install {model_type} Models",
|
||||
values=model_labels,
|
||||
value=[
|
||||
model_list.index(x)
|
||||
for x in model_list
|
||||
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||
],
|
||||
max_height=len(model_list) // columns + 1,
|
||||
relx=4,
|
||||
scroll_exit=True,
|
||||
),
|
||||
models=model_list,
|
||||
)
|
||||
|
||||
if truncated:
|
||||
widgets.update(
|
||||
warning_message=self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.",
|
||||
editable=False,
|
||||
color="CAUTION",
|
||||
)
|
||||
)
|
||||
|
||||
self.nextrely += 1
|
||||
widgets.update(
|
||||
download_ids=self.add_widget_intelligent(
|
||||
TextBox,
|
||||
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
||||
max_height=6,
|
||||
scroll_exit=True,
|
||||
editable=True,
|
||||
)
|
||||
)
|
||||
return widgets
|
||||
|
||||
### Tab for arbitrary diffusers widgets ###
|
||||
def add_pipeline_widgets(
|
||||
self,
|
||||
model_type: ModelType = ModelType.Main,
|
||||
window_width: int = 120,
|
||||
**kwargs,
|
||||
) -> dict[str, npyscreen.widget]:
|
||||
"""Similar to add_model_widgets() but adds some additional widgets at the bottom
|
||||
to support the autoload directory"""
|
||||
widgets = self.add_model_widgets(
|
||||
model_type=model_type,
|
||||
window_width=window_width,
|
||||
install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return widgets
|
||||
|
||||
def resize(self) -> None:
|
||||
super().resize()
|
||||
if s := self.starter_pipelines.get("models_selected"):
|
||||
if model_list := self.starter_pipelines.get("models"):
|
||||
s.values = [self.model_labels[x] for x in model_list]
|
||||
|
||||
def _toggle_tables(self, value: List[int]) -> None:
|
||||
selected_tab = value[0]
|
||||
widgets = [
|
||||
self.starter_pipelines,
|
||||
self.pipeline_models,
|
||||
self.controlnet_models,
|
||||
self.t2i_models,
|
||||
self.ipadapter_models,
|
||||
self.lora_models,
|
||||
self.ti_models,
|
||||
]
|
||||
|
||||
for group in widgets:
|
||||
for _k, v in group.items():
|
||||
try:
|
||||
v.hidden = True
|
||||
v.editable = False
|
||||
except Exception:
|
||||
pass
|
||||
for _k, v in widgets[selected_tab].items():
|
||||
try:
|
||||
v.hidden = False
|
||||
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
|
||||
v.editable = True
|
||||
except Exception:
|
||||
pass
|
||||
self.__class__.current_tab = selected_tab # for persistence
|
||||
self.display()
|
||||
|
||||
def _get_model_labels(self) -> dict[str, str]:
|
||||
"""Return a list of trimmed labels for all models."""
|
||||
window_width, window_height = get_terminal_size()
|
||||
checkbox_width = 4
|
||||
spacing_width = 2
|
||||
result = {}
|
||||
|
||||
models = self.all_models
|
||||
label_width = max([len(models[x].name or "") for x in self.starter_models])
|
||||
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||
|
||||
for key in self.all_models:
|
||||
description = models[key].description
|
||||
description = (
|
||||
description[0 : description_width - 3] + "..."
|
||||
if description and len(description) > description_width
|
||||
else description
|
||||
if description
|
||||
else ""
|
||||
)
|
||||
result[key] = f"%-{label_width}s %s" % (models[key].name, description)
|
||||
|
||||
return result
|
||||
|
||||
def _get_columns(self) -> int:
|
||||
window_width, window_height = get_terminal_size()
|
||||
cols = 4 if window_width > 240 else 3 if window_width > 160 else 2 if window_width > 80 else 1
|
||||
return min(cols, len(self.installed_models))
|
||||
|
||||
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
||||
remove_models = selections.remove_models
|
||||
if remove_models:
|
||||
model_names = [self.all_models[x].name or "" for x in remove_models]
|
||||
mods = "\n".join(model_names)
|
||||
is_ok = npyscreen.notify_ok_cancel(
|
||||
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
||||
)
|
||||
assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations
|
||||
return is_ok
|
||||
else:
|
||||
return True
|
||||
|
||||
@property
|
||||
def all_models(self) -> Dict[str, UnifiedModelInfo]:
|
||||
# npyscreen doesn't having typing hints
|
||||
return self.parentApp.install_helper.all_models # type: ignore
|
||||
|
||||
@property
|
||||
def starter_models(self) -> List[str]:
|
||||
return self.parentApp.install_helper._starter_models # type: ignore
|
||||
|
||||
@property
|
||||
def installed_models(self) -> List[str]:
|
||||
return self.parentApp.install_helper._installed_models # type: ignore
|
||||
|
||||
def on_back(self) -> None:
|
||||
self.parentApp.switchFormPrevious()
|
||||
self.editing = False
|
||||
|
||||
def on_cancel(self) -> None:
|
||||
self.parentApp.setNextForm(None)
|
||||
self.parentApp.user_cancelled = True
|
||||
self.editing = False
|
||||
|
||||
def on_done(self) -> None:
|
||||
self.marshall_arguments()
|
||||
if not self.confirm_deletions(self.parentApp.install_selections):
|
||||
return
|
||||
self.parentApp.setNextForm(None)
|
||||
self.parentApp.user_cancelled = False
|
||||
self.editing = False
|
||||
|
||||
def marshall_arguments(self) -> None:
|
||||
"""
|
||||
Assemble arguments and store as attributes of the application:
|
||||
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
||||
True => Install
|
||||
False => Remove
|
||||
.scan_directory: Path to a directory of models to scan and import
|
||||
.autoscan_on_startup: True if invokeai should scan and import at startup time
|
||||
.import_model_paths: list of URLs, repo_ids and file paths to import
|
||||
"""
|
||||
selections = self.parentApp.install_selections
|
||||
all_models = self.all_models
|
||||
|
||||
# Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove
|
||||
ui_sections = [
|
||||
self.starter_pipelines,
|
||||
self.pipeline_models,
|
||||
self.controlnet_models,
|
||||
self.t2i_models,
|
||||
self.ipadapter_models,
|
||||
self.lora_models,
|
||||
self.ti_models,
|
||||
]
|
||||
for section in ui_sections:
|
||||
if "models_selected" not in section:
|
||||
continue
|
||||
selected = {section["models"][x] for x in section["models_selected"].value}
|
||||
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
||||
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
||||
selections.remove_models.extend(models_to_remove)
|
||||
selections.install_models.extend([all_models[x] for x in models_to_install])
|
||||
|
||||
# models located in the 'download_ids" section
|
||||
for section in ui_sections:
|
||||
if downloads := section.get("download_ids"):
|
||||
models = [UnifiedModelInfo(source=x) for x in downloads.value.split()]
|
||||
selections.install_models.extend(models)
|
||||
|
||||
|
||||
class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
|
||||
def __init__(self, opt: Namespace, install_helper: InstallHelper):
|
||||
super().__init__()
|
||||
self.program_opts = opt
|
||||
self.user_cancelled = False
|
||||
self.install_selections = InstallSelections()
|
||||
self.install_helper = install_helper
|
||||
|
||||
def onStart(self) -> None:
|
||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||
self.main_form = self.addForm(
|
||||
"MAIN",
|
||||
addModelsForm,
|
||||
name="Install Stable Diffusion Models",
|
||||
cycle_widgets=False,
|
||||
)
|
||||
|
||||
|
||||
def list_models(installer: ModelInstallService, model_type: ModelType):
|
||||
"""Print out all models of type model_type."""
|
||||
models = installer.record_store.search_by_attr(model_type=model_type)
|
||||
print(f"Installed models of type `{model_type}`:")
|
||||
for model in models:
|
||||
path = (config.models_path / model.path).resolve()
|
||||
print(f"{model.name:40}{model.base.value:14}{path}")
|
||||
|
||||
|
||||
# --------------------------------------------------------
|
||||
def select_and_download_models(opt: Namespace) -> None:
|
||||
"""Prompt user for install/delete selections and execute."""
|
||||
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||
# unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal
|
||||
config.precision = precision # type: ignore
|
||||
install_helper = InstallHelper(config, logger)
|
||||
installer = install_helper.installer
|
||||
|
||||
if opt.list_models:
|
||||
list_models(installer, opt.list_models)
|
||||
|
||||
elif opt.add or opt.delete:
|
||||
selections = InstallSelections(
|
||||
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
|
||||
)
|
||||
install_helper.add_or_delete(selections)
|
||||
|
||||
elif opt.default_only:
|
||||
selections = InstallSelections(install_models=[install_helper.default_model()])
|
||||
install_helper.add_or_delete(selections)
|
||||
|
||||
elif opt.yes_to_all:
|
||||
selections = InstallSelections(install_models=install_helper.recommended_models())
|
||||
install_helper.add_or_delete(selections)
|
||||
|
||||
# this is where the TUI is called
|
||||
else:
|
||||
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||
raise WindowTooSmallException(
|
||||
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||
)
|
||||
|
||||
installApp = AddModelApplication(opt, install_helper)
|
||||
try:
|
||||
installApp.run()
|
||||
except KeyboardInterrupt:
|
||||
print("Aborted...")
|
||||
sys.exit(-1)
|
||||
|
||||
install_helper.add_or_delete(installApp.install_selections)
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||
parser.add_argument(
|
||||
"--add",
|
||||
nargs="*",
|
||||
help="List of URLs, local paths or repo_ids of models to install",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--delete",
|
||||
nargs="*",
|
||||
help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--full-precision",
|
||||
dest="full_precision",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
type=bool,
|
||||
default=False,
|
||||
help="use 32-bit weights instead of faster 16-bit weights",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--yes",
|
||||
"-y",
|
||||
dest="yes_to_all",
|
||||
action="store_true",
|
||||
help='answer "yes" to all prompts',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--default_only",
|
||||
action="store_true",
|
||||
help="Only install the default model",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-models",
|
||||
choices=[x.value for x in ModelType],
|
||||
help="list installed models",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--root_dir",
|
||||
dest="root",
|
||||
type=str,
|
||||
default=None,
|
||||
help="path to root of install directory",
|
||||
)
|
||||
opt = parser.parse_args()
|
||||
|
||||
invoke_args = []
|
||||
if opt.root:
|
||||
invoke_args.extend(["--root", opt.root])
|
||||
if opt.full_precision:
|
||||
invoke_args.extend(["--precision", "float32"])
|
||||
config.parse_args(invoke_args)
|
||||
logger = InvokeAILogger().get_logger(config=config)
|
||||
|
||||
if not config.model_conf_path.exists():
|
||||
logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.")
|
||||
from invokeai.frontend.install.invokeai_configure import invokeai_configure
|
||||
|
||||
invokeai_configure()
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
select_and_download_models(opt)
|
||||
except AssertionError as e:
|
||||
logger.error(e)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
curses.nocbreak()
|
||||
curses.echo()
|
||||
curses.endwin()
|
||||
logger.info("Goodbye! Come back soon.")
|
||||
except WindowTooSmallException as e:
|
||||
logger.error(str(e))
|
||||
except widget.NotEnoughSpaceForWidget as e:
|
||||
if str(e).startswith("Height of 1 allocated"):
|
||||
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
|
||||
input("Press any key to continue...")
|
||||
except Exception as e:
|
||||
if str(e).startswith("addwstr"):
|
||||
logger.error(
|
||||
"Insufficient horizontal space for the interface. Please make your window wider and try again."
|
||||
)
|
||||
else:
|
||||
print(f"An exception has occurred: {str(e)} Details:")
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
input("Press any key to continue...")
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
438
invokeai/frontend/merge/merge_diffusers2.py
Normal file
438
invokeai/frontend/merge/merge_diffusers2.py
Normal file
@@ -0,0 +1,438 @@
|
||||
"""
|
||||
invokeai.frontend.merge exports a single function called merge_diffusion_models().
|
||||
|
||||
It merges 2-3 models together and create a new InvokeAI-registered diffusion model.
|
||||
|
||||
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||
"""
|
||||
import argparse
|
||||
import curses
|
||||
import re
|
||||
import sys
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import npyscreen
|
||||
from npyscreen import widget
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||
from invokeai.backend.install.install_helper import initialize_installer
|
||||
from invokeai.backend.model_manager import (
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
)
|
||||
from invokeai.backend.model_manager.merge import ModelMerger
|
||||
from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
BASE_TYPES = [
|
||||
(BaseModelType.StableDiffusion1, "Models Built on SD-1.x"),
|
||||
(BaseModelType.StableDiffusion2, "Models Built on SD-2.x"),
|
||||
(BaseModelType.StableDiffusionXL, "Models Built on SDXL"),
|
||||
]
|
||||
|
||||
|
||||
def _parse_args() -> Namespace:
|
||||
parser = argparse.ArgumentParser(description="InvokeAI model merging")
|
||||
parser.add_argument(
|
||||
"--root_dir",
|
||||
type=Path,
|
||||
default=config.root,
|
||||
help="Path to the invokeai runtime directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--front_end",
|
||||
"--gui",
|
||||
dest="front_end",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Activate the text-based graphical front end for collecting parameters. Aside from --root_dir, other parameters will be ignored.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--models",
|
||||
dest="model_names",
|
||||
type=str,
|
||||
nargs="+",
|
||||
help="Two to three model names to be merged",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base_model",
|
||||
type=str,
|
||||
choices=[x[0].value for x in BASE_TYPES],
|
||||
help="The base model shared by the models to be merged",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--merged_model_name",
|
||||
"--destination",
|
||||
dest="merged_model_name",
|
||||
type=str,
|
||||
help="Name of the output model. If not specified, will be the concatenation of the input model names.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--alpha",
|
||||
type=float,
|
||||
default=0.5,
|
||||
help="The interpolation parameter, ranging from 0 to 1. It affects the ratio in which the checkpoints are merged. Higher values give more weight to the 2d and 3d models",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--interpolation",
|
||||
dest="interp",
|
||||
type=str,
|
||||
choices=["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"],
|
||||
default="weighted_sum",
|
||||
help='Interpolation method to use. If three models are present, only "add_difference" will work.',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Try to merge models even if they are incompatible with each other",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clobber",
|
||||
"--overwrite",
|
||||
dest="clobber",
|
||||
action="store_true",
|
||||
help="Overwrite the merged model if --merged_model_name already exists",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
# ------------------------- GUI HERE -------------------------
|
||||
class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid"]
|
||||
|
||||
def __init__(self, parentApp, name):
|
||||
self.parentApp = parentApp
|
||||
self.ALLOW_RESIZE = True
|
||||
self.FIX_MINIMUM_SIZE_WHEN_CREATED = False
|
||||
super().__init__(parentApp, name)
|
||||
|
||||
@property
|
||||
def model_record_store(self) -> ModelRecordServiceBase:
|
||||
installer: ModelInstallServiceBase = self.parentApp.installer
|
||||
return installer.record_store
|
||||
|
||||
def afterEditing(self) -> None:
|
||||
self.parentApp.setNextForm(None)
|
||||
|
||||
def create(self) -> None:
|
||||
window_height, window_width = curses.initscr().getmaxyx()
|
||||
self.current_base = 0
|
||||
self.models = self.get_models(BASE_TYPES[self.current_base][0])
|
||||
self.model_names = [x[1] for x in self.models]
|
||||
max_width = max([len(x) for x in self.model_names])
|
||||
max_width += 6
|
||||
horizontal_layout = max_width * 3 < window_width
|
||||
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
color="CONTROL",
|
||||
value="Select two models to merge and optionally a third.",
|
||||
editable=False,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
color="CONTROL",
|
||||
value="Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
|
||||
editable=False,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.base_select = self.add_widget_intelligent(
|
||||
SingleSelectColumns,
|
||||
values=[x[1] for x in BASE_TYPES],
|
||||
value=[self.current_base],
|
||||
columns=4,
|
||||
max_height=2,
|
||||
relx=8,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.base_select.on_changed = self._populate_models
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="MODEL 1",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
rely=6 if horizontal_layout else None,
|
||||
)
|
||||
self.model1 = self.add_widget_intelligent(
|
||||
npyscreen.SelectOne,
|
||||
values=self.model_names,
|
||||
value=0,
|
||||
max_height=len(self.model_names),
|
||||
max_width=max_width,
|
||||
scroll_exit=True,
|
||||
rely=7,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="MODEL 2",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
relx=max_width + 3 if horizontal_layout else None,
|
||||
rely=6 if horizontal_layout else None,
|
||||
)
|
||||
self.model2 = self.add_widget_intelligent(
|
||||
npyscreen.SelectOne,
|
||||
name="(2)",
|
||||
values=self.model_names,
|
||||
value=1,
|
||||
max_height=len(self.model_names),
|
||||
max_width=max_width,
|
||||
relx=max_width + 3 if horizontal_layout else None,
|
||||
rely=7 if horizontal_layout else None,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="MODEL 3",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||
rely=6 if horizontal_layout else None,
|
||||
)
|
||||
models_plus_none = self.model_names.copy()
|
||||
models_plus_none.insert(0, "None")
|
||||
self.model3 = self.add_widget_intelligent(
|
||||
npyscreen.SelectOne,
|
||||
name="(3)",
|
||||
values=models_plus_none,
|
||||
value=0,
|
||||
max_height=len(self.model_names) + 1,
|
||||
max_width=max_width,
|
||||
scroll_exit=True,
|
||||
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||
rely=7 if horizontal_layout else None,
|
||||
)
|
||||
for m in [self.model1, self.model2, self.model3]:
|
||||
m.when_value_edited = self.models_changed
|
||||
self.merged_model_name = self.add_widget_intelligent(
|
||||
TextBox,
|
||||
name="Name for merged model:",
|
||||
labelColor="CONTROL",
|
||||
max_height=3,
|
||||
value="",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.force = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Force merge of models created by different diffusers library versions",
|
||||
labelColor="CONTROL",
|
||||
value=True,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.merge_method = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Merge Method:",
|
||||
values=self.interpolations,
|
||||
value=0,
|
||||
labelColor="CONTROL",
|
||||
max_height=len(self.interpolations) + 1,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.alpha = self.add_widget_intelligent(
|
||||
FloatTitleSlider,
|
||||
name="Weight (alpha) to assign to second and third models:",
|
||||
out_of=1.0,
|
||||
step=0.01,
|
||||
lowest=0,
|
||||
value=0.5,
|
||||
labelColor="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.model1.editing = True
|
||||
|
||||
def models_changed(self) -> None:
|
||||
models = self.model1.values
|
||||
selected_model1 = self.model1.value[0]
|
||||
selected_model2 = self.model2.value[0]
|
||||
selected_model3 = self.model3.value[0]
|
||||
merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}"
|
||||
self.merged_model_name.value = merged_model_name
|
||||
|
||||
if selected_model3 > 0:
|
||||
self.merge_method.values = ["add_difference ( A+(B-C) )"]
|
||||
self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one.
|
||||
else:
|
||||
self.merge_method.values = self.interpolations
|
||||
self.merge_method.value = 0
|
||||
|
||||
def on_ok(self) -> None:
|
||||
if self.validate_field_values() and self.check_for_overwrite():
|
||||
self.parentApp.setNextForm(None)
|
||||
self.editing = False
|
||||
self.parentApp.merge_arguments = self.marshall_arguments()
|
||||
npyscreen.notify("Starting the merge...")
|
||||
else:
|
||||
self.editing = True
|
||||
|
||||
def on_cancel(self) -> None:
|
||||
sys.exit(0)
|
||||
|
||||
def marshall_arguments(self) -> dict:
|
||||
model_keys = [x[0] for x in self.models]
|
||||
models = [
|
||||
model_keys[self.model1.value[0]],
|
||||
model_keys[self.model2.value[0]],
|
||||
]
|
||||
if self.model3.value[0] > 0:
|
||||
models.append(model_keys[self.model3.value[0] - 1])
|
||||
interp = "add_difference"
|
||||
else:
|
||||
interp = self.interpolations[self.merge_method.value[0]]
|
||||
|
||||
args = {
|
||||
"model_keys": models,
|
||||
"alpha": self.alpha.value,
|
||||
"interp": interp,
|
||||
"force": self.force.value,
|
||||
"merged_model_name": self.merged_model_name.value,
|
||||
}
|
||||
return args
|
||||
|
||||
def check_for_overwrite(self) -> bool:
|
||||
model_out = self.merged_model_name.value
|
||||
if model_out not in self.model_names:
|
||||
return True
|
||||
else:
|
||||
result: bool = npyscreen.notify_yes_no(
|
||||
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
|
||||
)
|
||||
return result
|
||||
|
||||
def validate_field_values(self) -> bool:
|
||||
bad_fields = []
|
||||
model_names = self.model_names
|
||||
selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]}
|
||||
if self.model3.value[0] > 0:
|
||||
selected_models.add(model_names[self.model3.value[0] - 1])
|
||||
if len(selected_models) < 2:
|
||||
bad_fields.append(f"Please select two or three DIFFERENT models to compare. You selected {selected_models}")
|
||||
if len(bad_fields) > 0:
|
||||
message = "The following problems were detected and must be corrected:"
|
||||
for problem in bad_fields:
|
||||
message += f"\n* {problem}"
|
||||
npyscreen.notify_confirm(message)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_models(self, base_model: Optional[BaseModelType] = None) -> List[Tuple[str, str]]: # key to name
|
||||
models = [
|
||||
(x.key, x.name)
|
||||
for x in self.model_record_store.search_by_attr(model_type=ModelType.Main, base_model=base_model)
|
||||
if x.format == ModelFormat("diffusers")
|
||||
and hasattr(x, "variant")
|
||||
and x.variant == ModelVariantType("normal")
|
||||
]
|
||||
return sorted(models, key=lambda x: x[1])
|
||||
|
||||
def _populate_models(self, value: List[int]) -> None:
|
||||
base_model = BASE_TYPES[value[0]][0]
|
||||
self.models = self.get_models(base_model)
|
||||
self.model_names = [x[1] for x in self.models]
|
||||
|
||||
models_plus_none = self.model_names.copy()
|
||||
models_plus_none.insert(0, "None")
|
||||
self.model1.values = self.model_names
|
||||
self.model2.values = self.model_names
|
||||
self.model3.values = models_plus_none
|
||||
|
||||
self.display()
|
||||
|
||||
|
||||
# npyscreen is untyped and causes mypy to get naggy
|
||||
class Mergeapp(npyscreen.NPSAppManaged): # type: ignore
|
||||
def __init__(self, installer: ModelInstallServiceBase):
|
||||
"""Initialize the npyscreen application."""
|
||||
super().__init__()
|
||||
self.installer = installer
|
||||
|
||||
def onStart(self) -> None:
|
||||
npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
|
||||
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
|
||||
|
||||
|
||||
def run_gui(args: Namespace) -> None:
|
||||
installer = initialize_installer(config)
|
||||
mergeapp = Mergeapp(installer)
|
||||
mergeapp.run()
|
||||
merge_args = mergeapp.merge_arguments
|
||||
merger = ModelMerger(installer)
|
||||
merger.merge_diffusion_models_and_save(**merge_args)
|
||||
logger.info(f'Models merged into new model: "{merge_args.merged_model_name}".')
|
||||
|
||||
|
||||
def run_cli(args: Namespace) -> None:
|
||||
assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1"
|
||||
assert (
|
||||
args.model_names and len(args.model_names) >= 1 and len(args.model_names) <= 3
|
||||
), "Please provide the --models argument to list 2 to 3 models to merge. Use --help for full usage."
|
||||
|
||||
if not args.merged_model_name:
|
||||
args.merged_model_name = "+".join(args.model_names)
|
||||
logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"')
|
||||
|
||||
installer = initialize_installer(config)
|
||||
store = installer.record_store
|
||||
assert (
|
||||
len(store.search_by_attr(args.merged_model_name, args.base_model, ModelType.Main)) == 0 or args.clobber
|
||||
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
||||
|
||||
merger = ModelMerger(installer)
|
||||
model_keys = []
|
||||
for name in args.model_names:
|
||||
if len(name) == 32 and re.match(r"^[0-9a-f]$", name):
|
||||
model_keys.append(name)
|
||||
else:
|
||||
models = store.search_by_attr(
|
||||
model_name=name, model_type=ModelType.Main, base_model=BaseModelType(args.base_model)
|
||||
)
|
||||
assert len(models) > 0, f"{name}: Unknown model"
|
||||
assert len(models) < 2, f"{name}: More than one model by this name. Please specify the model key instead."
|
||||
model_keys.append(models[0].key)
|
||||
|
||||
merger.merge_diffusion_models_and_save(
|
||||
alpha=args.alpha,
|
||||
model_keys=model_keys,
|
||||
merged_model_name=args.merged_model_name,
|
||||
interp=args.interp,
|
||||
force=args.force,
|
||||
)
|
||||
logger.info(f'Models merged into new model: "{args.merged_model_name}".')
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = _parse_args()
|
||||
if args.root_dir:
|
||||
config.parse_args(["--root", str(args.root_dir)])
|
||||
else:
|
||||
config.parse_args([])
|
||||
|
||||
try:
|
||||
if args.front_end:
|
||||
run_gui(args)
|
||||
else:
|
||||
run_cli(args)
|
||||
except widget.NotEnoughSpaceForWidget as e:
|
||||
if str(e).startswith("Height of 1 allocated"):
|
||||
logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge")
|
||||
else:
|
||||
logger.error("Not enough room for the user interface. Try making this window larger.")
|
||||
sys.exit(-1)
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -3,7 +3,7 @@
|
||||
"""
|
||||
This is the frontend to "textual_inversion_training.py".
|
||||
|
||||
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
|
||||
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||
"""
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import npyscreen
|
||||
from npyscreen import widget
|
||||
@@ -22,8 +22,9 @@ from omegaconf import OmegaConf
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
from ...backend.training import do_textual_inversion_training, parse_args
|
||||
from invokeai.backend.install.install_helper import initialize_installer
|
||||
from invokeai.backend.model_manager import ModelType
|
||||
from invokeai.backend.training import do_textual_inversion_training, parse_args
|
||||
|
||||
TRAINING_DATA = "text-inversion-training-data"
|
||||
TRAINING_DIR = "text-inversion-output"
|
||||
@@ -44,19 +45,21 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
precisions = ["no", "fp16", "bf16"]
|
||||
learnable_properties = ["object", "style"]
|
||||
|
||||
def __init__(self, parentApp, name, saved_args=None):
|
||||
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
|
||||
self.saved_args = saved_args or {}
|
||||
super().__init__(parentApp, name)
|
||||
|
||||
def afterEditing(self):
|
||||
def afterEditing(self) -> None:
|
||||
self.parentApp.setNextForm(None)
|
||||
|
||||
def create(self):
|
||||
def create(self) -> None:
|
||||
self.model_names, default = self.get_model_names()
|
||||
default_initializer_token = "★"
|
||||
default_placeholder_token = ""
|
||||
saved_args = self.saved_args
|
||||
|
||||
assert config is not None
|
||||
|
||||
try:
|
||||
default = self.model_names.index(saved_args["model"])
|
||||
except Exception:
|
||||
@@ -71,7 +74,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
self.model = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Model Name:",
|
||||
values=self.model_names,
|
||||
values=sorted(self.model_names),
|
||||
value=default,
|
||||
max_height=len(self.model_names) + 1,
|
||||
scroll_exit=True,
|
||||
@@ -236,7 +239,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
)
|
||||
self.model.editing = True
|
||||
|
||||
def initializer_changed(self):
|
||||
def initializer_changed(self) -> None:
|
||||
placeholder = self.placeholder_token.value
|
||||
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
|
||||
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
|
||||
@@ -275,10 +278,13 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
return True
|
||||
|
||||
def get_model_names(self) -> Tuple[List[str], int]:
|
||||
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
|
||||
model_names = [idx for idx in sorted(conf.keys()) if conf[idx].get("format", None) == "diffusers"]
|
||||
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
|
||||
default = defaults[0] if len(defaults) > 0 else 0
|
||||
global config
|
||||
assert config is not None
|
||||
installer = initialize_installer(config)
|
||||
store = installer.record_store
|
||||
main_models = store.search_by_attr(model_type=ModelType.Main)
|
||||
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
|
||||
default = 0
|
||||
return (model_names, default)
|
||||
|
||||
def marshall_arguments(self) -> dict:
|
||||
@@ -326,7 +332,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
|
||||
class MyApplication(npyscreen.NPSAppManaged):
|
||||
def __init__(self, saved_args=None):
|
||||
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
|
||||
super().__init__()
|
||||
self.ti_arguments = None
|
||||
self.saved_args = saved_args
|
||||
@@ -341,11 +347,12 @@ class MyApplication(npyscreen.NPSAppManaged):
|
||||
)
|
||||
|
||||
|
||||
def copy_to_embeddings_folder(args: dict):
|
||||
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
|
||||
"""
|
||||
Copy learned_embeds.bin into the embeddings folder, and offer to
|
||||
delete the full model and checkpoints.
|
||||
"""
|
||||
assert config is not None
|
||||
source = Path(args["output_dir"], "learned_embeds.bin")
|
||||
dest_dir_name = args["placeholder_token"].strip("<>")
|
||||
destination = config.root_dir / "embeddings" / dest_dir_name
|
||||
@@ -358,10 +365,11 @@ def copy_to_embeddings_folder(args: dict):
|
||||
logger.info(f'Keeping {args["output_dir"]}')
|
||||
|
||||
|
||||
def save_args(args: dict):
|
||||
def save_args(args: dict) -> None:
|
||||
"""
|
||||
Save the current argument values to an omegaconf file
|
||||
"""
|
||||
assert config is not None
|
||||
dest_dir = config.root_dir / TRAINING_DIR
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
conf_file = dest_dir / CONF_FILE
|
||||
@@ -373,6 +381,7 @@ def previous_args() -> dict:
|
||||
"""
|
||||
Get the previous arguments used.
|
||||
"""
|
||||
assert config is not None
|
||||
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
|
||||
try:
|
||||
conf = OmegaConf.load(conf_file)
|
||||
@@ -383,24 +392,26 @@ def previous_args() -> dict:
|
||||
return conf
|
||||
|
||||
|
||||
def do_front_end(args: Namespace):
|
||||
def do_front_end() -> None:
|
||||
global config
|
||||
saved_args = previous_args()
|
||||
myapplication = MyApplication(saved_args=saved_args)
|
||||
myapplication.run()
|
||||
|
||||
if args := myapplication.ti_arguments:
|
||||
os.makedirs(args["output_dir"], exist_ok=True)
|
||||
if my_args := myapplication.ti_arguments:
|
||||
os.makedirs(my_args["output_dir"], exist_ok=True)
|
||||
|
||||
# Automatically add angle brackets around the trigger
|
||||
if not re.match("^<.+>$", args["placeholder_token"]):
|
||||
args["placeholder_token"] = f"<{args['placeholder_token']}>"
|
||||
if not re.match("^<.+>$", my_args["placeholder_token"]):
|
||||
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
|
||||
|
||||
args["only_save_embeds"] = True
|
||||
save_args(args)
|
||||
my_args["only_save_embeds"] = True
|
||||
save_args(my_args)
|
||||
|
||||
try:
|
||||
do_textual_inversion_training(InvokeAIAppConfig.get_config(), **args)
|
||||
copy_to_embeddings_folder(args)
|
||||
print(my_args)
|
||||
do_textual_inversion_training(config, **my_args)
|
||||
copy_to_embeddings_folder(my_args)
|
||||
except Exception as e:
|
||||
logger.error("An exception occurred during training. The exception was:")
|
||||
logger.error(str(e))
|
||||
@@ -408,11 +419,12 @@ def do_front_end(args: Namespace):
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
global config
|
||||
|
||||
args = parse_args()
|
||||
args: Namespace = parse_args()
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args([])
|
||||
|
||||
# change root if needed
|
||||
if args.root_dir:
|
||||
@@ -420,7 +432,7 @@ def main():
|
||||
|
||||
try:
|
||||
if args.front_end:
|
||||
do_front_end(args)
|
||||
do_front_end()
|
||||
else:
|
||||
do_textual_inversion_training(config, **vars(args))
|
||||
except AssertionError as e:
|
||||
|
||||
454
invokeai/frontend/training/textual_inversion2.py
Normal file
454
invokeai/frontend/training/textual_inversion2.py
Normal file
@@ -0,0 +1,454 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This is the frontend to "textual_inversion_training.py".
|
||||
|
||||
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import npyscreen
|
||||
from npyscreen import widget
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.install.install_helper import initialize_installer
|
||||
from invokeai.backend.model_manager import ModelType
|
||||
from invokeai.backend.training import do_textual_inversion_training, parse_args
|
||||
|
||||
TRAINING_DATA = "text-inversion-training-data"
|
||||
TRAINING_DIR = "text-inversion-output"
|
||||
CONF_FILE = "preferences.conf"
|
||||
config = None
|
||||
|
||||
|
||||
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
resolutions = [512, 768, 1024]
|
||||
lr_schedulers = [
|
||||
"linear",
|
||||
"cosine",
|
||||
"cosine_with_restarts",
|
||||
"polynomial",
|
||||
"constant",
|
||||
"constant_with_warmup",
|
||||
]
|
||||
precisions = ["no", "fp16", "bf16"]
|
||||
learnable_properties = ["object", "style"]
|
||||
|
||||
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
|
||||
self.saved_args = saved_args or {}
|
||||
super().__init__(parentApp, name)
|
||||
|
||||
def afterEditing(self) -> None:
|
||||
self.parentApp.setNextForm(None)
|
||||
|
||||
def create(self) -> None:
|
||||
self.model_names, default = self.get_model_names()
|
||||
default_initializer_token = "★"
|
||||
default_placeholder_token = ""
|
||||
saved_args = self.saved_args
|
||||
|
||||
assert config is not None
|
||||
|
||||
try:
|
||||
default = self.model_names.index(saved_args["model"])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields, cursor arrows to make a selection, and space to toggle checkboxes.",
|
||||
editable=False,
|
||||
)
|
||||
|
||||
self.model = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Model Name:",
|
||||
values=sorted(self.model_names),
|
||||
value=default,
|
||||
max_height=len(self.model_names) + 1,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.placeholder_token = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Trigger Term:",
|
||||
value="", # saved_args.get('placeholder_token',''), # to restore previous term
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.placeholder_token.when_value_edited = self.initializer_changed
|
||||
self.nextrely -= 1
|
||||
self.nextrelx += 30
|
||||
self.prompt_token = self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
name="Trigger term for use in prompt",
|
||||
value="",
|
||||
editable=False,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrelx -= 30
|
||||
self.initializer_token = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Initializer:",
|
||||
value=saved_args.get("initializer_token", default_initializer_token),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.resume_from_checkpoint = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Resume from last saved checkpoint",
|
||||
value=False,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.learnable_property = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Learnable property:",
|
||||
values=self.learnable_properties,
|
||||
value=self.learnable_properties.index(saved_args.get("learnable_property", "object")),
|
||||
max_height=4,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.train_data_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name="Data Training Directory:",
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
value=str(
|
||||
saved_args.get(
|
||||
"train_data_dir",
|
||||
config.root_dir / TRAINING_DATA / default_placeholder_token,
|
||||
)
|
||||
),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.output_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name="Output Destination Directory:",
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
value=str(
|
||||
saved_args.get(
|
||||
"output_dir",
|
||||
config.root_dir / TRAINING_DIR / default_placeholder_token,
|
||||
)
|
||||
),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.resolution = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Image resolution (pixels):",
|
||||
values=self.resolutions,
|
||||
value=self.resolutions.index(saved_args.get("resolution", 512)),
|
||||
max_height=4,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.center_crop = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Center crop images before resizing to resolution",
|
||||
value=saved_args.get("center_crop", False),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.mixed_precision = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Mixed Precision:",
|
||||
values=self.precisions,
|
||||
value=self.precisions.index(saved_args.get("mixed_precision", "fp16")),
|
||||
max_height=4,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.num_train_epochs = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name="Number of training epochs:",
|
||||
out_of=1000,
|
||||
step=50,
|
||||
lowest=1,
|
||||
value=saved_args.get("num_train_epochs", 100),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.max_train_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name="Max Training Steps:",
|
||||
out_of=10000,
|
||||
step=500,
|
||||
lowest=1,
|
||||
value=saved_args.get("max_train_steps", 3000),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.train_batch_size = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name="Batch Size (reduce if you run out of memory):",
|
||||
out_of=50,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get("train_batch_size", 8),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name="Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):",
|
||||
out_of=10,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get("gradient_accumulation_steps", 4),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name="Warmup Steps:",
|
||||
out_of=100,
|
||||
step=1,
|
||||
lowest=0,
|
||||
value=saved_args.get("lr_warmup_steps", 0),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.learning_rate = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Learning Rate:",
|
||||
value=str(
|
||||
saved_args.get("learning_rate", "5.0e-04"),
|
||||
),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.scale_lr = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Scale learning rate by number GPUs, steps and batch size",
|
||||
value=saved_args.get("scale_lr", True),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Use xformers acceleration",
|
||||
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.lr_scheduler = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="Learning rate scheduler:",
|
||||
values=self.lr_schedulers,
|
||||
max_height=7,
|
||||
value=self.lr_schedulers.index(saved_args.get("lr_scheduler", "constant")),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.model.editing = True
|
||||
|
||||
def initializer_changed(self) -> None:
|
||||
placeholder = self.placeholder_token.value
|
||||
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
|
||||
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
|
||||
self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder)
|
||||
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
|
||||
|
||||
def on_ok(self):
|
||||
if self.validate_field_values():
|
||||
self.parentApp.setNextForm(None)
|
||||
self.editing = False
|
||||
self.parentApp.ti_arguments = self.marshall_arguments()
|
||||
npyscreen.notify("Launching textual inversion training. This will take a while...")
|
||||
else:
|
||||
self.editing = True
|
||||
|
||||
def ok_cancel(self):
|
||||
sys.exit(0)
|
||||
|
||||
def validate_field_values(self) -> bool:
|
||||
bad_fields = []
|
||||
if self.model.value is None:
|
||||
bad_fields.append("Model Name must correspond to a known model in models.yaml")
|
||||
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
|
||||
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
|
||||
if self.train_data_dir.value is None:
|
||||
bad_fields.append("Data Training Directory cannot be empty")
|
||||
if self.output_dir.value is None:
|
||||
bad_fields.append("The Output Destination Directory cannot be empty")
|
||||
if len(bad_fields) > 0:
|
||||
message = "The following problems were detected and must be corrected:"
|
||||
for problem in bad_fields:
|
||||
message += f"\n* {problem}"
|
||||
npyscreen.notify_confirm(message)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_model_names(self) -> Tuple[List[str], int]:
|
||||
global config
|
||||
assert config is not None
|
||||
installer = initialize_installer(config)
|
||||
store = installer.record_store
|
||||
main_models = store.search_by_attr(model_type=ModelType.Main)
|
||||
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
|
||||
default = 0
|
||||
return (model_names, default)
|
||||
|
||||
def marshall_arguments(self) -> dict:
|
||||
args = {}
|
||||
|
||||
# the choices
|
||||
args.update(
|
||||
model=self.model_names[self.model.value[0]],
|
||||
resolution=self.resolutions[self.resolution.value[0]],
|
||||
lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]],
|
||||
mixed_precision=self.precisions[self.mixed_precision.value[0]],
|
||||
learnable_property=self.learnable_properties[self.learnable_property.value[0]],
|
||||
)
|
||||
|
||||
# all the strings and booleans
|
||||
for attr in (
|
||||
"initializer_token",
|
||||
"placeholder_token",
|
||||
"train_data_dir",
|
||||
"output_dir",
|
||||
"scale_lr",
|
||||
"center_crop",
|
||||
"enable_xformers_memory_efficient_attention",
|
||||
):
|
||||
args[attr] = getattr(self, attr).value
|
||||
|
||||
# all the integers
|
||||
for attr in (
|
||||
"train_batch_size",
|
||||
"gradient_accumulation_steps",
|
||||
"num_train_epochs",
|
||||
"max_train_steps",
|
||||
"lr_warmup_steps",
|
||||
):
|
||||
args[attr] = int(getattr(self, attr).value)
|
||||
|
||||
# the floats (just one)
|
||||
args.update(learning_rate=float(self.learning_rate.value))
|
||||
|
||||
# a special case
|
||||
if self.resume_from_checkpoint.value and Path(self.output_dir.value).exists():
|
||||
args["resume_from_checkpoint"] = "latest"
|
||||
|
||||
return args
|
||||
|
||||
|
||||
class MyApplication(npyscreen.NPSAppManaged):
|
||||
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
|
||||
super().__init__()
|
||||
self.ti_arguments = None
|
||||
self.saved_args = saved_args
|
||||
|
||||
def onStart(self):
|
||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||
self.main = self.addForm(
|
||||
"MAIN",
|
||||
textualInversionForm,
|
||||
name="Textual Inversion Settings",
|
||||
saved_args=self.saved_args,
|
||||
)
|
||||
|
||||
|
||||
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
|
||||
"""
|
||||
Copy learned_embeds.bin into the embeddings folder, and offer to
|
||||
delete the full model and checkpoints.
|
||||
"""
|
||||
assert config is not None
|
||||
source = Path(args["output_dir"], "learned_embeds.bin")
|
||||
dest_dir_name = args["placeholder_token"].strip("<>")
|
||||
destination = config.root_dir / "embeddings" / dest_dir_name
|
||||
os.makedirs(destination, exist_ok=True)
|
||||
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
|
||||
shutil.copy(source, destination)
|
||||
if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")):
|
||||
shutil.rmtree(Path(args["output_dir"]))
|
||||
else:
|
||||
logger.info(f'Keeping {args["output_dir"]}')
|
||||
|
||||
|
||||
def save_args(args: dict) -> None:
|
||||
"""
|
||||
Save the current argument values to an omegaconf file
|
||||
"""
|
||||
assert config is not None
|
||||
dest_dir = config.root_dir / TRAINING_DIR
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
conf_file = dest_dir / CONF_FILE
|
||||
conf = OmegaConf.create(args)
|
||||
OmegaConf.save(config=conf, f=conf_file)
|
||||
|
||||
|
||||
def previous_args() -> dict:
|
||||
"""
|
||||
Get the previous arguments used.
|
||||
"""
|
||||
assert config is not None
|
||||
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
|
||||
try:
|
||||
conf = OmegaConf.load(conf_file)
|
||||
conf["placeholder_token"] = conf["placeholder_token"].strip("<>")
|
||||
except Exception:
|
||||
conf = None
|
||||
|
||||
return conf
|
||||
|
||||
|
||||
def do_front_end() -> None:
|
||||
global config
|
||||
saved_args = previous_args()
|
||||
myapplication = MyApplication(saved_args=saved_args)
|
||||
myapplication.run()
|
||||
|
||||
if my_args := myapplication.ti_arguments:
|
||||
os.makedirs(my_args["output_dir"], exist_ok=True)
|
||||
|
||||
# Automatically add angle brackets around the trigger
|
||||
if not re.match("^<.+>$", my_args["placeholder_token"]):
|
||||
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
|
||||
|
||||
my_args["only_save_embeds"] = True
|
||||
save_args(my_args)
|
||||
|
||||
try:
|
||||
print(my_args)
|
||||
do_textual_inversion_training(config, **my_args)
|
||||
copy_to_embeddings_folder(my_args)
|
||||
except Exception as e:
|
||||
logger.error("An exception occurred during training. The exception was:")
|
||||
logger.error(str(e))
|
||||
logger.error("DETAILS:")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
|
||||
def main() -> None:
|
||||
global config
|
||||
|
||||
args: Namespace = parse_args()
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args([])
|
||||
|
||||
# change root if needed
|
||||
if args.root_dir:
|
||||
config.root = args.root_dir
|
||||
|
||||
try:
|
||||
if args.front_end:
|
||||
do_front_end()
|
||||
else:
|
||||
do_textual_inversion_training(config, **vars(args))
|
||||
except AssertionError as e:
|
||||
logger.error(e)
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except (widget.NotEnoughSpaceForWidget, Exception) as e:
|
||||
if str(e).startswith("Height of 1 allocated"):
|
||||
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
|
||||
elif str(e).startswith("addwstr"):
|
||||
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
|
||||
else:
|
||||
logger.error(e)
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,131 +1,26 @@
|
||||
module.exports = {
|
||||
env: {
|
||||
browser: true,
|
||||
es6: true,
|
||||
node: true,
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:react/jsx-runtime',
|
||||
'prettier',
|
||||
'plugin:storybook/recommended',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
ecmaVersion: 2018,
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: [
|
||||
'react',
|
||||
'@typescript-eslint',
|
||||
'eslint-plugin-react-hooks',
|
||||
'i18next',
|
||||
'path',
|
||||
'unused-imports',
|
||||
'simple-import-sort',
|
||||
'eslint-plugin-import',
|
||||
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
||||
// '@arthurgeron/react-usememo',
|
||||
],
|
||||
root: true,
|
||||
extends: ['@invoke-ai/eslint-config-react'],
|
||||
plugins: ['path', 'i18next'],
|
||||
rules: {
|
||||
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
|
||||
'react-refresh/only-export-components': 'off',
|
||||
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
|
||||
'@typescript-eslint/consistent-type-assertions': 'off',
|
||||
// https://github.com/qdanik/eslint-plugin-path
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
curly: 'error',
|
||||
'i18next/no-literal-string': 'warn',
|
||||
'react/jsx-no-bind': ['error', { allowBind: true }],
|
||||
'react/jsx-curly-brace-presence': [
|
||||
'error',
|
||||
{ props: 'never', children: 'never' },
|
||||
],
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
'no-var': 'error',
|
||||
'brace-style': 'error',
|
||||
'prefer-template': 'error',
|
||||
'import/no-duplicates': 'error',
|
||||
radix: 'error',
|
||||
'space-before-blocks': 'error',
|
||||
'import/prefer-default-export': 'off',
|
||||
'@typescript-eslint/no-unused-vars': 'off',
|
||||
'unused-imports/no-unused-imports': 'error',
|
||||
'unused-imports/no-unused-vars': [
|
||||
'warn',
|
||||
{
|
||||
vars: 'all',
|
||||
varsIgnorePattern: '^_',
|
||||
args: 'after-used',
|
||||
argsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
||||
// '@arthurgeron/react-usememo/require-usememo': [
|
||||
// 'warn',
|
||||
// {
|
||||
// strict: false,
|
||||
// checkHookReturnObject: false,
|
||||
// fix: { addImports: true },
|
||||
// checkHookCalls: false,
|
||||
|
||||
// },
|
||||
// ],
|
||||
// '@arthurgeron/react-usememo/require-memo': 'warn',
|
||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
'@typescript-eslint/no-empty-interface': [
|
||||
'error',
|
||||
{
|
||||
allowSingleExtends: true,
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/consistent-type-imports': [
|
||||
'error',
|
||||
{
|
||||
prefer: 'type-imports',
|
||||
fixStyle: 'separate-type-imports',
|
||||
disallowTypeAnnotations: true,
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-import-type-side-effects': 'error',
|
||||
'simple-import-sort/imports': 'error',
|
||||
'simple-import-sort/exports': 'error',
|
||||
// Prefer @invoke-ai/ui components over chakra
|
||||
'no-restricted-imports': 'off',
|
||||
'@typescript-eslint/no-restricted-imports': [
|
||||
'warn',
|
||||
{
|
||||
paths: [
|
||||
{
|
||||
name: '@chakra-ui/react',
|
||||
message: "Please import from '@invoke-ai/ui' instead.",
|
||||
},
|
||||
{
|
||||
name: '@chakra-ui/layout',
|
||||
message: "Please import from '@invoke-ai/ui' instead.",
|
||||
},
|
||||
{
|
||||
name: '@chakra-ui/portal',
|
||||
message: "Please import from '@invoke-ai/ui' instead.",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
'i18next/no-literal-string': 'error',
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
* Overrides for stories
|
||||
*/
|
||||
{
|
||||
files: ['*.stories.tsx'],
|
||||
rules: {
|
||||
// We may not have i18n available in stories.
|
||||
'i18next/no-literal-string': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
version: 'detect',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
module.exports = {
|
||||
trailingComma: 'es5',
|
||||
tabWidth: 2,
|
||||
semi: true,
|
||||
singleQuote: true,
|
||||
endOfLine: 'auto',
|
||||
...require('@invoke-ai/prettier-config-react'),
|
||||
overrides: [
|
||||
{
|
||||
files: ['public/locales/*.json'],
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||
import { modelChanged } from '../src/features/parameters/store/generationSlice';
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui';
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
/**
|
||||
* Initializes some state for storybook. Must be in a different component
|
||||
* so that it is run inside the redux context.
|
||||
|
||||
@@ -6,7 +6,6 @@ import { Provider } from 'react-redux';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
import { Container } from '@chakra-ui/react';
|
||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
{
|
||||
"entry": ["src/main.tsx"],
|
||||
"extensions": [".ts", ".tsx"],
|
||||
"ignorePatterns": [
|
||||
"**/node_modules/**",
|
||||
"dist/**",
|
||||
"public/**",
|
||||
"**/*.stories.tsx",
|
||||
"config/**"
|
||||
],
|
||||
"ignorePatterns": ["**/node_modules/**", "dist/**", "public/**", "**/*.stories.tsx", "config/**"],
|
||||
"ignoreUnresolved": [],
|
||||
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
|
||||
"respectGitignore": true,
|
||||
|
||||
150
invokeai/frontend/web/README.md
Normal file
150
invokeai/frontend/web/README.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Invoke UI
|
||||
|
||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=2 depthTo=3 orderedList=false} -->
|
||||
|
||||
<!-- code_chunk_output -->
|
||||
|
||||
- [Dev environment](#dev-environment)
|
||||
- [Setup](#setup)
|
||||
- [Package scripts](#package-scripts)
|
||||
- [Type generation](#type-generation)
|
||||
- [Localization](#localization)
|
||||
- [VSCode](#vscode)
|
||||
- [Contributing](#contributing)
|
||||
- [Check in before investing your time](#check-in-before-investing-your-time)
|
||||
- [Commit format](#commit-format)
|
||||
- [Submitting a PR](#submitting-a-pr)
|
||||
- [Other docs](#other-docs)
|
||||
|
||||
<!-- /code_chunk_output -->
|
||||
|
||||
Invoke's UI is made possible by many contributors and open-source libraries. Thank you!
|
||||
|
||||
## Dev environment
|
||||
|
||||
### Setup
|
||||
|
||||
1. Install [node] and [pnpm].
|
||||
1. Run `pnpm i` to install all packages.
|
||||
|
||||
#### Run in dev mode
|
||||
|
||||
1. From `invokeai/frontend/web/`, run `pnpm dev`.
|
||||
1. From repo root, run `python scripts/invokeai-web.py`.
|
||||
1. Point your browser to the dev server address, e.g. <http://localhost:5173/>
|
||||
|
||||
### Package scripts
|
||||
|
||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||
- `typegen`: generate types from the OpenAPI schema (see [Type generation])
|
||||
- `lint:madge`: check frontend for circular dependencies
|
||||
- `lint:eslint`: check frontend for code quality
|
||||
- `lint:prettier`: check frontend for code formatting
|
||||
- `lint:tsc`: check frontend for type issues
|
||||
- `lint`: run all checks concurrently
|
||||
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
||||
|
||||
### Type generation
|
||||
|
||||
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
||||
|
||||
The generated types are committed to the repo in [schema.ts].
|
||||
|
||||
```sh
|
||||
# from the repo root, start the server
|
||||
python scripts/invokeai-web.py
|
||||
# from invokeai/frontend/web/, run the script
|
||||
pnpm typegen
|
||||
```
|
||||
|
||||
### Localization
|
||||
|
||||
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project.
|
||||
|
||||
Only the English source strings should be changed on this repo.
|
||||
|
||||
### VSCode
|
||||
|
||||
#### Example debugger config
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "chrome",
|
||||
"request": "launch",
|
||||
"name": "Invoke UI",
|
||||
"url": "http://localhost:5173",
|
||||
"webRoot": "${workspaceFolder}/invokeai/frontend/web",
|
||||
},
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
#### Remote dev
|
||||
|
||||
We've noticed an intermittent timeout issue with the VSCode remote dev port forwarding.
|
||||
|
||||
We suggest disabling the editor's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
```sh
|
||||
ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host
|
||||
```
|
||||
|
||||
## Contributing Guidelines
|
||||
|
||||
Thanks for your interest in contributing to the Invoke Web UI!
|
||||
|
||||
Please follow these guidelines when contributing.
|
||||
|
||||
### Check in before investing your time
|
||||
|
||||
Please check in before you invest your time on anything besides a trivial fix, in case it conflicts with ongoing work or isn't aligned with the vision for the app.
|
||||
|
||||
If a feature request or issue doesn't already exist for the thing you want to work on, please create one.
|
||||
|
||||
Ping `@psychedelicious` on [discord] in the `#frontend-dev` channel or in the feature request / issue you want to work on - we're happy chat.
|
||||
|
||||
### Code conventions
|
||||
|
||||
- This is a fairly complex app with a deep component tree. Please use memoization (`useCallback`, `useMemo`, `memo`) with enthusiasm.
|
||||
- If you need to add some global, ephemeral state, please use [nanostores] if possible.
|
||||
- Be careful with your redux selectors. If they need to be parameterized, consider creating them inside a `useMemo`.
|
||||
- Feel free to use `lodash` (via `lodash-es`) to make the intent of your code clear.
|
||||
- Please add comments describing the "why", not the "how" (unless it is really arcane).
|
||||
|
||||
### Commit format
|
||||
|
||||
Please use the [conventional commits] spec for the web UI, with a scope of "ui":
|
||||
|
||||
- `chore(ui): bump deps`
|
||||
- `chore(ui): lint`
|
||||
- `feat(ui): add some cool new feature`
|
||||
- `fix(ui): fix some bug`
|
||||
|
||||
### Submitting a PR
|
||||
|
||||
- Ensure your branch is tidy. Use an interactive rebase to clean up the commit history and reword the commit messages if they are not descriptive.
|
||||
- Run `pnpm lint`. Some issues are auto-fixable with `pnpm fix`.
|
||||
- Fill out the PR form when creating the PR.
|
||||
- It doesn't need to be super detailed, but a screenshot or video is nice if you changed something visually.
|
||||
- If a section isn't relevant, delete it. There are no UI tests at this time.
|
||||
|
||||
## Other docs
|
||||
|
||||
- [Workflows - Design and Implementation]
|
||||
- [State Management]
|
||||
|
||||
[node]: https://nodejs.org/en/download/
|
||||
[pnpm]: https://github.com/pnpm/pnpm
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[i18next]: https://github.com/i18next/react-i18next
|
||||
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
||||
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
||||
[Type generation]: #type-generation
|
||||
[schema.ts]: ../src/services/api/schema.ts
|
||||
[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0/
|
||||
[Workflows - Design and Implementation]: ./docs/WORKFLOWS_DESIGN_IMPLEMENTATION.md
|
||||
[State Management]: ./docs/STATE_MGMT.md
|
||||
@@ -22,12 +22,13 @@ export const packageConfig: UserConfig = {
|
||||
fileName: (format) => `invoke-ai-ui.${format}.js`,
|
||||
},
|
||||
rollupOptions: {
|
||||
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react'],
|
||||
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'],
|
||||
output: {
|
||||
globals: {
|
||||
react: 'React',
|
||||
'react-dom': 'ReactDOM',
|
||||
'@emotion/react': 'EmotionReact',
|
||||
'@invoke-ai/ui-library': 'UiLibrary',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
# InvokeAI Web UI
|
||||
|
||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
|
||||
|
||||
<!-- code_chunk_output -->
|
||||
|
||||
- [InvokeAI Web UI](#invokeai-web-ui)
|
||||
- [Core Libraries](#core-libraries)
|
||||
- [Redux Toolkit](#redux-toolkit)
|
||||
- [Socket\.IO](#socketio)
|
||||
- [Chakra UI](#chakra-ui)
|
||||
- [KonvaJS](#konvajs)
|
||||
- [Vite](#vite)
|
||||
- [i18next & Weblate](#i18next--weblate)
|
||||
- [openapi-typescript](#openapi-typescript)
|
||||
- [reactflow](#reactflow)
|
||||
- [zod](#zod)
|
||||
- [Client Types Generation](#client-types-generation)
|
||||
- [Package Scripts](#package-scripts)
|
||||
- [Contributing](#contributing)
|
||||
- [Dev Environment](#dev-environment)
|
||||
- [VSCode Remote Dev](#vscode-remote-dev)
|
||||
- [Production builds](#production-builds)
|
||||
|
||||
<!-- /code_chunk_output -->
|
||||
|
||||
The UI is a fairly straightforward Typescript React app.
|
||||
|
||||
## Core Libraries
|
||||
|
||||
InvokeAI's UI is made possible by a number of excellent open-source libraries. The most heavily-used are listed below, but there are many others.
|
||||
|
||||
### Redux Toolkit
|
||||
|
||||
[Redux Toolkit] is used for state management and fetching/caching:
|
||||
|
||||
- `RTK-Query` for data fetching and caching
|
||||
- `createAsyncThunk` for a couple other HTTP requests
|
||||
- `createEntityAdapter` to normalize things like images and models
|
||||
- `createListenerMiddleware` for async workflows
|
||||
|
||||
We use [redux-remember] for persistence.
|
||||
|
||||
### Socket\.IO
|
||||
|
||||
[Socket.IO] is used for server-to-client events, like generation process and queue state changes.
|
||||
|
||||
### Chakra UI
|
||||
|
||||
[Chakra UI] is our primary UI library, but we also use a few components from [Mantine v6].
|
||||
|
||||
### KonvaJS
|
||||
|
||||
[KonvaJS] powers the canvas. In the future, we'd like to explore [PixiJS] or WebGPU.
|
||||
|
||||
### Vite
|
||||
|
||||
[Vite] is our bundler.
|
||||
|
||||
### i18next & Weblate
|
||||
|
||||
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project. **Only the English source strings should be changed on this repo.**
|
||||
|
||||
### openapi-typescript
|
||||
|
||||
[openapi-typescript] is used to generate types from the server's OpenAPI schema. See TYPES_CODEGEN.md.
|
||||
|
||||
### reactflow
|
||||
|
||||
[reactflow] powers the Workflow Editor.
|
||||
|
||||
### zod
|
||||
|
||||
[zod] schemas are used to model data structures and provide runtime validation.
|
||||
|
||||
## Client Types Generation
|
||||
|
||||
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
||||
|
||||
The generated types are written to `invokeai/frontend/web/src/services/api/schema.d.ts`. This file is committed to the repo.
|
||||
|
||||
The server must be started and available at <http://127.0.0.1:9090>.
|
||||
|
||||
```sh
|
||||
# from the repo root, start the server
|
||||
python scripts/invokeai-web.py
|
||||
# from invokeai/frontend/web/, run the script
|
||||
pnpm typegen
|
||||
```
|
||||
|
||||
## Package Scripts
|
||||
|
||||
See `package.json` for all scripts.
|
||||
|
||||
Run with `pnpm <script name>`.
|
||||
|
||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||
- `typegen`: generate types from the OpenAPI schema (see [Client Types Generation](#client-types-generation))
|
||||
- `lint:madge`: check frontend for circular dependencies
|
||||
- `lint:eslint`: check frontend for code quality
|
||||
- `lint:prettier`: check frontend for code formatting
|
||||
- `lint:tsc`: check frontend for type issues
|
||||
- `lint`: run all checks concurrently
|
||||
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [discord] if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
Install [node] and [pnpm].
|
||||
|
||||
From `invokeai/frontend/web/` run `pnpm i` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
|
||||
1. Start the dev server: `pnpm dev`
|
||||
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
|
||||
|
||||
#### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `pnpm build`.
|
||||
|
||||
[node]: https://nodejs.org/en/download/
|
||||
[pnpm]: https://github.com/pnpm/pnpm
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
|
||||
[redux-remember]: https://github.com/zewish/redux-remember
|
||||
[Socket.IO]: https://github.com/socketio/socket.io
|
||||
[Chakra UI]: https://github.com/chakra-ui/chakra-ui
|
||||
[Mantine v6]: https://v6.mantine.dev/
|
||||
[KonvaJS]: https://github.com/konvajs/react-konva
|
||||
[PixiJS]: https://github.com/pixijs/pixijs
|
||||
[Vite]: https://github.com/vitejs/vite
|
||||
[i18next]: https://github.com/i18next/react-i18next
|
||||
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
||||
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
||||
[reactflow]: https://github.com/xyflow/xyflow
|
||||
[zod]: https://github.com/colinhacks/zod
|
||||
38
invokeai/frontend/web/docs/STATE_MGMT.md
Normal file
38
invokeai/frontend/web/docs/STATE_MGMT.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# State Management
|
||||
|
||||
The app makes heavy use of Redux Toolkit, its Query library, and `nanostores`.
|
||||
|
||||
## Redux
|
||||
|
||||
TODO
|
||||
|
||||
## `nanostores`
|
||||
|
||||
[nanostores] is a tiny state management library. It provides both imperative and declarative APIs.
|
||||
|
||||
### Example
|
||||
|
||||
```ts
|
||||
export const $myStringOption = atom<string | null>(null);
|
||||
|
||||
// Outside a component, or within a callback for performance-critical logic
|
||||
$myStringOption.get();
|
||||
$myStringOption.set('new value');
|
||||
|
||||
// Inside a component
|
||||
const myStringOption = useStore($myStringOption);
|
||||
```
|
||||
|
||||
### Where to put nanostores
|
||||
|
||||
- For global application state, export your stores from `invokeai/frontend/web/src/app/store/nanostores/`.
|
||||
- For feature state, create a file for the stores next to the redux slice definition (e.g. `invokeai/frontend/web/src/features/myFeature/myFeatureNanostores.ts`).
|
||||
- For hooks with global state, export the store from the same file the hook is in, or put it next to the hook.
|
||||
|
||||
### When to use nanostores
|
||||
|
||||
- For non-serializable data that needs to be available throughout the app, use `nanostores` instead of a global.
|
||||
- For ephemeral global state (i.e. state that does not need to be persisted), use `nanostores` instead of redux.
|
||||
- For performance-critical code and in callbacks, redux selectors can be problematic due to the declarative reactivity system. Consider refactoring to use `nanostores` if there's a **measurable** performance issue.
|
||||
|
||||
[nanostores]: https://github.com/nanostores/nanostores/
|
||||
@@ -23,7 +23,7 @@
|
||||
- [Primitive Types](#primitive-types)
|
||||
- [Complex Types](#complex-types)
|
||||
- [Collection Types](#collection-types)
|
||||
- [Polymorphic Types](#polymorphic-types)
|
||||
- [Collection or Scalar Types](#collection-or-scalar-types)
|
||||
- [Optional Fields](#optional-fields)
|
||||
- [Building Field Input Templates](#building-field-input-templates)
|
||||
- [Building Field Output Templates](#building-field-output-templates)
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"dev": "concurrently \"vite dev\" \"pnpm run theme:watch\"",
|
||||
"dev:host": "concurrently \"vite dev --host\" \"pnpm run theme:watch\"",
|
||||
"dev": "vite dev",
|
||||
"dev:host": "vite dev --host",
|
||||
"build": "pnpm run lint && vite build",
|
||||
"typegen": "node scripts/typegen.js",
|
||||
"preview": "vite preview",
|
||||
@@ -31,9 +31,6 @@
|
||||
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
|
||||
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
||||
"preinstall": "npx only-allow pnpm",
|
||||
"postinstall": "pnpm run theme",
|
||||
"theme": "chakra-cli tokens node_modules/@invoke-ai/ui",
|
||||
"theme:watch": "chakra-cli tokens node_modules/@invoke-ai/ui --watch",
|
||||
"storybook": "storybook dev -p 6006",
|
||||
"build-storybook": "storybook build",
|
||||
"unimported": "npx unimported"
|
||||
@@ -52,21 +49,12 @@
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/anatomy": "^2.2.2",
|
||||
"@chakra-ui/icons": "^2.1.1",
|
||||
"@chakra-ui/layout": "^2.3.1",
|
||||
"@chakra-ui/portal": "^2.1.0",
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@chakra-ui/styled-system": "^2.9.2",
|
||||
"@chakra-ui/theme-tools": "^2.1.2",
|
||||
"@dagrejs/graphlib": "^2.1.13",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@emotion/react": "^11.11.3",
|
||||
"@emotion/styled": "^11.11.0",
|
||||
"@fontsource-variable/inter": "^5.0.16",
|
||||
"@invoke-ai/ui": "0.0.10",
|
||||
"@invoke-ai/ui-library": "^0.0.18",
|
||||
"@mantine/form": "6.0.21",
|
||||
"@nanostores/react": "^0.7.1",
|
||||
"@reduxjs/toolkit": "2.0.1",
|
||||
@@ -116,7 +104,6 @@
|
||||
"zod-validation-error": "^3.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@chakra-ui/cli": "^2.4.1",
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
@@ -124,7 +111,8 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
|
||||
"@chakra-ui/cli": "^2.4.1",
|
||||
"@invoke-ai/eslint-config-react": "^0.0.13",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.6",
|
||||
"@storybook/addon-docs": "^7.6.10",
|
||||
"@storybook/addon-essentials": "^7.6.10",
|
||||
"@storybook/addon-interactions": "^7.6.10",
|
||||
@@ -164,7 +152,7 @@
|
||||
"storybook": "^7.6.10",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^5.0.11",
|
||||
"vite": "^5.0.12",
|
||||
"vite-plugin-css-injected-by-js": "^3.3.1",
|
||||
"vite-plugin-dts": "^3.7.1",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
|
||||
1199
invokeai/frontend/web/pnpm-lock.yaml
generated
1199
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,6 @@
|
||||
"img2img": "صورة إلى صورة",
|
||||
"unifiedCanvas": "لوحة موحدة",
|
||||
"nodes": "عقد",
|
||||
"langArabic": "العربية",
|
||||
"nodesDesc": "نظام مبني على العقد لإنتاج الصور قيد التطوير حاليًا. تبقى على اتصال مع تحديثات حول هذه الميزة المذهلة.",
|
||||
"postProcessing": "معالجة بعد الإصدار",
|
||||
"postProcessDesc1": "Invoke AI توفر مجموعة واسعة من ميزات المعالجة بعد الإصدار. تحسين الصور واستعادة الوجوه متاحين بالفعل في واجهة الويب. يمكنك الوصول إليهم من الخيارات المتقدمة في قائمة الخيارات في علامة التبويب Text To Image و Image To Image. يمكن أيضًا معالجة الصور مباشرةً باستخدام أزرار الإجراء على الصورة فوق عرض الصورة الحالي أو في العارض.",
|
||||
|
||||
5
invokeai/frontend/web/public/locales/az.json
Normal file
5
invokeai/frontend/web/public/locales/az.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"accessibility": {
|
||||
"about": "Haqqında"
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@
|
||||
"settingsLabel": "Einstellungen",
|
||||
"img2img": "Bild zu Bild",
|
||||
"nodes": "Knoten Editor",
|
||||
"langGerman": "Deutsch",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
@@ -41,24 +40,11 @@
|
||||
"cancel": "Abbrechen",
|
||||
"accept": "Annehmen",
|
||||
"back": "Zurück",
|
||||
"langEnglish": "Englisch",
|
||||
"langDutch": "Niederländisch",
|
||||
"langFrench": "Französisch",
|
||||
"langItalian": "Italienisch",
|
||||
"langPortuguese": "Portugiesisch",
|
||||
"langRussian": "Russisch",
|
||||
"langUkranian": "Ukrainisch",
|
||||
"hotkeysLabel": "Tastenkombinationen",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"txt2img": "Text zu Bild",
|
||||
"postprocessing": "Nachbearbeitung",
|
||||
"langPolish": "Polnisch",
|
||||
"langJapanese": "Japanisch",
|
||||
"langArabic": "Arabisch",
|
||||
"langKorean": "Koreanisch",
|
||||
"langHebrew": "Hebräisch",
|
||||
"langSpanish": "Spanisch",
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"communityLabel": "Gemeinschaft",
|
||||
"dontAskMeAgain": "Frag mich nicht nochmal",
|
||||
@@ -69,7 +55,6 @@
|
||||
"on": "An",
|
||||
"nodeEditor": "Knoten Editor",
|
||||
"statusMergingModels": "Modelle zusammenführen",
|
||||
"langSimplifiedChinese": "Vereinfachtes Chinesisch",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"controlAdapter": "Control Adapter",
|
||||
"auto": "Automatisch",
|
||||
@@ -77,7 +62,6 @@
|
||||
"imageFailedToLoad": "Kann Bild nicht laden",
|
||||
"statusModelConverted": "Model konvertiert",
|
||||
"modelManager": "Model Manager",
|
||||
"lightMode": "Heller Modus",
|
||||
"generate": "Erstellen",
|
||||
"learnMore": "Mehr lernen",
|
||||
"darkMode": "Dunkler Modus",
|
||||
@@ -85,7 +69,6 @@
|
||||
"random": "Zufall",
|
||||
"batch": "Stapel-Manager",
|
||||
"advanced": "Erweitert",
|
||||
"langBrPortuguese": "Portugiesisch (Brasilien)",
|
||||
"unifiedCanvas": "Einheitliche Leinwand",
|
||||
"openInNewTab": "In einem neuem Tab öffnen",
|
||||
"statusProcessing": "wird bearbeitet",
|
||||
@@ -98,7 +81,7 @@
|
||||
"outputs": "Ausgabe",
|
||||
"data": "Daten",
|
||||
"safetensors": "Safetensors",
|
||||
"outpaint": "outpaint",
|
||||
"outpaint": "Ausmalen",
|
||||
"details": "Details",
|
||||
"format": "Format",
|
||||
"unknown": "Unbekannt",
|
||||
@@ -110,7 +93,30 @@
|
||||
"somethingWentWrong": "Etwas ist schief gelaufen",
|
||||
"copyError": "$t(gallery.copy) Fehler",
|
||||
"input": "Eingabe",
|
||||
"notInstalled": "Nicht $t(common.installed)"
|
||||
"notInstalled": "Nicht $t(common.installed)",
|
||||
"advancedOptions": "Erweiterte Einstellungen",
|
||||
"alpha": "Alpha",
|
||||
"red": "Rot",
|
||||
"green": "Grün",
|
||||
"blue": "Blau",
|
||||
"delete": "Löschen",
|
||||
"or": "oder",
|
||||
"direction": "Richtung",
|
||||
"free": "Frei",
|
||||
"save": "Speichern",
|
||||
"preferencesLabel": "Präferenzen",
|
||||
"created": "Erstellt",
|
||||
"prevPage": "Vorherige Seite",
|
||||
"nextPage": "Nächste Seite",
|
||||
"unknownError": "Unbekannter Fehler",
|
||||
"unsaved": "Nicht gespeichert",
|
||||
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:",
|
||||
"localSystem": "Lokales System",
|
||||
"orderBy": "Ordnen nach",
|
||||
"saveAs": "Speicher als",
|
||||
"updated": "Aktualisiert",
|
||||
"copy": "Kopieren",
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
@@ -140,7 +146,17 @@
|
||||
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
||||
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
|
||||
"noImageSelected": "Kein Bild ausgewählt"
|
||||
"noImageSelected": "Kein Bild ausgewählt",
|
||||
"problemDeletingImagesDesc": "Ein oder mehrere Bilder konnten nicht gelöscht werden",
|
||||
"starImage": "Bild markieren",
|
||||
"assets": "Ressourcen",
|
||||
"unstarImage": "Markierung Entfernen",
|
||||
"image": "Bild",
|
||||
"deleteSelection": "Lösche markierte",
|
||||
"dropToUpload": "$t(gallery.drop) zum hochladen",
|
||||
"dropOrUpload": "$t(gallery.drop) oder hochladen",
|
||||
"drop": "Ablegen",
|
||||
"problemDeletingImages": "Problem beim Löschen der Bilder"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
@@ -344,6 +360,25 @@
|
||||
"addNodes": {
|
||||
"title": "Knotenpunkt hinzufügen",
|
||||
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
||||
},
|
||||
"cancelAndClear": {
|
||||
"title": "Abbruch und leeren",
|
||||
"desc": "Aktuelle Berechnung abbrechen und alle wartenden löschen"
|
||||
},
|
||||
"noHotkeysFound": "Kein Hotkey gefunden",
|
||||
"searchHotkeys": "Hotkeys durchsuchen",
|
||||
"clearSearch": "Suche leeren",
|
||||
"resetOptionsAndGallery": {
|
||||
"desc": "Optionen und Galerie-Panels zurücksetzen",
|
||||
"title": "Optionen und Galerie zurücksetzen"
|
||||
},
|
||||
"remixImage": {
|
||||
"desc": "Alle Parameter außer Seed vom aktuellen Bild verwenden",
|
||||
"title": "Remix des Bilds erstellen"
|
||||
},
|
||||
"toggleOptionsAndGallery": {
|
||||
"title": "Optionen und Galerie umschalten",
|
||||
"desc": "Optionen und Galerie-Panels öffnen und schließen"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -351,14 +386,14 @@
|
||||
"modelUpdated": "Model aktualisiert",
|
||||
"modelEntryDeleted": "Modelleintrag gelöscht",
|
||||
"cannotUseSpaces": "Leerzeichen können nicht verwendet werden",
|
||||
"addNew": "Neue hinzufügen",
|
||||
"addNewModel": "Neues Model hinzufügen",
|
||||
"addNew": "Neu hinzufügen",
|
||||
"addNewModel": "Neues Modell hinzufügen",
|
||||
"addManually": "Manuell hinzufügen",
|
||||
"nameValidationMsg": "Geben Sie einen Namen für Ihr Model ein",
|
||||
"description": "Beschreibung",
|
||||
"descriptionValidationMsg": "Fügen Sie eine Beschreibung für Ihr Model hinzu",
|
||||
"config": "Konfiguration",
|
||||
"configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Models.",
|
||||
"configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Modells.",
|
||||
"modelLocation": "Ort des Models",
|
||||
"modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models",
|
||||
"vaeLocation": "VAE Ort",
|
||||
@@ -367,9 +402,9 @@
|
||||
"widthValidationMsg": "Standardbreite Ihres Models.",
|
||||
"height": "Höhe",
|
||||
"heightValidationMsg": "Standardbhöhe Ihres Models.",
|
||||
"addModel": "Model hinzufügen",
|
||||
"addModel": "Modell hinzufügen",
|
||||
"updateModel": "Model aktualisieren",
|
||||
"availableModels": "Verfügbare Models",
|
||||
"availableModels": "Verfügbare Modelle",
|
||||
"search": "Suche",
|
||||
"load": "Laden",
|
||||
"active": "Aktiv",
|
||||
@@ -488,7 +523,9 @@
|
||||
"mergedModelName": "Zusammengeführter Modellname",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Modell Speicherort",
|
||||
"noModelSelected": "Kein Modell ausgewählt"
|
||||
"noModelSelected": "Kein Modell ausgewählt",
|
||||
"conversionNotSupported": "Umwandlung nicht unterstützt",
|
||||
"configFile": "Konfigurationsdatei"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -639,7 +676,7 @@
|
||||
"redo": "Wiederherstellen",
|
||||
"clearCanvas": "Leinwand löschen",
|
||||
"canvasSettings": "Leinwand-Einstellungen",
|
||||
"showIntermediates": "Zwischenprodukte anzeigen",
|
||||
"showIntermediates": "Zwischenbilder anzeigen",
|
||||
"showGrid": "Gitternetz anzeigen",
|
||||
"snapToGrid": "Am Gitternetz einrasten",
|
||||
"darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln",
|
||||
@@ -701,7 +738,8 @@
|
||||
"invokeProgressBar": "Invoke Fortschrittsanzeige",
|
||||
"mode": "Modus",
|
||||
"resetUI": "$t(accessibility.reset) von UI",
|
||||
"createIssue": "Ticket erstellen"
|
||||
"createIssue": "Ticket erstellen",
|
||||
"about": "Über"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
||||
@@ -724,15 +762,15 @@
|
||||
"deleteBoard": "Löschen Ordner",
|
||||
"deleteBoardAndImages": "Löschen Ordner und Bilder",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner könnte nicht wiederhergestellt werden",
|
||||
"movingImagesToBoard_one": "Verschiebe {{count}} Bild zu Ordner",
|
||||
"movingImagesToBoard_other": "Verschiebe {{count}} Bilder in Ordner"
|
||||
"movingImagesToBoard_one": "Verschiebe {{count}} Bild zu Ordner:",
|
||||
"movingImagesToBoard_other": "Verschiebe {{count}} Bilder in Ordner:"
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
"contentShuffleDescription": "Mischt den Inhalt von einem Bild",
|
||||
"addT2IAdapter": "$t(common.t2iAdapter) hinzufügen",
|
||||
"importImageFromCanvas": "Importieren Bild von Zeichenfläche",
|
||||
"lineartDescription": "Konvertiere Bild zu Lineart",
|
||||
"importImageFromCanvas": "Bild von Zeichenfläche importieren",
|
||||
"lineartDescription": "Konvertiere Bild in Strichzeichnung",
|
||||
"importMaskFromCanvas": "Importiere Maske von Zeichenfläche",
|
||||
"hed": "HED",
|
||||
"hideAdvanced": "Verstecke Erweitert",
|
||||
@@ -768,8 +806,8 @@
|
||||
"toggleControlNet": "Schalten ControlNet um",
|
||||
"delete": "Löschen",
|
||||
"controlAdapter_one": "Control Adapter",
|
||||
"controlAdapter_other": "Control Adapters",
|
||||
"colorMapTileSize": "Tile Größe",
|
||||
"controlAdapter_other": "Control Adapter",
|
||||
"colorMapTileSize": "Kachelgröße",
|
||||
"depthZoeDescription": "Tiefenmap erstellen mit Zoe",
|
||||
"setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe",
|
||||
"handAndFace": "Hand und Gesicht",
|
||||
@@ -789,14 +827,14 @@
|
||||
"mlsdDescription": "Minimalistischer Liniensegmentdetektor",
|
||||
"openPoseDescription": "Schätzung der menschlichen Pose mit Openpose",
|
||||
"control": "Kontrolle",
|
||||
"coarse": "Coarse",
|
||||
"coarse": "Grob",
|
||||
"crop": "Zuschneiden",
|
||||
"pidiDescription": "PIDI-Bildverarbeitung",
|
||||
"mediapipeFace": "Mediapipe Gesichter",
|
||||
"mlsd": "M-LSD",
|
||||
"controlMode": "Steuermodus",
|
||||
"cannyDescription": "Canny Ecken Erkennung",
|
||||
"lineart": "Lineart",
|
||||
"cannyDescription": "Canny Umrisserkennung",
|
||||
"lineart": "Linienzeichnung",
|
||||
"lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil",
|
||||
"minConfidence": "Minimales Vertrauen",
|
||||
"megaControl": "Mega-Kontrolle",
|
||||
@@ -809,7 +847,14 @@
|
||||
"canny": "Canny",
|
||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||
"scribble": "Scribble",
|
||||
"maxFaces": "Maximal Anzahl Gesichter"
|
||||
"maxFaces": "Maximale Anzahl Gesichter",
|
||||
"resizeSimple": "Größe ändern (einfach)",
|
||||
"large": "Groß",
|
||||
"modelSize": "Modell Größe",
|
||||
"small": "Klein",
|
||||
"base": "Basis",
|
||||
"depthAnything": "Depth Anything",
|
||||
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -842,7 +887,7 @@
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"batchValues": "Stapel Werte",
|
||||
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"queuedCount": "{{pending}} wartenden Elemente",
|
||||
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
@@ -851,7 +896,7 @@
|
||||
"enqueueing": "Stapel in der Warteschlange",
|
||||
"queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen",
|
||||
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
|
||||
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?",
|
||||
"clearQueueAlertDialog2": "Warteschlange wirklich leeren?",
|
||||
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
|
||||
"pauseSucceeded": "Prozessor angehalten",
|
||||
"cancelFailed": "Problem beim Stornieren des Auftrags",
|
||||
@@ -864,34 +909,40 @@
|
||||
"back": "Hinten",
|
||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen",
|
||||
"time": "Zeit"
|
||||
"time": "Zeit",
|
||||
"batchQueuedDesc_one": "{{count}} Eintrag ans {{direction}} der Wartschlange hinzugefügt",
|
||||
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt",
|
||||
"openQueue": "Warteschlange öffnen",
|
||||
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
|
||||
"batchFieldValues": "Stapelverarbeitungswerte",
|
||||
"batchQueued": "Stapelverarbeitung eingereiht"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
"metadata": "Meta-Data",
|
||||
"strength": "Bild zu Bild stärke",
|
||||
"metadata": "Meta-Daten",
|
||||
"strength": "Bild zu Bild Stärke",
|
||||
"imageDetails": "Bild Details",
|
||||
"model": "Modell",
|
||||
"noImageDetails": "Keine Bild Details gefunden",
|
||||
"cfgScale": "CFG-Skala",
|
||||
"fit": "Bild zu Bild passen",
|
||||
"fit": "Bild zu Bild anpassen",
|
||||
"height": "Höhe",
|
||||
"noMetaData": "Keine Meta-Data gefunden",
|
||||
"noMetaData": "Keine Meta-Daten gefunden",
|
||||
"width": "Breite",
|
||||
"createdBy": "Erstellt von",
|
||||
"steps": "Schritte",
|
||||
"seamless": "Nahtlos",
|
||||
"positivePrompt": "Positiver Prompt",
|
||||
"generationMode": "Generierungsmodus",
|
||||
"Threshold": "Noise Schwelle",
|
||||
"seed": "Samen",
|
||||
"Threshold": "Rauschen-Schwelle",
|
||||
"seed": "Seed",
|
||||
"perlin": "Perlin Noise",
|
||||
"hiresFix": "Optimierung für hohe Auflösungen",
|
||||
"initImage": "Erstes Bild",
|
||||
"variations": "Samengewichtspaare",
|
||||
"vae": "VAE",
|
||||
"workflow": "Arbeitsablauf",
|
||||
"scheduler": "Scheduler",
|
||||
"scheduler": "Planer",
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden",
|
||||
"recallParameters": "Recall Parameters"
|
||||
},
|
||||
@@ -928,12 +979,20 @@
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Deaktivieren",
|
||||
"misses": "Cache Nötig",
|
||||
"misses": "Cache nicht genutzt",
|
||||
"hits": "Cache Treffer",
|
||||
"enable": "Aktivieren",
|
||||
"clear": "Leeren",
|
||||
"maxCacheSize": "Maximale Cache Größe",
|
||||
"cacheSize": "Cache Größe"
|
||||
"cacheSize": "Cache Größe",
|
||||
"useCache": "Benutze Cache",
|
||||
"enableFailed": "Problem beim Aktivieren des Zwischenspeichers",
|
||||
"disableFailed": "Problem bei Deaktivierung des Cache",
|
||||
"enableSucceeded": "Zwischenspeicher aktiviert",
|
||||
"disableSucceeded": "Aufrufcache deaktiviert",
|
||||
"clearSucceeded": "Zwischenspeicher gelöscht",
|
||||
"invocationCache": "Zwischenspeicher",
|
||||
"clearFailed": "Problem beim Löschen des Zwischenspeichers"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "Keine passenden Embeddings",
|
||||
@@ -972,7 +1031,41 @@
|
||||
"colorCodeEdges": "Farbkodierte Kanten",
|
||||
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)",
|
||||
"boardField": "Ordner",
|
||||
"boardFieldDescription": "Ein Galerie Ordner"
|
||||
"boardFieldDescription": "Ein Galerie Ordner",
|
||||
"collectionFieldType": "{{name}} Sammlung",
|
||||
"controlCollectionDescription": "Kontrollinformationen zwischen Knotenpunkten weitergegeben.",
|
||||
"connectionWouldCreateCycle": "Verbindung würde einen Kreislauf/cycle schaffen",
|
||||
"ipAdapterDescription": "Ein Adapter für die Bildabfrage (IP-Adapter) / Bilderprompt-Adapter.",
|
||||
"controlField": "Kontrolle",
|
||||
"inputFields": "Eingabefelder",
|
||||
"imageField": "Bild",
|
||||
"inputMayOnlyHaveOneConnection": "Eingang darf nur eine Verbindung haben",
|
||||
"integerCollectionDescription": "Eine Sammlung ganzer Zahlen.",
|
||||
"integerDescription": "Das sind ganze Zahlen ohne Dezimalpunkt.",
|
||||
"conditioningPolymorphic": "Konditionierung polymorphisch",
|
||||
"conditioningPolymorphicDescription": "Die Konditionierung kann zwischen den Knotenpunkten weitergegeben werden.",
|
||||
"invalidOutputSchema": "Ungültiges Ausgabeschema",
|
||||
"ipAdapterModel": "IP-Adapter Modell",
|
||||
"conditioningFieldDescription": "Die Konditionierung kann zwischen den Knotenpunkten weitergegeben werden.",
|
||||
"ipAdapterCollectionDescription": "Eine Sammlung von IP-Adaptern.",
|
||||
"collectionDescription": "Zu erledigen",
|
||||
"imageFieldDescription": "Bilder können zwischen Knoten weitergegeben werden.",
|
||||
"imagePolymorphic": "Bild Polymorphie",
|
||||
"imagePolymorphicDescription": "Eine Bildersammlung.",
|
||||
"inputField": "Eingabefeld",
|
||||
"hideLegendNodes": "Feldtyp-Legende ausblenden",
|
||||
"collectionItemDescription": "Zu erledigen",
|
||||
"inputNode": "Eingangsknoten",
|
||||
"integer": "Ganze Zahl",
|
||||
"integerCollection": "Ganzzahlige Sammlung",
|
||||
"addLinearView": "Zur linearen Ansicht hinzufügen",
|
||||
"currentImageDescription": "Zeigt das aktuelle Bild im Node-Editor an",
|
||||
"ipAdapter": "IP-Adapter",
|
||||
"hideMinimapnodes": "Miniatur-Kartenansicht ausblenden",
|
||||
"imageCollection": "Bildersammlung",
|
||||
"imageCollectionDescription": "Eine Sammlung von Bildern.",
|
||||
"denoiseMaskField": "Entrauschen-Maske",
|
||||
"ipAdapterCollection": "IP-Adapter Sammlung"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen",
|
||||
@@ -998,6 +1091,71 @@
|
||||
"noLoRAsInstalled": "Keine LoRAs installiert",
|
||||
"selectLoRA": "Wählen ein LoRA aus",
|
||||
"esrganModel": "ESRGAN Modell",
|
||||
"addLora": "LoRA hinzufügen"
|
||||
"addLora": "LoRA hinzufügen",
|
||||
"defaultVAE": "Standard VAE",
|
||||
"noLoRAsLoaded": "Keine LoRAs geladen",
|
||||
"lora": "LoRA",
|
||||
"allLoRAsAdded": "Alle LoRAs hinzugefügt",
|
||||
"incompatibleBaseModel": "Inkompatibles Basismodell",
|
||||
"noMainModelSelected": "Kein Hauptmodell ausgewählt",
|
||||
"loraAlreadyAdded": "LoRA bereits hinzugefügt"
|
||||
},
|
||||
"accordions": {
|
||||
"generation": {
|
||||
"title": "Erstellung",
|
||||
"modelTab": "Modell",
|
||||
"conceptsTab": "Konzepte"
|
||||
},
|
||||
"image": {
|
||||
"title": "Bild"
|
||||
},
|
||||
"advanced": {
|
||||
"title": "Erweitert"
|
||||
},
|
||||
"control": {
|
||||
"title": "Kontrolle",
|
||||
"controlAdaptersTab": "Kontroll Adapter",
|
||||
"ipTab": "Bild Beschreibung"
|
||||
},
|
||||
"compositing": {
|
||||
"coherenceTab": "Kohärenzpass",
|
||||
"infillTab": "Füllung",
|
||||
"title": "Compositing"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"workflows": "Arbeitsabläufe",
|
||||
"noSystemWorkflows": "Keine System-Arbeitsabläufe",
|
||||
"workflowName": "Arbeitsablauf-Name",
|
||||
"workflowIsOpen": "Arbeitsablauf ist offen",
|
||||
"saveWorkflowAs": "Arbeitsablauf speichern als",
|
||||
"searchWorkflows": "Suche Arbeitsabläufe",
|
||||
"newWorkflowCreated": "Neuer Arbeitsablauf erstellt",
|
||||
"problemSavingWorkflow": "Problem beim Speichern des Arbeitsablaufs",
|
||||
"noRecentWorkflows": "Keine kürzlichen Arbeitsabläufe",
|
||||
"problemLoading": "Problem beim Laden von Arbeitsabläufen",
|
||||
"downloadWorkflow": "Speichern als",
|
||||
"savingWorkflow": "Speichere Arbeitsablauf...",
|
||||
"saveWorkflow": "Arbeitsablauf speichern",
|
||||
"noWorkflows": "Keine Arbeitsabläufe",
|
||||
"workflowLibrary": "Bibliothek",
|
||||
"defaultWorkflows": "Standard-Arbeitsabläufe",
|
||||
"unnamedWorkflow": "Unbenannter Arbeitsablauf",
|
||||
"noDescription": "Keine Beschreibung",
|
||||
"clearWorkflowSearchFilter": "Suchfilter zurücksetzen",
|
||||
"workflowEditorMenu": "Arbeitsablauf-Editor Menü",
|
||||
"deleteWorkflow": "Arbeitsablauf löschen",
|
||||
"userWorkflows": "Meine Arbeitsabläufe",
|
||||
"workflowSaved": "Arbeitsablauf gespeichert",
|
||||
"uploadWorkflow": "Aus Datei laden",
|
||||
"projectWorkflows": "Projekt-Arbeitsabläufe",
|
||||
"openWorkflow": "Arbeitsablauf öffnen",
|
||||
"noUserWorkflows": "Keine Benutzer-Arbeitsabläufe",
|
||||
"saveWorkflowToProject": "Arbeitsablauf in Projekt speichern",
|
||||
"workflowCleared": "Arbeitsablauf gelöscht",
|
||||
"loading": "Lade Arbeitsabläufe"
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "App-Store ist nicht initialisiert"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +86,7 @@
|
||||
"back": "Back",
|
||||
"batch": "Batch Manager",
|
||||
"cancel": "Cancel",
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"close": "Close",
|
||||
"on": "On",
|
||||
@@ -117,24 +118,7 @@
|
||||
"inpaint": "inpaint",
|
||||
"input": "Input",
|
||||
"installed": "Installed",
|
||||
"langArabic": "العربية",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langDutch": "Nederlands",
|
||||
"langEnglish": "English",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "German",
|
||||
"langHebrew": "Hebrew",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "日本語",
|
||||
"langKorean": "한국어",
|
||||
"langPolish": "Polski",
|
||||
"langPortuguese": "Português",
|
||||
"langRussian": "Русский",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langSpanish": "Español",
|
||||
"languagePickerLabel": "Language",
|
||||
"langUkranian": "Украї́нська",
|
||||
"lightMode": "Light Mode",
|
||||
"linear": "Linear",
|
||||
"load": "Load",
|
||||
"loading": "Loading",
|
||||
@@ -224,6 +208,7 @@
|
||||
"amult": "a_mult",
|
||||
"autoConfigure": "Auto configure processor",
|
||||
"balanced": "Balanced",
|
||||
"base": "Base",
|
||||
"beginEndStepPercent": "Begin / End Step Percentage",
|
||||
"bgth": "bg_th",
|
||||
"canny": "Canny",
|
||||
@@ -237,6 +222,8 @@
|
||||
"controlMode": "Control Mode",
|
||||
"crop": "Crop",
|
||||
"delete": "Delete",
|
||||
"depthAnything": "Depth Anything",
|
||||
"depthAnythingDescription": "Depth map generation using the Depth Anything technique",
|
||||
"depthMidas": "Depth (Midas)",
|
||||
"depthMidasDescription": "Depth map generation using Midas",
|
||||
"depthZoe": "Depth (Zoe)",
|
||||
@@ -256,6 +243,7 @@
|
||||
"colorMapTileSize": "Tile Size",
|
||||
"importImageFromCanvas": "Import Image From Canvas",
|
||||
"importMaskFromCanvas": "Import Mask From Canvas",
|
||||
"large": "Large",
|
||||
"lineart": "Lineart",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"lineartAnimeDescription": "Anime-style lineart processing",
|
||||
@@ -268,6 +256,7 @@
|
||||
"minConfidence": "Min Confidence",
|
||||
"mlsd": "M-LSD",
|
||||
"mlsdDescription": "Minimalist Line Segment Detector",
|
||||
"modelSize": "Model Size",
|
||||
"none": "None",
|
||||
"noneDescription": "No processing applied",
|
||||
"normalBae": "Normal BAE",
|
||||
@@ -288,6 +277,7 @@
|
||||
"selectModel": "Select a model",
|
||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||
"showAdvanced": "Show Advanced",
|
||||
"small": "Small",
|
||||
"toggleControlNet": "Toggle this ControlNet",
|
||||
"w": "W",
|
||||
"weight": "Weight",
|
||||
@@ -600,6 +590,10 @@
|
||||
"desc": "Send current image to Image to Image",
|
||||
"title": "Send To Image To Image"
|
||||
},
|
||||
"remixImage": {
|
||||
"desc": "Use all parameters except seed from the current image",
|
||||
"title": "Remix image"
|
||||
},
|
||||
"setParameters": {
|
||||
"desc": "Use all parameters of the current image",
|
||||
"title": "Set Parameters"
|
||||
@@ -1003,6 +997,9 @@
|
||||
"newWorkflow": "New Workflow",
|
||||
"newWorkflowDesc": "Create a new workflow?",
|
||||
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||
"clearWorkflow": "Clear Workflow",
|
||||
"clearWorkflowDesc": "Clear this workflow and start a new one?",
|
||||
"clearWorkflowDesc2": "Your current workflow has unsaved changes.",
|
||||
"scheduler": "Scheduler",
|
||||
"schedulerDescription": "TODO",
|
||||
"sDXLMainModelField": "SDXL Model",
|
||||
@@ -1216,6 +1213,7 @@
|
||||
"useCpuNoise": "Use CPU Noise",
|
||||
"cpuNoise": "CPU Noise",
|
||||
"gpuNoise": "GPU Noise",
|
||||
"remixImage": "Remix Image",
|
||||
"useInitImg": "Use Initial Image",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
@@ -1361,6 +1359,7 @@
|
||||
"problemCopyingCanvasDesc": "Unable to export base layer",
|
||||
"problemCopyingImage": "Unable to Copy Image",
|
||||
"problemCopyingImageLink": "Unable to Copy Image Link",
|
||||
"problemDownloadingImage": "Unable to Download Image",
|
||||
"problemDownloadingCanvas": "Problem Downloading Canvas",
|
||||
"problemDownloadingCanvasDesc": "Unable to export base layer",
|
||||
"problemImportingMask": "Problem Importing Mask",
|
||||
@@ -1452,9 +1451,7 @@
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Coherence Pass",
|
||||
"paragraphs": [
|
||||
"A second round of denoising helps to composite the Inpainted/Outpainted image."
|
||||
]
|
||||
"paragraphs": ["A second round of denoising helps to composite the Inpainted/Outpainted image."]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
@@ -1462,10 +1459,7 @@
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
"paragraphs": [
|
||||
"Number of denoising steps used in the Coherence Pass.",
|
||||
"Same as the main Steps parameter."
|
||||
]
|
||||
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
|
||||
},
|
||||
"compositingStrength": {
|
||||
"heading": "Strength",
|
||||
@@ -1487,15 +1481,11 @@
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Control Mode",
|
||||
"paragraphs": [
|
||||
"Lends more weight to either the prompt or ControlNet."
|
||||
]
|
||||
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": [
|
||||
"How the ControlNet image will be fit to the image output size."
|
||||
]
|
||||
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
@@ -1505,9 +1495,7 @@
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"How strongly the ControlNet will impact the generated image."
|
||||
]
|
||||
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"heading": "Dynamic Prompts",
|
||||
@@ -1519,9 +1507,7 @@
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"heading": "Max Prompts",
|
||||
"paragraphs": [
|
||||
"Limits the number of prompts that can be generated by Dynamic Prompts."
|
||||
]
|
||||
"paragraphs": ["Limits the number of prompts that can be generated by Dynamic Prompts."]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"heading": "Seed Behaviour",
|
||||
@@ -1538,9 +1524,7 @@
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Weight",
|
||||
"paragraphs": [
|
||||
"Higher LoRA weight will lead to larger impacts on the final image."
|
||||
]
|
||||
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"heading": "Use CPU Noise",
|
||||
@@ -1552,9 +1536,7 @@
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG Scale",
|
||||
"paragraphs": [
|
||||
"Controls how much your prompt influences the generation process."
|
||||
]
|
||||
"paragraphs": ["Controls how much your prompt influences the generation process."]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "CFG Rescale Multiplier",
|
||||
@@ -1606,9 +1588,7 @@
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"Model used for translating AI output into the final image."
|
||||
]
|
||||
"paragraphs": ["Model used for translating AI output into the final image."]
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"heading": "VAE Precision",
|
||||
@@ -1697,6 +1677,7 @@
|
||||
"workflowLibrary": "Library",
|
||||
"userWorkflows": "My Workflows",
|
||||
"defaultWorkflows": "Default Workflows",
|
||||
"projectWorkflows": "Project Workflows",
|
||||
"openWorkflow": "Open Workflow",
|
||||
"uploadWorkflow": "Load from File",
|
||||
"deleteWorkflow": "Delete Workflow",
|
||||
@@ -1704,11 +1685,13 @@
|
||||
"downloadWorkflow": "Save to File",
|
||||
"saveWorkflow": "Save Workflow",
|
||||
"saveWorkflowAs": "Save Workflow As",
|
||||
"saveWorkflowToProject": "Save Workflow to Project",
|
||||
"savingWorkflow": "Saving Workflow...",
|
||||
"problemSavingWorkflow": "Problem Saving Workflow",
|
||||
"workflowSaved": "Workflow Saved",
|
||||
"noRecentWorkflows": "No Recent Workflows",
|
||||
"noUserWorkflows": "No User Workflows",
|
||||
"noWorkflows": "No Workflows",
|
||||
"noSystemWorkflows": "No System Workflows",
|
||||
"problemLoading": "Problem Loading Workflows",
|
||||
"loading": "Loading Workflows",
|
||||
@@ -1717,6 +1700,7 @@
|
||||
"clearWorkflowSearchFilter": "Clear Workflow Search Filter",
|
||||
"workflowName": "Workflow Name",
|
||||
"newWorkflowCreated": "New Workflow Created",
|
||||
"workflowCleared": "Workflow Cleared",
|
||||
"workflowEditorMenu": "Workflow Editor Menu",
|
||||
"workflowIsOpen": "Workflow is Open"
|
||||
},
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Editor del flujo de trabajo",
|
||||
"langSpanish": "Español",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
|
||||
@@ -43,25 +42,10 @@
|
||||
"statusMergedModels": "Modelos combinados",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"langEnglish": "Inglés",
|
||||
"langDutch": "Holandés",
|
||||
"langFrench": "Francés",
|
||||
"langGerman": "Alemán",
|
||||
"langItalian": "Italiano",
|
||||
"langArabic": "Árabe",
|
||||
"langJapanese": "Japones",
|
||||
"langPolish": "Polaco",
|
||||
"langBrPortuguese": "Portugués brasileño",
|
||||
"langRussian": "Ruso",
|
||||
"langSimplifiedChinese": "Chino simplificado",
|
||||
"langUkranian": "Ucraniano",
|
||||
"back": "Atrás",
|
||||
"statusConvertingModel": "Convertir el modelo",
|
||||
"statusModelConverted": "Modelo adaptado",
|
||||
"statusMergingModels": "Fusionar modelos",
|
||||
"langPortuguese": "Portugués",
|
||||
"langKorean": "Coreano",
|
||||
"langHebrew": "Hebreo",
|
||||
"loading": "Cargando",
|
||||
"loadingInvokeAI": "Cargando invocar a la IA",
|
||||
"postprocessing": "Tratamiento posterior",
|
||||
@@ -77,7 +61,6 @@
|
||||
"imagePrompt": "Indicación de imagen",
|
||||
"batch": "Administrador de lotes",
|
||||
"darkMode": "Modo oscuro",
|
||||
"lightMode": "Modo claro",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
},
|
||||
|
||||
@@ -27,22 +27,12 @@
|
||||
"statusModelChanged": "Malli vaihdettu",
|
||||
"statusConvertingModel": "Muunnetaan mallia",
|
||||
"statusModelConverted": "Malli muunnettu",
|
||||
"langFrench": "Ranska",
|
||||
"langItalian": "Italia",
|
||||
"languagePickerLabel": "Kielen valinta",
|
||||
"hotkeysLabel": "Pikanäppäimet",
|
||||
"reportBugLabel": "Raportoi Bugista",
|
||||
"langPolish": "Puola",
|
||||
"langDutch": "Hollanti",
|
||||
"settingsLabel": "Asetukset",
|
||||
"githubLabel": "Github",
|
||||
"langGerman": "Saksa",
|
||||
"langPortuguese": "Portugali",
|
||||
"discordLabel": "Discord",
|
||||
"langEnglish": "Englanti",
|
||||
"langRussian": "Venäjä",
|
||||
"langUkranian": "Ukraina",
|
||||
"langSpanish": "Espanja",
|
||||
"upload": "Lataa",
|
||||
"statusMergedModels": "Mallit yhdistelty",
|
||||
"img2img": "Kuva kuvaksi",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Image en image",
|
||||
"unifiedCanvas": "Canvas unifié",
|
||||
"nodes": "Nœuds",
|
||||
"langFrench": "Français",
|
||||
"nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.",
|
||||
"postProcessing": "Post-traitement",
|
||||
"postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu 'Options avancées' des onglets 'Texte vers image' et 'Image vers image'. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image au-dessus de l'affichage d'image actuel ou dans le visualiseur.",
|
||||
@@ -47,7 +46,6 @@
|
||||
"statusMergingModels": "Mélange des modèles",
|
||||
"loadingInvokeAI": "Chargement de Invoke AI",
|
||||
"cancel": "Annuler",
|
||||
"langEnglish": "Anglais",
|
||||
"statusConvertingModel": "Conversion du modèle",
|
||||
"statusModelConverted": "Modèle converti",
|
||||
"loading": "Chargement",
|
||||
|
||||
@@ -111,17 +111,6 @@
|
||||
"githubLabel": "גיטהאב",
|
||||
"discordLabel": "דיסקורד",
|
||||
"settingsLabel": "הגדרות",
|
||||
"langEnglish": "אנגלית",
|
||||
"langDutch": "הולנדית",
|
||||
"langArabic": "ערבית",
|
||||
"langFrench": "צרפתית",
|
||||
"langGerman": "גרמנית",
|
||||
"langJapanese": "יפנית",
|
||||
"langBrPortuguese": "פורטוגזית",
|
||||
"langRussian": "רוסית",
|
||||
"langSimplifiedChinese": "סינית",
|
||||
"langUkranian": "אוקראינית",
|
||||
"langSpanish": "ספרדית",
|
||||
"img2img": "תמונה לתמונה",
|
||||
"unifiedCanvas": "קנבס מאוחד",
|
||||
"nodes": "צמתים",
|
||||
@@ -152,9 +141,7 @@
|
||||
"statusMergedModels": "מודלים מוזגו",
|
||||
"hotkeysLabel": "מקשים חמים",
|
||||
"reportBugLabel": "דווח באג",
|
||||
"langItalian": "איטלקית",
|
||||
"upload": "העלאה",
|
||||
"langPolish": "פולנית",
|
||||
"training": "אימון",
|
||||
"load": "טעינה",
|
||||
"back": "אחורה",
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
"delete": "Törlés",
|
||||
"data": "Adat",
|
||||
"discordLabel": "Discord",
|
||||
"folder": "Mappa",
|
||||
"langEnglish": "Angol"
|
||||
"folder": "Mappa"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"nodes": "Editor del flusso di lavoro",
|
||||
"langItalian": "Italiano",
|
||||
"nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.",
|
||||
"postProcessing": "Post-elaborazione",
|
||||
"postProcessDesc1": "Invoke AI offre un'ampia varietà di funzionalità di post-elaborazione. Ampliamento Immagine e Restaura Volti sono già disponibili nell'interfaccia Web. È possibile accedervi dal menu 'Opzioni avanzate' delle schede 'Testo a Immagine' e 'Immagine a Immagine'. È inoltre possibile elaborare le immagini direttamente, utilizzando i pulsanti di azione dell'immagine sopra la visualizzazione dell'immagine corrente o nel visualizzatore.",
|
||||
@@ -43,26 +42,11 @@
|
||||
"statusModelChanged": "Modello cambiato",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"langArabic": "Arabo",
|
||||
"langEnglish": "Inglese",
|
||||
"langFrench": "Francese",
|
||||
"langGerman": "Tedesco",
|
||||
"langJapanese": "Giapponese",
|
||||
"langPolish": "Polacco",
|
||||
"langBrPortuguese": "Portoghese Basiliano",
|
||||
"langRussian": "Russo",
|
||||
"langUkranian": "Ucraino",
|
||||
"langSpanish": "Spagnolo",
|
||||
"statusMergingModels": "Fusione Modelli",
|
||||
"statusMergedModels": "Modelli fusi",
|
||||
"langSimplifiedChinese": "Cinese semplificato",
|
||||
"langDutch": "Olandese",
|
||||
"statusModelConverted": "Modello Convertito",
|
||||
"statusConvertingModel": "Conversione Modello",
|
||||
"langKorean": "Coreano",
|
||||
"langPortuguese": "Portoghese",
|
||||
"loading": "Caricamento in corso",
|
||||
"langHebrew": "Ebraico",
|
||||
"loadingInvokeAI": "Caricamento Invoke AI",
|
||||
"postprocessing": "Post Elaborazione",
|
||||
"txt2img": "Testo a Immagine",
|
||||
@@ -76,7 +60,6 @@
|
||||
"dontAskMeAgain": "Non chiedermelo più",
|
||||
"imagePrompt": "Prompt Immagine",
|
||||
"darkMode": "Modalità scura",
|
||||
"lightMode": "Modalità chiara",
|
||||
"batch": "Gestione Lotto",
|
||||
"modelManager": "Gestore modello",
|
||||
"communityLabel": "Comunità",
|
||||
@@ -118,7 +101,15 @@
|
||||
"advancedOptions": "Opzioni avanzate",
|
||||
"free": "Libero",
|
||||
"or": "o",
|
||||
"preferencesLabel": "Preferenze"
|
||||
"preferencesLabel": "Preferenze",
|
||||
"red": "Rosso",
|
||||
"aboutHeading": "Possiedi il tuo potere creativo",
|
||||
"aboutDesc": "Utilizzi Invoke per lavoro? Guarda qui:",
|
||||
"localSystem": "Sistema locale",
|
||||
"green": "Verde",
|
||||
"blue": "Blu",
|
||||
"alpha": "Alfa",
|
||||
"copy": "Copia"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generazioni",
|
||||
@@ -377,7 +368,11 @@
|
||||
"desc": "Apre e chiude le opzioni e i pannelli della galleria",
|
||||
"title": "Attiva/disattiva le Opzioni e la Galleria"
|
||||
},
|
||||
"clearSearch": "Cancella ricerca"
|
||||
"clearSearch": "Cancella ricerca",
|
||||
"remixImage": {
|
||||
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||
"title": "Remixa l'immagine"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestione Modelli",
|
||||
@@ -521,7 +516,8 @@
|
||||
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
|
||||
"vaePrecision": "Precisione VAE",
|
||||
"noModelSelected": "Nessun modello selezionato",
|
||||
"conversionNotSupported": "Conversione non supportata"
|
||||
"conversionNotSupported": "Conversione non supportata",
|
||||
"configFile": "File di configurazione"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -660,7 +656,10 @@
|
||||
"lockAspectRatio": "Blocca proporzioni",
|
||||
"swapDimensions": "Scambia dimensioni",
|
||||
"aspect": "Aspetto",
|
||||
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)"
|
||||
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)",
|
||||
"boxBlur": "Box",
|
||||
"gaussianBlur": "Gaussian",
|
||||
"remixImage": "Remixa l'immagine"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -794,7 +793,9 @@
|
||||
"invalidUpload": "Caricamento non valido",
|
||||
"problemDeletingWorkflow": "Problema durante l'eliminazione del flusso di lavoro",
|
||||
"workflowDeleted": "Flusso di lavoro eliminato",
|
||||
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro"
|
||||
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro",
|
||||
"resetInitialImage": "Reimposta l'immagine iniziale",
|
||||
"uploadInitialImage": "Carica l'immagine iniziale"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@@ -899,7 +900,8 @@
|
||||
"loadMore": "Carica altro",
|
||||
"mode": "Modalità",
|
||||
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
|
||||
"createIssue": "Segnala un problema"
|
||||
"createIssue": "Segnala un problema",
|
||||
"about": "Informazioni"
|
||||
},
|
||||
"ui": {
|
||||
"hideProgressImages": "Nascondi avanzamento immagini",
|
||||
@@ -1232,7 +1234,11 @@
|
||||
"scribble": "Scarabocchio",
|
||||
"amult": "Angolo di illuminazione",
|
||||
"coarse": "Approssimativo",
|
||||
"resizeSimple": "Ridimensiona (semplice)"
|
||||
"resizeSimple": "Ridimensiona (semplice)",
|
||||
"large": "Grande",
|
||||
"small": "Piccolo",
|
||||
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
|
||||
"modelSize": "Dimensioni del modello"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -1664,7 +1670,9 @@
|
||||
"userWorkflows": "I miei flussi di lavoro",
|
||||
"newWorkflowCreated": "Nuovo flusso di lavoro creato",
|
||||
"downloadWorkflow": "Salva su file",
|
||||
"uploadWorkflow": "Carica da file"
|
||||
"uploadWorkflow": "Carica da file",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
"noWorkflows": "Nessun flusso di lavoro"
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "Il negozio non è inizializzato"
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
"languagePickerLabel": "言語",
|
||||
"reportBugLabel": "バグ報告",
|
||||
"settingsLabel": "設定",
|
||||
"langJapanese": "日本語",
|
||||
"nodesDesc": "現在、画像生成のためのノードベースシステムを開発中です。機能についてのアップデートにご期待ください。",
|
||||
"postProcessing": "後処理",
|
||||
"postProcessDesc1": "Invoke AIは、多彩な後処理の機能を備えています。アップスケーリングと顔修復は、すでにWebUI上で利用可能です。これらは、[Text To Image]および[Image To Image]タブの[詳細オプション]メニューからアクセスできます。また、現在の画像表示の上やビューア内の画像アクションボタンを使って、画像を直接処理することもできます。",
|
||||
@@ -36,11 +35,6 @@
|
||||
"statusModelChanged": "モデルを変更",
|
||||
"cancel": "キャンセル",
|
||||
"accept": "同意",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langRussian": "Русский",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langUkranian": "Украї́нська",
|
||||
"langSpanish": "Español",
|
||||
"img2img": "img2img",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"statusMergingModels": "モデルのマージ",
|
||||
@@ -54,18 +48,8 @@
|
||||
"statusMergedModels": "マージ済モデル",
|
||||
"githubLabel": "Github",
|
||||
"hotkeysLabel": "ホットキー",
|
||||
"langHebrew": "עברית",
|
||||
"discordLabel": "Discord",
|
||||
"langItalian": "Italiano",
|
||||
"langEnglish": "English",
|
||||
"langArabic": "アラビア語",
|
||||
"langDutch": "Nederlands",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "Deutsch",
|
||||
"langPortuguese": "Português",
|
||||
"nodes": "ワークフローエディター",
|
||||
"langKorean": "한국어",
|
||||
"langPolish": "Polski",
|
||||
"txt2img": "txt2img",
|
||||
"postprocessing": "Post Processing",
|
||||
"t2iAdapter": "T2I アダプター",
|
||||
@@ -84,7 +68,6 @@
|
||||
"imageFailedToLoad": "画像が読み込めません",
|
||||
"imagePrompt": "画像プロンプト",
|
||||
"modelManager": "モデルマネージャー",
|
||||
"lightMode": "ライトモード",
|
||||
"generate": "生成",
|
||||
"learnMore": "もっと学ぶ",
|
||||
"darkMode": "ダークモード",
|
||||
|
||||
@@ -4,17 +4,7 @@
|
||||
"reportBugLabel": "버그 리포트",
|
||||
"githubLabel": "Github",
|
||||
"settingsLabel": "설정",
|
||||
"langArabic": "العربية",
|
||||
"langEnglish": "English",
|
||||
"langDutch": "Nederlands",
|
||||
"unifiedCanvas": "통합 캔버스",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "Deutsch",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "日本語",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langRussian": "Русский",
|
||||
"langSpanish": "Español",
|
||||
"nodes": "Workflow Editor",
|
||||
"nodesDesc": "이미지 생성을 위한 노드 기반 시스템은 현재 개발 중입니다. 이 놀라운 기능에 대한 업데이트를 계속 지켜봐 주세요.",
|
||||
"postProcessing": "후처리",
|
||||
@@ -31,7 +21,6 @@
|
||||
"statusDisconnected": "연결 끊김",
|
||||
"statusError": "에러",
|
||||
"statusPreparing": "준비 중",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"statusGenerating": "생성 중",
|
||||
"statusGeneratingTextToImage": "텍스트->이미지 생성",
|
||||
"statusGeneratingInpainting": "인페인팅 생성",
|
||||
@@ -51,9 +40,7 @@
|
||||
"hotkeysLabel": "단축키 설정",
|
||||
"img2img": "이미지->이미지",
|
||||
"discordLabel": "Discord",
|
||||
"langPolish": "Polski",
|
||||
"postProcessDesc1": "Invoke AI는 다양한 후처리 기능을 제공합니다. 이미지 업스케일링 및 얼굴 복원은 이미 Web UI에서 사용할 수 있습니다. 텍스트->이미지 또는 이미지->이미지 탭의 고급 옵션 메뉴에서 사용할 수 있습니다. 또한 현재 이미지 표시 위, 또는 뷰어에서 액션 버튼을 사용하여 이미지를 직접 처리할 수도 있습니다.",
|
||||
"langUkranian": "Украї́нська",
|
||||
"statusProcessingCanceled": "처리 취소됨",
|
||||
"statusGeneratingImageToImage": "이미지->이미지 생성",
|
||||
"statusProcessingComplete": "처리 완료",
|
||||
@@ -73,7 +60,6 @@
|
||||
"updated": "업데이트 됨",
|
||||
"on": "켜기",
|
||||
"save": "저장",
|
||||
"langPortuguese": "Português",
|
||||
"created": "생성됨",
|
||||
"nodeEditor": "Node Editor",
|
||||
"error": "에러",
|
||||
@@ -100,10 +86,8 @@
|
||||
"somethingWentWrong": "뭔가 잘못됐어요",
|
||||
"imagePrompt": "이미지 프롬프트",
|
||||
"modelManager": "Model Manager",
|
||||
"lightMode": "라이트 모드",
|
||||
"safetensors": "Safetensors",
|
||||
"outpaint": "outpaint",
|
||||
"langKorean": "한국어",
|
||||
"orderBy": "정렬 기준",
|
||||
"generate": "생성",
|
||||
"copyError": "$t(gallery.copy) 에러",
|
||||
@@ -113,7 +97,6 @@
|
||||
"darkMode": "다크 모드",
|
||||
"loading": "불러오는 중",
|
||||
"random": "랜덤",
|
||||
"langHebrew": "Hebrew",
|
||||
"batch": "Batch 매니저",
|
||||
"postprocessing": "후처리",
|
||||
"advanced": "고급",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Afbeelding naar afbeelding",
|
||||
"unifiedCanvas": "Centraal canvas",
|
||||
"nodes": "Werkstroom-editor",
|
||||
"langDutch": "Nederlands",
|
||||
"nodesDesc": "Een op knooppunten gebaseerd systeem voor het genereren van afbeeldingen is momenteel in ontwikkeling. Blijf op de hoogte voor nieuws over deze verbluffende functie.",
|
||||
"postProcessing": "Naverwerking",
|
||||
"postProcessDesc1": "Invoke AI biedt een breed scala aan naverwerkingsfuncties. Afbeeldingsopschaling en Gezichtsherstel zijn al beschikbaar in de web-UI. Je kunt ze openen via het menu Uitgebreide opties in de tabbladen Tekst naar afbeelding en Afbeelding naar afbeelding. Je kunt een afbeelding ook direct verwerken via de afbeeldingsactieknoppen boven de weergave van de huidigde afbeelding of in de Viewer.",
|
||||
@@ -41,18 +40,6 @@
|
||||
"statusModelChanged": "Model gewijzigd",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"langArabic": "Arabisch",
|
||||
"langEnglish": "Engels",
|
||||
"langFrench": "Frans",
|
||||
"langGerman": "Duits",
|
||||
"langItalian": "Italiaans",
|
||||
"langJapanese": "Japans",
|
||||
"langPolish": "Pools",
|
||||
"langBrPortuguese": "Portugees (Brazilië)",
|
||||
"langRussian": "Russisch",
|
||||
"langSimplifiedChinese": "Chinees (vereenvoudigd)",
|
||||
"langUkranian": "Oekraïens",
|
||||
"langSpanish": "Spaans",
|
||||
"training": "Training",
|
||||
"back": "Terug",
|
||||
"statusConvertingModel": "Omzetten van model",
|
||||
@@ -61,11 +48,8 @@
|
||||
"statusMergedModels": "Modellen samengevoegd",
|
||||
"cancel": "Annuleer",
|
||||
"accept": "Akkoord",
|
||||
"langPortuguese": "Portugees",
|
||||
"loading": "Bezig met laden",
|
||||
"loadingInvokeAI": "Bezig met laden van Invoke AI",
|
||||
"langHebrew": "עברית",
|
||||
"langKorean": "한국어",
|
||||
"txt2img": "Tekst naar afbeelding",
|
||||
"postprocessing": "Naverwerking",
|
||||
"dontAskMeAgain": "Vraag niet opnieuw",
|
||||
@@ -78,7 +62,6 @@
|
||||
"batch": "Seriebeheer",
|
||||
"modelManager": "Modelbeheer",
|
||||
"darkMode": "Donkere modus",
|
||||
"lightMode": "Lichte modus",
|
||||
"communityLabel": "Gemeenschap",
|
||||
"t2iAdapter": "T2I-adapter",
|
||||
"on": "Aan",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Obraz na obraz",
|
||||
"unifiedCanvas": "Tryb uniwersalny",
|
||||
"nodes": "Węzły",
|
||||
"langPolish": "Polski",
|
||||
"nodesDesc": "W tym miejscu powstanie graficzny system generowania obrazów oparty na węzłach. Jest na co czekać!",
|
||||
"postProcessing": "Przetwarzanie końcowe",
|
||||
"postProcessDesc1": "Invoke AI oferuje wiele opcji przetwarzania końcowego. Z poziomu przeglądarki dostępne jest już zwiększanie rozdzielczości oraz poprawianie twarzy. Znajdziesz je wśród ustawień w trybach \"Tekst na obraz\" oraz \"Obraz na obraz\". Są również obecne w pasku menu wyświetlanym nad podglądem wygenerowanego obrazu.",
|
||||
@@ -42,8 +41,7 @@
|
||||
"statusModelChanged": "Zmieniono model",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"darkMode": "Tryb ciemny",
|
||||
"lightMode": "Tryb jasny"
|
||||
"darkMode": "Tryb ciemny"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Wygenerowane",
|
||||
|
||||
@@ -1,22 +1,9 @@
|
||||
{
|
||||
"common": {
|
||||
"langArabic": "العربية",
|
||||
"reportBugLabel": "Reportar Bug",
|
||||
"settingsLabel": "Configurações",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"languagePickerLabel": "Seletor de Idioma",
|
||||
"langDutch": "Nederlands",
|
||||
"langEnglish": "English",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"langPolish": "Polski",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "Deutsch",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "日本語",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langSpanish": "Espanhol",
|
||||
"langRussian": "Русский",
|
||||
"langUkranian": "Украї́нська",
|
||||
"img2img": "Imagem para Imagem",
|
||||
"unifiedCanvas": "Tela Unificada",
|
||||
"nodes": "Nós",
|
||||
@@ -60,8 +47,7 @@
|
||||
"statusMergingModels": "Mesclando Modelos",
|
||||
"statusMergedModels": "Modelos Mesclados",
|
||||
"loading": "A carregar",
|
||||
"loadingInvokeAI": "A carregar Invoke AI",
|
||||
"langPortuguese": "Português"
|
||||
"loadingInvokeAI": "A carregar Invoke AI"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageResetSize": "Resetar Imagem",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Imagem Para Imagem",
|
||||
"unifiedCanvas": "Tela Unificada",
|
||||
"nodes": "Nódulos",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"nodesDesc": "Um sistema baseado em nódulos para geração de imagens está em contrução. Fique ligado para atualizações sobre essa funcionalidade incrível.",
|
||||
"postProcessing": "Pós-processamento",
|
||||
"postProcessDesc1": "Invoke AI oferece uma variedade e funcionalidades de pós-processamento. Redimensionador de Imagem e Restauração Facial já estão disponíveis na interface. Você pode acessar elas no menu de Opções Avançadas na aba de Texto para Imagem e Imagem para Imagem. Você também pode processar imagens diretamente, usando os botões de ação de imagem acima da atual tela de imagens ou visualizador.",
|
||||
@@ -42,23 +41,11 @@
|
||||
"statusModelChanged": "Modelo Alterado",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"langArabic": "Árabe",
|
||||
"langEnglish": "Inglês",
|
||||
"langDutch": "Holandês",
|
||||
"langFrench": "Francês",
|
||||
"langGerman": "Alemão",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "Japonês",
|
||||
"langPolish": "Polonês",
|
||||
"langSimplifiedChinese": "Chinês",
|
||||
"langUkranian": "Ucraniano",
|
||||
"back": "Voltar",
|
||||
"statusConvertingModel": "Convertendo Modelo",
|
||||
"statusModelConverted": "Modelo Convertido",
|
||||
"statusMergingModels": "Mesclando Modelos",
|
||||
"statusMergedModels": "Modelos Mesclados",
|
||||
"langRussian": "Russo",
|
||||
"langSpanish": "Espanhol",
|
||||
"loadingInvokeAI": "Carregando Invoke AI",
|
||||
"loading": "Carregando"
|
||||
},
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"img2img": "Изображение в изображение (img2img)",
|
||||
"unifiedCanvas": "Единый холст",
|
||||
"nodes": "Редактор рабочего процесса",
|
||||
"langRussian": "Русский",
|
||||
"nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.",
|
||||
"postProcessing": "Постобработка",
|
||||
"postProcessDesc1": "Invoke AI предлагает широкий спектр функций постобработки. Увеличение изображения (upscale) и восстановление лиц уже доступны в интерфейсе. Получите доступ к ним из меню 'Дополнительные параметры' на вкладках 'Текст в изображение' и 'Изображение в изображение'. Обрабатывайте изображения напрямую, используя кнопки действий с изображениями над текущим изображением или в режиме просмотра.",
|
||||
@@ -51,23 +50,8 @@
|
||||
"statusConvertingModel": "Конвертация модели",
|
||||
"cancel": "Отменить",
|
||||
"accept": "Принять",
|
||||
"langUkranian": "Украї́нська",
|
||||
"langEnglish": "English",
|
||||
"postprocessing": "Постобработка",
|
||||
"langArabic": "العربية",
|
||||
"langSpanish": "Español",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langDutch": "Nederlands",
|
||||
"langFrench": "Français",
|
||||
"langGerman": "German",
|
||||
"langHebrew": "Hebrew",
|
||||
"langItalian": "Italiano",
|
||||
"langJapanese": "日本語",
|
||||
"langKorean": "한국어",
|
||||
"langPolish": "Polski",
|
||||
"langPortuguese": "Português",
|
||||
"txt2img": "Текст в изображение (txt2img)",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"linear": "Линейная обработка",
|
||||
"dontAskMeAgain": "Больше не спрашивать",
|
||||
"areYouSure": "Вы уверены?",
|
||||
@@ -76,7 +60,6 @@
|
||||
"openInNewTab": "Открыть в новой вкладке",
|
||||
"imagePrompt": "Запрос",
|
||||
"communityLabel": "Сообщество",
|
||||
"lightMode": "Светлая тема",
|
||||
"batch": "Пакетный менеджер",
|
||||
"modelManager": "Менеджер моделей",
|
||||
"darkMode": "Темная тема",
|
||||
|
||||
@@ -26,24 +26,8 @@
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Inställningar",
|
||||
"langEnglish": "Engelska",
|
||||
"langDutch": "Nederländska",
|
||||
"langFrench": "Franska",
|
||||
"langGerman": "Tyska",
|
||||
"langItalian": "Italienska",
|
||||
"langArabic": "العربية",
|
||||
"langHebrew": "עברית",
|
||||
"langPolish": "Polski",
|
||||
"langPortuguese": "Português",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langSimplifiedChinese": "简体中文",
|
||||
"langJapanese": "日本語",
|
||||
"langKorean": "한국어",
|
||||
"langRussian": "Русский",
|
||||
"unifiedCanvas": "Förenad kanvas",
|
||||
"nodesDesc": "Ett nodbaserat system för bildgenerering är under utveckling. Håll utkik för uppdateringar om denna fantastiska funktion.",
|
||||
"langUkranian": "Украї́нська",
|
||||
"langSpanish": "Español",
|
||||
"postProcessDesc2": "Ett dedikerat användargränssnitt kommer snart att släppas för att underlätta mer avancerade arbetsflöden av efterbehandling.",
|
||||
"trainingDesc1": "Ett dedikerat arbetsflöde för träning av dina egna inbäddningar och kontrollpunkter genom Textual Inversion eller Dreambooth från webbgränssnittet.",
|
||||
"trainingDesc2": "InvokeAI stöder redan träning av anpassade inbäddningar med hjälp av Textual Inversion genom huvudscriptet.",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,6 @@
|
||||
"img2img": "Зображення із зображення (img2img)",
|
||||
"unifiedCanvas": "Універсальне полотно",
|
||||
"nodes": "Вузли",
|
||||
"langUkranian": "Украї́нська",
|
||||
"nodesDesc": "Система генерації зображень на основі нодів (вузлів) вже розробляється. Слідкуйте за новинами про цю чудову функцію.",
|
||||
"postProcessing": "Постобробка",
|
||||
"postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.",
|
||||
@@ -48,24 +47,9 @@
|
||||
"statusMergingModels": "Злиття моделей",
|
||||
"loading": "Завантаження",
|
||||
"loadingInvokeAI": "Завантаження Invoke AI",
|
||||
"langHebrew": "Іврит",
|
||||
"langKorean": "Корейська",
|
||||
"langPortuguese": "Португальська",
|
||||
"langArabic": "Арабська",
|
||||
"langSimplifiedChinese": "Китайська (спрощена)",
|
||||
"langSpanish": "Іспанська",
|
||||
"langEnglish": "Англійська",
|
||||
"langGerman": "Німецька",
|
||||
"langItalian": "Італійська",
|
||||
"langJapanese": "Японська",
|
||||
"langPolish": "Польська",
|
||||
"langBrPortuguese": "Португальська (Бразилія)",
|
||||
"langRussian": "Російська",
|
||||
"githubLabel": "Github",
|
||||
"txt2img": "Текст в зображення (txt2img)",
|
||||
"discordLabel": "Discord",
|
||||
"langDutch": "Голландська",
|
||||
"langFrench": "Французька",
|
||||
"statusMergedModels": "Моделі об'єднані",
|
||||
"statusConvertingModel": "Конвертація моделі",
|
||||
"linear": "Лінійна обробка"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user