mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-25 10:07:54 -05:00
Compare commits
118 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19cdd5a99b | ||
|
|
76337e13f5 | ||
|
|
eb4ca4042e | ||
|
|
594bf6fef1 | ||
|
|
6f2e8d5217 | ||
|
|
52ae15c167 | ||
|
|
2c4128d44e | ||
|
|
01b106d939 | ||
|
|
68f1f87c6f | ||
|
|
c2c99b8650 | ||
|
|
896b77cf56 | ||
|
|
6f7d221f57 | ||
|
|
fba4085939 | ||
|
|
48ad005732 | ||
|
|
9ce4bd1182 | ||
|
|
39b7ace273 | ||
|
|
319c56f844 | ||
|
|
389a0d2810 | ||
|
|
fe33acedad | ||
|
|
eab18c7385 | ||
|
|
8e98085530 | ||
|
|
5396e998b3 | ||
|
|
fc98089960 | ||
|
|
dd0b4dc744 | ||
|
|
ddeba190bc | ||
|
|
3a610e1a65 | ||
|
|
e10e22440d | ||
|
|
f4e8a91bcf | ||
|
|
ce7fbdb01d | ||
|
|
4da6623700 | ||
|
|
0e3ca59e49 | ||
|
|
e06f2229ac | ||
|
|
5962d96f27 | ||
|
|
d4854c4fac | ||
|
|
46801c076f | ||
|
|
9370572169 | ||
|
|
ace65325ff | ||
|
|
e6d890888c | ||
|
|
8e7f581065 | ||
|
|
85ef3f51e7 | ||
|
|
8fdc8a8da5 | ||
|
|
52d56e96a5 | ||
|
|
c013fe5b5d | ||
|
|
ddf7ddc2c1 | ||
|
|
4a0774b260 | ||
|
|
17e401cb8c | ||
|
|
29a590cced | ||
|
|
7deafa838b | ||
|
|
20757d1c02 | ||
|
|
5134de7cfa | ||
|
|
b1a6ba552b | ||
|
|
cd21d2f2b6 | ||
|
|
9dc28373d8 | ||
|
|
ffe7d5785b | ||
|
|
a2e2f0858d | ||
|
|
f73c70ca96 | ||
|
|
e2240feae4 | ||
|
|
e06348bfab | ||
|
|
8fb970d436 | ||
|
|
15256ed3a4 | ||
|
|
89a15f78dd | ||
|
|
8fc20c837b | ||
|
|
8dfe196c4f | ||
|
|
9e27fd9b90 | ||
|
|
2771328853 | ||
|
|
a481607d3f | ||
|
|
1e3cebbf42 | ||
|
|
d523556558 | ||
|
|
da523fa32f | ||
|
|
ab9b5f3b95 | ||
|
|
f32bd5dd10 | ||
|
|
190ba5af59 | ||
|
|
cb29ac63a8 | ||
|
|
603989dc0d | ||
|
|
2872ae2aab | ||
|
|
b7cdda0781 | ||
|
|
267940a77e | ||
|
|
8d77c5ca96 | ||
|
|
0795d8764f | ||
|
|
2db56306e4 | ||
|
|
70fec9ddab | ||
|
|
909f538fb5 | ||
|
|
bab8b6d240 | ||
|
|
f2f49bd8d0 | ||
|
|
b8e0810ed1 | ||
|
|
6cb9167a1b | ||
|
|
09dfcc4277 | ||
|
|
82eb1f1075 | ||
|
|
187cf906fa | ||
|
|
82554b25fe | ||
|
|
039091c5d4 | ||
|
|
d76bf4444c | ||
|
|
82496fee14 | ||
|
|
c2b99e7545 | ||
|
|
e918168f7a | ||
|
|
6e36c275c9 | ||
|
|
6affe42310 | ||
|
|
170bbd7da3 | ||
|
|
f6d5e93020 | ||
|
|
f2515d9480 | ||
|
|
4d8f17c69d | ||
|
|
3a987b2e72 | ||
|
|
4e3f58552c | ||
|
|
77d9657980 | ||
|
|
12cae33dcd | ||
|
|
1e5310793c | ||
|
|
a0b5930340 | ||
|
|
53ed252168 | ||
|
|
a683379dda | ||
|
|
899aa1d251 | ||
|
|
5f940bf3b3 | ||
|
|
509514f11d | ||
|
|
c557402dbb | ||
|
|
c291b82b94 | ||
|
|
6ba48af0a9 | ||
|
|
40fffec0b6 | ||
|
|
ff74370eda | ||
|
|
446d87516a |
@@ -1,42 +1,38 @@
|
||||
# How to Contribute
|
||||
|
||||
## Welcome to Invoke AI
|
||||
|
||||
We're thrilled to have you here and we're excited for you to contribute.
|
||||
|
||||
Invoke AI originated as a project built by the community, and that vision carries forward today as we aim to build the best pro-grade tools available. We work together to incorporate the latest in AI/ML research, making these tools available in over 20 languages to artists and creatives around the world as part of our fully permissive OSS project designed for individual users to self-host and use.
|
||||
|
||||
Here are some guidelines to help you get started:
|
||||
|
||||
### Technical Prerequisites
|
||||
## Contributing to Invoke AI
|
||||
Anyone who wishes to contribute to InvokeAI, whether features, bug fixes, code cleanup, testing, code reviews, documentation or translation is very much encouraged to do so.
|
||||
|
||||
Front-end: You'll need a working knowledge of React and TypeScript.
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
|
||||
Back-end: Depending on the scope of your contribution, you may need to know SQLite, FastAPI, Python, and Socketio. Also, a good majority of the backend logic involved in processing images is built in a modular way using a concept called "Nodes", which are isolated functions that carry out individual, discrete operations. This design allows for easy contributions of novel pipelines and capabilities.
|
||||
### Areas of contribution:
|
||||
|
||||
### How to Submit Contributions
|
||||
#### Development
|
||||
If you’d like to help with development, please see our [development guide](contribution_guides/development.md). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
|
||||
|
||||
To start contributing, please follow these steps:
|
||||
#### Documentation
|
||||
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documenation.md).
|
||||
|
||||
1. Familiarize yourself with our roadmap and open projects to see where your skills and interests align. These documents can serve as a source of inspiration.
|
||||
2. Open a Pull Request (PR) with a clear description of the feature you're adding or the problem you're solving. Make sure your contribution aligns with the project's vision.
|
||||
3. Adhere to general best practices. This includes assuming interoperability with other nodes, keeping the scope of your functions as small as possible, and organizing your code according to our architecture documents.
|
||||
#### Translation
|
||||
If you'd like to help with translation, please see our [translation guide](docs/contributing/.contribution_guides/translation.md).
|
||||
|
||||
### Types of Contributions We're Looking For
|
||||
#### Tutorials
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
|
||||
We welcome all contributions that improve the project. Right now, we're especially looking for:
|
||||
We hope you enjoy using our software as much as we enjoy creating it, and we hope that some of those of you who are reading this will elect to become part of our contributor community.
|
||||
|
||||
1. Quality of life (QOL) enhancements on the front-end.
|
||||
2. New backend capabilities added through nodes.
|
||||
3. Incorporating additional optimizations from the broader open-source software community.
|
||||
|
||||
### Communication and Decision-making Process
|
||||
### Contributors
|
||||
|
||||
Project maintainers and code owners review PRs to ensure they align with the project's goals. They may provide design or architectural guidance, suggestions on user experience, or provide more significant feedback on the contribution itself. Expect to receive feedback on your submissions, and don't hesitate to ask questions or propose changes.
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
|
||||
|
||||
For more robust discussions, or if you're planning to add capabilities not currently listed on our roadmap, please reach out to us on our Discord server. That way, we can ensure your proposed contribution aligns with the project's direction before you start writing code.
|
||||
### Code of Conduct
|
||||
|
||||
### Code of Conduct and Contribution Expectations
|
||||
|
||||
We want everyone in our community to have a positive experience. To facilitate this, we've established a code of conduct and a statement of values that we expect all contributors to adhere to. Please take a moment to review these documents—they're essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
@@ -49,6 +45,12 @@ This disclaimer is not a license and does not grant any rights or permissions. Y
|
||||
|
||||
This disclaimer is provided "as is" without warranty of any kind, whether expressed or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, or non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the contribution or the use or other dealings in the contribution.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's [GitHub Issues](https://github.com/invoke-ai/InvokeAI/issues), or join the [Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
Original portions of the software are Copyright (c) 2023 by respective contributors.
|
||||
|
||||
---
|
||||
|
||||
Remember, your contributions help make this project great. We're excited to see what you'll bring to our community!
|
||||
|
||||
91
docs/contributing/contribution_guides/development.md
Normal file
91
docs/contributing/contribution_guides/development.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Development
|
||||
|
||||
## **What do I need to know to help?**
|
||||
|
||||
If you are looking to help to with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
|
||||
For more information, please review our area specific documentation:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](development_guides/contributingToFrontend.md)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md) or [translation](translation.md).
|
||||
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our roadmap <******************************link to roadmap>******************************. The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
## Best Practices:
|
||||
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
|
||||
* Comments! Commenting your code helps reviwers easily understand your contribution
|
||||
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
|
||||
* Make all communications public. This ensure knowledge is shared with the whole community
|
||||
|
||||
## **How do I make a contribution?**
|
||||
|
||||
Never made an open source contribution before? Wondering how contributions work in our project? Here's a quick rundown!
|
||||
|
||||
Before starting these steps, ensure you have your local environment [configured for development](../LOCAL_DEVELOPMENT.md).
|
||||
|
||||
1. Find a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) that you are interested in addressing or a feature that you would like to add. Then, reach out to our team in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord to ensure you are setup for success.
|
||||
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
|
||||
3. Clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
|
||||
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface.
|
||||
|
||||
4. Create a new branch for your fix using:
|
||||
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
|
||||
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
|
||||
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
|
||||
|
||||
```bash
|
||||
git add insert-paths-of-changed-files-here
|
||||
```
|
||||
|
||||
7. Store the contents of the index with a descriptive message.
|
||||
|
||||
```bash
|
||||
git commit -m "Insert a short message of the changes made here"
|
||||
```
|
||||
|
||||
8. Push the changes to the remote repository using
|
||||
|
||||
```markdown
|
||||
git push origin branch-name-here
|
||||
```
|
||||
|
||||
9. Submit a pull request to the **main** branch of the InvokeAI repository.
|
||||
10. Title the pull request with a short description of the changes made and the issue or bug number associated with your change. For example, you can title an issue like so "Added more log outputting to resolve #1234".
|
||||
11. In the description of the pull request, explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. It's OK if your pull request is not perfect (no pull request is), the reviewer will be able to help you fix any problems and improve it!
|
||||
12. Wait for the pull request to be reviewed by other collaborators.
|
||||
13. Make changes to the pull request if the reviewer(s) recommend them.
|
||||
14. Celebrate your success after your pull request is merged!
|
||||
|
||||
If you’d like to learn more about contributing to Open Source projects, here is a [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||
|
||||
## **Where can I go for help?**
|
||||
|
||||
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
|
||||
|
||||
For frontend related work, **@pyschedelicious** is the best person to reach out to.
|
||||
|
||||
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@pyschedelicious**.
|
||||
|
||||
## **What does the Code of Conduct mean for me?**
|
||||
|
||||
Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code.
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
# Contributing to the Frontend
|
||||
|
||||
# InvokeAI Web UI
|
||||
|
||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
||||
|
||||
Code is located in `invokeai/frontend/web/` for review.
|
||||
|
||||
## Stack
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
2. Install [yarn classic](https://classic.yarnpkg.com/lang/en/) and confirm it is installed by running this:
|
||||
```bash
|
||||
npm install --global yarn
|
||||
yarn --version
|
||||
```
|
||||
|
||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
1. Ensure your virtual environment is running
|
||||
2. Start the dev server: `yarn dev`
|
||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
||||
|
||||
### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `yarn build`.
|
||||
13
docs/contributing/contribution_guides/documentation.md
Normal file
13
docs/contributing/contribution_guides/documentation.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Documentation
|
||||
|
||||
Documentation is an important part of any open source project. It provides a clear and concise way to communicate how the software works, how to use it, and how to troubleshoot issues. Without proper documentation, it can be difficult for users to understand the purpose and functionality of the project.
|
||||
|
||||
## Contributing
|
||||
|
||||
All documentation is maintained in the InvokeAI GitHub repository. If you come across documentation that is out of date or incorrect, please submit a pull request with the necessary changes.
|
||||
|
||||
When updating or creating documentation, please keep in mind InvokeAI is a tool for everyone, not just those who have familiarity with generative art.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please ping @imic1 or @hipsterusername in the [Discord](https://discord.com/channels/1020123559063990373/1049495067846524939) if you have any questions.
|
||||
19
docs/contributing/contribution_guides/translation.md
Normal file
19
docs/contributing/contribution_guides/translation.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Translation
|
||||
|
||||
InvokeAI uses [Weblate](https://weblate.org/) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||
|
||||
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||
|
||||
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @Harvestor on [Discord](https://discord.com/channels/1020123559063990373/1049495067846524939) if you have any questions.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to the InvokeAI community for their efforts to translate the project!
|
||||
11
docs/contributing/contribution_guides/tutorials.md
Normal file
11
docs/contributing/contribution_guides/tutorials.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Tutorials
|
||||
|
||||
Tutorials help new & existing users expand their abilty to use InvokeAI to the full extent of our features and services.
|
||||
|
||||
Currently, we have a set of tutorials available on our [YouTube channel](https://www.youtube.com/@invokeai), but as InvokeAI continues to evolve with new updates, we want to ensure that we are giving our users the resources they need to succeed.
|
||||
|
||||
Tutorials can be in the form of videos or article walkthroughs on a subject of your choice. We recommend focusing tutorials on the key image generation methods, or on a specific component within one of the image generation methods.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
@@ -24,7 +24,7 @@ title: Home
|
||||
|
||||
[![CI checks on main badge]][ci checks on main link]
|
||||
[![CI checks on dev badge]][ci checks on dev link]
|
||||
[![latest commit to dev badge]][latest commit to dev link]
|
||||
<!-- [![latest commit to dev badge]][latest commit to dev link] -->
|
||||
|
||||
[![github open issues badge]][github open issues link]
|
||||
[![github open prs badge]][github open prs link]
|
||||
@@ -54,10 +54,10 @@ title: Home
|
||||
[github stars badge]:
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]:
|
||||
<!-- [latest commit to dev badge]:
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
https://github.com/invoke-ai/InvokeAI/commits/main -->
|
||||
[latest release badge]:
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
@@ -82,6 +82,25 @@ Q&A</a>]
|
||||
|
||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### **[Automated Installer](installation/010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
@@ -107,24 +126,6 @@ images in full-precision mode:
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
This method is recommended for 1st time users
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
@@ -222,14 +223,10 @@ get solutions for common installation problems and other issues.
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation,
|
||||
features, bug fixes, code cleanup, testing, or code reviews, is very much
|
||||
encouraged to do so. If you are unfamiliar with how to contribute to GitHub
|
||||
projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||
encouraged to do so.
|
||||
|
||||
A full set of contribution guidelines, along with templates, are in progress,
|
||||
but for now the most important thing is to **make your pull request against the
|
||||
"development" branch**, and not against "main". This will help keep public
|
||||
breakage to a minimum and will allow you to propose more radical changes.
|
||||
[Please take a look at our Contribution documentation to learn more about contributing to InvokeAI.
|
||||
](contributing/CONTRIBUTING.md)
|
||||
|
||||
## :octicons-person-24: Contributors
|
||||
|
||||
|
||||
@@ -124,9 +124,9 @@ experimental versions later.
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||
and look for a file named:
|
||||
|
||||
- InvokeAI-installer-v2.X.X.zip
|
||||
- InvokeAI-installer-v3.X.X.zip
|
||||
|
||||
where "2.X.X" is the latest released version. The file is located
|
||||
where "3.X.X" is the latest released version. The file is located
|
||||
at the very bottom of the release page, under **Assets**.
|
||||
|
||||
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||
|
||||
@@ -15,7 +15,7 @@ See the [troubleshooting
|
||||
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
|
||||
install guide for frequently-encountered installation issues.
|
||||
|
||||
## Main Application
|
||||
## Installation options
|
||||
|
||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||
|
||||
@@ -24,6 +24,9 @@ install guide for frequently-encountered installation issues.
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
|
||||
✅ This is the recommended option for first time users.
|
||||
|
||||
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
|
||||
32
docs/nodes/communityNodes.md
Normal file
32
docs/nodes/communityNodes.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Community Nodes
|
||||
|
||||
These are nodes that have been developed by the community, for the community. If you're not sure what a node is, you can learn more about nodes [here](overview.md).
|
||||
|
||||
If you'd like to submit a node for the community, please refer to the [node creation overview](./overview.md#contributing-nodes).
|
||||
|
||||
To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations/` folder in your Invoke AI install location. Along with the node, an example node graph should be provided to help you get started with the node.
|
||||
|
||||
To use a community node graph, download the the `.json` node graph file and load it into Invoke AI via the **Load Nodes** button on the Node Editor.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
The nodes linked below have been developed and contributed by members of the Invoke AI community. While we strive to ensure the quality and safety of these contributions, we do not guarantee the reliability or security of the nodes. If you have issues or concerns with any of the nodes below, please raise it on GitHub or in the Discord.
|
||||
|
||||
## List of Nodes
|
||||
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
|
||||
|
||||
## Help
|
||||
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
42
docs/nodes/overview.md
Normal file
42
docs/nodes/overview.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Nodes
|
||||
|
||||
## What are Nodes?
|
||||
An Node is simply a single operation that takes in some inputs and gives
|
||||
out some outputs. We can then chain multiple nodes together to create more
|
||||
complex functionality. All InvokeAI features are added through nodes.
|
||||
|
||||
This means nodes can be used to easily extend the image generation capabilities of InvokeAI, and allow you build workflows to suit your needs.
|
||||
|
||||
You can read more about nodes and the node editor [here](../features/NODES.md).
|
||||
|
||||
|
||||
## Downloading Nodes
|
||||
To download a new node, visit our list of [Community Nodes](communityNodes.md). These are nodes that have been created by the community, for the community.
|
||||
|
||||
|
||||
## Contributing Nodes
|
||||
|
||||
To learn about creating a new node, please visit our [Node creation documenation](../contributing/INVOCATIONS.md).
|
||||
|
||||
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
||||
* Make sure the node is contained in a new Python (.py) file
|
||||
* Submit a pull request with a link to your node in GitHub against the `nodes` branch to add the node to the [Community Nodes](Community Nodes) list
|
||||
* Make sure you are following the template below and have provided all relevant details about the node and what it does.
|
||||
* A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you might be asked for permission to include it in the core project.
|
||||
|
||||
### Community Node Template
|
||||
|
||||
```markdown
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
```
|
||||
@@ -19,65 +19,264 @@ We thank them for all of their time and hard work.
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @keturn (Kevin Turner) - Diffusers
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||
* @genomancer (Gregg Helt) - Model training and merging
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @cheerio (Mary Hip) - Web development
|
||||
* @brandon (Brandon Rising) - OSS/commercial interactions
|
||||
* @spencer - Web development
|
||||
* @millu (Millun Atluri) - Documentation, GitHub integration
|
||||
* @gogurt enjoyer - Discord moderator and end user support
|
||||
* @whosawhatsis - Discord moderator and end user support
|
||||
* @dwinrger - Discord moderator and end user support
|
||||
* @526christian - Discord moderator and end user support
|
||||
|
||||
## **Contributions by**
|
||||
## **Full List of Contributors by Commit Name**
|
||||
|
||||
- [Sean McLellan](https://github.com/Oceanswave)
|
||||
- [Kevin Gibbons](https://github.com/bakkot)
|
||||
- [Tesseract Cat](https://github.com/TesseractCat)
|
||||
- [blessedcoolant](https://github.com/blessedcoolant)
|
||||
- [David Ford](https://github.com/david-ford)
|
||||
- [yunsaki](https://github.com/yunsaki)
|
||||
- [James Reynolds](https://github.com/magnusviri)
|
||||
- [David Wager](https://github.com/maddavid123)
|
||||
- [Jason Toffaletti](https://github.com/toffaletti)
|
||||
- [tildebyte](https://github.com/tildebyte)
|
||||
- [Cragin Godley](https://github.com/cgodley)
|
||||
- [BlueAmulet](https://github.com/BlueAmulet)
|
||||
- [Benjamin Warner](https://github.com/warner-benjamin)
|
||||
- [Cora Johnson-Roberson](https://github.com/corajr)
|
||||
- [veprogames](https://github.com/veprogames)
|
||||
- [JigenD](https://github.com/JigenD)
|
||||
- [Niek van der Maas](https://github.com/Niek)
|
||||
- [Henry van Megen](https://github.com/hvanmegen)
|
||||
- [Håvard Gulldahl](https://github.com/havardgulldahl)
|
||||
- [greentext2](https://github.com/greentext2)
|
||||
- [Simon Vans-Colina](https://github.com/simonvc)
|
||||
- [Gabriel Rotbart](https://github.com/gabrielrotbart)
|
||||
- [Eric Khun](https://github.com/erickhun)
|
||||
- [Brent Ozar](https://github.com/BrentOzar)
|
||||
- [nderscore](https://github.com/nderscore)
|
||||
- [Mikhail Tishin](https://github.com/tishin)
|
||||
- [Tom Elovi Spruce](https://github.com/ilovecomputers)
|
||||
- [spezialspezial](https://github.com/spezialspezial)
|
||||
- [Yosuke Shinya](https://github.com/shinya7y)
|
||||
- [Andy Pilate](https://github.com/Cubox)
|
||||
- [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- [Paul Sajna](https://github.com/sajattack)
|
||||
- [Samuel Husso](https://github.com/shusso)
|
||||
- [nicolai256](https://github.com/nicolai256)
|
||||
- [Mihai](https://github.com/mh-dm)
|
||||
- [Any Winter](https://github.com/any-winter-4079)
|
||||
- [Doggettx](https://github.com/doggettx)
|
||||
- [Matthias Wild](https://github.com/mauwii)
|
||||
- [Kyle Schouviller](https://github.com/kyle0654)
|
||||
- [rabidcopy](https://github.com/rabidcopy)
|
||||
- [Dominic Letz](https://github.com/dominicletz)
|
||||
- [Dmitry T.](https://github.com/ArDiouscuros)
|
||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
- AbdBarho
|
||||
- ablattmann
|
||||
- AdamOStark
|
||||
- Adam Rice
|
||||
- Airton Silva
|
||||
- Alexander Eichhorn
|
||||
- Alexandre D. Roberge
|
||||
- Andreas Rozek
|
||||
- Andre LaBranche
|
||||
- Andy Bearman
|
||||
- Andy Luhrs
|
||||
- Andy Pilate
|
||||
- Any-Winter-4079
|
||||
- apolinario
|
||||
- ArDiouscuros
|
||||
- Armando C. Santisbon
|
||||
- Arthur Holstvoogd
|
||||
- artmen1516
|
||||
- Artur
|
||||
- Arturo Mendivil
|
||||
- Ben Alkov
|
||||
- Benjamin Warner
|
||||
- Bernard Maltais
|
||||
- blessedcoolant
|
||||
- blhook
|
||||
- BlueAmulet
|
||||
- Bouncyknighter
|
||||
- Brandon Rising
|
||||
- Brent Ozar
|
||||
- Brian Racer
|
||||
- bsilvereagle
|
||||
- c67e708d
|
||||
- CapableWeb
|
||||
- Carson Katri
|
||||
- Chloe
|
||||
- Chris Dawson
|
||||
- Chris Hayes
|
||||
- Chris Jones
|
||||
- chromaticist
|
||||
- Claus F. Strasburger
|
||||
- cmdr2
|
||||
- cody
|
||||
- Conor Reid
|
||||
- Cora Johnson-Roberson
|
||||
- coreco
|
||||
- cosmii02
|
||||
- cpacker
|
||||
- Cragin Godley
|
||||
- creachec
|
||||
- Damian Stewart
|
||||
- Daniel Manzke
|
||||
- Danny Beer
|
||||
- Dan Sully
|
||||
- David Burnett
|
||||
- David Ford
|
||||
- David Regla
|
||||
- David Wager
|
||||
- Daya Adianto
|
||||
- db3000
|
||||
- Denis Olshin
|
||||
- Dennis
|
||||
- Dominic Letz
|
||||
- DrGunnarMallon
|
||||
- Edward Johan
|
||||
- elliotsayes
|
||||
- Elrik
|
||||
- ElrikUnderlake
|
||||
- Eric Khun
|
||||
- Eric Wolf
|
||||
- Eugene Brodsky
|
||||
- ExperimentalCyborg
|
||||
- Fabian Bahl
|
||||
- Fabio 'MrWHO' Torchetti
|
||||
- fattire
|
||||
- Felipe Nogueira
|
||||
- Félix Sanz
|
||||
- figgefigge
|
||||
- Gabriel Mackievicz Telles
|
||||
- gabrielrotbart
|
||||
- gallegonovato
|
||||
- Gérald LONLAS
|
||||
- GitHub Actions Bot
|
||||
- gogurtenjoyer
|
||||
- greentext2
|
||||
- Gregg Helt
|
||||
- H4rk
|
||||
- Håvard Gulldahl
|
||||
- henry
|
||||
- Henry van Megen
|
||||
- hipsterusername
|
||||
- hj
|
||||
- Hosted Weblate
|
||||
- Iman Karim
|
||||
- ismail ihsan bülbül
|
||||
- Ivan Efimov
|
||||
- jakehl
|
||||
- Jakub Kolčář
|
||||
- JamDon2
|
||||
- James Reynolds
|
||||
- Jan Skurovec
|
||||
- Jari Vetoniemi
|
||||
- Jason Toffaletti
|
||||
- Jaulustus
|
||||
- Jeff Mahoney
|
||||
- jeremy
|
||||
- Jeremy Clark
|
||||
- JigenD
|
||||
- Jim Hays
|
||||
- Johan Roxendal
|
||||
- Johnathon Selstad
|
||||
- Jonathan
|
||||
- Joseph Dries III
|
||||
- JPPhoto
|
||||
- jspraul
|
||||
- Justin Wong
|
||||
- Juuso V
|
||||
- Kaspar Emanuel
|
||||
- Katsuyuki-Karasawa
|
||||
- Kent Keirsey
|
||||
- Kevin Coakley
|
||||
- Kevin Gibbons
|
||||
- Kevin Schaul
|
||||
- Kevin Turner
|
||||
- krummrey
|
||||
- Kyle Lacy
|
||||
- Kyle Schouviller
|
||||
- Lawrence Norton
|
||||
- LemonDouble
|
||||
- Leo Pasanen
|
||||
- Lincoln Stein
|
||||
- LoganPederson
|
||||
- Lynne Whitehorn
|
||||
- majick
|
||||
- Marco Labarile
|
||||
- Martin Kristiansen
|
||||
- Mary Hipp Rogers
|
||||
- mastercaster9000
|
||||
- Matthias Wild
|
||||
- michaelk71
|
||||
- mickr777
|
||||
- Mihai
|
||||
- Mihail Dumitrescu
|
||||
- Mikhail Tishin
|
||||
- Millun Atluri
|
||||
- Minjune Song
|
||||
- mitien
|
||||
- mofuzz
|
||||
- Muhammad Usama
|
||||
- Name
|
||||
- _nderscore
|
||||
- Netzer R
|
||||
- Nicholas Koh
|
||||
- Nicholas Körfer
|
||||
- nicolai256
|
||||
- Niek van der Maas
|
||||
- noodlebox
|
||||
- Nuno Coração
|
||||
- ofirkris
|
||||
- Olivier Louvignes
|
||||
- owenvincent
|
||||
- Patrick Esser
|
||||
- Patrick Tien
|
||||
- Patrick von Platen
|
||||
- Paul Sajna
|
||||
- pejotr
|
||||
- Peter Baylies
|
||||
- Peter Lin
|
||||
- plucked
|
||||
- prixt
|
||||
- psychedelicious
|
||||
- Rainer Bernhardt
|
||||
- Riccardo Giovanetti
|
||||
- Rich Jones
|
||||
- rmagur1203
|
||||
- Rob Baines
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
- Ryan Cao
|
||||
- Saifeddine
|
||||
- Saifeddine ALOUI
|
||||
- SammCheese
|
||||
- Sammy
|
||||
- sammyf
|
||||
- Samuel Husso
|
||||
- Scott Lahteine
|
||||
- Sean McLellan
|
||||
- Sebastian Aigner
|
||||
- Sergey Borisov
|
||||
- Sergey Krashevich
|
||||
- Shapor Naghibzadeh
|
||||
- Shawn Zhong
|
||||
- Simon Vans-Colina
|
||||
- skunkworxdark
|
||||
- slashtechno
|
||||
- spezialspezial
|
||||
- ssantos
|
||||
- StAlKeR7779
|
||||
- Stephan Koglin-Fischer
|
||||
- SteveCaruso
|
||||
- Steve Martinelli
|
||||
- Steven Frank
|
||||
- System X - Files
|
||||
- Taylor Kems
|
||||
- techicode
|
||||
- techybrain-dev
|
||||
- tesseractcat
|
||||
- thealanle
|
||||
- Thomas
|
||||
- tildebyte
|
||||
- Tim Cabbage
|
||||
- Tom
|
||||
- Tom Elovi Spruce
|
||||
- Tom Gouville
|
||||
- tomosuto
|
||||
- Travco
|
||||
- Travis Palmer
|
||||
- tyler
|
||||
- unknown
|
||||
- user1
|
||||
- Vedant Madane
|
||||
- veprogames
|
||||
- wa.code
|
||||
- wfng92
|
||||
- whosawhatsis
|
||||
- Will
|
||||
- William Becher
|
||||
- William Chong
|
||||
- xra
|
||||
- Yeung Yiu Hung
|
||||
- ymgenesis
|
||||
- Yorzaren
|
||||
- Yosuke Shinya
|
||||
- yun saki
|
||||
- Zadagu
|
||||
- zeptofine
|
||||
- 冯不游
|
||||
- 唐澤 克幸
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
||||
@@ -58,7 +58,8 @@ class ApiDependencies:
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
||||
logger.debug(f"InvokeAI version {__version__}")
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
|
||||
@@ -1,9 +1,22 @@
|
||||
from enum import Enum
|
||||
from fastapi import Body
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.version import __version__
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
class LogLevel(int, Enum):
|
||||
NotSet = logging.NOTSET
|
||||
Debug = logging.DEBUG
|
||||
Info = logging.INFO
|
||||
Warning = logging.WARNING
|
||||
Error = logging.ERROR
|
||||
Critical = logging.CRITICAL
|
||||
|
||||
app_router = APIRouter(prefix="/v1/app", tags=["app"])
|
||||
|
||||
|
||||
@@ -34,3 +47,27 @@ async def get_config() -> AppConfig:
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append('patchmatch')
|
||||
return AppConfig(infill_methods=infill_methods)
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
operation_id="get_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
)
|
||||
async def get_log_level(
|
||||
) -> LogLevel:
|
||||
"""Returns the log level"""
|
||||
return LogLevel(ApiDependencies.invoker.services.logger.level)
|
||||
|
||||
@app_router.post(
|
||||
"/logging",
|
||||
operation_id="set_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
)
|
||||
async def set_log_level(
|
||||
level: LogLevel = Body(description="New log verbosity level"),
|
||||
) -> LogLevel:
|
||||
"""Sets the log verbosity level"""
|
||||
ApiDependencies.invoker.services.logger.setLevel(level)
|
||||
return LogLevel(ApiDependencies.invoker.services.logger.level)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import io
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import (Body, HTTPException, Path, Query, Request, Response,
|
||||
UploadFile)
|
||||
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
@@ -11,9 +10,11 @@ from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||
from invokeai.app.services.item_storage import PaginatedResults
|
||||
from invokeai.app.services.models.image_record import (ImageDTO,
|
||||
ImageRecordChanges,
|
||||
ImageUrlsDTO)
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageDTO,
|
||||
ImageRecordChanges,
|
||||
ImageUrlsDTO,
|
||||
)
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -39,9 +40,15 @@ async def upload_image(
|
||||
response: Response,
|
||||
image_category: ImageCategory = Query(description="The category of the image"),
|
||||
is_intermediate: bool = Query(description="Whether this is an intermediate image"),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None, description="The board to add this image to, if any"
|
||||
),
|
||||
session_id: Optional[str] = Query(
|
||||
default=None, description="The session ID associated with this upload, if any"
|
||||
),
|
||||
crop_visible: Optional[bool] = Query(
|
||||
default=False, description="Whether to crop the image"
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type.startswith("image"):
|
||||
@@ -51,6 +58,9 @@ async def upload_image(
|
||||
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except:
|
||||
# Error opening the image
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
@@ -61,6 +71,7 @@ async def upload_image(
|
||||
image_origin=ResourceOrigin.EXTERNAL,
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -84,15 +95,16 @@ async def delete_image(
|
||||
# TODO: Does this need any exception handling at all?
|
||||
pass
|
||||
|
||||
|
||||
@images_router.post("/clear-intermediates", operation_id="clear_intermediates")
|
||||
async def clear_intermediates() -> int:
|
||||
"""Clears first 100 intermediates"""
|
||||
"""Clears all intermediates"""
|
||||
|
||||
try:
|
||||
count_deleted = ApiDependencies.invoker.services.images.delete_many(is_intermediate=True)
|
||||
count_deleted = ApiDependencies.invoker.services.images.delete_intermediates()
|
||||
return count_deleted
|
||||
except Exception as e:
|
||||
# TODO: Does this need any exception handling at all?
|
||||
raise HTTPException(status_code=500, detail="Failed to clear intermediates")
|
||||
pass
|
||||
|
||||
|
||||
@@ -130,6 +142,7 @@ async def get_image_dto(
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/metadata",
|
||||
operation_id="get_image_metadata",
|
||||
@@ -254,7 +267,8 @@ async def list_image_dtos(
|
||||
default=None, description="Whether to list intermediate images."
|
||||
),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None, description="The board id to filter by. Use 'none' to find images without a board."
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find images without a board.",
|
||||
),
|
||||
offset: int = Query(default=0, description="The page offset"),
|
||||
limit: int = Query(default=10, description="The number of images per page"),
|
||||
|
||||
@@ -315,20 +315,21 @@ async def list_ckpt_configs(
|
||||
return ApiDependencies.invoker.services.model_manager.list_checkpoint_configs()
|
||||
|
||||
|
||||
@models_router.get(
|
||||
@models_router.post(
|
||||
"/sync",
|
||||
operation_id="sync_to_config",
|
||||
responses={
|
||||
201: { "description": "synchronization successful" },
|
||||
},
|
||||
status_code = 201,
|
||||
response_model = None
|
||||
response_model = bool
|
||||
)
|
||||
async def sync_to_config(
|
||||
)->None:
|
||||
)->bool:
|
||||
"""Call after making changes to models.yaml, autoimport directories or models directory to synchronize
|
||||
in-memory data structures with disk data structures."""
|
||||
return ApiDependencies.invoker.services.model_manager.sync_to_config()
|
||||
ApiDependencies.invoker.services.model_manager.sync_to_config()
|
||||
return True
|
||||
|
||||
@models_router.put(
|
||||
"/merge/{base_model}",
|
||||
@@ -373,50 +374,3 @@ async def merge_models(
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
return response
|
||||
|
||||
# The rename operation is now supported by update_model and no longer needs to be
|
||||
# a standalone route.
|
||||
# @models_router.post(
|
||||
# "/rename/{base_model}/{model_type}/{model_name}",
|
||||
# operation_id="rename_model",
|
||||
# responses= {
|
||||
# 201: {"description" : "The model was renamed successfully"},
|
||||
# 404: {"description" : "The model could not be found"},
|
||||
# 409: {"description" : "There is already a model corresponding to the new name"},
|
||||
# },
|
||||
# status_code=201,
|
||||
# response_model=ImportModelResponse
|
||||
# )
|
||||
# async def rename_model(
|
||||
# base_model: BaseModelType = Path(description="Base model"),
|
||||
# model_type: ModelType = Path(description="The type of model"),
|
||||
# model_name: str = Path(description="current model name"),
|
||||
# new_name: Optional[str] = Query(description="new model name", default=None),
|
||||
# new_base: Optional[BaseModelType] = Query(description="new model base", default=None),
|
||||
# ) -> ImportModelResponse:
|
||||
# """ Rename a model"""
|
||||
|
||||
# logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
# try:
|
||||
# result = ApiDependencies.invoker.services.model_manager.rename_model(
|
||||
# base_model = base_model,
|
||||
# model_type = model_type,
|
||||
# model_name = model_name,
|
||||
# new_name = new_name,
|
||||
# new_base = new_base,
|
||||
# )
|
||||
# logger.debug(result)
|
||||
# logger.info(f'Successfully renamed {model_name}=>{new_name}')
|
||||
# model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
# model_name=new_name or model_name,
|
||||
# base_model=new_base or base_model,
|
||||
# model_type=model_type
|
||||
# )
|
||||
# return parse_obj_as(ImportModelResponse, model_raw)
|
||||
# except ModelNotFoundException as e:
|
||||
# logger.error(str(e))
|
||||
# raise HTTPException(status_code=404, detail=str(e))
|
||||
# except ValueError as e:
|
||||
# logger.error(str(e))
|
||||
# raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
@@ -4,6 +4,7 @@ import sys
|
||||
from inspect import signature
|
||||
|
||||
import uvicorn
|
||||
import socket
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
@@ -193,9 +194,22 @@ app.mount("/",
|
||||
)
|
||||
|
||||
def invoke_api():
|
||||
def find_port(port: int):
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
# https://github.com/WaylonWalker
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
if s.connect_ex(("localhost", port)) == 0:
|
||||
return find_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
# Start our own event loop for eventing usage
|
||||
loop = asyncio.new_event_loop()
|
||||
config = uvicorn.Config(app=app, host=app_config.host, port=app_config.port, loop=loop)
|
||||
config = uvicorn.Config(app=app, host=app_config.host, port=port, loop=loop)
|
||||
# Use access_log to turn off logging
|
||||
server = uvicorn.Server(config)
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
@@ -85,8 +85,8 @@ CONTROLNET_DEFAULT_MODELS = [
|
||||
CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)]
|
||||
CONTROLNET_MODE_VALUES = Literal[tuple(
|
||||
["balanced", "more_prompt", "more_control", "unbalanced"])]
|
||||
# crop and fill options not ready yet
|
||||
# CONTROLNET_RESIZE_VALUES = Literal[tuple(["just_resize", "crop_resize", "fill_resize"])]
|
||||
CONTROLNET_RESIZE_VALUES = Literal[tuple(
|
||||
["just_resize", "crop_resize", "fill_resize", "just_resize_simple",])]
|
||||
|
||||
|
||||
class ControlNetModelField(BaseModel):
|
||||
@@ -111,7 +111,8 @@ class ControlField(BaseModel):
|
||||
description="When the ControlNet is last applied (% of total steps)")
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(
|
||||
default="balanced", description="The control mode to use")
|
||||
# resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(
|
||||
default="just_resize", description="The resize mode to use")
|
||||
|
||||
@validator("control_weight")
|
||||
def validate_control_weight(cls, v):
|
||||
@@ -161,6 +162,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||
description="When the ControlNet is last applied (% of total steps)")
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode used")
|
||||
# fmt: on
|
||||
|
||||
class Config(InvocationConfig):
|
||||
@@ -187,6 +189,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
control_mode=self.control_mode,
|
||||
resize_mode=self.resize_mode,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ from .compel import ConditioningField
|
||||
from .controlnet_image_processors import ControlField
|
||||
from .image import ImageOutput
|
||||
from .model import ModelInfo, UNetField, VaeField
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
|
||||
from diffusers.models.attention_processor import (
|
||||
AttnProcessor2_0,
|
||||
@@ -288,7 +289,7 @@ class TextToLatentsInvocation(BaseInvocation):
|
||||
# and add in batch_size, num_images_per_prompt?
|
||||
# and do real check for classifier_free_guidance?
|
||||
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
|
||||
control_image = model.prepare_control_image(
|
||||
control_image = prepare_control_image(
|
||||
image=input_image,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
width=control_width_resize,
|
||||
@@ -298,13 +299,18 @@ class TextToLatentsInvocation(BaseInvocation):
|
||||
device=control_model.device,
|
||||
dtype=control_model.dtype,
|
||||
control_mode=control_info.control_mode,
|
||||
resize_mode=control_info.resize_mode,
|
||||
)
|
||||
control_item = ControlNetData(
|
||||
model=control_model, image_tensor=control_image,
|
||||
model=control_model,
|
||||
image_tensor=control_image,
|
||||
weight=control_info.control_weight,
|
||||
begin_step_percent=control_info.begin_step_percent,
|
||||
end_step_percent=control_info.end_step_percent,
|
||||
control_mode=control_info.control_mode,
|
||||
# any resizing needed should currently be happening in prepare_control_image(),
|
||||
# but adding resize_mode to ControlNetData in case needed in the future
|
||||
resize_mode=control_info.resize_mode,
|
||||
)
|
||||
control_data.append(control_item)
|
||||
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
|
||||
@@ -601,7 +607,7 @@ class ResizeLatentsInvocation(BaseInvocation):
|
||||
antialias: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
@@ -647,7 +653,7 @@ class ScaleLatentsInvocation(BaseInvocation):
|
||||
antialias: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
@@ -758,7 +764,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
dtype=vae.dtype
|
||||
) # FIXME: uses torch.randn. make reproducible!
|
||||
|
||||
latents = 0.18215 * latents
|
||||
latents = vae.config.scaling_factor * latents
|
||||
latents = latents.to(dtype=orig_dtype)
|
||||
|
||||
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import List, Literal, Optional, Union
|
||||
from pydantic import Field, validator
|
||||
|
||||
from ...backend.model_management import ModelType, SubModelType
|
||||
from invokeai.app.util.step_callback import stable_diffusion_xl_step_callback
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext)
|
||||
|
||||
@@ -243,10 +244,31 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
},
|
||||
}
|
||||
|
||||
def dispatch_progress(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
) -> None:
|
||||
stable_diffusion_xl_step_callback(
|
||||
context=context,
|
||||
node=self.dict(),
|
||||
source_node_id=source_node_id,
|
||||
sample=sample,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
|
||||
# based on
|
||||
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
graph_execution_state = context.services.graph_execution_manager.get(
|
||||
context.graph_execution_state_id
|
||||
)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||
latents = context.services.latents.get(self.noise.latents_name)
|
||||
|
||||
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||
@@ -341,6 +363,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
else:
|
||||
@@ -409,6 +432,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
|
||||
@@ -473,10 +497,31 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
},
|
||||
}
|
||||
|
||||
def dispatch_progress(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
) -> None:
|
||||
stable_diffusion_xl_step_callback(
|
||||
context=context,
|
||||
node=self.dict(),
|
||||
source_node_id=source_node_id,
|
||||
sample=sample,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
|
||||
# based on
|
||||
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
graph_execution_state = context.services.graph_execution_manager.get(
|
||||
context.graph_execution_state_id
|
||||
)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||
latents = context.services.latents.get(self.latents.latents_name)
|
||||
|
||||
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||
@@ -579,6 +624,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
else:
|
||||
@@ -647,6 +693,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
|
||||
|
||||
@@ -277,7 +277,7 @@ class InvokeAISettings(BaseSettings):
|
||||
@classmethod
|
||||
def _excluded_from_yaml(self)->List[str]:
|
||||
# combination of deprecated parameters and internal ones that shouldn't be exposed as invokeai.yaml options
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore']
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore', 'root']
|
||||
|
||||
class Config:
|
||||
env_file_encoding = 'utf-8'
|
||||
@@ -374,16 +374,16 @@ setting environment variables INVOKEAI_<setting>.
|
||||
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
|
||||
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
|
||||
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
|
||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
|
||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance')
|
||||
|
||||
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
|
||||
autoimport_dir : Path = Field(default='autoimport/main', description='Path to a directory of models files to be imported on startup.', category='Paths')
|
||||
lora_dir : Path = Field(default='autoimport/lora', description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths')
|
||||
embedding_dir : Path = Field(default='autoimport/embedding', description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths')
|
||||
controlnet_dir : Path = Field(default='autoimport/controlnet', description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths')
|
||||
autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths')
|
||||
lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths')
|
||||
embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths')
|
||||
controlnet_dir : Path = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths')
|
||||
conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths')
|
||||
models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths')
|
||||
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
|
||||
@@ -397,7 +397,7 @@ setting environment variables INVOKEAI_<setting>.
|
||||
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
|
||||
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
||||
log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
|
||||
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
|
||||
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
||||
#fmt: on
|
||||
@@ -446,7 +446,7 @@ setting environment variables INVOKEAI_<setting>.
|
||||
Path to the runtime root directory
|
||||
'''
|
||||
if self.root:
|
||||
return Path(self.root).expanduser()
|
||||
return Path(self.root).expanduser().absolute()
|
||||
else:
|
||||
return self.find_root()
|
||||
|
||||
|
||||
@@ -122,6 +122,11 @@ class ImageRecordStorageBase(ABC):
|
||||
"""Deletes many image records."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
"""Deletes all intermediate image records, returning a list of deleted image names."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(
|
||||
self,
|
||||
@@ -461,6 +466,32 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = list(map(lambda r: r[0], result))
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
self._conn.commit()
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def save(
|
||||
self,
|
||||
image_name: str,
|
||||
|
||||
@@ -6,21 +6,33 @@ from typing import TYPE_CHECKING, Optional
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import (ImageCategory,
|
||||
InvalidImageCategoryException,
|
||||
InvalidOriginException, ResourceOrigin)
|
||||
from invokeai.app.services.board_image_record_storage import \
|
||||
BoardImageRecordStorageBase
|
||||
from invokeai.app.models.image import (
|
||||
ImageCategory,
|
||||
InvalidImageCategoryException,
|
||||
InvalidOriginException,
|
||||
ResourceOrigin,
|
||||
)
|
||||
from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
|
||||
from invokeai.app.services.image_file_storage import (
|
||||
ImageFileDeleteException, ImageFileNotFoundException,
|
||||
ImageFileSaveException, ImageFileStorageBase)
|
||||
ImageFileDeleteException,
|
||||
ImageFileNotFoundException,
|
||||
ImageFileSaveException,
|
||||
ImageFileStorageBase,
|
||||
)
|
||||
from invokeai.app.services.image_record_storage import (
|
||||
ImageRecordDeleteException, ImageRecordNotFoundException,
|
||||
ImageRecordSaveException, ImageRecordStorageBase, OffsetPaginatedResults)
|
||||
ImageRecordDeleteException,
|
||||
ImageRecordNotFoundException,
|
||||
ImageRecordSaveException,
|
||||
ImageRecordStorageBase,
|
||||
OffsetPaginatedResults,
|
||||
)
|
||||
from invokeai.app.services.item_storage import ItemStorageABC
|
||||
from invokeai.app.services.models.image_record import (ImageDTO, ImageRecord,
|
||||
ImageRecordChanges,
|
||||
image_record_to_dto)
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageDTO,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
image_record_to_dto,
|
||||
)
|
||||
from invokeai.app.services.resource_name import NameServiceBase
|
||||
from invokeai.app.services.urls import UrlServiceBase
|
||||
from invokeai.app.util.metadata import get_metadata_graph_from_raw_session
|
||||
@@ -40,6 +52,7 @@ class ImageServiceABC(ABC):
|
||||
image_category: ImageCategory,
|
||||
node_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> ImageDTO:
|
||||
@@ -109,12 +122,10 @@ class ImageServiceABC(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_many(self, is_intermediate: bool) -> int:
|
||||
"""Deletes many images."""
|
||||
def delete_intermediates(self) -> int:
|
||||
"""Deletes all intermediate images."""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def delete_images_on_board(self, board_id: str):
|
||||
"""Deletes all images on a board."""
|
||||
@@ -164,6 +175,7 @@ class ImageService(ImageServiceABC):
|
||||
image_category: ImageCategory,
|
||||
node_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> ImageDTO:
|
||||
@@ -205,6 +217,11 @@ class ImageService(ImageServiceABC):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if board_id is not None:
|
||||
self._services.board_image_records.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
|
||||
self._services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, graph=graph
|
||||
)
|
||||
@@ -401,21 +418,13 @@ class ImageService(ImageServiceABC):
|
||||
except Exception as e:
|
||||
self._services.logger.error("Problem deleting image records and files")
|
||||
raise e
|
||||
|
||||
def delete_many(self, is_intermediate: bool):
|
||||
|
||||
def delete_intermediates(self) -> int:
|
||||
try:
|
||||
# only clears 100 at a time
|
||||
images = self._services.image_records.get_many(offset=0, limit=100, is_intermediate=is_intermediate,)
|
||||
count = len(images.items)
|
||||
image_name_list = list(
|
||||
map(
|
||||
lambda r: r.image_name,
|
||||
images.items,
|
||||
)
|
||||
)
|
||||
for image_name in image_name_list:
|
||||
image_names = self._services.image_records.delete_intermediates()
|
||||
count = len(image_names)
|
||||
for image_name in image_names:
|
||||
self._services.image_files.delete(image_name)
|
||||
self._services.image_records.delete_many(image_name_list)
|
||||
return count
|
||||
except ImageRecordDeleteException:
|
||||
self._services.logger.error(f"Failed to delete image records")
|
||||
|
||||
@@ -299,10 +299,11 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
else:
|
||||
config_file = config.root_dir / "configs/models.yaml"
|
||||
|
||||
logger.debug(f'config file={config_file}')
|
||||
logger.debug(f'Config file={config_file}')
|
||||
|
||||
device = torch.device(choose_torch_device())
|
||||
logger.debug(f'GPU device = {device}')
|
||||
device_name = torch.cuda.get_device_name() if device==torch.device('cuda') else ''
|
||||
logger.info(f'GPU device = {device} {device_name}')
|
||||
|
||||
precision = config.precision
|
||||
if precision == "auto":
|
||||
|
||||
342
invokeai/app/util/controlnet_utils.py
Normal file
342
invokeai/app/util/controlnet_utils.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
from PIL import Image
|
||||
from diffusers.utils import PIL_INTERPOLATION
|
||||
|
||||
from einops import rearrange
|
||||
from controlnet_aux.util import HWC3, resize_image
|
||||
|
||||
###################################################################
|
||||
# Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet
|
||||
###################################################################
|
||||
# High Quality Edge Thinning using Pure Python
|
||||
# Written by Lvmin Zhangu
|
||||
# 2023 April
|
||||
# Stanford University
|
||||
# If you use this, please Cite "High Quality Edge Thinning using Pure Python", Lvmin Zhang, In Mikubill/sd-webui-controlnet.
|
||||
|
||||
lvmin_kernels_raw = [
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[0, 1, 0],
|
||||
[1, 1, 1]
|
||||
], dtype=np.int32),
|
||||
np.array([
|
||||
[0, -1, -1],
|
||||
[1, 1, -1],
|
||||
[0, 1, 0]
|
||||
], dtype=np.int32)
|
||||
]
|
||||
|
||||
lvmin_kernels = []
|
||||
lvmin_kernels += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
|
||||
lvmin_prunings_raw = [
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[-1, 1, -1],
|
||||
[0, 0, -1]
|
||||
], dtype=np.int32),
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[-1, 1, -1],
|
||||
[-1, 0, 0]
|
||||
], dtype=np.int32)
|
||||
]
|
||||
|
||||
lvmin_prunings = []
|
||||
lvmin_prunings += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
|
||||
|
||||
def remove_pattern(x, kernel):
|
||||
objects = cv2.morphologyEx(x, cv2.MORPH_HITMISS, kernel)
|
||||
objects = np.where(objects > 127)
|
||||
x[objects] = 0
|
||||
return x, objects[0].shape[0] > 0
|
||||
|
||||
|
||||
def thin_one_time(x, kernels):
|
||||
y = x
|
||||
is_done = True
|
||||
for k in kernels:
|
||||
y, has_update = remove_pattern(y, k)
|
||||
if has_update:
|
||||
is_done = False
|
||||
return y, is_done
|
||||
|
||||
|
||||
def lvmin_thin(x, prunings=True):
|
||||
y = x
|
||||
for i in range(32):
|
||||
y, is_done = thin_one_time(y, lvmin_kernels)
|
||||
if is_done:
|
||||
break
|
||||
if prunings:
|
||||
y, _ = thin_one_time(y, lvmin_prunings)
|
||||
return y
|
||||
|
||||
|
||||
def nake_nms(x):
|
||||
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
y = np.zeros_like(x)
|
||||
for f in [f1, f2, f3, f4]:
|
||||
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
||||
return y
|
||||
|
||||
|
||||
################################################################################
|
||||
# copied from Mikubill/sd-webui-controlnet external_code.py and modified for InvokeAI
|
||||
################################################################################
|
||||
# FIXME: not using yet, if used in the future will most likely require modification of preprocessors
|
||||
def pixel_perfect_resolution(
|
||||
image: np.ndarray,
|
||||
target_H: int,
|
||||
target_W: int,
|
||||
resize_mode: str,
|
||||
) -> int:
|
||||
"""
|
||||
Calculate the estimated resolution for resizing an image while preserving aspect ratio.
|
||||
|
||||
The function first calculates scaling factors for height and width of the image based on the target
|
||||
height and width. Then, based on the chosen resize mode, it either takes the smaller or the larger
|
||||
scaling factor to estimate the new resolution.
|
||||
|
||||
If the resize mode is OUTER_FIT, the function uses the smaller scaling factor, ensuring the whole image
|
||||
fits within the target dimensions, potentially leaving some empty space.
|
||||
|
||||
If the resize mode is not OUTER_FIT, the function uses the larger scaling factor, ensuring the target
|
||||
dimensions are fully filled, potentially cropping the image.
|
||||
|
||||
After calculating the estimated resolution, the function prints some debugging information.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): A 3D numpy array representing an image. The dimensions represent [height, width, channels].
|
||||
target_H (int): The target height for the image.
|
||||
target_W (int): The target width for the image.
|
||||
resize_mode (ResizeMode): The mode for resizing.
|
||||
|
||||
Returns:
|
||||
int: The estimated resolution after resizing.
|
||||
"""
|
||||
raw_H, raw_W, _ = image.shape
|
||||
|
||||
k0 = float(target_H) / float(raw_H)
|
||||
k1 = float(target_W) / float(raw_W)
|
||||
|
||||
if resize_mode == "fill_resize":
|
||||
estimation = min(k0, k1) * float(min(raw_H, raw_W))
|
||||
else: # "crop_resize" or "just_resize" (or possibly "just_resize_simple"?)
|
||||
estimation = max(k0, k1) * float(min(raw_H, raw_W))
|
||||
|
||||
# print(f"Pixel Perfect Computation:")
|
||||
# print(f"resize_mode = {resize_mode}")
|
||||
# print(f"raw_H = {raw_H}")
|
||||
# print(f"raw_W = {raw_W}")
|
||||
# print(f"target_H = {target_H}")
|
||||
# print(f"target_W = {target_W}")
|
||||
# print(f"estimation = {estimation}")
|
||||
|
||||
return int(np.round(estimation))
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
###########################################################################
|
||||
# def detectmap_proc(detected_map, module, resize_mode, h, w):
|
||||
def np_img_resize(
|
||||
np_img: np.ndarray,
|
||||
resize_mode: str,
|
||||
h: int,
|
||||
w: int,
|
||||
device: torch.device = torch.device('cpu')
|
||||
):
|
||||
# if 'inpaint' in module:
|
||||
# np_img = np_img.astype(np.float32)
|
||||
# else:
|
||||
# np_img = HWC3(np_img)
|
||||
np_img = HWC3(np_img)
|
||||
|
||||
def safe_numpy(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = y.copy()
|
||||
y = np.ascontiguousarray(y)
|
||||
y = y.copy()
|
||||
return y
|
||||
|
||||
def get_pytorch_control(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = torch.from_numpy(y)
|
||||
y = y.float() / 255.0
|
||||
y = rearrange(y, 'h w c -> 1 c h w')
|
||||
y = y.clone()
|
||||
# y = y.to(devices.get_device_for("controlnet"))
|
||||
y = y.to(device)
|
||||
y = y.clone()
|
||||
return y
|
||||
|
||||
def high_quality_resize(x: np.ndarray,
|
||||
size):
|
||||
# Written by lvmin
|
||||
# Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges
|
||||
inpaint_mask = None
|
||||
if x.ndim == 3 and x.shape[2] == 4:
|
||||
inpaint_mask = x[:, :, 3]
|
||||
x = x[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1])
|
||||
unique_color_count = np.unique(x.reshape(-1, x.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
if unique_color_count == 2:
|
||||
is_binary = np.min(x) < 16 and np.max(x) > 240
|
||||
if is_binary:
|
||||
xc = x
|
||||
xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(xc < x)[0].shape[0]
|
||||
all_edge_count = np.where(x > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
y = cv2.resize(x, size, interpolation=interpolation)
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
if is_binary:
|
||||
y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
y = nake_nms(y)
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = lvmin_thin(y, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = np.stack([y] * 3, axis=2)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
y = np.concatenate([y, inpaint_mask], axis=2)
|
||||
|
||||
return y
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.RESIZE:
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = high_quality_resize(np_img, (w, h))
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
|
||||
old_h, old_w, _ = np_img.shape
|
||||
old_w = float(old_w)
|
||||
old_h = float(old_h)
|
||||
k0 = float(h) / old_h
|
||||
k1 = float(w) / old_w
|
||||
|
||||
safeint = lambda x: int(np.round(x))
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.OUTER_FIT:
|
||||
if resize_mode == "fill_resize": # OUTER_FIT
|
||||
k = min(k0, k1)
|
||||
borders = np.concatenate([np_img[0, :, :], np_img[-1, :, :], np_img[:, 0, :], np_img[:, -1, :]], axis=0)
|
||||
high_quality_border_color = np.median(borders, axis=0).astype(np_img.dtype)
|
||||
if len(high_quality_border_color) == 4:
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
high_quality_background[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = np_img
|
||||
np_img = high_quality_background
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
np_img = np_img[pad_h:pad_h + h, pad_w:pad_w + w]
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
|
||||
def prepare_control_image(
|
||||
# image used to be Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor, List[torch.Tensor]]
|
||||
# but now should be able to assume that image is a single PIL.Image, which simplifies things
|
||||
image: Image,
|
||||
# FIXME: need to fix hardwiring of width and height, change to basing on latents dimensions?
|
||||
# latents_to_match_resolution, # TorchTensor of shape (batch_size, 3, height, width)
|
||||
width=512, # should be 8 * latent.shape[3]
|
||||
height=512, # should be 8 * latent height[2]
|
||||
# batch_size=1, # currently no batching
|
||||
# num_images_per_prompt=1, # currently only single image
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced",
|
||||
resize_mode="just_resize_simple",
|
||||
):
|
||||
# FIXME: implement "crop_resize_simple" and "fill_resize_simple", or pull them out
|
||||
if (resize_mode == "just_resize_simple" or
|
||||
resize_mode == "crop_resize_simple" or
|
||||
resize_mode == "fill_resize_simple"):
|
||||
image = image.convert("RGB")
|
||||
if (resize_mode == "just_resize_simple"):
|
||||
image = image.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
elif (resize_mode == "crop_resize_simple"): # not yet implemented
|
||||
pass
|
||||
elif (resize_mode == "fill_resize_simple"): # not yet implemented
|
||||
pass
|
||||
nimage = np.array(image)
|
||||
nimage = nimage[None, :]
|
||||
nimage = np.concatenate([nimage], axis=0)
|
||||
# normalizing RGB values to [0,1] range (in PIL.Image they are [0-255])
|
||||
nimage = np.array(nimage).astype(np.float32) / 255.0
|
||||
nimage = nimage.transpose(0, 3, 1, 2)
|
||||
timage = torch.from_numpy(nimage)
|
||||
|
||||
# use fancy lvmin controlnet resizing
|
||||
elif (resize_mode == "just_resize" or resize_mode == "crop_resize" or resize_mode == "fill_resize"):
|
||||
nimage = np.array(image)
|
||||
timage, nimage = np_img_resize(
|
||||
np_img=nimage,
|
||||
resize_mode=resize_mode,
|
||||
h=height,
|
||||
w=width,
|
||||
# device=torch.device('cpu')
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
pass
|
||||
print("ERROR: invalid resize_mode ==> ", resize_mode)
|
||||
exit(1)
|
||||
|
||||
timage = timage.to(device=device, dtype=dtype)
|
||||
cfg_injection = (control_mode == "more_control" or control_mode == "unbalanced")
|
||||
if do_classifier_free_guidance and not cfg_injection:
|
||||
timage = torch.cat([timage] * 2)
|
||||
return timage
|
||||
@@ -1,9 +1,30 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from invokeai.app.models.exceptions import CanceledException
|
||||
from invokeai.app.models.image import ProgressImage
|
||||
from ..invocations.baseinvocation import InvocationContext
|
||||
from ...backend.util.util import image_to_dataURL
|
||||
from ...backend.generator.base import Generator
|
||||
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix = None):
|
||||
latent_image = samples[0].permute(1, 2, 0) @ latent_rgb_factors
|
||||
|
||||
if smooth_matrix is not None:
|
||||
latent_image = latent_image.unsqueeze(0).permute(3, 0, 1, 2)
|
||||
latent_image = torch.nn.functional.conv2d(latent_image, smooth_matrix.reshape((1,1,3,3)), padding=1)
|
||||
latent_image = latent_image.permute(1, 2, 3, 0).squeeze(0)
|
||||
|
||||
latents_ubyte = (
|
||||
((latent_image + 1) / 2)
|
||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
.byte()
|
||||
).cpu()
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
|
||||
def stable_diffusion_step_callback(
|
||||
@@ -37,7 +58,24 @@ def stable_diffusion_step_callback(
|
||||
# step = intermediate_state.step
|
||||
|
||||
# TODO: only output a preview image when requested
|
||||
image = Generator.sample_to_lowres_estimated_image(sample)
|
||||
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
|
||||
# these updated numbers for v1.5 are from @torridgristle
|
||||
v1_5_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[0.3444, 0.1385, 0.0670], # L1
|
||||
[0.1247, 0.4027, 0.1494], # L2
|
||||
[-0.3192, 0.2513, 0.2103], # L3
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors)
|
||||
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
@@ -53,3 +91,56 @@ def stable_diffusion_step_callback(
|
||||
step=intermediate_state.step,
|
||||
total_steps=node["steps"],
|
||||
)
|
||||
|
||||
def stable_diffusion_xl_step_callback(
|
||||
context: InvocationContext,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
):
|
||||
if context.services.queue.is_canceled(context.graph_execution_state_id):
|
||||
raise CanceledException
|
||||
|
||||
sdxl_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[ 0.3816, 0.4930, 0.5320],
|
||||
[-0.3753, 0.1631, 0.1739],
|
||||
[ 0.1770, 0.3588, -0.2048],
|
||||
[-0.4350, -0.2644, -0.4289],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
sdxl_smooth_matrix = torch.tensor(
|
||||
[
|
||||
#[ 0.0478, 0.1285, 0.0478],
|
||||
#[ 0.1285, 0.2948, 0.1285],
|
||||
#[ 0.0478, 0.1285, 0.0478],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
[0.0964, 0.4711, 0.0964],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix)
|
||||
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
|
||||
dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
context.services.events.emit_generator_progress(
|
||||
graph_execution_state_id=context.graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
@@ -23,6 +23,7 @@ from urllib import request
|
||||
|
||||
import npyscreen
|
||||
import transformers
|
||||
import omegaconf
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from huggingface_hub import HfFolder
|
||||
@@ -44,6 +45,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
||||
from invokeai.frontend.install.widgets import (
|
||||
CenteredButtonPress,
|
||||
FileBox,
|
||||
IntTitleSlider,
|
||||
set_min_terminal_size,
|
||||
CyclingForm,
|
||||
@@ -409,21 +411,21 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Directories containing textual inversion, controlnet and LoRA models (<tab> autocompletes, ctrl-N advances):",
|
||||
value="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models (<tab> autocompletes, ctrl-N advances):",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.autoimport_dirs = {}
|
||||
for description, config_name, path in autoimport_paths(old_opts):
|
||||
self.autoimport_dirs[config_name] = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name=description+':',
|
||||
value=str(path),
|
||||
self.autoimport_dirs['autoimport_dir'] = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
name=f'Autoimport Folder',
|
||||
value=str(config.root_path / config.autoimport_dir),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=32,
|
||||
max_height = 3,
|
||||
scroll_exit=True
|
||||
)
|
||||
self.nextrely += 1
|
||||
@@ -567,7 +569,14 @@ def default_startup_options(init_file: Path) -> Namespace:
|
||||
return opts
|
||||
|
||||
def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
installer = ModelInstall(config)
|
||||
|
||||
try:
|
||||
installer = ModelInstall(config)
|
||||
except omegaconf.errors.ConfigKeyError:
|
||||
logger.warning('Your models.yaml file is corrupt or out of date. Reinitializing')
|
||||
initialize_rootdir(config.root_path, True)
|
||||
installer = ModelInstall(config)
|
||||
|
||||
models = installer.all_models()
|
||||
return InstallSelections(
|
||||
install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id]
|
||||
@@ -575,19 +584,8 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
|
||||
if program_opts.yes_to_all
|
||||
else list(),
|
||||
# scan_directory=None,
|
||||
# autoscan_on_startup=None,
|
||||
)
|
||||
|
||||
# -------------------------------------
|
||||
def autoimport_paths(config: InvokeAIAppConfig):
|
||||
return [
|
||||
('Checkpoints & diffusers models', 'autoimport_dir', config.root_path / config.autoimport_dir),
|
||||
('LoRA/LyCORIS models', 'lora_dir', config.root_path / config.lora_dir),
|
||||
('Controlnet models', 'controlnet_dir', config.root_path / config.controlnet_dir),
|
||||
('Textual Inversion Embeddings', 'embedding_dir', config.root_path / config.embedding_dir),
|
||||
]
|
||||
|
||||
# -------------------------------------
|
||||
def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||
@@ -663,7 +661,7 @@ def write_opts(opts: Namespace, init_file: Path):
|
||||
with open(init_file,'w', encoding='utf-8') as file:
|
||||
file.write(new_config.to_yaml())
|
||||
|
||||
if opts.hf_token:
|
||||
if hasattr(opts,'hf_token') and opts.hf_token:
|
||||
HfLogin(opts.hf_token)
|
||||
|
||||
# -------------------------------------
|
||||
|
||||
@@ -858,7 +858,7 @@ class ModelManager(object):
|
||||
loaded_files = set()
|
||||
new_models_found = False
|
||||
|
||||
self.logger.info(f'scanning {self.app_config.models_path} for new models')
|
||||
self.logger.info(f'Scanning {self.app_config.models_path} for new models')
|
||||
with Chdir(self.app_config.root_path):
|
||||
for model_key, model_config in list(self.models.items()):
|
||||
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
|
||||
@@ -956,7 +956,7 @@ class ModelManager(object):
|
||||
config.lora_dir,
|
||||
config.embedding_dir,
|
||||
config.controlnet_dir,
|
||||
]
|
||||
] if x
|
||||
}
|
||||
scanner = ScanAndImport(directories, self.logger, ignore=known_paths, installer=installer)
|
||||
scanner.search()
|
||||
|
||||
@@ -219,6 +219,7 @@ class ControlNetData:
|
||||
begin_step_percent: float = Field(default=0.0)
|
||||
end_step_percent: float = Field(default=1.0)
|
||||
control_mode: str = Field(default="balanced")
|
||||
resize_mode: str = Field(default="just_resize")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -653,7 +654,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if cfg_injection:
|
||||
# Inferred ControlNet only for the conditional batch.
|
||||
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
||||
# add 0 to the unconditional batch to keep it unchanged.
|
||||
# prepend zeros for unconditional batch
|
||||
down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples]
|
||||
mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample])
|
||||
|
||||
@@ -954,53 +955,3 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
debug_image(
|
||||
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
|
||||
)
|
||||
|
||||
# Copied from diffusers pipeline_stable_diffusion_controlnet.py
|
||||
# Returns torch.Tensor of shape (batch_size, 3, height, width)
|
||||
@staticmethod
|
||||
def prepare_control_image(
|
||||
image,
|
||||
# FIXME: need to fix hardwiring of width and height, change to basing on latents dimensions?
|
||||
# latents,
|
||||
width=512, # should be 8 * latent.shape[3]
|
||||
height=512, # should be 8 * latent height[2]
|
||||
batch_size=1,
|
||||
num_images_per_prompt=1,
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced"
|
||||
):
|
||||
|
||||
if not isinstance(image, torch.Tensor):
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
image = [image]
|
||||
|
||||
if isinstance(image[0], PIL.Image.Image):
|
||||
images = []
|
||||
for image_ in image:
|
||||
image_ = image_.convert("RGB")
|
||||
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
image_ = np.array(image_)
|
||||
image_ = image_[None, :]
|
||||
images.append(image_)
|
||||
image = images
|
||||
image = np.concatenate(image, axis=0)
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image.transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
elif isinstance(image[0], torch.Tensor):
|
||||
image = torch.cat(image, dim=0)
|
||||
|
||||
image_batch_size = image.shape[0]
|
||||
if image_batch_size == 1:
|
||||
repeat_by = batch_size
|
||||
else:
|
||||
# image batch size is the same as prompt batch size
|
||||
repeat_by = num_images_per_prompt
|
||||
image = image.repeat_interleave(repeat_by, dim=0)
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
cfg_injection = (control_mode == "more_control" or control_mode == "unbalanced")
|
||||
if do_classifier_free_guidance and not cfg_injection:
|
||||
image = torch.cat([image] * 2)
|
||||
return image
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import math
|
||||
import torch
|
||||
import diffusers
|
||||
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
@@ -61,3 +63,150 @@ def new_torch_interpolate(input, size=None, scale_factor=None, mode='nearest', a
|
||||
return _torch_interpolate(input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias)
|
||||
|
||||
torch.nn.functional.interpolate = new_torch_interpolate
|
||||
|
||||
# TODO: refactor it
|
||||
_SlicedAttnProcessor = diffusers.models.attention_processor.SlicedAttnProcessor
|
||||
class ChunkedSlicedAttnProcessor:
|
||||
r"""
|
||||
Processor for implementing sliced attention.
|
||||
|
||||
Args:
|
||||
slice_size (`int`, *optional*):
|
||||
The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and
|
||||
`attention_head_dim` must be a multiple of the `slice_size`.
|
||||
"""
|
||||
|
||||
def __init__(self, slice_size):
|
||||
assert isinstance(slice_size, int)
|
||||
slice_size = 1 # TODO: maybe implement chunking in batches too when enough memory
|
||||
self.slice_size = slice_size
|
||||
self._sliced_attn_processor = _SlicedAttnProcessor(slice_size)
|
||||
|
||||
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
||||
if self.slice_size != 1:
|
||||
return self._sliced_attn_processor(attn, hidden_states, encoder_hidden_states, attention_mask)
|
||||
|
||||
residual = hidden_states
|
||||
|
||||
input_ndim = hidden_states.ndim
|
||||
|
||||
if input_ndim == 4:
|
||||
batch_size, channel, height, width = hidden_states.shape
|
||||
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
||||
|
||||
batch_size, sequence_length, _ = (
|
||||
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
||||
)
|
||||
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
||||
|
||||
if attn.group_norm is not None:
|
||||
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
||||
|
||||
query = attn.to_q(hidden_states)
|
||||
dim = query.shape[-1]
|
||||
query = attn.head_to_batch_dim(query)
|
||||
|
||||
if encoder_hidden_states is None:
|
||||
encoder_hidden_states = hidden_states
|
||||
elif attn.norm_cross:
|
||||
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
||||
|
||||
key = attn.to_k(encoder_hidden_states)
|
||||
value = attn.to_v(encoder_hidden_states)
|
||||
key = attn.head_to_batch_dim(key)
|
||||
value = attn.head_to_batch_dim(value)
|
||||
|
||||
batch_size_attention, query_tokens, _ = query.shape
|
||||
hidden_states = torch.zeros(
|
||||
(batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype
|
||||
)
|
||||
|
||||
chunk_tmp_tensor = torch.empty(self.slice_size, query.shape[1], key.shape[1], dtype=query.dtype, device=query.device)
|
||||
|
||||
for i in range(batch_size_attention // self.slice_size):
|
||||
start_idx = i * self.slice_size
|
||||
end_idx = (i + 1) * self.slice_size
|
||||
|
||||
query_slice = query[start_idx:end_idx]
|
||||
key_slice = key[start_idx:end_idx]
|
||||
attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
|
||||
|
||||
self.get_attention_scores_chunked(attn, query_slice, key_slice, attn_mask_slice, hidden_states[start_idx:end_idx], value[start_idx:end_idx], chunk_tmp_tensor)
|
||||
|
||||
hidden_states = attn.batch_to_head_dim(hidden_states)
|
||||
|
||||
# linear proj
|
||||
hidden_states = attn.to_out[0](hidden_states)
|
||||
# dropout
|
||||
hidden_states = attn.to_out[1](hidden_states)
|
||||
|
||||
if input_ndim == 4:
|
||||
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
||||
|
||||
if attn.residual_connection:
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
hidden_states = hidden_states / attn.rescale_output_factor
|
||||
|
||||
return hidden_states
|
||||
|
||||
|
||||
def get_attention_scores_chunked(self, attn, query, key, attention_mask, hidden_states, value, chunk):
|
||||
# batch size = 1
|
||||
assert query.shape[0] == 1
|
||||
assert key.shape[0] == 1
|
||||
assert value.shape[0] == 1
|
||||
assert hidden_states.shape[0] == 1
|
||||
|
||||
dtype = query.dtype
|
||||
if attn.upcast_attention:
|
||||
query = query.float()
|
||||
key = key.float()
|
||||
|
||||
#out_item_size = query.dtype.itemsize
|
||||
#if attn.upcast_attention:
|
||||
# out_item_size = torch.float32.itemsize
|
||||
out_item_size = query.element_size()
|
||||
if attn.upcast_attention:
|
||||
out_item_size = 4
|
||||
|
||||
chunk_size = 2 ** 29
|
||||
|
||||
out_size = query.shape[1] * key.shape[1] * out_item_size
|
||||
chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size))
|
||||
chunk_step = max(1, int(query.shape[1] / chunks_count))
|
||||
|
||||
key = key.transpose(-1, -2)
|
||||
|
||||
def _get_chunk_view(tensor, start, length):
|
||||
if start + length > tensor.shape[1]:
|
||||
length = tensor.shape[1] - start
|
||||
#print(f"view: [{tensor.shape[0]},{tensor.shape[1]},{tensor.shape[2]}] - start: {start}, length: {length}")
|
||||
return tensor[:,start:start+length]
|
||||
|
||||
for chunk_pos in range(0, query.shape[1], chunk_step):
|
||||
if attention_mask is not None:
|
||||
torch.baddbmm(
|
||||
_get_chunk_view(attention_mask, chunk_pos, chunk_step),
|
||||
_get_chunk_view(query, chunk_pos, chunk_step),
|
||||
key,
|
||||
beta=1,
|
||||
alpha=attn.scale,
|
||||
out=chunk,
|
||||
)
|
||||
else:
|
||||
torch.baddbmm(
|
||||
torch.zeros((1,1,1), device=query.device, dtype=query.dtype),
|
||||
_get_chunk_view(query, chunk_pos, chunk_step),
|
||||
key,
|
||||
beta=0,
|
||||
alpha=attn.scale,
|
||||
out=chunk,
|
||||
)
|
||||
chunk = chunk.softmax(dim=-1)
|
||||
torch.bmm(chunk, value, out=_get_chunk_view(hidden_states, chunk_pos, chunk_step))
|
||||
|
||||
#del chunk
|
||||
|
||||
|
||||
diffusers.models.attention_processor.SlicedAttnProcessor = ChunkedSlicedAttnProcessor
|
||||
|
||||
169
invokeai/frontend/web/dist/assets/App-06ea4e5e.js
vendored
Normal file
169
invokeai/frontend/web/dist/assets/App-06ea4e5e.js
vendored
Normal file
File diff suppressed because one or more lines are too long
169
invokeai/frontend/web/dist/assets/App-2fa7e2d4.js
vendored
169
invokeai/frontend/web/dist/assets/App-2fa7e2d4.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
invokeai/frontend/web/dist/assets/MantineProvider-b5842fc1.js
vendored
Normal file
1
invokeai/frontend/web/dist/assets/MantineProvider-b5842fc1.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
125
invokeai/frontend/web/dist/assets/index-0ec007dd.js
vendored
125
invokeai/frontend/web/dist/assets/index-0ec007dd.js
vendored
File diff suppressed because one or more lines are too long
125
invokeai/frontend/web/dist/assets/index-e2437518.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-e2437518.js
vendored
Normal file
File diff suppressed because one or more lines are too long
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-0ec007dd.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-e2437518.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
14
invokeai/frontend/web/dist/locales/en.json
vendored
14
invokeai/frontend/web/dist/locales/en.json
vendored
@@ -455,7 +455,12 @@
|
||||
"addDifference": "Add Difference",
|
||||
"pickModelType": "Pick Model Type",
|
||||
"selectModel": "Select Model",
|
||||
"importModels": "Import Models"
|
||||
"importModels": "Import Models",
|
||||
"settings": "Settings",
|
||||
"syncModels": "Sync Models",
|
||||
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
|
||||
"modelsSynced": "Models Synced",
|
||||
"modelSyncFailed": "Model Sync Failed"
|
||||
},
|
||||
"parameters": {
|
||||
"general": "General",
|
||||
@@ -547,7 +552,8 @@
|
||||
"saveSteps": "Save images every n steps",
|
||||
"confirmOnDelete": "Confirm On Delete",
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"alternateCanvasLayout": "Alternate Canvas Layout",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
@@ -564,7 +570,9 @@
|
||||
"ui": "User Interface",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
"showAdvancedOptions": "Show Advanced Options"
|
||||
"showAdvancedOptions": "Show Advanced Options",
|
||||
"experimental": "Experimental",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"toast": {
|
||||
"serverError": "Server Error",
|
||||
|
||||
@@ -455,7 +455,12 @@
|
||||
"addDifference": "Add Difference",
|
||||
"pickModelType": "Pick Model Type",
|
||||
"selectModel": "Select Model",
|
||||
"importModels": "Import Models"
|
||||
"importModels": "Import Models",
|
||||
"settings": "Settings",
|
||||
"syncModels": "Sync Models",
|
||||
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
|
||||
"modelsSynced": "Models Synced",
|
||||
"modelSyncFailed": "Model Sync Failed"
|
||||
},
|
||||
"parameters": {
|
||||
"general": "General",
|
||||
@@ -547,7 +552,8 @@
|
||||
"saveSteps": "Save images every n steps",
|
||||
"confirmOnDelete": "Confirm On Delete",
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"alternateCanvasLayout": "Alternate Canvas Layout",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
@@ -564,7 +570,9 @@
|
||||
"ui": "User Interface",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
"showAdvancedOptions": "Show Advanced Options"
|
||||
"showAdvancedOptions": "Show Advanced Options",
|
||||
"experimental": "Experimental",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"toast": {
|
||||
"serverError": "Server Error",
|
||||
|
||||
@@ -175,9 +175,7 @@ export const isValidDrop = (
|
||||
const destinationBoard = overData.context.boardId;
|
||||
|
||||
const isSameBoard = currentBoard === destinationBoard;
|
||||
const isDestinationValid = !currentBoard
|
||||
? destinationBoard !== 'no_board'
|
||||
: true;
|
||||
const isDestinationValid = !currentBoard ? destinationBoard : true;
|
||||
|
||||
return !isSameBoard && isDestinationValid;
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ export const addFirstListImagesListener = () => {
|
||||
action,
|
||||
{ getState, dispatch, unsubscribe, cancelActiveListeners }
|
||||
) => {
|
||||
// Only run this listener on the first listImages request for `images` categories
|
||||
// Only run this listener on the first listImages request for no-board images
|
||||
if (
|
||||
action.meta.arg.queryCacheKey !==
|
||||
getListImagesUrl({ categories: IMAGE_CATEGORIES })
|
||||
getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import {
|
||||
ASSETS_CATEGORIES,
|
||||
IMAGE_CATEGORIES,
|
||||
boardIdSelected,
|
||||
galleryViewChanged,
|
||||
imageSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
getBoardIdQueryParamForBoard,
|
||||
getCategoriesQueryParamForBoard,
|
||||
} from 'features/gallery/store/util';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'boards' });
|
||||
|
||||
export const addBoardIdSelectedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: boardIdSelected,
|
||||
matcher: isAnyOf(boardIdSelected, galleryViewChanged),
|
||||
effect: async (
|
||||
action,
|
||||
{ getState, dispatch, condition, cancelActiveListeners }
|
||||
@@ -22,12 +22,21 @@ export const addBoardIdSelectedListener = () => {
|
||||
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
|
||||
cancelActiveListeners();
|
||||
|
||||
const _board_id = action.payload;
|
||||
// when a board is selected, we need to wait until the board has loaded *some* images, then select the first one
|
||||
const state = getState();
|
||||
|
||||
const categories = getCategoriesQueryParamForBoard(_board_id);
|
||||
const board_id = getBoardIdQueryParamForBoard(_board_id);
|
||||
const queryArgs = { board_id, categories };
|
||||
const board_id = boardIdSelected.match(action)
|
||||
? action.payload
|
||||
: state.gallery.selectedBoardId;
|
||||
|
||||
const galleryView = galleryViewChanged.match(action)
|
||||
? action.payload
|
||||
: state.gallery.galleryView;
|
||||
|
||||
// when a board is selected, we need to wait until the board has loaded *some* images, then select the first one
|
||||
const categories =
|
||||
galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
|
||||
|
||||
const queryArgs = { board_id: board_id ?? 'none', categories };
|
||||
|
||||
// wait until the board has some images - maybe it already has some from a previous fetch
|
||||
// must use getState() to ensure we do not have stale state
|
||||
@@ -35,7 +44,7 @@ export const addBoardIdSelectedListener = () => {
|
||||
() =>
|
||||
imagesApi.endpoints.listImages.select(queryArgs)(getState())
|
||||
.isSuccess,
|
||||
1000
|
||||
5000
|
||||
);
|
||||
|
||||
if (isSuccess) {
|
||||
|
||||
@@ -45,7 +45,7 @@ export const addCanvasMergedListener = () => {
|
||||
relativeTo: canvasBaseLayer.getParent(),
|
||||
});
|
||||
|
||||
const imageUploadedRequest = dispatch(
|
||||
const imageDTO = await dispatch(
|
||||
imagesApi.endpoints.uploadImage.initiate({
|
||||
file: new File([blob], 'mergedCanvas.png', {
|
||||
type: 'image/png',
|
||||
@@ -57,17 +57,10 @@ export const addCanvasMergedListener = () => {
|
||||
toastOptions: { title: 'Canvas Merged' },
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
const [{ payload }] = await take(
|
||||
(uploadedImageAction) =>
|
||||
imagesApi.endpoints.uploadImage.matchFulfilled(uploadedImageAction) &&
|
||||
uploadedImageAction.meta.requestId === imageUploadedRequest.requestId
|
||||
);
|
||||
).unwrap();
|
||||
|
||||
// TODO: I can't figure out how to do the type narrowing in the `take()` so just brute forcing it here
|
||||
const { image_name } =
|
||||
payload as typeof imagesApi.endpoints.uploadImage.Types.ResultType;
|
||||
const { image_name } = imageDTO;
|
||||
|
||||
dispatch(
|
||||
setMergedCanvas({
|
||||
|
||||
@@ -34,6 +34,8 @@ export const addCanvasSavedToGalleryListener = () => {
|
||||
}),
|
||||
image_category: 'general',
|
||||
is_intermediate: false,
|
||||
board_id: state.gallery.autoAddBoardId,
|
||||
crop_visible: true,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: 'Canvas Saved to Gallery' },
|
||||
|
||||
@@ -156,14 +156,13 @@ export const addImageDroppedListener = () => {
|
||||
if (
|
||||
overData.actionType === 'MOVE_BOARD' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO &&
|
||||
overData.context.boardId
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
const { boardId } = overData.context;
|
||||
|
||||
// if the board is "No Board", this is a remove action
|
||||
if (boardId === 'no_board') {
|
||||
// image was droppe on the "NoBoardBoard"
|
||||
if (!boardId) {
|
||||
dispatch(
|
||||
imagesApi.endpoints.removeImageFromBoard.initiate({
|
||||
imageDTO,
|
||||
@@ -172,12 +171,7 @@ export const addImageDroppedListener = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle adding image to batch
|
||||
if (boardId === 'batch') {
|
||||
// TODO
|
||||
}
|
||||
|
||||
// Otherwise, add the image to the board
|
||||
// image was dropped on a user board
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
imageDTO,
|
||||
|
||||
@@ -5,30 +5,30 @@ import { startAppListening } from '..';
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
|
||||
export const addImageUpdatedFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.updateImage.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
moduleLog.debug(
|
||||
{
|
||||
data: {
|
||||
oldImage: action.meta.arg.originalArgs,
|
||||
updatedImage: action.payload,
|
||||
},
|
||||
},
|
||||
'Image updated'
|
||||
);
|
||||
},
|
||||
});
|
||||
// startAppListening({
|
||||
// matcher: imagesApi.endpoints.updateImage.matchFulfilled,
|
||||
// effect: (action, { dispatch, getState }) => {
|
||||
// moduleLog.debug(
|
||||
// {
|
||||
// data: {
|
||||
// oldImage: action.meta.arg.originalArgs,
|
||||
// updatedImage: action.payload,
|
||||
// },
|
||||
// },
|
||||
// 'Image updated'
|
||||
// );
|
||||
// },
|
||||
// });
|
||||
};
|
||||
|
||||
export const addImageUpdatedRejectedListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.updateImage.matchRejected,
|
||||
effect: (action, { dispatch }) => {
|
||||
moduleLog.debug(
|
||||
{ data: action.meta.arg.originalArgs },
|
||||
'Image update failed'
|
||||
);
|
||||
},
|
||||
});
|
||||
// startAppListening({
|
||||
// matcher: imagesApi.endpoints.updateImage.matchRejected,
|
||||
// effect: (action, { dispatch }) => {
|
||||
// moduleLog.debug(
|
||||
// { data: action.meta.arg.originalArgs },
|
||||
// 'Image update failed'
|
||||
// );
|
||||
// },
|
||||
// });
|
||||
};
|
||||
|
||||
@@ -8,10 +8,7 @@ import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { startAppListening } from '..';
|
||||
import {
|
||||
SYSTEM_BOARDS,
|
||||
imagesApi,
|
||||
} from '../../../../../services/api/endpoints/images';
|
||||
import { imagesApi } from '../../../../../services/api/endpoints/images';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
|
||||
@@ -26,7 +23,7 @@ export const addImageUploadedFulfilledListener = () => {
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const imageDTO = action.payload;
|
||||
const state = getState();
|
||||
const { selectedBoardId } = state.gallery;
|
||||
const { selectedBoardId, autoAddBoardId } = state.gallery;
|
||||
|
||||
moduleLog.debug({ arg: '<Blob>', imageDTO }, 'Image uploaded');
|
||||
|
||||
@@ -44,13 +41,13 @@ export const addImageUploadedFulfilledListener = () => {
|
||||
// default action - just upload and alert user
|
||||
if (postUploadAction?.type === 'TOAST') {
|
||||
const { toastOptions } = postUploadAction;
|
||||
if (SYSTEM_BOARDS.includes(selectedBoardId)) {
|
||||
if (!autoAddBoardId) {
|
||||
dispatch(addToast({ ...DEFAULT_UPLOADED_TOAST, ...toastOptions }));
|
||||
} else {
|
||||
// Add this image to the board
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
board_id: selectedBoardId,
|
||||
board_id: autoAddBoardId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
@@ -59,10 +56,10 @@ export const addImageUploadedFulfilledListener = () => {
|
||||
const { data } = boardsApi.endpoints.listAllBoards.select()(state);
|
||||
|
||||
// Fall back to just the board id if we can't find the board for some reason
|
||||
const board = data?.find((b) => b.board_id === selectedBoardId);
|
||||
const board = data?.find((b) => b.board_id === autoAddBoardId);
|
||||
const description = board
|
||||
? `Added to board ${board.board_name}`
|
||||
: `Added to board ${selectedBoardId}`;
|
||||
: `Added to board ${autoAddBoardId}`;
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
|
||||
@@ -3,14 +3,11 @@ import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
IMAGE_CATEGORIES,
|
||||
boardIdSelected,
|
||||
galleryViewChanged,
|
||||
imageSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { progressImageSet } from 'features/system/store/systemSlice';
|
||||
import {
|
||||
SYSTEM_BOARDS,
|
||||
imagesAdapter,
|
||||
imagesApi,
|
||||
} from 'services/api/endpoints/images';
|
||||
import { imagesAdapter, imagesApi } from 'services/api/endpoints/images';
|
||||
import { isImageOutput } from 'services/api/guards';
|
||||
import { sessionCanceled } from 'services/api/thunks/session';
|
||||
import {
|
||||
@@ -32,8 +29,7 @@ export const addInvocationCompleteEventListener = () => {
|
||||
);
|
||||
const session_id = action.payload.data.graph_execution_state_id;
|
||||
|
||||
const { cancelType, isCancelScheduled, boardIdToAddTo } =
|
||||
getState().system;
|
||||
const { cancelType, isCancelScheduled } = getState().system;
|
||||
|
||||
// Handle scheduled cancelation
|
||||
if (cancelType === 'scheduled' && isCancelScheduled) {
|
||||
@@ -60,54 +56,59 @@ export const addInvocationCompleteEventListener = () => {
|
||||
}
|
||||
|
||||
if (!imageDTO.is_intermediate) {
|
||||
// update the cache for 'All Images'
|
||||
dispatch(
|
||||
imagesApi.util.updateQueryData(
|
||||
'listImages',
|
||||
{
|
||||
categories: IMAGE_CATEGORIES,
|
||||
},
|
||||
(draft) => {
|
||||
imagesAdapter.addOne(draft, imageDTO);
|
||||
draft.total = draft.total + 1;
|
||||
}
|
||||
)
|
||||
);
|
||||
/**
|
||||
* Cache updates for when an image result is received
|
||||
* - *add* to getImageDTO
|
||||
* - IF `autoAddBoardId` is set:
|
||||
* - THEN add it to the board_id/images
|
||||
* - ELSE (`autoAddBoardId` is not set):
|
||||
* - THEN add it to the no_board/images
|
||||
*/
|
||||
|
||||
// update the cache for 'No Board'
|
||||
dispatch(
|
||||
imagesApi.util.updateQueryData(
|
||||
'listImages',
|
||||
{
|
||||
board_id: 'none',
|
||||
},
|
||||
(draft) => {
|
||||
imagesAdapter.addOne(draft, imageDTO);
|
||||
draft.total = draft.total + 1;
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// add image to the board if we had one selected
|
||||
if (boardIdToAddTo && !SYSTEM_BOARDS.includes(boardIdToAddTo)) {
|
||||
const { autoAddBoardId } = gallery;
|
||||
if (autoAddBoardId) {
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
board_id: boardIdToAddTo,
|
||||
board_id: autoAddBoardId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
dispatch(
|
||||
imagesApi.util.updateQueryData(
|
||||
'listImages',
|
||||
{
|
||||
board_id: 'none',
|
||||
categories: IMAGE_CATEGORIES,
|
||||
},
|
||||
(draft) => {
|
||||
const oldTotal = draft.total;
|
||||
const newState = imagesAdapter.addOne(draft, imageDTO);
|
||||
const delta = newState.total - oldTotal;
|
||||
draft.total = draft.total + delta;
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { selectedBoardId } = gallery;
|
||||
dispatch(
|
||||
imagesApi.util.invalidateTags([
|
||||
{ type: 'BoardImagesTotal', id: autoAddBoardId ?? 'none' },
|
||||
{ type: 'BoardAssetsTotal', id: autoAddBoardId ?? 'none' },
|
||||
])
|
||||
);
|
||||
|
||||
if (boardIdToAddTo && boardIdToAddTo !== selectedBoardId) {
|
||||
dispatch(boardIdSelected(boardIdToAddTo));
|
||||
} else if (!boardIdToAddTo) {
|
||||
dispatch(boardIdSelected('all'));
|
||||
}
|
||||
const { selectedBoardId, shouldAutoSwitch } = gallery;
|
||||
|
||||
// If auto-switch is enabled, select the new image
|
||||
if (getState().gallery.shouldAutoSwitch) {
|
||||
if (shouldAutoSwitch) {
|
||||
// if auto-add is enabled, switch the board as the image comes in
|
||||
if (autoAddBoardId && autoAddBoardId !== selectedBoardId) {
|
||||
dispatch(boardIdSelected(autoAddBoardId));
|
||||
dispatch(galleryViewChanged('images'));
|
||||
} else if (!autoAddBoardId) {
|
||||
dispatch(galleryViewChanged('images'));
|
||||
}
|
||||
dispatch(imageSelected(imageDTO.image_name));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,25 +12,35 @@ export const addStagingAreaImageSavedListener = () => {
|
||||
effect: async (action, { dispatch, getState, take }) => {
|
||||
const { imageDTO } = action.payload;
|
||||
|
||||
dispatch(
|
||||
imagesApi.endpoints.updateImage.initiate({
|
||||
imageDTO,
|
||||
changes: { is_intermediate: false },
|
||||
})
|
||||
)
|
||||
.unwrap()
|
||||
.then((image) => {
|
||||
dispatch(addToast({ title: 'Image Saved', status: 'success' }));
|
||||
})
|
||||
.catch((error) => {
|
||||
dispatch(
|
||||
addToast({
|
||||
title: 'Image Saving Failed',
|
||||
description: error.message,
|
||||
status: 'error',
|
||||
try {
|
||||
const newImageDTO = await dispatch(
|
||||
imagesApi.endpoints.changeImageIsIntermediate.initiate({
|
||||
imageDTO,
|
||||
is_intermediate: false,
|
||||
})
|
||||
).unwrap();
|
||||
|
||||
// we may need to add it to the autoadd board
|
||||
const { autoAddBoardId } = getState().gallery;
|
||||
|
||||
if (autoAddBoardId) {
|
||||
await dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
imageDTO: newImageDTO,
|
||||
board_id: autoAddBoardId,
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
dispatch(addToast({ title: 'Image Saved', status: 'success' }));
|
||||
} catch (error) {
|
||||
dispatch(
|
||||
addToast({
|
||||
title: 'Image Saving Failed',
|
||||
description: (error as Error)?.message,
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -73,7 +73,7 @@ export const addUserInvokedCanvasListener = () => {
|
||||
// For img2img and inpaint/outpaint, we need to upload the init images
|
||||
if (['img2img', 'inpaint', 'outpaint'].includes(generationMode)) {
|
||||
// upload the image, saving the request id
|
||||
const { requestId: initImageUploadedRequestId } = dispatch(
|
||||
canvasInitImage = await dispatch(
|
||||
imagesApi.endpoints.uploadImage.initiate({
|
||||
file: new File([baseBlob], 'canvasInitImage.png', {
|
||||
type: 'image/png',
|
||||
@@ -81,23 +81,13 @@ export const addUserInvokedCanvasListener = () => {
|
||||
image_category: 'general',
|
||||
is_intermediate: true,
|
||||
})
|
||||
);
|
||||
|
||||
// Wait for the image to be uploaded, matching by request id
|
||||
const [{ payload }] = await take(
|
||||
// TODO: figure out how to narrow this action's type
|
||||
(action) =>
|
||||
imagesApi.endpoints.uploadImage.matchFulfilled(action) &&
|
||||
action.meta.requestId === initImageUploadedRequestId
|
||||
);
|
||||
|
||||
canvasInitImage = payload as ImageDTO;
|
||||
).unwrap();
|
||||
}
|
||||
|
||||
// For inpaint/outpaint, we also need to upload the mask layer
|
||||
if (['inpaint', 'outpaint'].includes(generationMode)) {
|
||||
// upload the image, saving the request id
|
||||
const { requestId: maskImageUploadedRequestId } = dispatch(
|
||||
canvasMaskImage = await dispatch(
|
||||
imagesApi.endpoints.uploadImage.initiate({
|
||||
file: new File([maskBlob], 'canvasMaskImage.png', {
|
||||
type: 'image/png',
|
||||
@@ -105,17 +95,7 @@ export const addUserInvokedCanvasListener = () => {
|
||||
image_category: 'mask',
|
||||
is_intermediate: true,
|
||||
})
|
||||
);
|
||||
|
||||
// Wait for the image to be uploaded, matching by request id
|
||||
const [{ payload }] = await take(
|
||||
// TODO: figure out how to narrow this action's type
|
||||
(action) =>
|
||||
imagesApi.endpoints.uploadImage.matchFulfilled(action) &&
|
||||
action.meta.requestId === maskImageUploadedRequestId
|
||||
);
|
||||
|
||||
canvasMaskImage = payload as ImageDTO;
|
||||
).unwrap();
|
||||
}
|
||||
|
||||
const graph = buildCanvasGraph(
|
||||
@@ -141,14 +121,14 @@ export const addUserInvokedCanvasListener = () => {
|
||||
sessionCreated.fulfilled.match(action) &&
|
||||
action.meta.requestId === sessionCreatedRequestId
|
||||
);
|
||||
const sessionId = sessionCreatedAction.payload.id;
|
||||
const session_id = sessionCreatedAction.payload.id;
|
||||
|
||||
// Associate the init image with the session, now that we have the session ID
|
||||
if (['img2img', 'inpaint'].includes(generationMode) && canvasInitImage) {
|
||||
dispatch(
|
||||
imagesApi.endpoints.updateImage.initiate({
|
||||
imagesApi.endpoints.changeImageSessionId.initiate({
|
||||
imageDTO: canvasInitImage,
|
||||
changes: { session_id: sessionId },
|
||||
session_id,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -156,9 +136,9 @@ export const addUserInvokedCanvasListener = () => {
|
||||
// Associate the mask image with the session, now that we have the session ID
|
||||
if (['inpaint'].includes(generationMode) && canvasMaskImage) {
|
||||
dispatch(
|
||||
imagesApi.endpoints.updateImage.initiate({
|
||||
imagesApi.endpoints.changeImageSessionId.initiate({
|
||||
imageDTO: canvasMaskImage,
|
||||
changes: { session_id: sessionId },
|
||||
session_id,
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -167,7 +147,7 @@ export const addUserInvokedCanvasListener = () => {
|
||||
if (!state.canvas.layerState.stagingArea.boundingBox) {
|
||||
dispatch(
|
||||
stagingAreaInitialized({
|
||||
sessionId,
|
||||
sessionId: session_id,
|
||||
boundingBox: {
|
||||
...state.canvas.boundingBoxCoordinates,
|
||||
...state.canvas.boundingBoxDimensions,
|
||||
@@ -177,7 +157,7 @@ export const addUserInvokedCanvasListener = () => {
|
||||
}
|
||||
|
||||
// Flag the session with the canvas session ID
|
||||
dispatch(canvasSessionIdChanged(sessionId));
|
||||
dispatch(canvasSessionIdChanged(session_id));
|
||||
|
||||
// We are ready to invoke the session!
|
||||
dispatch(sessionReadyToInvoke());
|
||||
|
||||
@@ -92,7 +92,10 @@ const IAICollapse = (props: IAIToggleCollapseProps) => {
|
||||
sx={{
|
||||
p: 4,
|
||||
borderBottomRadius: 'base',
|
||||
bg: mode('base.100', 'base.800')(colorMode),
|
||||
bg: 'base.100',
|
||||
_dark: {
|
||||
bg: 'base.800',
|
||||
},
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
|
||||
@@ -17,13 +17,21 @@ import {
|
||||
} from 'common/components/IAIImageFallback';
|
||||
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { MouseEvent, ReactElement, SyntheticEvent, memo } from 'react';
|
||||
import ImageContextMenu from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import {
|
||||
MouseEvent,
|
||||
ReactElement,
|
||||
SyntheticEvent,
|
||||
memo,
|
||||
useCallback,
|
||||
useState,
|
||||
} from 'react';
|
||||
import { FaImage, FaUndo, FaUpload } from 'react-icons/fa';
|
||||
import { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
import { mode } from 'theme/util/mode';
|
||||
import IAIDraggable from './IAIDraggable';
|
||||
import IAIDroppable from './IAIDroppable';
|
||||
import ImageContextMenu from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import SelectionOverlay from './SelectionOverlay';
|
||||
|
||||
type IAIDndImageProps = {
|
||||
imageDTO: ImageDTO | undefined;
|
||||
@@ -49,6 +57,7 @@ type IAIDndImageProps = {
|
||||
thumbnail?: boolean;
|
||||
noContentFallback?: ReactElement;
|
||||
useThumbailFallback?: boolean;
|
||||
withHoverOverlay?: boolean;
|
||||
};
|
||||
|
||||
const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
@@ -75,9 +84,17 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
resetIcon = <FaUndo />,
|
||||
noContentFallback = <IAINoContentFallback icon={FaImage} />,
|
||||
useThumbailFallback,
|
||||
withHoverOverlay = false,
|
||||
} = props;
|
||||
|
||||
const { colorMode } = useColorMode();
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const handleMouseOver = useCallback(() => {
|
||||
setIsHovered(true);
|
||||
}, []);
|
||||
const handleMouseOut = useCallback(() => {
|
||||
setIsHovered(false);
|
||||
}, []);
|
||||
|
||||
const { getUploadButtonProps, getUploadInputProps } = useImageUploadButton({
|
||||
postUploadAction,
|
||||
@@ -105,6 +122,8 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
sx={{
|
||||
width: 'full',
|
||||
height: 'full',
|
||||
@@ -147,12 +166,14 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
maxW: 'full',
|
||||
maxH: 'full',
|
||||
borderRadius: 'base',
|
||||
shadow: isSelected ? 'selected.light' : undefined,
|
||||
_dark: { shadow: isSelected ? 'selected.dark' : undefined },
|
||||
...imageSx,
|
||||
}}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay image={imageDTO} />}
|
||||
<SelectionOverlay
|
||||
isSelected={isSelected}
|
||||
isHovered={withHoverOverlay ? isHovered : false}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
@@ -183,13 +204,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
</>
|
||||
)}
|
||||
{!imageDTO && isUploadDisabled && noContentFallback}
|
||||
{!isDropDisabled && (
|
||||
<IAIDroppable
|
||||
data={droppableData}
|
||||
disabled={isDropDisabled}
|
||||
dropLabel={dropLabel}
|
||||
/>
|
||||
)}
|
||||
{imageDTO && !isDragDisabled && (
|
||||
<IAIDraggable
|
||||
data={draggableData}
|
||||
@@ -197,6 +211,13 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
onClick={onClick}
|
||||
/>
|
||||
)}
|
||||
{!isDropDisabled && (
|
||||
<IAIDroppable
|
||||
data={droppableData}
|
||||
disabled={isDropDisabled}
|
||||
dropLabel={dropLabel}
|
||||
/>
|
||||
)}
|
||||
{onClickReset && withResetIcon && imageDTO && (
|
||||
<IAIIconButton
|
||||
onClick={onClickReset}
|
||||
|
||||
@@ -13,10 +13,11 @@ type IAIDroppableProps = {
|
||||
dropLabel?: ReactNode;
|
||||
disabled?: boolean;
|
||||
data?: TypesafeDroppableData;
|
||||
hoverRef?: React.Ref<HTMLDivElement>;
|
||||
};
|
||||
|
||||
const IAIDroppable = (props: IAIDroppableProps) => {
|
||||
const { dropLabel, data, disabled } = props;
|
||||
const { dropLabel, data, disabled, hoverRef } = props;
|
||||
const dndId = useRef(uuidv4());
|
||||
|
||||
const { isOver, setNodeRef, active } = useDroppable({
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
} from '@chakra-ui/react';
|
||||
import { memo } from 'react';
|
||||
|
||||
interface Props extends SwitchProps {
|
||||
export interface IAISwitchProps extends SwitchProps {
|
||||
label?: string;
|
||||
width?: string | number;
|
||||
formControlProps?: FormControlProps;
|
||||
@@ -20,7 +20,7 @@ interface Props extends SwitchProps {
|
||||
/**
|
||||
* Customized Chakra FormControl + Switch multi-part component.
|
||||
*/
|
||||
const IAISwitch = (props: Props) => {
|
||||
const IAISwitch = (props: IAISwitchProps) => {
|
||||
const {
|
||||
label,
|
||||
isDisabled = false,
|
||||
|
||||
@@ -19,10 +19,11 @@ import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
import { PostUploadAction } from 'services/api/types';
|
||||
import ImageUploadOverlay from './ImageUploadOverlay';
|
||||
import { AnimatePresence, motion } from 'framer-motion';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
|
||||
const selector = createSelector(
|
||||
[activeTabNameSelector],
|
||||
(activeTabName) => {
|
||||
[stateSelector, activeTabNameSelector],
|
||||
({ gallery }, activeTabName) => {
|
||||
let postUploadAction: PostUploadAction = { type: 'TOAST' };
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
@@ -33,7 +34,10 @@ const selector = createSelector(
|
||||
postUploadAction = { type: 'SET_INITIAL_IMAGE' };
|
||||
}
|
||||
|
||||
const { autoAddBoardId } = gallery;
|
||||
|
||||
return {
|
||||
autoAddBoardId,
|
||||
postUploadAction,
|
||||
};
|
||||
},
|
||||
@@ -46,7 +50,7 @@ type ImageUploaderProps = {
|
||||
|
||||
const ImageUploader = (props: ImageUploaderProps) => {
|
||||
const { children } = props;
|
||||
const { postUploadAction } = useAppSelector(selector);
|
||||
const { autoAddBoardId, postUploadAction } = useAppSelector(selector);
|
||||
const isBusy = useAppSelector(selectIsBusy);
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
@@ -74,9 +78,10 @@ const ImageUploader = (props: ImageUploaderProps) => {
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction,
|
||||
board_id: autoAddBoardId,
|
||||
});
|
||||
},
|
||||
[postUploadAction, uploadImage]
|
||||
[autoAddBoardId, postUploadAction, uploadImage]
|
||||
);
|
||||
|
||||
const onDrop = useCallback(
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import { Box } from '@chakra-ui/react';
|
||||
|
||||
type Props = {
|
||||
isSelected: boolean;
|
||||
isHovered: boolean;
|
||||
};
|
||||
const SelectionOverlay = ({ isSelected, isHovered }: Props) => {
|
||||
return (
|
||||
<Box
|
||||
className="selection-box"
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
insetInlineEnd: 0,
|
||||
bottom: 0,
|
||||
insetInlineStart: 0,
|
||||
borderRadius: 'base',
|
||||
opacity: isSelected ? 1 : 0.7,
|
||||
transitionProperty: 'common',
|
||||
transitionDuration: '0.1s',
|
||||
shadow: isSelected
|
||||
? isHovered
|
||||
? 'hoverSelected.light'
|
||||
: 'selected.light'
|
||||
: isHovered
|
||||
? 'hoverUnselected.light'
|
||||
: undefined,
|
||||
_dark: {
|
||||
shadow: isSelected
|
||||
? isHovered
|
||||
? 'hoverSelected.dark'
|
||||
: 'selected.dark'
|
||||
: isHovered
|
||||
? 'hoverUnselected.dark'
|
||||
: undefined,
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default SelectionOverlay;
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCallback } from 'react';
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
@@ -31,6 +32,9 @@ export const useImageUploadButton = ({
|
||||
postUploadAction,
|
||||
isDisabled,
|
||||
}: UseImageUploadButtonArgs) => {
|
||||
const autoAddBoardId = useAppSelector(
|
||||
(state) => state.gallery.autoAddBoardId
|
||||
);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
const onDropAccepted = useCallback(
|
||||
(files: File[]) => {
|
||||
@@ -45,9 +49,10 @@ export const useImageUploadButton = ({
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction: postUploadAction ?? { type: 'TOAST' },
|
||||
board_id: autoAddBoardId,
|
||||
});
|
||||
},
|
||||
[postUploadAction, uploadImage]
|
||||
[autoAddBoardId, postUploadAction, uploadImage]
|
||||
);
|
||||
|
||||
const {
|
||||
|
||||
@@ -24,6 +24,7 @@ import ParamControlNetShouldAutoConfig from './ParamControlNetShouldAutoConfig';
|
||||
import ParamControlNetBeginEnd from './parameters/ParamControlNetBeginEnd';
|
||||
import ParamControlNetControlMode from './parameters/ParamControlNetControlMode';
|
||||
import ParamControlNetProcessorSelect from './parameters/ParamControlNetProcessorSelect';
|
||||
import ParamControlNetResizeMode from './parameters/ParamControlNetResizeMode';
|
||||
|
||||
type ControlNetProps = {
|
||||
controlNetId: string;
|
||||
@@ -68,7 +69,7 @@ const ControlNet = (props: ControlNetProps) => {
|
||||
<Flex
|
||||
sx={{
|
||||
flexDir: 'column',
|
||||
gap: 2,
|
||||
gap: 3,
|
||||
p: 3,
|
||||
borderRadius: 'base',
|
||||
position: 'relative',
|
||||
@@ -117,7 +118,12 @@ const ControlNet = (props: ControlNetProps) => {
|
||||
tooltip={isExpanded ? 'Hide Advanced' : 'Show Advanced'}
|
||||
aria-label={isExpanded ? 'Hide Advanced' : 'Show Advanced'}
|
||||
onClick={toggleIsExpanded}
|
||||
variant="link"
|
||||
variant="ghost"
|
||||
sx={{
|
||||
_hover: {
|
||||
bg: 'none',
|
||||
},
|
||||
}}
|
||||
icon={
|
||||
<ChevronUpIcon
|
||||
sx={{
|
||||
@@ -151,7 +157,7 @@ const ControlNet = (props: ControlNetProps) => {
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
<Flex sx={{ w: 'full', flexDirection: 'column' }}>
|
||||
<Flex sx={{ w: 'full', flexDirection: 'column', gap: 3 }}>
|
||||
<Flex sx={{ gap: 4, w: 'full', alignItems: 'center' }}>
|
||||
<Flex
|
||||
sx={{
|
||||
@@ -176,16 +182,16 @@ const ControlNet = (props: ControlNetProps) => {
|
||||
h: 28,
|
||||
w: 28,
|
||||
aspectRatio: '1/1',
|
||||
mt: 3,
|
||||
}}
|
||||
>
|
||||
<ControlNetImagePreview controlNetId={controlNetId} height={28} />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
<Box mt={2}>
|
||||
<Flex sx={{ gap: 2 }}>
|
||||
<ParamControlNetControlMode controlNetId={controlNetId} />
|
||||
</Box>
|
||||
<ParamControlNetResizeMode controlNetId={controlNetId} />
|
||||
</Flex>
|
||||
<ParamControlNetProcessorSelect controlNetId={controlNetId} />
|
||||
</Flex>
|
||||
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIMantineSelect from 'common/components/IAIMantineSelect';
|
||||
import {
|
||||
ResizeModes,
|
||||
controlNetResizeModeChanged,
|
||||
} from 'features/controlNet/store/controlNetSlice';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type ParamControlNetResizeModeProps = {
|
||||
controlNetId: string;
|
||||
};
|
||||
|
||||
const RESIZE_MODE_DATA = [
|
||||
{ label: 'Resize', value: 'just_resize' },
|
||||
{ label: 'Crop', value: 'crop_resize' },
|
||||
{ label: 'Fill', value: 'fill_resize' },
|
||||
];
|
||||
|
||||
export default function ParamControlNetResizeMode(
|
||||
props: ParamControlNetResizeModeProps
|
||||
) {
|
||||
const { controlNetId } = props;
|
||||
const dispatch = useAppDispatch();
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
stateSelector,
|
||||
({ controlNet }) => {
|
||||
const { resizeMode, isEnabled } =
|
||||
controlNet.controlNets[controlNetId];
|
||||
return { resizeMode, isEnabled };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
),
|
||||
[controlNetId]
|
||||
);
|
||||
|
||||
const { resizeMode, isEnabled } = useAppSelector(selector);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleResizeModeChange = useCallback(
|
||||
(resizeMode: ResizeModes) => {
|
||||
dispatch(controlNetResizeModeChanged({ controlNetId, resizeMode }));
|
||||
},
|
||||
[controlNetId, dispatch]
|
||||
);
|
||||
|
||||
return (
|
||||
<IAIMantineSelect
|
||||
disabled={!isEnabled}
|
||||
label="Resize Mode"
|
||||
data={RESIZE_MODE_DATA}
|
||||
value={String(resizeMode)}
|
||||
onChange={handleResizeModeChange}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { RootState } from 'app/store/store';
|
||||
import { ControlNetModelParam } from 'features/parameters/types/parameterSchemas';
|
||||
import { cloneDeep, forEach } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { components } from 'services/api/schema';
|
||||
import { isAnySessionRejected } from 'services/api/thunks/session';
|
||||
import { appSocketInvocationError } from 'services/events/actions';
|
||||
import { controlNetImageProcessed } from './actions';
|
||||
@@ -16,11 +17,13 @@ import {
|
||||
RequiredControlNetProcessorNode,
|
||||
} from './types';
|
||||
|
||||
export type ControlModes =
|
||||
| 'balanced'
|
||||
| 'more_prompt'
|
||||
| 'more_control'
|
||||
| 'unbalanced';
|
||||
export type ControlModes = NonNullable<
|
||||
components['schemas']['ControlNetInvocation']['control_mode']
|
||||
>;
|
||||
|
||||
export type ResizeModes = NonNullable<
|
||||
components['schemas']['ControlNetInvocation']['resize_mode']
|
||||
>;
|
||||
|
||||
export const initialControlNet: Omit<ControlNetConfig, 'controlNetId'> = {
|
||||
isEnabled: true,
|
||||
@@ -29,6 +32,7 @@ export const initialControlNet: Omit<ControlNetConfig, 'controlNetId'> = {
|
||||
beginStepPct: 0,
|
||||
endStepPct: 1,
|
||||
controlMode: 'balanced',
|
||||
resizeMode: 'just_resize',
|
||||
controlImage: null,
|
||||
processedControlImage: null,
|
||||
processorType: 'canny_image_processor',
|
||||
@@ -45,6 +49,7 @@ export type ControlNetConfig = {
|
||||
beginStepPct: number;
|
||||
endStepPct: number;
|
||||
controlMode: ControlModes;
|
||||
resizeMode: ResizeModes;
|
||||
controlImage: string | null;
|
||||
processedControlImage: string | null;
|
||||
processorType: ControlNetProcessorType;
|
||||
@@ -215,6 +220,16 @@ export const controlNetSlice = createSlice({
|
||||
const { controlNetId, controlMode } = action.payload;
|
||||
state.controlNets[controlNetId].controlMode = controlMode;
|
||||
},
|
||||
controlNetResizeModeChanged: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
controlNetId: string;
|
||||
resizeMode: ResizeModes;
|
||||
}>
|
||||
) => {
|
||||
const { controlNetId, resizeMode } = action.payload;
|
||||
state.controlNets[controlNetId].resizeMode = resizeMode;
|
||||
},
|
||||
controlNetProcessorParamsChanged: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
@@ -342,6 +357,7 @@ export const {
|
||||
controlNetBeginStepPctChanged,
|
||||
controlNetEndStepPctChanged,
|
||||
controlNetControlModeChanged,
|
||||
controlNetResizeModeChanged,
|
||||
controlNetProcessorParamsChanged,
|
||||
controlNetProcessorTypeChanged,
|
||||
controlNetReset,
|
||||
|
||||
@@ -98,12 +98,16 @@ const ParamEmbeddingPopover = (props: Props) => {
|
||||
sx={{ p: 0, w: `calc(${PARAMETERS_PANEL_WIDTH} - 2rem )` }}
|
||||
>
|
||||
{data.length === 0 ? (
|
||||
<Flex sx={{ justifyContent: 'center', p: 2 }}>
|
||||
<Text
|
||||
sx={{ fontSize: 'sm', color: 'base.500', _dark: 'base.700' }}
|
||||
>
|
||||
No Embeddings Loaded
|
||||
</Text>
|
||||
<Flex
|
||||
sx={{
|
||||
justifyContent: 'center',
|
||||
p: 2,
|
||||
fontSize: 'sm',
|
||||
color: 'base.500',
|
||||
_dark: { color: 'base.700' },
|
||||
}}
|
||||
>
|
||||
<Text>No Embeddings Loaded</Text>
|
||||
</Flex>
|
||||
) : (
|
||||
<IAIMantineSearchableSelect
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
import { Badge, Flex } from '@chakra-ui/react';
|
||||
|
||||
const AutoAddIcon = () => {
|
||||
return (
|
||||
<Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
insetInlineEnd: 0,
|
||||
top: 0,
|
||||
p: 1,
|
||||
}}
|
||||
>
|
||||
<Badge
|
||||
variant="solid"
|
||||
sx={{ bg: 'accent.400', _dark: { bg: 'accent.500' } }}
|
||||
>
|
||||
auto
|
||||
</Badge>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default AutoAddIcon;
|
||||
@@ -0,0 +1,80 @@
|
||||
import { SelectItem } from '@mantine/core';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIMantineSearchableSelect from 'common/components/IAIMantineSearchableSelect';
|
||||
import IAIMantineSelectItemWithTooltip from 'common/components/IAIMantineSelectItemWithTooltip';
|
||||
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { useCallback, useRef } from 'react';
|
||||
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
({ gallery }) => {
|
||||
const { autoAddBoardId } = gallery;
|
||||
|
||||
return {
|
||||
autoAddBoardId,
|
||||
};
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
const BoardAutoAddSelect = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { autoAddBoardId } = useAppSelector(selector);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const { boards, hasBoards } = useListAllBoardsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
const boards: SelectItem[] = [
|
||||
{
|
||||
label: 'None',
|
||||
value: 'none',
|
||||
},
|
||||
];
|
||||
data?.forEach(({ board_id, board_name }) => {
|
||||
boards.push({
|
||||
label: board_name,
|
||||
value: board_id,
|
||||
});
|
||||
});
|
||||
return {
|
||||
boards,
|
||||
hasBoards: boards.length > 1,
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
const handleChange = useCallback(
|
||||
(v: string | null) => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
|
||||
dispatch(autoAddBoardIdChanged(v === 'none' ? undefined : v));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
return (
|
||||
<IAIMantineSearchableSelect
|
||||
label="Auto-Add Board"
|
||||
inputRef={inputRef}
|
||||
autoFocus
|
||||
placeholder={'Select a Board'}
|
||||
value={autoAddBoardId}
|
||||
data={boards}
|
||||
nothingFound="No matching Boards"
|
||||
itemComponent={IAIMantineSelectItemWithTooltip}
|
||||
disabled={!hasBoards}
|
||||
filter={(value, item: SelectItem) =>
|
||||
item.label?.toLowerCase().includes(value.toLowerCase().trim()) ||
|
||||
item.value.toLowerCase().includes(value.toLowerCase().trim())
|
||||
}
|
||||
onChange={handleChange}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default BoardAutoAddSelect;
|
||||
@@ -0,0 +1,94 @@
|
||||
import { MenuGroup, MenuItem, MenuList } from '@chakra-ui/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { ContextMenu, ContextMenuProps } from 'chakra-ui-contextmenu';
|
||||
import {
|
||||
autoAddBoardIdChanged,
|
||||
boardIdSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { MouseEvent, memo, useCallback, useMemo } from 'react';
|
||||
import { FaFolder, FaPlus } from 'react-icons/fa';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
import { menuListMotionProps } from 'theme/components/menu';
|
||||
import GalleryBoardContextMenuItems from './GalleryBoardContextMenuItems';
|
||||
import NoBoardContextMenuItems from './NoBoardContextMenuItems';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
|
||||
type Props = {
|
||||
board?: BoardDTO;
|
||||
board_id?: string;
|
||||
children: ContextMenuProps<HTMLDivElement>['children'];
|
||||
setBoardToDelete?: (board?: BoardDTO) => void;
|
||||
};
|
||||
|
||||
const BoardContextMenu = memo(
|
||||
({ board, board_id, setBoardToDelete, children }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(stateSelector, ({ gallery }) => {
|
||||
const isSelected = gallery.selectedBoardId === board_id;
|
||||
const isAutoAdd = gallery.autoAddBoardId === board_id;
|
||||
return { isSelected, isAutoAdd };
|
||||
}),
|
||||
[board_id]
|
||||
);
|
||||
|
||||
const { isSelected, isAutoAdd } = useAppSelector(selector);
|
||||
const boardName = useBoardName(board_id);
|
||||
|
||||
const handleSelectBoard = useCallback(() => {
|
||||
dispatch(boardIdSelected(board_id));
|
||||
}, [board_id, dispatch]);
|
||||
|
||||
const handleSetAutoAdd = useCallback(() => {
|
||||
dispatch(autoAddBoardIdChanged(board_id));
|
||||
}, [board_id, dispatch]);
|
||||
|
||||
const skipEvent = useCallback((e: MouseEvent<HTMLDivElement>) => {
|
||||
e.preventDefault();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ContextMenu<HTMLDivElement>
|
||||
menuProps={{ size: 'sm', isLazy: true }}
|
||||
menuButtonProps={{
|
||||
bg: 'transparent',
|
||||
_hover: { bg: 'transparent' },
|
||||
}}
|
||||
renderMenu={() => (
|
||||
<MenuList
|
||||
sx={{ visibility: 'visible !important' }}
|
||||
motionProps={menuListMotionProps}
|
||||
onContextMenu={skipEvent}
|
||||
>
|
||||
<MenuGroup title={boardName}>
|
||||
<MenuItem
|
||||
icon={<FaPlus />}
|
||||
isDisabled={isAutoAdd}
|
||||
onClick={handleSetAutoAdd}
|
||||
>
|
||||
Auto-add to this Board
|
||||
</MenuItem>
|
||||
{!board && <NoBoardContextMenuItems />}
|
||||
{board && (
|
||||
<GalleryBoardContextMenuItems
|
||||
board={board}
|
||||
setBoardToDelete={setBoardToDelete}
|
||||
/>
|
||||
)}
|
||||
</MenuGroup>
|
||||
</MenuList>
|
||||
)}
|
||||
>
|
||||
{children}
|
||||
</ContextMenu>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
BoardContextMenu.displayName = 'HoverableBoard';
|
||||
|
||||
export default BoardContextMenu;
|
||||
@@ -1,5 +1,6 @@
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import { useCallback } from 'react';
|
||||
import { FaPlus } from 'react-icons/fa';
|
||||
import { useCreateBoardMutation } from 'services/api/endpoints/boards';
|
||||
|
||||
const DEFAULT_BOARD_NAME = 'My Board';
|
||||
@@ -12,15 +13,14 @@ const AddBoardButton = () => {
|
||||
}, [createBoard]);
|
||||
|
||||
return (
|
||||
<IAIButton
|
||||
<IAIIconButton
|
||||
icon={<FaPlus />}
|
||||
isLoading={isLoading}
|
||||
tooltip="Add Board"
|
||||
aria-label="Add Board"
|
||||
onClick={handleCreateBoard}
|
||||
size="sm"
|
||||
sx={{ px: 4 }}
|
||||
>
|
||||
Add Board
|
||||
</IAIButton>
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
import {
|
||||
ASSETS_CATEGORIES,
|
||||
INITIAL_IMAGE_LIMIT,
|
||||
boardIdSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { FaFileImage } from 'react-icons/fa';
|
||||
import { useDispatch } from 'react-redux';
|
||||
import {
|
||||
ListImagesArgs,
|
||||
useListImagesQuery,
|
||||
} from 'services/api/endpoints/images';
|
||||
import GenericBoard from './GenericBoard';
|
||||
|
||||
const baseQueryArg: ListImagesArgs = {
|
||||
categories: ASSETS_CATEGORIES,
|
||||
offset: 0,
|
||||
limit: INITIAL_IMAGE_LIMIT,
|
||||
is_intermediate: false,
|
||||
};
|
||||
|
||||
const AllAssetsBoard = ({ isSelected }: { isSelected: boolean }) => {
|
||||
const dispatch = useDispatch();
|
||||
|
||||
const handleClick = () => {
|
||||
dispatch(boardIdSelected('assets'));
|
||||
};
|
||||
|
||||
const { total } = useListImagesQuery(baseQueryArg, {
|
||||
selectFromResult: ({ data }) => ({ total: data?.total ?? 0 }),
|
||||
});
|
||||
|
||||
// TODO: Do we support making 'images' 'assets? if yes, we need to handle this
|
||||
// const droppableData: MoveBoardDropData = {
|
||||
// id: 'all-images-board',
|
||||
// actionType: 'MOVE_BOARD',
|
||||
// context: { boardId: 'assets' },
|
||||
// };
|
||||
|
||||
return (
|
||||
<GenericBoard
|
||||
onClick={handleClick}
|
||||
isSelected={isSelected}
|
||||
icon={FaFileImage}
|
||||
label="All Assets"
|
||||
badgeCount={total}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AllAssetsBoard;
|
||||
@@ -1,50 +0,0 @@
|
||||
import {
|
||||
IMAGE_CATEGORIES,
|
||||
INITIAL_IMAGE_LIMIT,
|
||||
boardIdSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { FaImages } from 'react-icons/fa';
|
||||
import { useDispatch } from 'react-redux';
|
||||
import {
|
||||
ListImagesArgs,
|
||||
useListImagesQuery,
|
||||
} from 'services/api/endpoints/images';
|
||||
import GenericBoard from './GenericBoard';
|
||||
|
||||
const baseQueryArg: ListImagesArgs = {
|
||||
categories: IMAGE_CATEGORIES,
|
||||
offset: 0,
|
||||
limit: INITIAL_IMAGE_LIMIT,
|
||||
is_intermediate: false,
|
||||
};
|
||||
|
||||
const AllImagesBoard = ({ isSelected }: { isSelected: boolean }) => {
|
||||
const dispatch = useDispatch();
|
||||
|
||||
const handleClick = () => {
|
||||
dispatch(boardIdSelected('images'));
|
||||
};
|
||||
|
||||
const { total } = useListImagesQuery(baseQueryArg, {
|
||||
selectFromResult: ({ data }) => ({ total: data?.total ?? 0 }),
|
||||
});
|
||||
|
||||
// TODO: Do we support making 'images' 'assets? if yes, we need to handle this
|
||||
// const droppableData: MoveBoardDropData = {
|
||||
// id: 'all-images-board',
|
||||
// actionType: 'MOVE_BOARD',
|
||||
// context: { boardId: 'images' },
|
||||
// };
|
||||
|
||||
return (
|
||||
<GenericBoard
|
||||
onClick={handleClick}
|
||||
isSelected={isSelected}
|
||||
icon={FaImages}
|
||||
label="All Images"
|
||||
badgeCount={total}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AllImagesBoard;
|
||||
@@ -29,6 +29,7 @@ const BatchBoard = ({ isSelected }: { isSelected: boolean }) => {
|
||||
|
||||
return (
|
||||
<GenericBoard
|
||||
board_id="batch"
|
||||
droppableData={droppableData}
|
||||
onClick={handleBatchBoardClick}
|
||||
isSelected={isSelected}
|
||||
|
||||
@@ -1,27 +1,22 @@
|
||||
import {
|
||||
Collapse,
|
||||
Flex,
|
||||
Grid,
|
||||
GridItem,
|
||||
useDisclosure,
|
||||
} from '@chakra-ui/react';
|
||||
import { ButtonGroup, Collapse, Flex, Grid, GridItem } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import { AnimatePresence, motion } from 'framer-motion';
|
||||
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
|
||||
import { memo, useState } from 'react';
|
||||
import { memo, useCallback, useState } from 'react';
|
||||
import { FaSearch } from 'react-icons/fa';
|
||||
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
import { useFeatureStatus } from '../../../../system/hooks/useFeatureStatus';
|
||||
import DeleteBoardModal from '../DeleteBoardModal';
|
||||
import AddBoardButton from './AddBoardButton';
|
||||
import AllAssetsBoard from './AllAssetsBoard';
|
||||
import AllImagesBoard from './AllImagesBoard';
|
||||
import BatchBoard from './BatchBoard';
|
||||
import BoardsSearch from './BoardsSearch';
|
||||
import GalleryBoard from './GalleryBoard';
|
||||
import SystemBoardButton from './SystemBoardButton';
|
||||
import NoBoardBoard from './NoBoardBoard';
|
||||
import DeleteBoardModal from '../DeleteBoardModal';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
@@ -48,7 +43,6 @@ const BoardsList = (props: Props) => {
|
||||
)
|
||||
: boards;
|
||||
const [boardToDelete, setBoardToDelete] = useState<BoardDTO>();
|
||||
const [searchMode, setSearchMode] = useState(false);
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -64,7 +58,7 @@ const BoardsList = (props: Props) => {
|
||||
}}
|
||||
>
|
||||
<Flex sx={{ gap: 2, alignItems: 'center' }}>
|
||||
<BoardsSearch setSearchMode={setSearchMode} />
|
||||
<BoardsSearch />
|
||||
<AddBoardButton />
|
||||
</Flex>
|
||||
<OverlayScrollbarsComponent
|
||||
@@ -82,29 +76,13 @@ const BoardsList = (props: Props) => {
|
||||
<Grid
|
||||
className="list-container"
|
||||
sx={{
|
||||
gridTemplateRows: '6.5rem 6.5rem',
|
||||
gridAutoFlow: 'column dense',
|
||||
gridAutoColumns: '5rem',
|
||||
gridTemplateColumns: `repeat(auto-fill, minmax(108px, 1fr));`,
|
||||
maxH: 346,
|
||||
}}
|
||||
>
|
||||
{!searchMode && (
|
||||
<>
|
||||
<GridItem sx={{ p: 1.5 }}>
|
||||
<AllImagesBoard isSelected={selectedBoardId === 'images'} />
|
||||
</GridItem>
|
||||
<GridItem sx={{ p: 1.5 }}>
|
||||
<AllAssetsBoard isSelected={selectedBoardId === 'assets'} />
|
||||
</GridItem>
|
||||
<GridItem sx={{ p: 1.5 }}>
|
||||
<NoBoardBoard isSelected={selectedBoardId === 'no_board'} />
|
||||
</GridItem>
|
||||
{isBatchEnabled && (
|
||||
<GridItem sx={{ p: 1.5 }}>
|
||||
<BatchBoard isSelected={selectedBoardId === 'batch'} />
|
||||
</GridItem>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
<GridItem sx={{ p: 1.5 }}>
|
||||
<NoBoardBoard isSelected={selectedBoardId === undefined} />
|
||||
</GridItem>
|
||||
{filteredBoards &&
|
||||
filteredBoards.map((board) => (
|
||||
<GridItem key={board.board_id} sx={{ p: 1.5 }}>
|
||||
|
||||
@@ -10,7 +10,14 @@ import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { setBoardSearchText } from 'features/gallery/store/boardSlice';
|
||||
import { memo } from 'react';
|
||||
import {
|
||||
ChangeEvent,
|
||||
KeyboardEvent,
|
||||
memo,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useRef,
|
||||
} from 'react';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
@@ -21,32 +28,55 @@ const selector = createSelector(
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
type Props = {
|
||||
setSearchMode: (searchMode: boolean) => void;
|
||||
};
|
||||
|
||||
const BoardsSearch = (props: Props) => {
|
||||
const { setSearchMode } = props;
|
||||
const BoardsSearch = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { searchText } = useAppSelector(selector);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
const handleBoardSearch = (searchTerm: string) => {
|
||||
setSearchMode(searchTerm.length > 0);
|
||||
dispatch(setBoardSearchText(searchTerm));
|
||||
};
|
||||
const clearBoardSearch = () => {
|
||||
setSearchMode(false);
|
||||
const handleBoardSearch = useCallback(
|
||||
(searchTerm: string) => {
|
||||
dispatch(setBoardSearchText(searchTerm));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const clearBoardSearch = useCallback(() => {
|
||||
dispatch(setBoardSearchText(''));
|
||||
};
|
||||
}, [dispatch]);
|
||||
|
||||
const handleKeydown = useCallback(
|
||||
(e: KeyboardEvent<HTMLInputElement>) => {
|
||||
// exit search mode on escape
|
||||
if (e.key === 'Escape') {
|
||||
clearBoardSearch();
|
||||
}
|
||||
},
|
||||
[clearBoardSearch]
|
||||
);
|
||||
|
||||
const handleChange = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
handleBoardSearch(e.target.value);
|
||||
},
|
||||
[handleBoardSearch]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
// focus the search box on mount
|
||||
if (!inputRef.current) {
|
||||
return;
|
||||
}
|
||||
inputRef.current.focus();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<InputGroup>
|
||||
<Input
|
||||
ref={inputRef}
|
||||
placeholder="Search Boards..."
|
||||
value={searchText}
|
||||
onChange={(e) => {
|
||||
handleBoardSearch(e.target.value);
|
||||
}}
|
||||
onKeyDown={handleKeydown}
|
||||
onChange={handleChange}
|
||||
/>
|
||||
{searchText && searchText.length && (
|
||||
<InputRightElement>
|
||||
@@ -55,7 +85,8 @@ const BoardsSearch = (props: Props) => {
|
||||
size="xs"
|
||||
variant="ghost"
|
||||
aria-label="Clear Search"
|
||||
icon={<CloseIcon boxSize={3} />}
|
||||
opacity={0.5}
|
||||
icon={<CloseIcon boxSize={2} />}
|
||||
/>
|
||||
</InputRightElement>
|
||||
)}
|
||||
|
||||
@@ -1,31 +1,37 @@
|
||||
import {
|
||||
Badge,
|
||||
Box,
|
||||
ChakraProps,
|
||||
Editable,
|
||||
EditableInput,
|
||||
EditablePreview,
|
||||
Flex,
|
||||
Icon,
|
||||
Image,
|
||||
MenuItem,
|
||||
MenuList,
|
||||
Text,
|
||||
useColorMode,
|
||||
} from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
||||
import { MoveBoardDropData } from 'app/components/ImageDnd/typesafeDnd';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { ContextMenu } from 'chakra-ui-contextmenu';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIDroppable from 'common/components/IAIDroppable';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { FaTrash, FaUser } from 'react-icons/fa';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { FaUser } from 'react-icons/fa';
|
||||
import { useUpdateBoardMutation } from 'services/api/endpoints/boards';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import { useBoardTotal } from 'services/api/hooks/useBoardTotal';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
import { menuListMotionProps } from 'theme/components/menu';
|
||||
import { mode } from 'theme/util/mode';
|
||||
import AutoAddIcon from '../AutoAddIcon';
|
||||
import BoardContextMenu from '../BoardContextMenu';
|
||||
import SelectionOverlay from 'common/components/SelectionOverlay';
|
||||
|
||||
const BASE_BADGE_STYLES: ChakraProps['sx'] = {
|
||||
bg: 'base.500',
|
||||
color: 'whiteAlpha.900',
|
||||
};
|
||||
interface GalleryBoardProps {
|
||||
board: BoardDTO;
|
||||
isSelected: boolean;
|
||||
@@ -35,13 +41,38 @@ interface GalleryBoardProps {
|
||||
const GalleryBoard = memo(
|
||||
({ board, isSelected, setBoardToDelete }: GalleryBoardProps) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
stateSelector,
|
||||
({ gallery }) => {
|
||||
const isSelectedForAutoAdd =
|
||||
board.board_id === gallery.autoAddBoardId;
|
||||
|
||||
return { isSelectedForAutoAdd };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
),
|
||||
[board.board_id]
|
||||
);
|
||||
|
||||
const { isSelectedForAutoAdd } = useAppSelector(selector);
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const handleMouseOver = useCallback(() => {
|
||||
setIsHovered(true);
|
||||
}, []);
|
||||
const handleMouseOut = useCallback(() => {
|
||||
setIsHovered(false);
|
||||
}, []);
|
||||
const { currentData: coverImage } = useGetImageDTOQuery(
|
||||
board.cover_image_name ?? skipToken
|
||||
);
|
||||
|
||||
const { colorMode } = useColorMode();
|
||||
const { totalImages, totalAssets } = useBoardTotal(board.board_id);
|
||||
|
||||
const { board_name, board_id } = board;
|
||||
const [localBoardName, setLocalBoardName] = useState(board_name);
|
||||
|
||||
const handleSelectBoard = useCallback(() => {
|
||||
dispatch(boardIdSelected(board_id));
|
||||
}, [board_id, dispatch]);
|
||||
@@ -49,14 +80,6 @@ const GalleryBoard = memo(
|
||||
const [updateBoard, { isLoading: isUpdateBoardLoading }] =
|
||||
useUpdateBoardMutation();
|
||||
|
||||
const handleUpdateBoardName = (newBoardName: string) => {
|
||||
updateBoard({ board_id, changes: { board_name: newBoardName } });
|
||||
};
|
||||
|
||||
const handleDeleteBoard = useCallback(() => {
|
||||
setBoardToDelete(board);
|
||||
}, [board, setBoardToDelete]);
|
||||
|
||||
const droppableData: MoveBoardDropData = useMemo(
|
||||
() => ({
|
||||
id: board_id,
|
||||
@@ -66,87 +89,115 @@ const GalleryBoard = memo(
|
||||
[board_id]
|
||||
);
|
||||
|
||||
const handleSubmit = useCallback(
|
||||
async (newBoardName: string) => {
|
||||
// empty strings are not allowed
|
||||
if (!newBoardName.trim()) {
|
||||
setLocalBoardName(board_name);
|
||||
return;
|
||||
}
|
||||
|
||||
// don't updated the board name if it hasn't changed
|
||||
if (newBoardName === board_name) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const { board_name } = await updateBoard({
|
||||
board_id,
|
||||
changes: { board_name: newBoardName },
|
||||
}).unwrap();
|
||||
|
||||
// update local state
|
||||
setLocalBoardName(board_name);
|
||||
} catch {
|
||||
// revert on error
|
||||
setLocalBoardName(board_name);
|
||||
}
|
||||
},
|
||||
[board_id, board_name, updateBoard]
|
||||
);
|
||||
|
||||
const handleChange = useCallback((newBoardName: string) => {
|
||||
setLocalBoardName(newBoardName);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<Box sx={{ touchAction: 'none', height: 'full' }}>
|
||||
<ContextMenu<HTMLDivElement>
|
||||
menuProps={{ size: 'sm', isLazy: true }}
|
||||
menuButtonProps={{
|
||||
bg: 'transparent',
|
||||
_hover: { bg: 'transparent' },
|
||||
<Box
|
||||
sx={{ w: 'full', h: 'full', touchAction: 'none', userSelect: 'none' }}
|
||||
>
|
||||
<Flex
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
sx={{
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
aspectRatio: '1/1',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
}}
|
||||
renderMenu={() => (
|
||||
<MenuList
|
||||
sx={{ visibility: 'visible !important' }}
|
||||
motionProps={menuListMotionProps}
|
||||
>
|
||||
{board.image_count > 0 && (
|
||||
<>
|
||||
{/* <MenuItem
|
||||
isDisabled={!board.image_count}
|
||||
icon={<FaImages />}
|
||||
onClickCapture={handleAddBoardToBatch}
|
||||
>
|
||||
Add Board to Batch
|
||||
</MenuItem> */}
|
||||
</>
|
||||
)}
|
||||
<MenuItem
|
||||
sx={{ color: 'error.600', _dark: { color: 'error.300' } }}
|
||||
icon={<FaTrash />}
|
||||
onClickCapture={handleDeleteBoard}
|
||||
>
|
||||
Delete Board
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
)}
|
||||
>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
key={board_id}
|
||||
userSelect="none"
|
||||
ref={ref}
|
||||
sx={{
|
||||
flexDir: 'column',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
cursor: 'pointer',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
}}
|
||||
>
|
||||
<BoardContextMenu
|
||||
board={board}
|
||||
board_id={board_id}
|
||||
setBoardToDelete={setBoardToDelete}
|
||||
>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
onClick={handleSelectBoard}
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
borderRadius: 'base',
|
||||
w: 'full',
|
||||
aspectRatio: '1/1',
|
||||
overflow: 'hidden',
|
||||
shadow: isSelected ? 'selected.light' : undefined,
|
||||
_dark: { shadow: isSelected ? 'selected.dark' : undefined },
|
||||
flexShrink: 0,
|
||||
cursor: 'pointer',
|
||||
bg: 'base.200',
|
||||
_dark: {
|
||||
bg: 'base.800',
|
||||
},
|
||||
}}
|
||||
>
|
||||
{board.cover_image_name && coverImage?.thumbnail_url && (
|
||||
<Image src={coverImage?.thumbnail_url} draggable={false} />
|
||||
)}
|
||||
{!(board.cover_image_name && coverImage?.thumbnail_url) && (
|
||||
<IAINoContentFallback
|
||||
boxSize={8}
|
||||
icon={FaUser}
|
||||
{coverImage?.thumbnail_url ? (
|
||||
<Image
|
||||
src={coverImage?.thumbnail_url}
|
||||
draggable={false}
|
||||
sx={{
|
||||
borderWidth: '2px',
|
||||
borderStyle: 'solid',
|
||||
borderColor: 'base.200',
|
||||
_dark: {
|
||||
borderColor: 'base.800',
|
||||
},
|
||||
objectFit: 'cover',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
maxH: 'full',
|
||||
borderRadius: 'base',
|
||||
borderBottomRadius: 'lg',
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<Flex
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
}}
|
||||
>
|
||||
<Icon
|
||||
boxSize={12}
|
||||
as={FaUser}
|
||||
sx={{
|
||||
mt: -6,
|
||||
opacity: 0.7,
|
||||
color: 'base.500',
|
||||
_dark: {
|
||||
color: 'base.500',
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
<Flex
|
||||
{/* <Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
insetInlineEnd: 0,
|
||||
@@ -154,58 +205,78 @@ const GalleryBoard = memo(
|
||||
p: 1,
|
||||
}}
|
||||
>
|
||||
<Badge variant="solid">{board.image_count}</Badge>
|
||||
<Badge variant="solid" sx={BASE_BADGE_STYLES}>
|
||||
{totalImages}/{totalAssets}
|
||||
</Badge>
|
||||
</Flex> */}
|
||||
{isSelectedForAutoAdd && <AutoAddIcon />}
|
||||
<SelectionOverlay
|
||||
isSelected={isSelected}
|
||||
isHovered={isHovered}
|
||||
/>
|
||||
<Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
p: 1,
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
w: 'full',
|
||||
maxW: 'full',
|
||||
borderBottomRadius: 'base',
|
||||
bg: isSelected ? 'accent.400' : 'base.500',
|
||||
color: isSelected ? 'base.50' : 'base.100',
|
||||
_dark: {
|
||||
bg: isSelected ? 'accent.500' : 'base.600',
|
||||
color: isSelected ? 'base.50' : 'base.100',
|
||||
},
|
||||
lineHeight: 'short',
|
||||
fontSize: 'xs',
|
||||
}}
|
||||
>
|
||||
<Editable
|
||||
value={localBoardName}
|
||||
isDisabled={isUpdateBoardLoading}
|
||||
submitOnBlur={true}
|
||||
onChange={handleChange}
|
||||
onSubmit={handleSubmit}
|
||||
sx={{
|
||||
w: 'full',
|
||||
}}
|
||||
>
|
||||
<EditablePreview
|
||||
sx={{
|
||||
p: 0,
|
||||
fontWeight: isSelected ? 700 : 500,
|
||||
textAlign: 'center',
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
}}
|
||||
noOfLines={1}
|
||||
/>
|
||||
<EditableInput
|
||||
sx={{
|
||||
p: 0,
|
||||
_focusVisible: {
|
||||
p: 0,
|
||||
textAlign: 'center',
|
||||
// get rid of the edit border
|
||||
boxShadow: 'none',
|
||||
},
|
||||
}}
|
||||
/>
|
||||
</Editable>
|
||||
</Flex>
|
||||
|
||||
<IAIDroppable
|
||||
data={droppableData}
|
||||
dropLabel={<Text fontSize="md">Move</Text>}
|
||||
/>
|
||||
</Flex>
|
||||
|
||||
<Flex
|
||||
sx={{
|
||||
width: 'full',
|
||||
height: 'full',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
}}
|
||||
>
|
||||
<Editable
|
||||
defaultValue={board_name}
|
||||
submitOnBlur={false}
|
||||
onSubmit={(nextValue) => {
|
||||
handleUpdateBoardName(nextValue);
|
||||
}}
|
||||
sx={{ maxW: 'full' }}
|
||||
>
|
||||
<EditablePreview
|
||||
sx={{
|
||||
color: isSelected
|
||||
? mode('base.900', 'base.50')(colorMode)
|
||||
: mode('base.700', 'base.200')(colorMode),
|
||||
fontWeight: isSelected ? 600 : undefined,
|
||||
fontSize: 'xs',
|
||||
textAlign: 'center',
|
||||
p: 0,
|
||||
overflow: 'hidden',
|
||||
textOverflow: 'ellipsis',
|
||||
}}
|
||||
noOfLines={1}
|
||||
/>
|
||||
<EditableInput
|
||||
sx={{
|
||||
color: mode('base.900', 'base.50')(colorMode),
|
||||
fontSize: 'xs',
|
||||
borderColor: mode('base.500', 'base.500')(colorMode),
|
||||
p: 0,
|
||||
outline: 0,
|
||||
}}
|
||||
/>
|
||||
</Editable>
|
||||
</Flex>
|
||||
</Flex>
|
||||
)}
|
||||
</ContextMenu>
|
||||
)}
|
||||
</BoardContextMenu>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ import { As, Badge, Flex } from '@chakra-ui/react';
|
||||
import { TypesafeDroppableData } from 'app/components/ImageDnd/typesafeDnd';
|
||||
import IAIDroppable from 'common/components/IAIDroppable';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { BoardId } from 'features/gallery/store/gallerySlice';
|
||||
import { ReactNode } from 'react';
|
||||
import BoardContextMenu from '../BoardContextMenu';
|
||||
|
||||
type GenericBoardProps = {
|
||||
board_id: BoardId;
|
||||
droppableData?: TypesafeDroppableData;
|
||||
onClick: () => void;
|
||||
isSelected: boolean;
|
||||
@@ -14,7 +17,7 @@ type GenericBoardProps = {
|
||||
badgeCount?: number;
|
||||
};
|
||||
|
||||
const formatBadgeCount = (count: number) =>
|
||||
export const formatBadgeCount = (count: number) =>
|
||||
Intl.NumberFormat('en-US', {
|
||||
notation: 'compact',
|
||||
maximumFractionDigits: 1,
|
||||
@@ -22,6 +25,7 @@ const formatBadgeCount = (count: number) =>
|
||||
|
||||
const GenericBoard = (props: GenericBoardProps) => {
|
||||
const {
|
||||
board_id,
|
||||
droppableData,
|
||||
onClick,
|
||||
isSelected,
|
||||
@@ -32,67 +36,72 @@ const GenericBoard = (props: GenericBoardProps) => {
|
||||
} = props;
|
||||
|
||||
return (
|
||||
<Flex
|
||||
sx={{
|
||||
flexDir: 'column',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
cursor: 'pointer',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
borderRadius: 'base',
|
||||
}}
|
||||
>
|
||||
<Flex
|
||||
onClick={onClick}
|
||||
sx={{
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
borderRadius: 'base',
|
||||
w: 'full',
|
||||
aspectRatio: '1/1',
|
||||
overflow: 'hidden',
|
||||
shadow: isSelected ? 'selected.light' : undefined,
|
||||
_dark: { shadow: isSelected ? 'selected.dark' : undefined },
|
||||
flexShrink: 0,
|
||||
}}
|
||||
>
|
||||
<IAINoContentFallback
|
||||
boxSize={8}
|
||||
icon={icon}
|
||||
sx={{
|
||||
border: '2px solid var(--invokeai-colors-base-200)',
|
||||
_dark: { border: '2px solid var(--invokeai-colors-base-800)' },
|
||||
}}
|
||||
/>
|
||||
<BoardContextMenu board_id={board_id}>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
insetInlineEnd: 0,
|
||||
top: 0,
|
||||
p: 1,
|
||||
flexDir: 'column',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
cursor: 'pointer',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
borderRadius: 'base',
|
||||
}}
|
||||
>
|
||||
{badgeCount !== undefined && (
|
||||
<Badge variant="solid">{formatBadgeCount(badgeCount)}</Badge>
|
||||
)}
|
||||
<Flex
|
||||
onClick={onClick}
|
||||
sx={{
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
borderRadius: 'base',
|
||||
w: 'full',
|
||||
aspectRatio: '1/1',
|
||||
overflow: 'hidden',
|
||||
shadow: isSelected ? 'selected.light' : undefined,
|
||||
_dark: { shadow: isSelected ? 'selected.dark' : undefined },
|
||||
flexShrink: 0,
|
||||
}}
|
||||
>
|
||||
<IAINoContentFallback
|
||||
boxSize={8}
|
||||
icon={icon}
|
||||
sx={{
|
||||
border: '2px solid var(--invokeai-colors-base-200)',
|
||||
_dark: { border: '2px solid var(--invokeai-colors-base-800)' },
|
||||
}}
|
||||
/>
|
||||
<Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
insetInlineEnd: 0,
|
||||
top: 0,
|
||||
p: 1,
|
||||
}}
|
||||
>
|
||||
{badgeCount !== undefined && (
|
||||
<Badge variant="solid">{formatBadgeCount(badgeCount)}</Badge>
|
||||
)}
|
||||
</Flex>
|
||||
<IAIDroppable data={droppableData} dropLabel={dropLabel} />
|
||||
</Flex>
|
||||
<Flex
|
||||
sx={{
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
fontWeight: isSelected ? 600 : undefined,
|
||||
fontSize: 'sm',
|
||||
color: isSelected ? 'base.900' : 'base.700',
|
||||
_dark: { color: isSelected ? 'base.50' : 'base.200' },
|
||||
}}
|
||||
>
|
||||
{label}
|
||||
</Flex>
|
||||
</Flex>
|
||||
<IAIDroppable data={droppableData} dropLabel={dropLabel} />
|
||||
</Flex>
|
||||
<Flex
|
||||
sx={{
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
fontWeight: isSelected ? 600 : undefined,
|
||||
fontSize: 'xs',
|
||||
color: isSelected ? 'base.900' : 'base.700',
|
||||
_dark: { color: isSelected ? 'base.50' : 'base.200' },
|
||||
}}
|
||||
>
|
||||
{label}
|
||||
</Flex>
|
||||
</Flex>
|
||||
)}
|
||||
</BoardContextMenu>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,53 +1,179 @@
|
||||
import { Text } from '@chakra-ui/react';
|
||||
import { Box, ChakraProps, Flex, Image, Text } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { MoveBoardDropData } from 'app/components/ImageDnd/typesafeDnd';
|
||||
import {
|
||||
INITIAL_IMAGE_LIMIT,
|
||||
boardIdSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { FaFolderOpen } from 'react-icons/fa';
|
||||
import { useDispatch } from 'react-redux';
|
||||
import {
|
||||
ListImagesArgs,
|
||||
useListImagesQuery,
|
||||
} from 'services/api/endpoints/images';
|
||||
import GenericBoard from './GenericBoard';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import InvokeAILogoImage from 'assets/images/logo.png';
|
||||
import IAIDroppable from 'common/components/IAIDroppable';
|
||||
import SelectionOverlay from 'common/components/SelectionOverlay';
|
||||
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
import { useBoardTotal } from 'services/api/hooks/useBoardTotal';
|
||||
import AutoAddIcon from '../AutoAddIcon';
|
||||
import BoardContextMenu from '../BoardContextMenu';
|
||||
|
||||
const baseQueryArg: ListImagesArgs = {
|
||||
board_id: 'none',
|
||||
offset: 0,
|
||||
limit: INITIAL_IMAGE_LIMIT,
|
||||
is_intermediate: false,
|
||||
const BASE_BADGE_STYLES: ChakraProps['sx'] = {
|
||||
bg: 'base.500',
|
||||
color: 'whiteAlpha.900',
|
||||
};
|
||||
interface Props {
|
||||
isSelected: boolean;
|
||||
}
|
||||
|
||||
const NoBoardBoard = ({ isSelected }: { isSelected: boolean }) => {
|
||||
const dispatch = useDispatch();
|
||||
const selector = createSelector(
|
||||
stateSelector,
|
||||
({ gallery }) => {
|
||||
const { autoAddBoardId } = gallery;
|
||||
return { autoAddBoardId };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
const handleClick = () => {
|
||||
dispatch(boardIdSelected('no_board'));
|
||||
};
|
||||
const NoBoardBoard = memo(({ isSelected }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { totalImages, totalAssets } = useBoardTotal(undefined);
|
||||
const { autoAddBoardId } = useAppSelector(selector);
|
||||
const boardName = useBoardName(undefined);
|
||||
const handleSelectBoard = useCallback(() => {
|
||||
dispatch(boardIdSelected(undefined));
|
||||
}, [dispatch]);
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const handleMouseOver = useCallback(() => {
|
||||
setIsHovered(true);
|
||||
}, []);
|
||||
const handleMouseOut = useCallback(() => {
|
||||
setIsHovered(false);
|
||||
}, []);
|
||||
|
||||
const { total } = useListImagesQuery(baseQueryArg, {
|
||||
selectFromResult: ({ data }) => ({ total: data?.total ?? 0 }),
|
||||
});
|
||||
|
||||
// TODO: Do we support making 'images' 'assets? if yes, we need to handle this
|
||||
const droppableData: MoveBoardDropData = {
|
||||
id: 'all-images-board',
|
||||
actionType: 'MOVE_BOARD',
|
||||
context: { boardId: 'no_board' },
|
||||
};
|
||||
const droppableData: MoveBoardDropData = useMemo(
|
||||
() => ({
|
||||
id: 'no_board',
|
||||
actionType: 'MOVE_BOARD',
|
||||
context: { boardId: undefined },
|
||||
}),
|
||||
[]
|
||||
);
|
||||
|
||||
return (
|
||||
<GenericBoard
|
||||
droppableData={droppableData}
|
||||
dropLabel={<Text fontSize="md">Move</Text>}
|
||||
onClick={handleClick}
|
||||
isSelected={isSelected}
|
||||
icon={FaFolderOpen}
|
||||
label="No Board"
|
||||
badgeCount={total}
|
||||
/>
|
||||
<Box sx={{ w: 'full', h: 'full', touchAction: 'none', userSelect: 'none' }}>
|
||||
<Flex
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
sx={{
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
aspectRatio: '1/1',
|
||||
borderRadius: 'base',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
}}
|
||||
>
|
||||
<BoardContextMenu>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
onClick={handleSelectBoard}
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
borderRadius: 'base',
|
||||
cursor: 'pointer',
|
||||
bg: 'base.200',
|
||||
_dark: {
|
||||
bg: 'base.800',
|
||||
},
|
||||
}}
|
||||
>
|
||||
<Flex
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
}}
|
||||
>
|
||||
{/* <Icon
|
||||
boxSize={12}
|
||||
as={FaBucket}
|
||||
sx={{
|
||||
opacity: 0.7,
|
||||
color: 'base.500',
|
||||
_dark: {
|
||||
color: 'base.500',
|
||||
},
|
||||
}}
|
||||
/> */}
|
||||
<Image
|
||||
src={InvokeAILogoImage}
|
||||
alt="invoke-ai-logo"
|
||||
sx={{
|
||||
opacity: 0.4,
|
||||
filter: 'grayscale(1)',
|
||||
mt: -6,
|
||||
w: 16,
|
||||
h: 16,
|
||||
minW: 16,
|
||||
minH: 16,
|
||||
userSelect: 'none',
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
{/* <Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
insetInlineEnd: 0,
|
||||
top: 0,
|
||||
p: 1,
|
||||
}}
|
||||
>
|
||||
<Badge variant="solid" sx={BASE_BADGE_STYLES}>
|
||||
{totalImages}/{totalAssets}
|
||||
</Badge>
|
||||
</Flex> */}
|
||||
{!autoAddBoardId && <AutoAddIcon />}
|
||||
<Flex
|
||||
sx={{
|
||||
position: 'absolute',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
p: 1,
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
w: 'full',
|
||||
maxW: 'full',
|
||||
borderBottomRadius: 'base',
|
||||
bg: isSelected ? 'accent.400' : 'base.500',
|
||||
color: isSelected ? 'base.50' : 'base.100',
|
||||
_dark: {
|
||||
bg: isSelected ? 'accent.500' : 'base.600',
|
||||
color: isSelected ? 'base.50' : 'base.100',
|
||||
},
|
||||
lineHeight: 'short',
|
||||
fontSize: 'xs',
|
||||
fontWeight: isSelected ? 700 : 500,
|
||||
}}
|
||||
>
|
||||
{boardName}
|
||||
</Flex>
|
||||
<SelectionOverlay isSelected={isSelected} isHovered={isHovered} />
|
||||
<IAIDroppable
|
||||
data={droppableData}
|
||||
dropLabel={<Text fontSize="md">Move</Text>}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
</BoardContextMenu>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
});
|
||||
|
||||
NoBoardBoard.displayName = 'HoverableBoard';
|
||||
|
||||
export default NoBoardBoard;
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
type Props = {
|
||||
board_id: 'images' | 'assets' | 'no_board';
|
||||
};
|
||||
|
||||
const SystemBoardButton = ({ board_id }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
[stateSelector],
|
||||
({ gallery }) => {
|
||||
const { selectedBoardId } = gallery;
|
||||
return { isSelected: selectedBoardId === board_id };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
),
|
||||
[board_id]
|
||||
);
|
||||
|
||||
const { isSelected } = useAppSelector(selector);
|
||||
|
||||
const boardName = useBoardName(board_id);
|
||||
|
||||
const handleClick = useCallback(() => {
|
||||
dispatch(boardIdSelected(board_id));
|
||||
}, [board_id, dispatch]);
|
||||
|
||||
return (
|
||||
<IAIButton
|
||||
onClick={handleClick}
|
||||
size="sm"
|
||||
isChecked={isSelected}
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
borderRadius: 'base',
|
||||
}}
|
||||
>
|
||||
{boardName}
|
||||
</IAIButton>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(SystemBoardButton);
|
||||
@@ -0,0 +1,78 @@
|
||||
import { MenuItem } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { FaPlus, FaTrash } from 'react-icons/fa';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
board: BoardDTO;
|
||||
setBoardToDelete?: (board?: BoardDTO) => void;
|
||||
};
|
||||
|
||||
const GalleryBoardContextMenuItems = ({ board, setBoardToDelete }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
stateSelector,
|
||||
({ gallery }) => {
|
||||
const isSelectedForAutoAdd =
|
||||
board.board_id === gallery.autoAddBoardId;
|
||||
|
||||
return { isSelectedForAutoAdd };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
),
|
||||
[board.board_id]
|
||||
);
|
||||
|
||||
const { isSelectedForAutoAdd } = useAppSelector(selector);
|
||||
|
||||
const handleDelete = useCallback(() => {
|
||||
if (!setBoardToDelete) {
|
||||
return;
|
||||
}
|
||||
setBoardToDelete(board);
|
||||
}, [board, setBoardToDelete]);
|
||||
|
||||
const handleToggleAutoAdd = useCallback(() => {
|
||||
dispatch(
|
||||
autoAddBoardIdChanged(isSelectedForAutoAdd ? undefined : board.board_id)
|
||||
);
|
||||
}, [board.board_id, dispatch, isSelectedForAutoAdd]);
|
||||
|
||||
return (
|
||||
<>
|
||||
{board.image_count > 0 && (
|
||||
<>
|
||||
{/* <MenuItem
|
||||
isDisabled={!board.image_count}
|
||||
icon={<FaImages />}
|
||||
onClickCapture={handleAddBoardToBatch}
|
||||
>
|
||||
Add Board to Batch
|
||||
</MenuItem> */}
|
||||
</>
|
||||
)}
|
||||
{/* {!isSelectedForAutoAdd && (
|
||||
<MenuItem icon={<FaPlus />} onClick={handleToggleAutoAdd}>
|
||||
Auto-add to this Board
|
||||
</MenuItem>
|
||||
)} */}
|
||||
<MenuItem
|
||||
sx={{ color: 'error.600', _dark: { color: 'error.300' } }}
|
||||
icon={<FaTrash />}
|
||||
onClick={handleDelete}
|
||||
>
|
||||
Delete Board
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(GalleryBoardContextMenuItems);
|
||||
@@ -0,0 +1,28 @@
|
||||
import { MenuItem } from '@chakra-ui/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { FaPlus } from 'react-icons/fa';
|
||||
|
||||
const NoBoardContextMenuItems = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const autoAddBoardId = useAppSelector(
|
||||
(state) => state.gallery.autoAddBoardId
|
||||
);
|
||||
const handleDisableAutoAdd = useCallback(() => {
|
||||
dispatch(autoAddBoardIdChanged(undefined));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* {autoAddBoardId && (
|
||||
<MenuItem icon={<FaPlus />} onClick={handleDisableAutoAdd}>
|
||||
Auto-add to this Board
|
||||
</MenuItem>
|
||||
)} */}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(NoBoardContextMenuItems);
|
||||
@@ -4,17 +4,15 @@ import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { memo } from 'react';
|
||||
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
(state) => {
|
||||
const { selectedBoardId } = state.gallery;
|
||||
|
||||
return {
|
||||
selectedBoardId,
|
||||
};
|
||||
return { selectedBoardId };
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
@@ -27,55 +25,64 @@ type Props = {
|
||||
const GalleryBoardName = (props: Props) => {
|
||||
const { isOpen, onToggle } = props;
|
||||
const { selectedBoardId } = useAppSelector(selector);
|
||||
const { selectedBoardName } = useListAllBoardsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let selectedBoardName = '';
|
||||
if (selectedBoardId === 'images') {
|
||||
selectedBoardName = 'All Images';
|
||||
} else if (selectedBoardId === 'assets') {
|
||||
selectedBoardName = 'All Assets';
|
||||
} else if (selectedBoardId === 'no_board') {
|
||||
selectedBoardName = 'No Board';
|
||||
} else if (selectedBoardId === 'batch') {
|
||||
selectedBoardName = 'Batch';
|
||||
} else {
|
||||
const selectedBoard = data?.find((b) => b.board_id === selectedBoardId);
|
||||
selectedBoardName = selectedBoard?.board_name || 'Unknown Board';
|
||||
}
|
||||
const boardName = useBoardName(selectedBoardId);
|
||||
// const { totalImages, totalAssets } = useBoardTotal(selectedBoardId);
|
||||
|
||||
return { selectedBoardName };
|
||||
},
|
||||
});
|
||||
const formattedBoardName = useMemo(() => {
|
||||
if (boardName.length > 20) {
|
||||
return `${boardName.substring(0, 20)}...`;
|
||||
}
|
||||
return boardName;
|
||||
// if (!boardName) {
|
||||
// return '';
|
||||
// }
|
||||
|
||||
// if (boardName && (totalImages === undefined || totalAssets === undefined)) {
|
||||
// return boardName;
|
||||
// }
|
||||
|
||||
// const count = `${totalImages}/${totalAssets}`;
|
||||
|
||||
// if (boardName.length > 20) {
|
||||
// return `${boardName.substring(0, 20)}... (${count})`;
|
||||
// }
|
||||
// return `${boardName} (${count})`;
|
||||
}, [boardName]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
as={Button}
|
||||
onClick={onToggle}
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
// variant="ghost"
|
||||
sx={{
|
||||
position: 'relative',
|
||||
gap: 2,
|
||||
w: 'full',
|
||||
justifyContent: 'center',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
px: 2,
|
||||
_hover: {
|
||||
bg: 'base.100',
|
||||
_dark: { bg: 'base.800' },
|
||||
},
|
||||
// bg: 'base.100',
|
||||
// _dark: { bg: 'base.800' },
|
||||
// _hover: {
|
||||
// bg: 'base.200',
|
||||
// _dark: { bg: 'base.700' },
|
||||
// },
|
||||
}}
|
||||
>
|
||||
<Text
|
||||
noOfLines={1}
|
||||
sx={{
|
||||
w: 'full',
|
||||
fontWeight: 600,
|
||||
w: '100%',
|
||||
textAlign: 'center',
|
||||
color: 'base.800',
|
||||
_dark: {
|
||||
color: 'base.200',
|
||||
},
|
||||
}}
|
||||
>
|
||||
{selectedBoardName}
|
||||
{formattedBoardName}
|
||||
</Text>
|
||||
<ChevronUpIcon
|
||||
sx={{
|
||||
|
||||
@@ -109,7 +109,7 @@ const GalleryDrawer = () => {
|
||||
isResizable={true}
|
||||
isOpen={shouldShowGallery}
|
||||
onClose={handleCloseGallery}
|
||||
minWidth={337}
|
||||
minWidth={400}
|
||||
>
|
||||
<ImageGalleryContent />
|
||||
</ResizableDrawer>
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import IAIPopover from 'common/components/IAIPopover';
|
||||
import IAISimpleCheckbox from 'common/components/IAISimpleCheckbox';
|
||||
import IAISlider from 'common/components/IAISlider';
|
||||
import { setGalleryImageMinimumWidth } from 'features/gallery/store/gallerySlice';
|
||||
|
||||
import {
|
||||
setGalleryImageMinimumWidth,
|
||||
shouldAutoSwitchChanged,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { ChangeEvent } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FaWrench } from 'react-icons/fa';
|
||||
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { shouldAutoSwitchChanged } from 'features/gallery/store/gallerySlice';
|
||||
import BoardAutoAddSelect from './Boards/BoardAutoAddSelect';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
@@ -50,7 +51,7 @@ const GallerySettingsPopover = () => {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex direction="column" gap={2}>
|
||||
<Flex direction="column" gap={4}>
|
||||
<IAISlider
|
||||
value={galleryImageMinimumWidth}
|
||||
onChange={handleChangeGalleryImageMinimumWidth}
|
||||
@@ -68,6 +69,7 @@ const GallerySettingsPopover = () => {
|
||||
dispatch(shouldAutoSwitchChanged(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<BoardAutoAddSelect />
|
||||
</Flex>
|
||||
</IAIPopover>
|
||||
);
|
||||
|
||||
@@ -35,6 +35,8 @@ import {
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
import { AddImageToBoardContext } from '../../../../app/contexts/AddImageToBoardContext';
|
||||
import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
||||
|
||||
type SingleSelectionMenuItemsProps = {
|
||||
imageDTO: ImageDTO;
|
||||
@@ -70,7 +72,16 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
|
||||
const { onClickAddToBoard } = useContext(AddImageToBoardContext);
|
||||
|
||||
const { currentData } = useGetImageMetadataQuery(imageDTO.image_name);
|
||||
const [debouncedMetadataQueryArg, debounceState] = useDebounce(
|
||||
imageDTO.image_name,
|
||||
500
|
||||
);
|
||||
|
||||
const { currentData } = useGetImageMetadataQuery(
|
||||
debounceState.isPending()
|
||||
? skipToken
|
||||
: debouncedMetadataQueryArg ?? skipToken
|
||||
);
|
||||
|
||||
const { isClipboardAPIAvailable, copyImageToClipboard } =
|
||||
useCopyImageToClipboard();
|
||||
|
||||
@@ -1,23 +1,38 @@
|
||||
import { Box, Flex, VStack, useDisclosure } from '@chakra-ui/react';
|
||||
import {
|
||||
Box,
|
||||
Button,
|
||||
ButtonGroup,
|
||||
Flex,
|
||||
Spacer,
|
||||
Tab,
|
||||
TabList,
|
||||
Tabs,
|
||||
VStack,
|
||||
useDisclosure,
|
||||
} from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { memo, useRef } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import BoardsList from './Boards/BoardsList/BoardsList';
|
||||
import GalleryBoardName from './GalleryBoardName';
|
||||
import GalleryPinButton from './GalleryPinButton';
|
||||
import GallerySettingsPopover from './GallerySettingsPopover';
|
||||
import BatchImageGrid from './ImageGrid/BatchImageGrid';
|
||||
import GalleryImageGrid from './ImageGrid/GalleryImageGrid';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { FaImages, FaServer } from 'react-icons/fa';
|
||||
import { galleryViewChanged } from '../store/gallerySlice';
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector],
|
||||
(state) => {
|
||||
const { selectedBoardId } = state.gallery;
|
||||
const { selectedBoardId, galleryView } = state.gallery;
|
||||
|
||||
return {
|
||||
selectedBoardId,
|
||||
galleryView,
|
||||
};
|
||||
},
|
||||
defaultSelectorOptions
|
||||
@@ -26,10 +41,19 @@ const selector = createSelector(
|
||||
const ImageGalleryContent = () => {
|
||||
const resizeObserverRef = useRef<HTMLDivElement>(null);
|
||||
const galleryGridRef = useRef<HTMLDivElement>(null);
|
||||
const { selectedBoardId } = useAppSelector(selector);
|
||||
const { selectedBoardId, galleryView } = useAppSelector(selector);
|
||||
const dispatch = useAppDispatch();
|
||||
const { isOpen: isBoardListOpen, onToggle: onToggleBoardList } =
|
||||
useDisclosure();
|
||||
|
||||
const handleClickImages = useCallback(() => {
|
||||
dispatch(galleryViewChanged('images'));
|
||||
}, [dispatch]);
|
||||
|
||||
const handleClickAssets = useCallback(() => {
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<VStack
|
||||
sx={{
|
||||
@@ -48,11 +72,11 @@ const ImageGalleryContent = () => {
|
||||
gap: 2,
|
||||
}}
|
||||
>
|
||||
<GallerySettingsPopover />
|
||||
<GalleryBoardName
|
||||
isOpen={isBoardListOpen}
|
||||
onToggle={onToggleBoardList}
|
||||
/>
|
||||
<GallerySettingsPopover />
|
||||
<GalleryPinButton />
|
||||
</Flex>
|
||||
<Box>
|
||||
@@ -60,6 +84,55 @@ const ImageGalleryContent = () => {
|
||||
</Box>
|
||||
</Box>
|
||||
<Flex ref={galleryGridRef} direction="column" gap={2} h="full" w="full">
|
||||
<Flex
|
||||
sx={{
|
||||
alignItems: 'center',
|
||||
justifyContent: 'space-between',
|
||||
gap: 2,
|
||||
}}
|
||||
>
|
||||
<Tabs
|
||||
index={galleryView === 'images' ? 0 : 1}
|
||||
variant="unstyled"
|
||||
size="sm"
|
||||
sx={{ w: 'full' }}
|
||||
>
|
||||
<TabList>
|
||||
<ButtonGroup
|
||||
isAttached
|
||||
sx={{
|
||||
w: 'full',
|
||||
}}
|
||||
>
|
||||
<Tab
|
||||
as={IAIButton}
|
||||
size="sm"
|
||||
isChecked={galleryView === 'images'}
|
||||
onClick={handleClickImages}
|
||||
sx={{
|
||||
w: 'full',
|
||||
}}
|
||||
leftIcon={<FaImages />}
|
||||
>
|
||||
Images
|
||||
</Tab>
|
||||
<Tab
|
||||
as={IAIButton}
|
||||
size="sm"
|
||||
isChecked={galleryView === 'assets'}
|
||||
onClick={handleClickAssets}
|
||||
sx={{
|
||||
w: 'full',
|
||||
}}
|
||||
leftIcon={<FaServer />}
|
||||
>
|
||||
Assets
|
||||
</Tab>
|
||||
</ButtonGroup>
|
||||
</TabList>
|
||||
</Tabs>
|
||||
</Flex>
|
||||
|
||||
{selectedBoardId === 'batch' ? (
|
||||
<BatchImageGrid />
|
||||
) : (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Box } from '@chakra-ui/react';
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { TypesafeDraggableData } from 'app/components/ImageDnd/typesafeDnd';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
@@ -86,38 +86,32 @@ const GalleryImage = (props: HoverableImageProps) => {
|
||||
|
||||
return (
|
||||
<Box sx={{ w: 'full', h: 'full', touchAction: 'none' }}>
|
||||
<ImageContextMenu imageDTO={imageDTO}>
|
||||
{(ref) => (
|
||||
<Box
|
||||
position="relative"
|
||||
key={imageName}
|
||||
userSelect="none"
|
||||
ref={ref}
|
||||
sx={{
|
||||
display: 'flex',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
aspectRatio: '1/1',
|
||||
}}
|
||||
>
|
||||
<IAIDndImage
|
||||
onClick={handleClick}
|
||||
imageDTO={imageDTO}
|
||||
draggableData={draggableData}
|
||||
isSelected={isSelected}
|
||||
minSize={0}
|
||||
onClickReset={handleDelete}
|
||||
imageSx={{ w: 'full', h: 'full' }}
|
||||
isDropDisabled={true}
|
||||
isUploadDisabled={true}
|
||||
thumbnail={true}
|
||||
// resetIcon={<FaTrash />}
|
||||
// resetTooltip="Delete image"
|
||||
// withResetIcon // removed bc it's too easy to accidentally delete images
|
||||
/>
|
||||
</Box>
|
||||
)}
|
||||
</ImageContextMenu>
|
||||
<Flex
|
||||
userSelect="none"
|
||||
sx={{
|
||||
position: 'relative',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
aspectRatio: '1/1',
|
||||
}}
|
||||
>
|
||||
<IAIDndImage
|
||||
onClick={handleClick}
|
||||
imageDTO={imageDTO}
|
||||
draggableData={draggableData}
|
||||
isSelected={isSelected}
|
||||
minSize={0}
|
||||
onClickReset={handleDelete}
|
||||
imageSx={{ w: 'full', h: 'full' }}
|
||||
isDropDisabled={true}
|
||||
isUploadDisabled={true}
|
||||
thumbnail={true}
|
||||
withHoverOverlay
|
||||
// resetIcon={<FaTrash />}
|
||||
// resetTooltip="Delete image"
|
||||
// withResetIcon // removed bc it's too easy to accidentally delete images
|
||||
/>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { Box, Spinner } from '@chakra-ui/react';
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { IMAGE_LIMIT } from 'features/gallery//store/gallerySlice';
|
||||
import { selectListImagesBaseQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import {
|
||||
UseOverlayScrollbarsParams,
|
||||
useOverlayScrollbars,
|
||||
@@ -15,10 +16,10 @@ import {
|
||||
useLazyListImagesQuery,
|
||||
useListImagesQuery,
|
||||
} from 'services/api/endpoints/images';
|
||||
import { useBoardTotal } from 'services/api/hooks/useBoardTotal';
|
||||
import GalleryImage from './GalleryImage';
|
||||
import ImageGridItemContainer from './ImageGridItemContainer';
|
||||
import ImageGridListContainer from './ImageGridListContainer';
|
||||
import { selectListImagesBaseQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
|
||||
const overlayScrollbarsConfig: UseOverlayScrollbarsParams = {
|
||||
defer: true,
|
||||
@@ -40,7 +41,10 @@ const GalleryImageGrid = () => {
|
||||
const [initialize, osInstance] = useOverlayScrollbars(
|
||||
overlayScrollbarsConfig
|
||||
);
|
||||
|
||||
const selectedBoardId = useAppSelector(
|
||||
(state) => state.gallery.selectedBoardId
|
||||
);
|
||||
const { currentViewTotal } = useBoardTotal(selectedBoardId);
|
||||
const queryArgs = useAppSelector(selectListImagesBaseQueryArgs);
|
||||
|
||||
const { currentData, isFetching, isSuccess, isError } =
|
||||
@@ -49,19 +53,23 @@ const GalleryImageGrid = () => {
|
||||
const [listImages] = useLazyListImagesQuery();
|
||||
|
||||
const areMoreAvailable = useMemo(() => {
|
||||
if (!currentData) {
|
||||
if (!currentData || !currentViewTotal) {
|
||||
return false;
|
||||
}
|
||||
return currentData.ids.length < currentData.total;
|
||||
}, [currentData]);
|
||||
return currentData.ids.length < currentViewTotal;
|
||||
}, [currentData, currentViewTotal]);
|
||||
|
||||
const handleLoadMoreImages = useCallback(() => {
|
||||
if (!areMoreAvailable) {
|
||||
return;
|
||||
}
|
||||
|
||||
listImages({
|
||||
...queryArgs,
|
||||
offset: currentData?.ids.length ?? 0,
|
||||
limit: IMAGE_LIMIT,
|
||||
});
|
||||
}, [listImages, queryArgs, currentData?.ids.length]);
|
||||
}, [areMoreAvailable, listImages, queryArgs, currentData?.ids.length]);
|
||||
|
||||
useEffect(() => {
|
||||
// Initialize the gallery's custom scrollbar
|
||||
@@ -79,20 +87,34 @@ const GalleryImageGrid = () => {
|
||||
|
||||
if (!currentData) {
|
||||
return (
|
||||
<Box sx={{ w: 'full', h: 'full' }}>
|
||||
<Spinner size="2xl" opacity={0.5} />
|
||||
</Box>
|
||||
<Flex
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
}}
|
||||
>
|
||||
<IAINoContentFallback label="Loading..." icon={FaImage} />
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
|
||||
if (isSuccess && currentData?.ids.length === 0) {
|
||||
return (
|
||||
<Box sx={{ w: 'full', h: 'full' }}>
|
||||
<Flex
|
||||
sx={{
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
}}
|
||||
>
|
||||
<IAINoContentFallback
|
||||
label={t('gallery.noImagesInGallery')}
|
||||
icon={FaImage}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -121,9 +143,7 @@ const GalleryImageGrid = () => {
|
||||
loadingText="Loading"
|
||||
flexShrink={0}
|
||||
>
|
||||
{areMoreAvailable
|
||||
? t('gallery.loadMore')
|
||||
: t('gallery.allImagesLoaded')}
|
||||
{`Load More (${currentData.ids.length} of ${currentViewTotal})`}
|
||||
</IAIButton>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -4,7 +4,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
IMAGE_LIMIT,
|
||||
imageSelected,
|
||||
selectImagesById,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { clamp, isEqual } from 'lodash-es';
|
||||
import { useCallback } from 'react';
|
||||
@@ -53,8 +52,8 @@ export const nextPrevImageButtonsSelector = createSelector(
|
||||
|
||||
const prevImageIndex = clamp(currentImageIndex - 1, 0, images.length - 1);
|
||||
|
||||
const nextImageId = images[nextImageIndex].image_name;
|
||||
const prevImageId = images[prevImageIndex].image_name;
|
||||
const nextImageId = images[nextImageIndex]?.image_name;
|
||||
const prevImageId = images[prevImageIndex]?.image_name;
|
||||
|
||||
const nextImage = selectors.selectById(data, nextImageId);
|
||||
const prevImage = selectors.selectById(data, prevImageId);
|
||||
@@ -65,7 +64,7 @@ export const nextPrevImageButtonsSelector = createSelector(
|
||||
isOnFirstImage: currentImageIndex === 0,
|
||||
isOnLastImage:
|
||||
!isNaN(currentImageIndex) && currentImageIndex === imagesLength - 1,
|
||||
areMoreImagesAvailable: data?.total ?? 0 > imagesLength,
|
||||
areMoreImagesAvailable: (data?.total ?? 0) > imagesLength,
|
||||
isFetching: status === 'pending',
|
||||
nextImage,
|
||||
prevImage,
|
||||
|
||||
@@ -2,11 +2,11 @@ import { createSelector } from '@reduxjs/toolkit';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { ListImagesArgs } from 'services/api/endpoints/images';
|
||||
import { INITIAL_IMAGE_LIMIT } from './gallerySlice';
|
||||
import {
|
||||
getBoardIdQueryParamForBoard,
|
||||
getCategoriesQueryParamForBoard,
|
||||
} from './util';
|
||||
ASSETS_CATEGORIES,
|
||||
IMAGE_CATEGORIES,
|
||||
INITIAL_IMAGE_LIMIT,
|
||||
} from './gallerySlice';
|
||||
|
||||
export const gallerySelector = (state: RootState) => state.gallery;
|
||||
|
||||
@@ -19,14 +19,13 @@ export const selectLastSelectedImage = createSelector(
|
||||
export const selectListImagesBaseQueryArgs = createSelector(
|
||||
[(state: RootState) => state],
|
||||
(state) => {
|
||||
const { selectedBoardId } = state.gallery;
|
||||
|
||||
const categories = getCategoriesQueryParamForBoard(selectedBoardId);
|
||||
const board_id = getBoardIdQueryParamForBoard(selectedBoardId);
|
||||
const { selectedBoardId, galleryView } = state.gallery;
|
||||
const categories =
|
||||
galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
|
||||
|
||||
const listImagesBaseQueryArgs: ListImagesArgs = {
|
||||
board_id: selectedBoardId ?? 'none',
|
||||
categories,
|
||||
board_id,
|
||||
offset: 0,
|
||||
limit: INITIAL_IMAGE_LIMIT,
|
||||
is_intermediate: false,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import { createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import { uniq } from 'lodash-es';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { ImageCategory } from 'services/api/types';
|
||||
@@ -14,19 +14,17 @@ export const ASSETS_CATEGORIES: ImageCategory[] = [
|
||||
export const INITIAL_IMAGE_LIMIT = 100;
|
||||
export const IMAGE_LIMIT = 20;
|
||||
|
||||
// export type GalleryView = 'images' | 'assets';
|
||||
export type BoardId =
|
||||
| 'images'
|
||||
| 'assets'
|
||||
| 'no_board'
|
||||
| 'batch'
|
||||
| (string & Record<never, never>);
|
||||
export type GalleryView = 'images' | 'assets';
|
||||
// export type BoardId = 'no_board' | (string & Record<never, never>);
|
||||
export type BoardId = string | undefined;
|
||||
|
||||
type GalleryState = {
|
||||
selection: string[];
|
||||
shouldAutoSwitch: boolean;
|
||||
autoAddBoardId: string | undefined;
|
||||
galleryImageMinimumWidth: number;
|
||||
selectedBoardId: BoardId;
|
||||
galleryView: GalleryView;
|
||||
batchImageNames: string[];
|
||||
isBatchEnabled: boolean;
|
||||
};
|
||||
@@ -34,8 +32,10 @@ type GalleryState = {
|
||||
export const initialGalleryState: GalleryState = {
|
||||
selection: [],
|
||||
shouldAutoSwitch: true,
|
||||
autoAddBoardId: undefined,
|
||||
galleryImageMinimumWidth: 96,
|
||||
selectedBoardId: 'images',
|
||||
selectedBoardId: undefined,
|
||||
galleryView: 'images',
|
||||
batchImageNames: [],
|
||||
isBatchEnabled: false,
|
||||
};
|
||||
@@ -44,14 +44,8 @@ export const gallerySlice = createSlice({
|
||||
name: 'gallery',
|
||||
initialState: initialGalleryState,
|
||||
reducers: {
|
||||
imagesRemoved: (state, action: PayloadAction<string[]>) => {
|
||||
// TODO: port all instances of this to use RTK Query cache
|
||||
// imagesAdapter.removeMany(state, action.payload);
|
||||
// state.batchImageNames = state.batchImageNames.filter(
|
||||
// (name) => !action.payload.includes(name)
|
||||
// );
|
||||
},
|
||||
imageRangeEndSelected: (state, action: PayloadAction<string>) => {
|
||||
// MULTI SELECT LOGIC
|
||||
// const rangeEndImageName = action.payload;
|
||||
// const lastSelectedImage = state.selection[state.selection.length - 1];
|
||||
// const filteredImages = selectFilteredImagesLocal(state);
|
||||
@@ -72,6 +66,7 @@ export const gallerySlice = createSlice({
|
||||
// }
|
||||
},
|
||||
imageSelectionToggled: (state, action: PayloadAction<string>) => {
|
||||
// MULTI SELECT LOGIC
|
||||
// if (
|
||||
// state.selection.includes(action.payload) &&
|
||||
// state.selection.length > 1
|
||||
@@ -94,6 +89,7 @@ export const gallerySlice = createSlice({
|
||||
},
|
||||
boardIdSelected: (state, action: PayloadAction<BoardId>) => {
|
||||
state.selectedBoardId = action.payload;
|
||||
state.galleryView = 'images';
|
||||
},
|
||||
isBatchEnabledChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.isBatchEnabled = action.payload;
|
||||
@@ -123,13 +119,37 @@ export const gallerySlice = createSlice({
|
||||
state.batchImageNames = [];
|
||||
state.selection = [];
|
||||
},
|
||||
autoAddBoardIdChanged: (
|
||||
state,
|
||||
action: PayloadAction<string | undefined>
|
||||
) => {
|
||||
state.autoAddBoardId = action.payload;
|
||||
},
|
||||
galleryViewChanged: (state, action: PayloadAction<GalleryView>) => {
|
||||
state.galleryView = action.payload;
|
||||
},
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addMatcher(isAnyBoardDeleted, (state, action) => {
|
||||
const deletedBoardId = action.meta.arg.originalArgs;
|
||||
if (deletedBoardId === state.selectedBoardId) {
|
||||
state.selectedBoardId = undefined;
|
||||
state.galleryView = 'images';
|
||||
}
|
||||
if (deletedBoardId === state.autoAddBoardId) {
|
||||
state.autoAddBoardId = undefined;
|
||||
}
|
||||
});
|
||||
builder.addMatcher(
|
||||
boardsApi.endpoints.deleteBoard.matchFulfilled,
|
||||
boardsApi.endpoints.listAllBoards.matchFulfilled,
|
||||
(state, action) => {
|
||||
if (action.meta.arg.originalArgs === state.selectedBoardId) {
|
||||
state.selectedBoardId = 'images';
|
||||
const boards = action.payload;
|
||||
if (!state.autoAddBoardId) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!boards.map((b) => b.board_id).includes(state.autoAddBoardId)) {
|
||||
state.autoAddBoardId = undefined;
|
||||
}
|
||||
}
|
||||
);
|
||||
@@ -147,6 +167,13 @@ export const {
|
||||
isBatchEnabledChanged,
|
||||
imagesAddedToBatch,
|
||||
imagesRemovedFromBatch,
|
||||
autoAddBoardIdChanged,
|
||||
galleryViewChanged,
|
||||
} = gallerySlice.actions;
|
||||
|
||||
export default gallerySlice.reducer;
|
||||
|
||||
const isAnyBoardDeleted = isAnyOf(
|
||||
boardsApi.endpoints.deleteBoard.matchFulfilled,
|
||||
boardsApi.endpoints.deleteBoardAndImages.matchFulfilled
|
||||
);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { SYSTEM_BOARDS } from 'services/api/endpoints/images';
|
||||
import { ASSETS_CATEGORIES, BoardId, IMAGE_CATEGORIES } from './gallerySlice';
|
||||
import { ImageCategory } from 'services/api/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { ImageCategory, ImageDTO } from 'services/api/types';
|
||||
import { ASSETS_CATEGORIES, BoardId, IMAGE_CATEGORIES } from './gallerySlice';
|
||||
|
||||
export const getCategoriesQueryParamForBoard = (
|
||||
board_id: BoardId
|
||||
@@ -20,16 +19,11 @@ export const getCategoriesQueryParamForBoard = (
|
||||
|
||||
export const getBoardIdQueryParamForBoard = (
|
||||
board_id: BoardId
|
||||
): string | undefined => {
|
||||
if (board_id === 'no_board') {
|
||||
): string | null => {
|
||||
if (board_id === undefined) {
|
||||
return 'none';
|
||||
}
|
||||
|
||||
// system boards besides 'no_board'
|
||||
if (SYSTEM_BOARDS.includes(board_id)) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// user boards
|
||||
return board_id;
|
||||
};
|
||||
@@ -52,3 +46,10 @@ export const getBoardIdFromBoardAndCategoriesQueryParam = (
|
||||
|
||||
return board_id ?? 'UNKNOWN_BOARD';
|
||||
};
|
||||
|
||||
export const getCategories = (imageDTO: ImageDTO) => {
|
||||
if (IMAGE_CATEGORIES.includes(imageDTO.image_category)) {
|
||||
return IMAGE_CATEGORIES;
|
||||
}
|
||||
return ASSETS_CATEGORIES;
|
||||
};
|
||||
|
||||
@@ -5,10 +5,12 @@ import {
|
||||
ModelInputFieldTemplate,
|
||||
} from 'features/nodes/types/types';
|
||||
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { SelectItem } from '@mantine/core';
|
||||
import IAIMantineSearchableSelect from 'common/components/IAIMantineSearchableSelect';
|
||||
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
|
||||
import { modelIdToMainModelParam } from 'features/parameters/util/modelIdToMainModelParam';
|
||||
import SyncModelsButton from 'features/ui/components/tabs/ModelManager/subpanels/ModelManagerSettingsPanel/SyncModelsButton';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -88,18 +90,23 @@ const ModelInputFieldComponent = (
|
||||
data={[]}
|
||||
/>
|
||||
) : (
|
||||
<IAIMantineSearchableSelect
|
||||
tooltip={selectedModel?.description}
|
||||
label={
|
||||
selectedModel?.base_model && MODEL_TYPE_MAP[selectedModel?.base_model]
|
||||
}
|
||||
value={selectedModel?.id}
|
||||
placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
|
||||
data={data}
|
||||
error={data.length === 0}
|
||||
disabled={data.length === 0}
|
||||
onChange={handleChangeModel}
|
||||
/>
|
||||
<Flex w="100%" alignItems="center" gap={2}>
|
||||
<IAIMantineSearchableSelect
|
||||
tooltip={selectedModel?.description}
|
||||
label={
|
||||
selectedModel?.base_model && MODEL_TYPE_MAP[selectedModel?.base_model]
|
||||
}
|
||||
value={selectedModel?.id}
|
||||
placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
|
||||
data={data}
|
||||
error={data.length === 0}
|
||||
disabled={data.length === 0}
|
||||
onChange={handleChangeModel}
|
||||
/>
|
||||
<Box mt={7}>
|
||||
<SyncModelsButton iconMode />
|
||||
</Box>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ export const addControlNetToLinearGraph = (
|
||||
beginStepPct,
|
||||
endStepPct,
|
||||
controlMode,
|
||||
resizeMode,
|
||||
model,
|
||||
processorType,
|
||||
weight,
|
||||
@@ -60,6 +61,7 @@ export const addControlNetToLinearGraph = (
|
||||
begin_step_percent: beginStepPct,
|
||||
end_step_percent: endStepPct,
|
||||
control_mode: controlMode,
|
||||
resize_mode: resizeMode,
|
||||
control_model: model as ControlNetInvocation['control_model'],
|
||||
control_weight: weight,
|
||||
};
|
||||
|
||||
@@ -4,6 +4,7 @@ import { useTranslation } from 'react-i18next';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIMantineSearchableSelect from 'common/components/IAIMantineSearchableSelect';
|
||||
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { SelectItem } from '@mantine/core';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
@@ -11,6 +12,7 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
|
||||
import { modelIdToMainModelParam } from 'features/parameters/util/modelIdToMainModelParam';
|
||||
import SyncModelsButton from 'features/ui/components/tabs/ModelManager/subpanels/ModelManagerSettingsPanel/SyncModelsButton';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { useGetMainModelsQuery } from 'services/api/endpoints/models';
|
||||
|
||||
@@ -84,16 +86,22 @@ const ParamMainModelSelect = () => {
|
||||
data={[]}
|
||||
/>
|
||||
) : (
|
||||
<IAIMantineSearchableSelect
|
||||
tooltip={selectedModel?.description}
|
||||
label={t('modelManager.model')}
|
||||
value={selectedModel?.id}
|
||||
placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
|
||||
data={data}
|
||||
error={data.length === 0}
|
||||
disabled={data.length === 0}
|
||||
onChange={handleChangeModel}
|
||||
/>
|
||||
<Flex w="100%" alignItems="center" gap={2}>
|
||||
<IAIMantineSearchableSelect
|
||||
tooltip={selectedModel?.description}
|
||||
label={t('modelManager.model')}
|
||||
value={selectedModel?.id}
|
||||
placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
|
||||
data={data}
|
||||
error={data.length === 0}
|
||||
disabled={data.length === 0}
|
||||
onChange={handleChangeModel}
|
||||
w="100%"
|
||||
/>
|
||||
<Box mt={7}>
|
||||
<SyncModelsButton iconMode />
|
||||
</Box>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import { Box, ChakraProps } from '@chakra-ui/react';
|
||||
import { Box, ChakraProps, Tooltip } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { userInvoked } from 'app/store/actions';
|
||||
import { stateSelector } from 'app/store/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import IAIButton, { IAIButtonProps } from 'common/components/IAIButton';
|
||||
import IAIIconButton, {
|
||||
IAIIconButtonProps,
|
||||
@@ -8,11 +11,13 @@ import IAIIconButton, {
|
||||
import { useIsReadyToInvoke } from 'common/hooks/useIsReadyToInvoke';
|
||||
import { clampSymmetrySteps } from 'features/parameters/store/generationSlice';
|
||||
import ProgressBar from 'features/system/components/ProgressBar';
|
||||
import { selectIsBusy } from 'features/system/store/systemSelectors';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FaPlay } from 'react-icons/fa';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
const IN_PROGRESS_STYLES: ChakraProps['sx'] = {
|
||||
_disabled: {
|
||||
@@ -26,6 +31,20 @@ const IN_PROGRESS_STYLES: ChakraProps['sx'] = {
|
||||
},
|
||||
};
|
||||
|
||||
const selector = createSelector(
|
||||
[stateSelector, activeTabNameSelector, selectIsBusy],
|
||||
({ gallery }, activeTabName, isBusy) => {
|
||||
const { autoAddBoardId } = gallery;
|
||||
|
||||
return {
|
||||
isBusy,
|
||||
autoAddBoardId,
|
||||
activeTabName,
|
||||
};
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
interface InvokeButton
|
||||
extends Omit<IAIButtonProps | IAIIconButtonProps, 'aria-label'> {
|
||||
iconButton?: boolean;
|
||||
@@ -35,8 +54,8 @@ export default function InvokeButton(props: InvokeButton) {
|
||||
const { iconButton = false, ...rest } = props;
|
||||
const dispatch = useAppDispatch();
|
||||
const isReady = useIsReadyToInvoke();
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const isProcessing = useAppSelector((state) => state.system.isProcessing);
|
||||
const { isBusy, autoAddBoardId, activeTabName } = useAppSelector(selector);
|
||||
const autoAddBoardName = useBoardName(autoAddBoardId);
|
||||
|
||||
const handleInvoke = useCallback(() => {
|
||||
dispatch(clampSymmetrySteps());
|
||||
@@ -75,43 +94,52 @@ export default function InvokeButton(props: InvokeButton) {
|
||||
<ProgressBar />
|
||||
</Box>
|
||||
)}
|
||||
{iconButton ? (
|
||||
<IAIIconButton
|
||||
aria-label={t('parameters.invoke')}
|
||||
type="submit"
|
||||
icon={<FaPlay />}
|
||||
isDisabled={!isReady || isProcessing}
|
||||
onClick={handleInvoke}
|
||||
tooltip={t('parameters.invoke')}
|
||||
tooltipProps={{ placement: 'top' }}
|
||||
colorScheme="accent"
|
||||
id="invoke-button"
|
||||
{...rest}
|
||||
sx={{
|
||||
w: 'full',
|
||||
flexGrow: 1,
|
||||
...(isProcessing ? IN_PROGRESS_STYLES : {}),
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<IAIButton
|
||||
aria-label={t('parameters.invoke')}
|
||||
type="submit"
|
||||
isDisabled={!isReady || isProcessing}
|
||||
onClick={handleInvoke}
|
||||
colorScheme="accent"
|
||||
id="invoke-button"
|
||||
{...rest}
|
||||
sx={{
|
||||
w: 'full',
|
||||
flexGrow: 1,
|
||||
fontWeight: 700,
|
||||
...(isProcessing ? IN_PROGRESS_STYLES : {}),
|
||||
}}
|
||||
>
|
||||
Invoke
|
||||
</IAIButton>
|
||||
)}
|
||||
<Tooltip
|
||||
placement="top"
|
||||
hasArrow
|
||||
openDelay={500}
|
||||
label={
|
||||
autoAddBoardId ? `Auto-Adding to ${autoAddBoardName}` : undefined
|
||||
}
|
||||
>
|
||||
{iconButton ? (
|
||||
<IAIIconButton
|
||||
aria-label={t('parameters.invoke')}
|
||||
type="submit"
|
||||
icon={<FaPlay />}
|
||||
isDisabled={!isReady || isBusy}
|
||||
onClick={handleInvoke}
|
||||
tooltip={t('parameters.invoke')}
|
||||
tooltipProps={{ placement: 'top' }}
|
||||
colorScheme="accent"
|
||||
id="invoke-button"
|
||||
{...rest}
|
||||
sx={{
|
||||
w: 'full',
|
||||
flexGrow: 1,
|
||||
...(isBusy ? IN_PROGRESS_STYLES : {}),
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<IAIButton
|
||||
aria-label={t('parameters.invoke')}
|
||||
type="submit"
|
||||
isDisabled={!isReady || isBusy}
|
||||
onClick={handleInvoke}
|
||||
colorScheme="accent"
|
||||
id="invoke-button"
|
||||
{...rest}
|
||||
sx={{
|
||||
w: 'full',
|
||||
flexGrow: 1,
|
||||
fontWeight: 700,
|
||||
...(isBusy ? IN_PROGRESS_STYLES : {}),
|
||||
}}
|
||||
>
|
||||
Invoke
|
||||
</IAIButton>
|
||||
)}
|
||||
</Tooltip>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
|
||||
import { setShouldLoopback } from 'features/parameters/store/postprocessingSlice';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FaRecycle } from 'react-icons/fa';
|
||||
|
||||
const loopbackSelector = createSelector(
|
||||
postprocessingSelector,
|
||||
({ shouldLoopback }) => shouldLoopback
|
||||
);
|
||||
|
||||
const LoopbackButton = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const shouldLoopback = useAppSelector(loopbackSelector);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<IAIIconButton
|
||||
aria-label={t('parameters.toggleLoopback')}
|
||||
tooltip={t('parameters.toggleLoopback')}
|
||||
isChecked={shouldLoopback}
|
||||
icon={<FaRecycle />}
|
||||
onClick={() => {
|
||||
dispatch(setShouldLoopback(!shouldLoopback));
|
||||
}}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default LoopbackButton;
|
||||
@@ -9,7 +9,6 @@ const ProcessButtons = () => {
|
||||
return (
|
||||
<Flex gap={2}>
|
||||
<InvokeButton />
|
||||
{/* {activeTabName === 'img2img' && <LoopbackButton />} */}
|
||||
<CancelButton />
|
||||
</Flex>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
import { Badge, BadgeProps, Flex, Text, TextProps } from '@chakra-ui/react';
|
||||
import IAISwitch, { IAISwitchProps } from 'common/components/IAISwitch';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type SettingSwitchProps = IAISwitchProps & {
|
||||
label: string;
|
||||
useBadge?: boolean;
|
||||
badgeLabel?: string;
|
||||
textProps?: TextProps;
|
||||
badgeProps?: BadgeProps;
|
||||
};
|
||||
|
||||
export default function SettingSwitch(props: SettingSwitchProps) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const {
|
||||
label,
|
||||
textProps,
|
||||
useBadge = false,
|
||||
badgeLabel = t('settings.experimental'),
|
||||
badgeProps,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
return (
|
||||
<Flex justifyContent="space-between" py={1}>
|
||||
<Flex gap={2} alignItems="center">
|
||||
<Text
|
||||
sx={{
|
||||
fontSize: 14,
|
||||
_dark: {
|
||||
color: 'base.300',
|
||||
},
|
||||
}}
|
||||
{...textProps}
|
||||
>
|
||||
{label}
|
||||
</Text>
|
||||
{useBadge && (
|
||||
<Badge
|
||||
size="xs"
|
||||
sx={{
|
||||
px: 2,
|
||||
color: 'base.700',
|
||||
bg: 'accent.200',
|
||||
_dark: { bg: 'accent.500', color: 'base.200' },
|
||||
}}
|
||||
{...badgeProps}
|
||||
>
|
||||
{badgeLabel}
|
||||
</Badge>
|
||||
)}
|
||||
</Flex>
|
||||
<IAISwitch {...rest} />
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
@@ -1,60 +1,71 @@
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
import { StyledFlex } from './SettingsModal';
|
||||
import { Heading, Text } from '@chakra-ui/react';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useCallback, useEffect } from 'react';
|
||||
import IAIButton from '../../../../common/components/IAIButton';
|
||||
import { useClearIntermediatesMutation } from '../../../../services/api/endpoints/images';
|
||||
import { addToast } from '../../store/systemSlice';
|
||||
import {
|
||||
useClearIntermediatesMutation,
|
||||
useGetIntermediatesCountQuery,
|
||||
} from '../../../../services/api/endpoints/images';
|
||||
import { resetCanvas } from '../../../canvas/store/canvasSlice';
|
||||
import { addToast } from '../../store/systemSlice';
|
||||
import { StyledFlex } from './SettingsModal';
|
||||
import { controlNetReset } from 'features/controlNet/store/controlNetSlice';
|
||||
|
||||
export default function SettingsClearIntermediates() {
|
||||
const dispatch = useAppDispatch();
|
||||
const [isDisabled, setIsDisabled] = useState(false);
|
||||
|
||||
const { data: intermediatesCount, refetch: updateIntermediatesCount } =
|
||||
useGetIntermediatesCountQuery();
|
||||
|
||||
const [clearIntermediates, { isLoading: isLoadingClearIntermediates }] =
|
||||
useClearIntermediatesMutation();
|
||||
|
||||
const handleClickClearIntermediates = useCallback(() => {
|
||||
clearIntermediates({})
|
||||
clearIntermediates()
|
||||
.unwrap()
|
||||
.then((response) => {
|
||||
dispatch(controlNetReset());
|
||||
dispatch(resetCanvas());
|
||||
dispatch(
|
||||
addToast({
|
||||
title:
|
||||
response === 0
|
||||
? `No intermediates to clear`
|
||||
: `Successfully cleared ${response} intermediates`,
|
||||
title: `Cleared ${response} intermediates`,
|
||||
status: 'info',
|
||||
})
|
||||
);
|
||||
if (response < 100) {
|
||||
setIsDisabled(true);
|
||||
}
|
||||
});
|
||||
}, [clearIntermediates, dispatch]);
|
||||
|
||||
useEffect(() => {
|
||||
// update the count on mount
|
||||
updateIntermediatesCount();
|
||||
}, [updateIntermediatesCount]);
|
||||
|
||||
const buttonText = intermediatesCount
|
||||
? `Clear ${intermediatesCount} Intermediate${
|
||||
intermediatesCount > 1 ? 's' : ''
|
||||
}`
|
||||
: 'No Intermediates to Clear';
|
||||
|
||||
return (
|
||||
<StyledFlex>
|
||||
<Heading size="sm">Clear Intermediates</Heading>
|
||||
<IAIButton
|
||||
colorScheme="error"
|
||||
colorScheme="warning"
|
||||
onClick={handleClickClearIntermediates}
|
||||
isLoading={isLoadingClearIntermediates}
|
||||
isDisabled={isDisabled}
|
||||
isDisabled={!intermediatesCount}
|
||||
>
|
||||
{isDisabled ? 'Intermediates Cleared' : 'Clear 100 Intermediates'}
|
||||
{buttonText}
|
||||
</IAIButton>
|
||||
<Text>
|
||||
Will permanently delete first 100 intermediates found on disk and in
|
||||
database
|
||||
<Text fontWeight="bold">
|
||||
Clearing intermediates will reset your Canvas and ControlNet state.
|
||||
</Text>
|
||||
<Text fontWeight="bold">This will also clear your canvas state.</Text>
|
||||
<Text>
|
||||
<Text variant="subtext">
|
||||
Intermediate images are byproducts of generation, different from the
|
||||
result images in the gallery. Purging intermediates will free disk
|
||||
space. Your gallery images will not be deleted.
|
||||
result images in the gallery. Clearing intermediates will free disk
|
||||
space.
|
||||
</Text>
|
||||
<Text variant="subtext">Your gallery images will not be deleted.</Text>
|
||||
</StyledFlex>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -11,13 +11,12 @@ import {
|
||||
Text,
|
||||
useDisclosure,
|
||||
} from '@chakra-ui/react';
|
||||
import { createSelector, current } from '@reduxjs/toolkit';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { VALID_LOG_LEVELS } from 'app/logging/useLogger';
|
||||
import { LOCALSTORAGE_KEYS, LOCALSTORAGE_PREFIX } from 'app/store/constants';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import IAIMantineSelect from 'common/components/IAIMantineSelect';
|
||||
import IAISwitch from 'common/components/IAISwitch';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import {
|
||||
SystemState,
|
||||
@@ -25,7 +24,6 @@ import {
|
||||
setEnableImageDebugging,
|
||||
setIsNodesEnabled,
|
||||
setShouldConfirmOnDelete,
|
||||
setShouldDisplayGuides,
|
||||
shouldAntialiasProgressImageChanged,
|
||||
shouldLogToConsoleChanged,
|
||||
} from 'features/system/store/systemSlice';
|
||||
@@ -48,15 +46,15 @@ import {
|
||||
} from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { LogLevelName } from 'roarr';
|
||||
import SettingsSchedulers from './SettingsSchedulers';
|
||||
import SettingSwitch from './SettingSwitch';
|
||||
import SettingsClearIntermediates from './SettingsClearIntermediates';
|
||||
import SettingsSchedulers from './SettingsSchedulers';
|
||||
|
||||
const selector = createSelector(
|
||||
[systemSelector, uiSelector],
|
||||
(system: SystemState, ui: UIState) => {
|
||||
const {
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
enableImageDebugging,
|
||||
consoleLogLevel,
|
||||
shouldLogToConsole,
|
||||
@@ -73,7 +71,6 @@ const selector = createSelector(
|
||||
|
||||
return {
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
enableImageDebugging,
|
||||
shouldUseCanvasBetaLayout,
|
||||
shouldUseSliders,
|
||||
@@ -139,7 +136,6 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
|
||||
const {
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
enableImageDebugging,
|
||||
shouldUseCanvasBetaLayout,
|
||||
shouldUseSliders,
|
||||
@@ -195,7 +191,7 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
<Modal
|
||||
isOpen={isSettingsModalOpen}
|
||||
onClose={onSettingsModalClose}
|
||||
size="xl"
|
||||
size="2xl"
|
||||
isCentered
|
||||
>
|
||||
<ModalOverlay />
|
||||
@@ -206,7 +202,7 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
<Flex sx={{ gap: 4, flexDirection: 'column' }}>
|
||||
<StyledFlex>
|
||||
<Heading size="sm">{t('settings.general')}</Heading>
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.confirmOnDelete')}
|
||||
isChecked={shouldConfirmOnDelete}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
@@ -214,7 +210,7 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
}
|
||||
/>
|
||||
{shouldShowAdvancedOptionsSettings && (
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.showAdvancedOptions')}
|
||||
isChecked={shouldShowAdvancedOptions}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
@@ -231,37 +227,21 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
|
||||
<StyledFlex>
|
||||
<Heading size="sm">{t('settings.ui')}</Heading>
|
||||
<IAISwitch
|
||||
label={t('settings.displayHelpIcons')}
|
||||
isChecked={shouldDisplayGuides}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldDisplayGuides(e.target.checked))
|
||||
}
|
||||
/>
|
||||
{shouldShowBetaLayout && (
|
||||
<IAISwitch
|
||||
label={t('settings.useCanvasBeta')}
|
||||
isChecked={shouldUseCanvasBetaLayout}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldUseCanvasBetaLayout(e.target.checked))
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.useSlidersForAll')}
|
||||
isChecked={shouldUseSliders}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldUseSliders(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.showProgressInViewer')}
|
||||
isChecked={shouldShowProgressInViewer}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldShowProgressInViewer(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.antialiasProgressImages')}
|
||||
isChecked={shouldAntialiasProgressImage}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
@@ -270,9 +250,21 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
)
|
||||
}
|
||||
/>
|
||||
{shouldShowBetaLayout && (
|
||||
<SettingSwitch
|
||||
label={t('settings.alternateCanvasLayout')}
|
||||
useBadge
|
||||
badgeLabel={t('settings.beta')}
|
||||
isChecked={shouldUseCanvasBetaLayout}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldUseCanvasBetaLayout(e.target.checked))
|
||||
}
|
||||
/>
|
||||
)}
|
||||
{shouldShowNodesToggle && (
|
||||
<IAISwitch
|
||||
label="Enable Nodes Editor (Experimental)"
|
||||
<SettingSwitch
|
||||
label={t('settings.enableNodesEditor')}
|
||||
useBadge
|
||||
isChecked={isNodesEnabled}
|
||||
onChange={handleToggleNodes}
|
||||
/>
|
||||
@@ -282,7 +274,7 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
{shouldShowDeveloperSettings && (
|
||||
<StyledFlex>
|
||||
<Heading size="sm">{t('settings.developer')}</Heading>
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.shouldLogToConsole')}
|
||||
isChecked={shouldLogToConsole}
|
||||
onChange={handleLogToConsoleChanged}
|
||||
@@ -294,7 +286,7 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
value={consoleLogLevel}
|
||||
data={VALID_LOG_LEVELS.concat()}
|
||||
/>
|
||||
<IAISwitch
|
||||
<SettingSwitch
|
||||
label={t('settings.enableImageDebugging')}
|
||||
isChecked={enableImageDebugging}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
@@ -313,8 +305,12 @@ const SettingsModal = ({ children, config }: SettingsModalProps) => {
|
||||
</IAIButton>
|
||||
{shouldShowResetWebUiText && (
|
||||
<>
|
||||
<Text>{t('settings.resetWebUIDesc1')}</Text>
|
||||
<Text>{t('settings.resetWebUIDesc2')}</Text>
|
||||
<Text variant="subtext">
|
||||
{t('settings.resetWebUIDesc1')}
|
||||
</Text>
|
||||
<Text variant="subtext">
|
||||
{t('settings.resetWebUIDesc2')}
|
||||
</Text>
|
||||
</>
|
||||
)}
|
||||
</StyledFlex>
|
||||
|
||||
@@ -38,7 +38,6 @@ export interface SystemState {
|
||||
currentIteration: number;
|
||||
totalIterations: number;
|
||||
currentStatusHasSteps: boolean;
|
||||
shouldDisplayGuides: boolean;
|
||||
isCancelable: boolean;
|
||||
enableImageDebugging: boolean;
|
||||
toastQueue: UseToastOptions[];
|
||||
@@ -84,14 +83,12 @@ export interface SystemState {
|
||||
shouldAntialiasProgressImage: boolean;
|
||||
language: keyof typeof LANGUAGES;
|
||||
isUploading: boolean;
|
||||
boardIdToAddTo?: string;
|
||||
isNodesEnabled: boolean;
|
||||
}
|
||||
|
||||
export const initialSystemState: SystemState = {
|
||||
isConnected: false,
|
||||
isProcessing: false,
|
||||
shouldDisplayGuides: true,
|
||||
isGFPGANAvailable: true,
|
||||
isESRGANAvailable: true,
|
||||
shouldConfirmOnDelete: true,
|
||||
@@ -134,9 +131,6 @@ export const systemSlice = createSlice({
|
||||
setShouldConfirmOnDelete: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldConfirmOnDelete = action.payload;
|
||||
},
|
||||
setShouldDisplayGuides: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldDisplayGuides = action.payload;
|
||||
},
|
||||
setIsCancelable: (state, action: PayloadAction<boolean>) => {
|
||||
state.isCancelable = action.payload;
|
||||
},
|
||||
@@ -204,7 +198,6 @@ export const systemSlice = createSlice({
|
||||
*/
|
||||
builder.addCase(appSocketSubscribed, (state, action) => {
|
||||
state.sessionId = action.payload.sessionId;
|
||||
state.boardIdToAddTo = action.payload.boardId;
|
||||
state.canceledSession = '';
|
||||
});
|
||||
|
||||
@@ -213,7 +206,6 @@ export const systemSlice = createSlice({
|
||||
*/
|
||||
builder.addCase(appSocketUnsubscribed, (state) => {
|
||||
state.sessionId = null;
|
||||
state.boardIdToAddTo = undefined;
|
||||
});
|
||||
|
||||
/**
|
||||
@@ -390,7 +382,6 @@ export const {
|
||||
setIsProcessing,
|
||||
setShouldConfirmOnDelete,
|
||||
setCurrentStatus,
|
||||
setShouldDisplayGuides,
|
||||
setIsCancelable,
|
||||
setEnableImageDebugging,
|
||||
addToast,
|
||||
|
||||
@@ -105,7 +105,7 @@ const enabledTabsSelector = createSelector(
|
||||
}
|
||||
);
|
||||
|
||||
const MIN_GALLERY_WIDTH = 300;
|
||||
const MIN_GALLERY_WIDTH = 350;
|
||||
const DEFAULT_GALLERY_PCT = 20;
|
||||
export const NO_GALLERY_TABS: InvokeTabName[] = ['modelManager'];
|
||||
|
||||
|
||||
@@ -78,7 +78,6 @@ const ParametersDrawer = () => {
|
||||
}}
|
||||
>
|
||||
<Flex
|
||||
paddingTop={1.5}
|
||||
paddingBottom={4}
|
||||
justifyContent="space-between"
|
||||
alignItems="center"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user