Compare commits

..

125 Commits

Author SHA1 Message Date
psychedelicious
5189fd70a4 feat(ui, nodes): metadata wip 2023-04-14 12:52:07 +10:00
psychedelicious
c79e7d3bc1 fix(ui): fix sqlalchemy dynamic model instantiation 2023-04-13 09:27:46 +10:00
psychedelicious
f0268235ed feat(ui): rewrite SqliteItemStore in sqlalchemy 2023-04-12 22:34:32 +10:00
psychedelicious
a428c473ae feat(ui): handle already-connected fields 2023-04-12 12:32:58 +10:00
psychedelicious
832e5f66e1 feat(ui): add url host transformation 2023-04-12 12:29:17 +10:00
Mary Hipp
43e820c98c add redux-dynamic-middlewares as a dependency 2023-04-12 12:29:17 +10:00
psychedelicious
708b236769 chore(ui): rebuild api, update types 2023-04-12 12:29:17 +10:00
psychedelicious
c182f64620 feat(ui): wip node editor 2023-04-12 12:29:17 +10:00
psychedelicious
6e4c3a7127 docs(ui): update nodes doc 2023-04-12 12:29:17 +10:00
psychedelicious
7b690a8127 feat(ui): validation connections w/ graphlib 2023-04-12 12:29:17 +10:00
psychedelicious
fe7cf16547 feat(ui): wip model handling and graph topology validation 2023-04-12 12:29:17 +10:00
psychedelicious
451fe7abcd feat(ui): it blends 2023-04-12 12:29:17 +10:00
psychedelicious
ebc76a4785 feat(ui): increase edge width 2023-04-12 12:29:17 +10:00
psychedelicious
2d1b818824 feat(ui): add connection validation styling 2023-04-12 12:29:17 +10:00
psychedelicious
d93473eaae fix(ui): add basic node edges & connection validation 2023-04-12 12:29:17 +10:00
psychedelicious
f6714d74be fix(ui): fix handle 2023-04-12 12:29:17 +10:00
psychedelicious
d26a414560 feat(ui): hook up nodes to redux 2023-04-12 12:29:17 +10:00
psychedelicious
5aec29b25f feat(ui): cleanup nodes ui stuff 2023-04-12 12:29:17 +10:00
psychedelicious
abde52573e feat(ui): nodes before deleting stuff 2023-04-12 12:29:17 +10:00
psychedelicious
49ea838a3c feat(ui): remove extraneous field types 2023-04-12 12:29:17 +10:00
psychedelicious
9bd79c04a6 feat(ui): wip node editor 2023-04-12 12:29:17 +10:00
psychedelicious
b55b2a8947 fix(ui): disable event subscription
it is not fully baked just yet
2023-04-12 12:29:17 +10:00
psychedelicious
e3a8fceb5d feat(ui): first steps to node editor ui 2023-04-12 12:29:17 +10:00
psychedelicious
1e09fdc8be feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
2023-04-12 12:29:17 +10:00
Mary Hipp
d0e9ec267c feat(ui): add hi-res functionality for txt2img generations 2023-04-12 12:29:17 +10:00
Mary Hipp
880e1743ac feat(ui): update ModelSelect for nodes API 2023-04-12 12:29:17 +10:00
Mary Hipp
f59d4a0015 feat(ui): generate iterations graph 2023-04-12 12:29:17 +10:00
psychedelicious
152d4e76aa feat(ui): add exampleGraphs object w/ iterations example 2023-04-12 12:29:17 +10:00
psychedelicious
b829af7410 fix(ui): fix middleware order for multi-node graphs 2023-04-12 12:29:16 +10:00
psychedelicious
dce604b567 feat(ui): increase StatusIndicator font size 2023-04-12 12:29:16 +10:00
psychedelicious
1ed4354753 feat(ui): improve InvocationCompleteEvent types 2023-04-12 12:29:16 +10:00
psychedelicious
db8ba8b0bf chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
3cd2695676 fix(ui): fix img2img type 2023-04-12 12:29:16 +10:00
psychedelicious
2787d32881 feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like

sorry, i was scattered and this commit has a lot of unrelated stuff in it.
2023-04-12 12:29:16 +10:00
psychedelicious
96768078fa feat(ui): prep for socket jwt 2023-04-12 12:29:16 +10:00
psychedelicious
13c9639d7b feat(ui): dynamic middleware loading 2023-04-12 12:29:16 +10:00
Mary Hipp
f104f0a390 feat(ui) working on making socket URL dynamic 2023-04-12 12:29:16 +10:00
Mary Hipp
c49d2accb7 feat(ui): export StatusIndicator and ModelSelect for header use 2023-04-12 12:29:16 +10:00
Mary Hipp
749a0912c8 feat(ui): add optional token for auth 2023-04-12 12:29:16 +10:00
psychedelicious
759e5613cd feat(ui): wip events, comments, and general refactoring 2023-04-12 12:29:16 +10:00
psychedelicious
ac9b83722e lang(ui): add toast strings 2023-04-12 12:29:16 +10:00
psychedelicious
439a35e064 docs(ui): organise and update docs 2023-04-12 12:29:16 +10:00
Mary Hipp
7286843698 feat(ui): add support to disableTabs 2023-04-12 12:29:16 +10:00
Mary Hipp
77ba1b77d7 disable panels when app mounts 2023-04-12 12:29:16 +10:00
Mary Hipp
e749e7e915 feat(ui): invert logic to be disabled 2023-04-12 12:29:16 +10:00
Mary Hipp
e486559d8f feat(ui): disable panels based on app props 2023-04-12 12:29:16 +10:00
psychedelicious
2d8982c23d feat(ui): wip refactor socket events 2023-04-12 12:29:16 +10:00
psychedelicious
02d510ba17 chore(ui): regenerate api 2023-04-12 12:29:16 +10:00
psychedelicious
84d9ccb014 feat(ui): wip gallery migration 2023-04-12 12:29:16 +10:00
psychedelicious
b9fc136f25 feat(ui): wip gallery migration 2023-04-12 12:29:16 +10:00
psychedelicious
f6691dbf3b chore(ui): regenerate api 2023-04-12 12:29:16 +10:00
psychedelicious
cb11717b9c feat(ui): patch api generation for headers access 2023-04-12 12:29:16 +10:00
Mary Hipp
35c950c50d fix(ui): restore removed type 2023-04-12 12:29:16 +10:00
Mary Hipp
afb0b564e9 feat(ui): POST upload working 2023-04-12 12:29:16 +10:00
Mary Hipp
657efadffa fix(ui): separate thunk for initial gallery load so it properly gets index 0 2023-04-12 12:29:16 +10:00
psychedelicious
5b1ffc292f feat(ui): clean up & comment results slice 2023-04-12 12:29:16 +10:00
psychedelicious
cad289dfe5 feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
2023-04-12 12:29:16 +10:00
psychedelicious
1df999d082 chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
2023-04-12 12:29:16 +10:00
psychedelicious
1372536728 chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
23a69ea7bf docs(ui): update readme 2023-04-12 12:29:16 +10:00
psychedelicious
5cff28aaf3 chore(ui): bump redux-toolkit 2023-04-12 12:29:16 +10:00
psychedelicious
21fba1aac6 feat(ui): load images on socket connect
Rudimentary
2023-04-12 12:29:16 +10:00
psychedelicious
c992c2fe7d feat(ui): add type guards for outputs 2023-04-12 12:29:16 +10:00
psychedelicious
3e76c1a3cd feat(ui): make thunk types more consistent 2023-04-12 12:29:16 +10:00
psychedelicious
5eb077accc feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
2023-04-12 12:29:16 +10:00
psychedelicious
007794f48b feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
2023-04-12 12:29:16 +10:00
psychedelicious
95a336c26a feat(ui): add rtk action type guard 2023-04-12 12:29:16 +10:00
psychedelicious
6ca0798303 fix(ui): fix middleware types 2023-04-12 12:29:16 +10:00
psychedelicious
bb9986bfd2 feat(ui): handle random seeds 2023-04-12 12:29:16 +10:00
psychedelicious
11f34e0388 feat(ui): add nodes mode script 2023-04-12 12:29:16 +10:00
maryhipp
dea27f451a chore(ui): add support for package mode 2023-04-12 12:29:16 +10:00
maryhipp
be32f5639b feat(ui): get intermediate images working but types are stubbed out 2023-04-12 12:29:16 +10:00
maryhipp
6fd9840608 feat(ui): img2img implementation 2023-04-12 12:29:16 +10:00
maryhipp
158528cf12 feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node 2023-04-12 12:29:16 +10:00
maryhipp
1401a26a41 feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation 2023-04-12 12:29:16 +10:00
maryhipp
213a2dcdc8 add optional apiUrl prop 2023-04-12 12:29:16 +10:00
maryhipp
85019ab1b0 use reference to sampler_name 2023-04-12 12:29:16 +10:00
maryhipp
683f8b324e use reference to sampler_name 2023-04-12 12:29:16 +10:00
maryhipp
8a45efbaf3 start building out node translations from frontend state and add notes about missing features 2023-04-12 12:29:16 +10:00
psychedelicious
14a1871087 feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
2023-04-12 12:29:16 +10:00
psychedelicious
3e3ac329c8 feat(ui): add socketio types 2023-04-12 12:29:16 +10:00
psychedelicious
1db0940c67 fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
2023-04-12 12:29:16 +10:00
psychedelicious
b7fa23be64 fix(ui): disable OG web server socket connection 2023-04-12 12:29:16 +10:00
psychedelicious
9be2c02d5e chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
686f03d2cc feat(ui): nodes cancel 2023-04-12 12:29:16 +10:00
psychedelicious
2b6ca72b36 feat(ui): more nodes api prototyping 2023-04-12 12:29:16 +10:00
psychedelicious
bfc0c0b3f6 feat(ui): generate object args for api client 2023-04-12 12:29:16 +10:00
psychedelicious
e3c3ddc45b feat(backend): fixes for nodes/generator 2023-04-12 12:29:16 +10:00
psychedelicious
9436f8e81e chore(ui): update openapi.json 2023-04-12 12:29:16 +10:00
psychedelicious
68d1c35b6f chore(ui): update .eslintignore, .prettierignore 2023-04-12 12:29:16 +10:00
psychedelicious
46f54c81ed chore(ui): organize generated files 2023-04-12 12:29:16 +10:00
psychedelicious
860d495732 fix(ui): update client & nodes test code w/ new Edge type 2023-04-12 12:29:16 +10:00
psychedelicious
7c24706778 feat(ui): add axios client generator and simple example 2023-04-12 12:29:16 +10:00
psychedelicious
d923d1d66b fix(nodes): fix naming of CvInvocationConfig 2023-04-11 12:13:53 +10:00
psychedelicious
1f2c1e14db fix(nodes): move InvocationConfig to baseinvocation.py 2023-04-11 12:13:53 +10:00
psychedelicious
07e3a0ec15 feat(nodes): add invocation schema customisation, add model selection
- add invocation schema customisation

done via fastapi's `Config` class and `schema_extra`. when using `Config`, inherit from `InvocationConfig` to get type hints.

where it makes sense - like for all math invocations - define a `MathInvocationConfig` class and have all invocations inherit from it.

this customisation can provide any arbitrary additional data to the UI. currently it provides tags and field type hints.

this is necessary for `model` type fields, which are actually string fields. without something like this, we can't reliably differentiate  `model` fields from normal `string` fields.

can also be used for future field types.

all invocations now have tags, and all `model` fields have ui type hints.

- fix model handling for invocations

added a helper to fall back to the default model if an invalid model name is chosen. model names in graphs now work.

- fix latents progress callback

noticed this wasn't correct while working on everything else.
2023-04-11 12:13:53 +10:00
psychedelicious
427db7c7e2 feat(nodes): fix typo in PasteImageInvocation 2023-04-10 21:33:08 +10:00
psychedelicious
dad3a7f263 fix(nodes): sampler_name --> scheduler
the name of this was changed at some point. nodes still used the old name, so scheduler selection did nothing. simple fix.
2023-04-10 19:54:09 +10:00
psychedelicious
5bd0bb637f fix(nodes): add missing type to ImageField 2023-04-10 19:33:15 +10:00
Lincoln Stein
f05095770c Increase chunk size when computing diffusers SHAs (#3159)
When running this app first time in WSL2 environment, which is
notoriously slow when it comes to IO, computing the SHAs of the models
takes an eternity.

Computing shas for sd2.1
```
| Calculating sha256 hash of model files
| sha256 = 1e4ce085102fe6590d41ec1ab6623a18c07127e2eca3e94a34736b36b57b9c5e (49 files hashed in 510.87s)
```

I increased the chunk size to 16MB reduce the number of round trips for
loading the data. New results:

```
| Calculating sha256 hash of model files
| sha256 = 1e4ce085102fe6590d41ec1ab6623a18c07127e2eca3e94a34736b36b57b9c5e (49 files hashed in 59.89s)
```

Higher values don't seem to make an impact.
2023-04-09 22:29:43 -04:00
AbdBarho
de189f2db6 Increase chunk size when computing SHAs 2023-04-09 21:53:59 +02:00
psychedelicious
4463124bdd feat(nodes): mark ImageField properties required, add docs 2023-04-09 22:53:17 +10:00
psychedelicious
34402cc46a feat(nodes): add list_images endpoint
- add `list_images` endpoint at `GET api/v1/images`
- extend `ImageStorageBase` with `list()` method, implemented it for `DiskImageStorage`
- add `ImageReponse` class to for image responses, which includes urls, metadata
- add `ImageMetadata` class (basically a stub at the moment)
- uploaded images now named `"{uuid}_{timestamp}.png"`
- add `models` modules. besides separating concerns more clearly, this helps to mitigate circular dependencies
- improve thumbnail handling
2023-04-09 13:48:44 +10:00
Kent Keirsey
54d9833db0 Else. 2023-04-08 12:08:51 -04:00
Kent Keirsey
5fe8cb56fc Correct response note 2023-04-08 12:08:51 -04:00
Kent Keirsey
7919d81fb1 Update to address feedback 2023-04-08 12:08:51 -04:00
Kent Keirsey
9d80b28a4f Begin Convert Work 2023-04-08 12:08:51 -04:00
Kent Keirsey
1fcd91bcc5 Add/Update and Delete Models 2023-04-08 12:08:51 -04:00
blessedcoolant
e456e2e63a fix typo (#3147)
fix typo.

reference:
21f79e5919/invokeai/configs/INITIAL_MODELS.yaml (L21-L25)
2023-04-08 20:25:31 +12:00
c67e708d
ee41b99049 Update 050_INSTALLING_MODELS.md
fix typo
2023-04-08 17:02:47 +09:00
psychedelicious
111d674e71 fix(nodes): use correct torch device in NoiseInvocation 2023-04-08 12:32:03 +10:00
Lincoln Stein
8f048cfbd9 Add python-multipart, which is needed by nodes (#3141)
I'm not quite sure why this isn't being installed by fastapi's
dependencies, but running without it installed yields:

```
root@gnubert:/srv/ssdtank/docker/invokeai/git/InvokeAI# docker run --gpus all -p 9989:9090 -v /srv/ssdtank/docker/invokeai/data:/data -v /srv/ssdtank/docker/invokeai/git/InvokeAI/static/dream_web/:/static/dream_web --rm -ti -u root --entrypoint /bin/bash ghcr.io/cmsj/invokeai-nodes@sha256:426ebc414936cb67e02f5f64d963196500a77b2f485df8122a2d462797293938
root@7a77b56a5771:/usr/src# /invoke-new.py --web
Form data requires "python-multipart" to be installed.
You can install "python-multipart" with:

pip install python-multipart

╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /invoke-new.py:22 in <module>                                                                    │
│                                                                                                  │
│   19                                                                                             │
│   20                                                                                             │
│   21 if __name__ == '__main__':                                                                  │
│ ❱ 22 │   main()                                                                                  │
│   23                                                                                             │
│                                                                                                  │
│ /invoke-new.py:13 in main                                                                        │
│                                                                                                  │
│   10 │   os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))                │
│   11 │                                                                                           │
│   12 │   if '--web' in sys.argv:                                                                 │
│ ❱ 13 │   │   from invokeai.app.api_app import invoke_api                                         │
│   14 │   │   invoke_api()                                                                        │
│   15 │   else:                                                                                   │
│   16 │   │   # TODO: Parse some top-level args here.                                             │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/invokeai/app/api_app.py:17 in <module>            │
│                                                                                                  │
│    14                                                                                            │
│    15 from ..backend import Args                                                                 │
│    16 from .api.dependencies import ApiDependencies                                              │
│ ❱  17 from .api.routers import images, sessions, models                                          │
│    18 from .api.sockets import SocketIO                                                          │
│    19 from .invocations import *                                                                 │
│    20 from .invocations.baseinvocation import BaseInvocation                                     │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/invokeai/app/api/routers/images.py:45 in <module> │
│                                                                                                  │
│   42 │   │   404: {"description": "Session not found"},                                          │
│   43 │   },                                                                                      │
│   44 )                                                                                           │
│ ❱ 45 async def upload_image(file: UploadFile, request: Request):                                 │
│   46 │   if not file.content_type.startswith("image"):                                           │
│   47 │   │   return Response(status_code=415)                                                    │
│   48                                                                                             │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/fastapi/routing.py:630 in decorator               │
│                                                                                                  │
│    627 │   │   ),                                                                                │
│    628 │   ) -> Callable[[DecoratedCallable], DecoratedCallable]:                                │
│    629 │   │   def decorator(func: DecoratedCallable) -> DecoratedCallable:                      │
│ ❱  630 │   │   │   self.add_api_route(                                                           │
│    631 │   │   │   │   path,                                                                     │
│    632 │   │   │   │   func,                                                                     │
│    633 │   │   │   │   response_model=response_model,                                            │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/fastapi/routing.py:569 in add_api_route           │
│                                                                                                  │
│    566 │   │   current_generate_unique_id = get_value_or_default(                                │
│    567 │   │   │   generate_unique_id_function, self.generate_unique_id_function                 │
│    568 │   │   )                                                                                 │
│ ❱  569 │   │   route = route_class(                                                              │
│    570 │   │   │   self.prefix + path,                                                           │
│    571 │   │   │   endpoint=endpoint,                                                            │
│    572 │   │   │   response_model=response_model,                                                │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/fastapi/routing.py:444 in __init__                │
│                                                                                                  │
│    441 │   │   │   │   0,                                                                        │
│    442 │   │   │   │   get_parameterless_sub_dependant(depends=depends, path=self.path_format),  │
│    443 │   │   │   )                                                                             │
│ ❱  444 │   │   self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)   │
│    445 │   │   self.app = request_response(self.get_route_handler())                             │
│    446 │                                                                                         │
│    447 │   def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]:    │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/fastapi/dependencies/utils.py:756 in              │
│ get_body_field                                                                                   │
│                                                                                                  │
│   753 │   │   alias="body",                                                                      │
│   754 │   │   field_info=BodyFieldInfo(**BodyFieldInfo_kwargs),                                  │
│   755 │   )                                                                                      │
│ ❱ 756 │   check_file_field(final_field)                                                          │
│   757 │   return final_field                                                                     │
│   758                                                                                            │
│                                                                                                  │
│ /usr/src/InvokeAI/lib/python3.10/site-packages/fastapi/dependencies/utils.py:111 in              │
│ check_file_field                                                                                 │
│                                                                                                  │
│   108 │   │   │   │   raise RuntimeError(multipart_incorrect_install_error) from None            │
│   109 │   │   except ImportError:                                                                │
│   110 │   │   │   logger.error(multipart_not_installed_error)                                    │
│ ❱ 111 │   │   │   raise RuntimeError(multipart_not_installed_error) from None                    │
│   112                                                                                            │
│   113                                                                                            │
│   114 def get_param_sub_dependant(                                                               │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
RuntimeError: Form data requires "python-multipart" to be installed.
You can install "python-multipart" with:

pip install python-multipart
```
2023-04-07 19:17:37 -04:00
Chris Jones
7103ac6a32 Add python-multipart, which is needed by nodes 2023-04-07 19:43:42 +01:00
blessedcoolant
f6b131e706 remove vestiges of non-functional autoimport code for legacy checkpoints (#3076)
- the functionality to automatically import and run legacy checkpoint
files in a designated folder has been removed from the backend but there
are vestiges of the code remaining in the frontend that are causing
crashes.
- This fixes the problem.

- Closes #3075
2023-04-08 02:21:23 +12:00
Lincoln Stein
d1b2b99226 Merge branch 'main' into bugfix/remove-autoimport-dead-code 2023-04-07 09:59:58 -04:00
psychedelicious
e356f2511b chore: configure stale bot 2023-04-07 20:45:08 +10:00
Lincoln Stein
e5f8b22a43 add a new method to model_manager that retrieves individual pipeline components (#3120)
This PR introduces a new set of ModelManager methods that enables you to
retrieve the individual parts of a stable diffusion pipeline model,
including the vae, text_encoder, unet, tokenizer, etc.

To use:

```
from invokeai.backend import ModelManager

manager = ModelManager('/path/to/models.yaml')

# get the VAE
vae = manager.get_model_vae('stable-diffusion-1.5')

# get the unet
unet = manager.get_model_unet('stable-diffusion-1.5')

# get the tokenizer
tokenizer = manager.get_model_tokenizer('stable-diffusion-1.5')

# etc etc
feature_extractor = manager.get_model_feature_extractor('stable-diffusion-1.5')
scheduler = manager.get_model_scheduler('stable-diffusion-1.5')
text_encoder = manager.get_model_text_encoder('stable-diffusion-1.5')

# if no model provided, then defaults to the one currently in GPU, if any
vae = manager.get_model_vae()
```
2023-04-07 01:39:57 -04:00
blessedcoolant
45b84fb4bb Merge branch 'main' into bugfix/remove-autoimport-dead-code 2023-04-07 17:07:25 +12:00
Lincoln Stein
f022c89249 Merge branch 'main' into feat/return-submodels 2023-04-06 22:03:31 -04:00
Lincoln Stein
ab05144716 Change where !replay looks for its infile (#3129)
!fetch puts its output file into the output directory; it may be
beneficial to have !replay look in the output directory as well.
2023-04-06 22:02:06 -04:00
Lincoln Stein
aeb4914e67 Merge branch 'main' into replay-file_path 2023-04-06 21:45:23 -04:00
Lincoln Stein
4c339dd4b0 refactor get_submodels() into individual methods 2023-04-06 17:08:23 -04:00
Thomas
7268131f57 change where !replay looks for its infile
!fetch puts its output file into the output directory; it may be beneficial to have !replay look in the output directory as well.
2023-04-06 08:14:11 -04:00
Lincoln Stein
d44151d6ff add a new method to model_manager that retrieves individual pipeline parts
- New method is ModelManager.get_sub_model(model_name:str,model_part:SDModelComponent)

To use:

```
from invokeai.backend import ModelManager, SDModelComponent as sdmc
manager = ModelManager('/path/to/models.yaml')
vae = manager.get_sub_model('stable-diffusion-1.5', sdmc.vae)
```
2023-04-05 17:25:42 -04:00
Lincoln Stein
1f89cf3343 remove vestiges of non-functional autoimport code for legacy checkpoints
- Closes #3075
2023-03-31 04:27:03 -04:00
268 changed files with 11107 additions and 1025 deletions

19
.github/stale.yaml vendored Normal file
View File

@@ -0,0 +1,19 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 28
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Please
update the ticket if this is still a problem on the latest release.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
Due to inactivity, this issue has been automatically closed. If this is
still a problem on the latest release, please recreate the issue.

View File

@@ -1,10 +1,18 @@
# Invocations
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
Invocations represent a single operation, its inputs, and its outputs. These
operations and their outputs can be chained together to generate and modify
images.
## Creating a new invocation
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
To create a new invocation, either find the appropriate module file in
`/ldm/invoke/app/invocations` to add your invocation to, or create a new one in
that folder. All invocations in that folder will be discovered and made
available to the CLI and API automatically. Invocations make use of
[typing](https://docs.python.org/3/library/typing.html) and
[pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration
into the CLI and API.
An invocation looks like this:
@@ -41,34 +49,54 @@ class UpscaleInvocation(BaseInvocation):
Each portion is important to implement correctly.
### Class definition and type
```py
class UpscaleInvocation(BaseInvocation):
"""Upscales an image."""
type: Literal['upscale'] = 'upscale'
```
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
All invocations must derive from `BaseInvocation`. They should have a docstring
that declares what they do in a single, short line. They should also have a
`type` with a type hint that's `Literal["command_name"]`, where `command_name`
is what the user will type on the CLI or use in the API to create this
invocation. The `command_name` must be unique. The `type` must be assigned to
the value of the literal in the type hint.
### Inputs
```py
# Inputs
image: Union[ImageField,None] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2,4] = Field(default=2, description="The upscale level")
```
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
| Part | Value | Description |
| ---- | ----- | ----------- |
| Name | `strength` | This field is referred to as `strength` |
| Type Hint | `float` | This field must be of type `float` |
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
Inputs consist of three parts: a name, a type hint, and a `Field` with default,
description, and validation information. For example:
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
| Part | Value | Description |
| --------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| Name | `strength` | This field is referred to as `strength` |
| Type Hint | `float` | This field must be of type `float` |
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this
field to be parsed with `None` as a value, which enables linking to previous
invocations. All fields should either provide a default value or allow `None` as
a value, so that they can be overwritten with a linked output from another
invocation.
The special type `ImageField` is also used here. All images are passed as
`ImageField`, which protects them from pydantic validation errors (since images
only ever come from links).
Finally, note that for all linking, the `type` of the linked fields must match.
If the `name` also matches, then the field can be **automatically linked** to a
previous invocation by name and matching.
### Invoke Function
```py
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image.image_type, self.image.image_name)
@@ -88,13 +116,22 @@ Finally, note that for all linking, the `type` of the linked fields must match.
image = ImageField(image_type = image_type, image_name = image_name)
)
```
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
The `invoke` function is the last portion of an invocation. It is provided an
`InvocationContext` which contains services to perform work as well as a
`session_id` for use as needed. It should return a class with output values that
derives from `BaseInvocationOutput`.
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
Before being called, the invocation will have all of its fields set from
defaults, inputs, and finally links (overriding in that order).
Assume that this invocation may be running simultaneously with other
invocations, may be running on another machine, or in other interesting
scenarios. If you need functionality, please provide it as a service in the
`InvocationServices` class, and make sure it can be overridden.
### Outputs
```py
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
@@ -102,4 +139,64 @@ class ImageOutput(BaseInvocationOutput):
image: ImageField = Field(default=None, description="The output image")
```
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.
Output classes look like an invocation class without the invoke method. Prefer
to use an existing output class if available, and prefer to name inputs the same
as outputs when possible, to promote automatic invocation linking.
## Schema Generation
Invocation, output and related classes are used to generate an OpenAPI schema.
### Required Properties
The schema generation treat all properties with default values as optional. This
makes sense internally, but when when using these classes via the generated
schema, we end up with e.g. the `ImageOutput` class having its `image` property
marked as optional.
We know that this property will always be present, so the additional logic
needed to always check if the property exists adds a lot of extraneous cruft.
To fix this, we can leverage `pydantic`'s
[schema customisation](https://docs.pydantic.dev/usage/schema/#schema-customization)
to mark properties that we know will always be present as required.
Here's that `ImageOutput` class, without the needed schema customisation:
```python
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
```
The generated OpenAPI schema, and all clients/types generated from it, will have
the `type` and `image` properties marked as optional, even though we know they
will always have a value by the time we can interact with them via the API.
Here's the same class, but with the schema customisation added:
```python
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
class Config:
schema_extra = {
'required': [
'type',
'image',
]
}
```
The resultant schema (and any API client or types generated from it) will now
have see `type` as string literal `"image"` and `image` as an `ImageField`
object.
See this `pydantic` issue for discussion on this solution:
<https://github.com/pydantic/pydantic/discussions/4577>

View File

@@ -50,7 +50,7 @@ subset that are currently installed are found in
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-inpainting|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-inpainting |
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |

View File

@@ -461,7 +461,8 @@ def get_torch_source() -> (Union[str, None],str):
url = "https://download.pytorch.org/whl/cpu"
if device == 'cuda':
url = 'https://download.pytorch.org/whl/cu118'
url = 'https://download.pytorch.org/whl/cu117'
optional_modules = '[xformers]'
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@@ -0,0 +1,14 @@
from pydantic import BaseModel, Field
from invokeai.app.models.image import ImageType
from invokeai.app.models.metadata import ImageMetadata
class ImageResponse(BaseModel):
"""The response type for images"""
image_type: ImageType = Field(description="The type of the image")
image_name: str = Field(description="The name of the image")
image_url: str = Field(description="The url of the image")
thumbnail_url: str = Field(description="The url of the image's thumbnail")
metadata: ImageMetadata = Field(description="The image's metadata")

View File

@@ -1,18 +1,23 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import io
from datetime import datetime, timezone
import json
import os
import uuid
from fastapi import Path, Request, UploadFile
from fastapi import Path, Query, Request, UploadFile
from fastapi.responses import FileResponse, Response
from fastapi.routing import APIRouter
from PIL import Image
from invokeai.app.api.models.images import ImageResponse
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.item_storage import PaginatedResults
from ...services.image_storage import ImageType
from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"])
@images_router.get("/{image_type}/{image_name}", operation_id="get_image")
async def get_image(
image_type: ImageType = Path(description="The type of image to get"),
@@ -38,29 +43,60 @@ async def get_thumbnail(
"/uploads/",
operation_id="upload_image",
responses={
201: {"description": "The image was uploaded successfully"},
201: {"description": "The image was uploaded successfully", "model": ImageResponse},
404: {"description": "Session not found"},
},
status_code=201
)
async def upload_image(file: UploadFile, request: Request):
async def upload_image(file: UploadFile, request: Request, response: Response) -> ImageResponse:
if not file.content_type.startswith("image"):
return Response(status_code=415)
contents = await file.read()
try:
im = Image.open(contents)
img = Image.open(io.BytesIO(contents))
except:
# Error opening the image
return Response(status_code=415)
filename = f"{str(int(datetime.now(timezone.utc).timestamp()))}.png"
ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, im)
filename = f"{uuid.uuid4()}_{str(int(datetime.now(timezone.utc).timestamp()))}.png"
image_path = ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, img)
invokeai_metadata = json.loads(img.info.get("invokeai", "{}"))
return Response(
status_code=201,
headers={
"Location": request.url_for(
"get_image", image_type=ImageType.UPLOAD, image_name=filename
)
},
res = ImageResponse(
image_type=ImageType.UPLOAD,
image_name=filename,
# TODO: DiskImageStorage should not be building URLs...?
image_url=f"api/v1/images/{ImageType.UPLOAD.value}/{filename}",
thumbnail_url=f"api/v1/images/{ImageType.UPLOAD.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
# TODO: Creation of this object should happen elsewhere, just making it fit here so it works
metadata=ImageMetadata(
created=int(os.path.getctime(image_path)),
width=img.width,
height=img.height,
invokeai=invokeai_metadata
),
)
response.status_code = 201
response.headers["Location"] = request.url_for(
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
)
return res
@images_router.get(
"/",
operation_id="list_images",
responses={200: {"model": PaginatedResults[ImageResponse]}},
)
async def list_images(
image_type: ImageType = Query(default=ImageType.RESULT, description="The type of images to get"),
page: int = Query(default=0, description="The page of images to get"),
per_page: int = Query(default=10, description="The number of images per page"),
) -> PaginatedResults[ImageResponse]:
"""Gets a list of images"""
result = ApiDependencies.invoker.services.images.list(
image_type, page, per_page
)
return result

View File

@@ -1,11 +1,17 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and 2023 Kent Keirsey (https://github.com/hipsterusername)
import shutil
import asyncio
from typing import Annotated, Any, List, Literal, Optional, Union
from fastapi.routing import APIRouter
from fastapi.routing import APIRouter, HTTPException
from pydantic import BaseModel, Field, parse_obj_as
from pathlib import Path
from ..dependencies import ApiDependencies
from invokeai.backend.globals import Globals, global_converted_ckpts_dir
from invokeai.backend.args import Args
models_router = APIRouter(prefix="/v1/models", tags=["models"])
@@ -15,11 +21,9 @@ class VaeRepo(BaseModel):
path: Optional[str] = Field(description="The path to the VAE")
subfolder: Optional[str] = Field(description="The subfolder to use for this VAE")
class ModelInfo(BaseModel):
description: Optional[str] = Field(description="A description of the model")
class CkptModelInfo(ModelInfo):
format: Literal['ckpt'] = 'ckpt'
@@ -29,7 +33,6 @@ class CkptModelInfo(ModelInfo):
width: Optional[int] = Field(description="The width of the model")
height: Optional[int] = Field(description="The height of the model")
class DiffusersModelInfo(ModelInfo):
format: Literal['diffusers'] = 'diffusers'
@@ -37,12 +40,29 @@ class DiffusersModelInfo(ModelInfo):
repo_id: Optional[str] = Field(description="The repo ID to use for this model")
path: Optional[str] = Field(description="The path to the model")
class CreateModelRequest(BaseModel):
name: str = Field(description="The name of the model")
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
class CreateModelResponse(BaseModel):
name: str = Field(description="The name of the new model")
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
status: str = Field(description="The status of the API response")
class ConversionRequest(BaseModel):
name: str = Field(description="The name of the new model")
info: CkptModelInfo = Field(description="The converted model info")
save_location: str = Field(description="The path to save the converted model weights")
class ConvertedModelResponse(BaseModel):
name: str = Field(description="The name of the new model")
info: DiffusersModelInfo = Field(description="The converted model info")
class ModelsList(BaseModel):
models: dict[str, Annotated[Union[(CkptModelInfo,DiffusersModelInfo)], Field(discriminator="format")]]
@models_router.get(
"/",
operation_id="list_models",
@@ -54,108 +74,61 @@ async def list_models() -> ModelsList:
models = parse_obj_as(ModelsList, { "models": models_raw })
return models
# @socketio.on("requestSystemConfig")
# def handle_request_capabilities():
# print(">> System config requested")
# config = self.get_system_config()
# config["model_list"] = self.generate.model_manager.list_models()
# config["infill_methods"] = infill_methods()
# socketio.emit("systemConfig", config)
# @socketio.on("searchForModels")
# def handle_search_models(search_folder: str):
# try:
# if not search_folder:
# socketio.emit(
# "foundModels",
# {"search_folder": None, "found_models": None},
# )
# else:
# (
# search_folder,
# found_models,
# ) = self.generate.model_manager.search_models(search_folder)
# socketio.emit(
# "foundModels",
# {"search_folder": search_folder, "found_models": found_models},
# )
# except Exception as e:
# self.handle_exceptions(e)
# print("\n")
@models_router.post(
"/",
operation_id="update_model",
responses={200: {"status": "success"}},
)
async def update_model(
model_request: CreateModelRequest
) -> CreateModelResponse:
""" Add Model """
model_request_info = model_request.info
info_dict = model_request_info.dict()
model_response = CreateModelResponse(name=model_request.name, info=model_request.info, status="success")
# @socketio.on("addNewModel")
# def handle_add_model(new_model_config: dict):
# try:
# model_name = new_model_config["name"]
# del new_model_config["name"]
# model_attributes = new_model_config
# if len(model_attributes["vae"]) == 0:
# del model_attributes["vae"]
# update = False
# current_model_list = self.generate.model_manager.list_models()
# if model_name in current_model_list:
# update = True
ApiDependencies.invoker.services.model_manager.add_model(
model_name=model_request.name,
model_attributes=info_dict,
clobber=True,
)
# print(f">> Adding New Model: {model_name}")
return model_response
# self.generate.model_manager.add_model(
# model_name=model_name,
# model_attributes=model_attributes,
# clobber=True,
# )
# self.generate.model_manager.commit(opt.conf)
# new_model_list = self.generate.model_manager.list_models()
# socketio.emit(
# "newModelAdded",
# {
# "new_model_name": model_name,
# "model_list": new_model_list,
# "update": update,
# },
# )
# print(f">> New Model Added: {model_name}")
# except Exception as e:
# self.handle_exceptions(e)
@models_router.delete(
"/{model_name}",
operation_id="del_model",
responses={
204: {
"description": "Model deleted successfully"
},
404: {
"description": "Model not found"
}
},
)
async def delete_model(model_name: str) -> None:
"""Delete Model"""
model_names = ApiDependencies.invoker.services.model_manager.model_names()
model_exists = model_name in model_names
# @socketio.on("deleteModel")
# def handle_delete_model(model_name: str):
# try:
# print(f">> Deleting Model: {model_name}")
# self.generate.model_manager.del_model(model_name)
# self.generate.model_manager.commit(opt.conf)
# updated_model_list = self.generate.model_manager.list_models()
# socketio.emit(
# "modelDeleted",
# {
# "deleted_model_name": model_name,
# "model_list": updated_model_list,
# },
# )
# print(f">> Model Deleted: {model_name}")
# except Exception as e:
# self.handle_exceptions(e)
# check if model exists
print(f">> Checking for model {model_name}...")
if model_exists:
print(f">> Deleting Model: {model_name}")
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
print(f">> Model Deleted: {model_name}")
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
else:
print(f">> Model not found")
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
# @socketio.on("requestModelChange")
# def handle_set_model(model_name: str):
# try:
# print(f">> Model change requested: {model_name}")
# model = self.generate.set_model(model_name)
# model_list = self.generate.model_manager.list_models()
# if model is None:
# socketio.emit(
# "modelChangeFailed",
# {"model_name": model_name, "model_list": model_list},
# )
# else:
# socketio.emit(
# "modelChanged",
# {"model_name": model_name, "model_list": model_list},
# )
# except Exception as e:
# self.handle_exceptions(e)
# @socketio.on("convertToDiffusers")
# @socketio.on("convertToDiffusers")
# def convert_to_diffusers(model_to_convert: dict):
# try:
# if model_info := self.generate.model_manager.model_info(
@@ -275,5 +248,4 @@ async def list_models() -> ModelsList:
# )
# print(f">> Models Merged: {models_to_merge}")
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
# except Exception as e:
# self.handle_exceptions(e)
# except Exception as e:

View File

@@ -6,7 +6,8 @@ from typing import Any, Callable, Iterable, Literal, get_args, get_origin, get_t
from pydantic import BaseModel, Field
import networkx as nx
import matplotlib.pyplot as plt
from ..invocations.image import ImageField
from ..models.image import ImageField
from ..services.graph import GraphExecutionState
from ..services.invoker import Invoker

View File

@@ -2,7 +2,7 @@
from abc import ABC, abstractmethod
from inspect import signature
from typing import get_args, get_type_hints
from typing import get_args, get_type_hints, Dict, List, Literal, TypedDict
from pydantic import BaseModel, Field
@@ -76,3 +76,56 @@ class BaseInvocation(ABC, BaseModel):
#fmt: off
id: str = Field(description="The id of this node. Must be unique among all nodes.")
#fmt: on
# TODO: figure out a better way to provide these hints
# TODO: when we can upgrade to python 3.11, we can use the`NotRequired` type instead of `total=False`
class UIConfig(TypedDict, total=False):
type_hints: Dict[
str,
Literal[
"integer",
"float",
"boolean",
"string",
"enum",
"image",
"latents",
"model",
],
]
tags: List[str]
class CustomisedSchemaExtra(TypedDict):
ui: UIConfig
class InvocationConfig(BaseModel.Config):
"""Customizes pydantic's BaseModel.Config class for use by Invocations.
Provide `schema_extra` a `ui` dict to add hints for generated UIs.
`tags`
- A list of strings, used to categorise invocations.
`type_hints`
- A dict of field types which override the types in the invocation definition.
- Each key should be the name of one of the invocation's fields.
- Each value should be one of the valid types:
- `integer`, `float`, `boolean`, `string`, `enum`, `image`, `latents`, `model`
```python
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["stable-diffusion", "image"],
"type_hints": {
"initial_image": "image",
},
},
}
```
"""
schema_extra: CustomisedSchemaExtra

View File

@@ -5,14 +5,26 @@ from typing import Literal
import cv2 as cv
import numpy
from PIL import Image, ImageOps
from pydantic import Field
from pydantic import BaseModel, Field
from ..services.image_storage import ImageType
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from invokeai.app.models.image import ImageField, ImageType
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
class CvInpaintInvocation(BaseInvocation):
class CvInvocationConfig(BaseModel):
"""Helper class to provide all OpenCV invocations with additional config"""
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["cv", "image"],
},
}
class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
"""Simple inpaint using opencv."""
#fmt: off
type: Literal["cv_inpaint"] = "cv_inpaint"
@@ -44,7 +56,9 @@ class CvInpaintInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, image_inpainted)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, image_inpainted, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image_inpainted,
)

View File

@@ -6,21 +6,37 @@ from typing import Literal, Optional, Union
import numpy as np
from torch import Tensor
from pydantic import Field
from pydantic import BaseModel, Field
from ..services.image_storage import ImageType
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from invokeai.app.models.image import ImageField, ImageType
from invokeai.app.invocations.util.choose_model import choose_model
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState
from ..util.util import diffusers_step_callback_adapter, CanceledException
from ..models.exceptions import CanceledException
from ..util.step_callback import diffusers_step_callback_adapter
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
class SDImageInvocation(BaseModel):
"""Helper class to provide all Stable Diffusion raster image invocations with additional config"""
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["stable-diffusion", "image"],
"type_hints": {
"model": "model",
},
},
}
SAMPLER_NAME_VALUES = Literal[
tuple(InvokeAIGenerator.schedulers())
]
# Text to image
class TextToImageInvocation(BaseInvocation):
class TextToImageInvocation(BaseInvocation, SDImageInvocation):
"""Generates an image using text2img."""
type: Literal["txt2img"] = "txt2img"
@@ -34,7 +50,7 @@ class TextToImageInvocation(BaseInvocation):
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", )
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", )
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
sampler_name: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The sampler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
model: str = Field(default="", description="The model to use (currently ignored)")
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
@@ -58,16 +74,10 @@ class TextToImageInvocation(BaseInvocation):
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
# def step_callback(state: PipelineIntermediateState):
# if (context.services.queue.is_canceled(context.graph_execution_state_id)):
# raise CanceledException
# self.dispatch_progress(context, state.latents, state.step)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
# (right now uses whatever current model is set in model manager)
model= context.services.model_manager.get_model()
model = choose_model(context.services.model_manager, self.model)
self.model_name = model["model_name"]
outputs = Txt2Img(model).generate(
prompt=self.prompt,
step_callback=partial(self.dispatch_progress, context),
@@ -86,9 +96,22 @@ class TextToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, generate_output.image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_id = graph_execution_state.prepared_source_mapping[self.id]
invocation = graph_execution_state.execution_graph.get_node(self.id)
metadata = {
"session": context.graph_execution_state_id,
"source_id": source_id,
"invocation": invocation.dict()
}
context.services.images.save(image_type, image_name, generate_output.image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=generate_output.image
)
@@ -134,9 +157,9 @@ class ImageToImageInvocation(TextToImageInvocation):
mask = None
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
model = context.services.model_manager.get_model()
model = choose_model(context.services.model_manager, self.model)
self.model = model["model_name"]
outputs = Img2Img(model).generate(
prompt=self.prompt,
init_image=image,
@@ -160,9 +183,11 @@ class ImageToImageInvocation(TextToImageInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, result_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, result_image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image
)
class InpaintInvocation(ImageToImageInvocation):
@@ -210,9 +235,9 @@ class InpaintInvocation(ImageToImageInvocation):
)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
model = context.services.model_manager.get_model()
model = choose_model(context.services.model_manager, self.model)
self.model = model["model_name"]
outputs = Inpaint(model).generate(
prompt=self.prompt,
init_img=image,
@@ -236,7 +261,9 @@ class InpaintInvocation(ImageToImageInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, result_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, result_image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image
)

View File

@@ -7,65 +7,90 @@ import numpy
from PIL import Image, ImageFilter, ImageOps
from pydantic import BaseModel, Field
from ..services.image_storage import ImageType
from ..models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
InvocationContext,
InvocationConfig,
)
class ImageField(BaseModel):
"""An image field used for passing image objects between invocations"""
class PILInvocationConfig(BaseModel):
"""Helper class to provide all PIL invocations with additional config"""
image_type: str = Field(
default=ImageType.RESULT, description="The type of the image"
)
image_name: Optional[str] = Field(default=None, description="The name of the image")
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["PIL", "image"],
},
}
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
#fmt: off
# fmt: off
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
#fmt: on
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
# fmt: on
class Config:
schema_extra = {
'required': [
'type',
'image',
"required": [
"type",
"image",
"width",
"height",
]
}
def build_image_output(
image_type: ImageType, image_name: str, image: Image.Image
) -> ImageOutput:
image_field = ImageField(image_name=image_name, image_type=image_type)
return ImageOutput(image=image_field, width=image.width, height=image.height)
class MaskOutput(BaseInvocationOutput):
"""Base class for invocations that output a mask"""
#fmt: off
# fmt: off
type: Literal["mask"] = "mask"
mask: ImageField = Field(default=None, description="The output mask")
#fmt: on
# fmt: on
class Config:
schema_extra = {
'required': [
'type',
'mask',
"required": [
"type",
"mask",
]
}
# TODO: this isn't really necessary anymore
class LoadImageInvocation(BaseInvocation):
"""Load an image from a filename and provide it as output."""
#fmt: off
type: Literal["load_image"] = "load_image"
# Inputs
image_type: ImageType = Field(description="The type of the image")
image_name: str = Field(description="The name of the image")
#fmt: on
# # TODO: this isn't really necessary anymore
# class LoadImageInvocation(BaseInvocation):
# """Load an image from a filename and provide it as output."""
# #fmt: off
# type: Literal["load_image"] = "load_image"
def invoke(self, context: InvocationContext) -> ImageOutput:
return ImageOutput(
image=ImageField(image_type=self.image_type, image_name=self.image_name)
)
# # Inputs
# image_type: ImageType = Field(description="The type of the image")
# image_name: str = Field(description="The name of the image")
# #fmt: on
# def invoke(self, context: InvocationContext) -> ImageOutput:
# return ImageOutput(
# image_type=self.image_type,
# image_name=self.image_name,
# image=result_image
# )
class ShowImageInvocation(BaseInvocation):
@@ -85,16 +110,17 @@ class ShowImageInvocation(BaseInvocation):
# TODO: how to handle failure?
return ImageOutput(
image=ImageField(
image_type=self.image.image_type, image_name=self.image.image_name
)
return build_image_output(
image_type=self.image.image_type,
image_name=self.image.image_name,
image=image,
)
class CropImageInvocation(BaseInvocation):
class CropImageInvocation(BaseInvocation, PILInvocationConfig):
"""Crops an image to a specified box. The box can be outside of the image."""
#fmt: off
# fmt: off
type: Literal["crop"] = "crop"
# Inputs
@@ -103,7 +129,7 @@ class CropImageInvocation(BaseInvocation):
y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
@@ -119,15 +145,16 @@ class CropImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, image_crop)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, image_crop, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=image_crop
)
class PasteImageInvocation(BaseInvocation):
class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
"""Pastes an image into another image."""
#fmt: off
# fmt: off
type: Literal["paste"] = "paste"
# Inputs
@@ -136,7 +163,7 @@ class PasteImageInvocation(BaseInvocation):
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
x: int = Field(default=0, description="The left x coordinate at which to paste the image")
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
base_image = context.services.images.get(
@@ -149,7 +176,7 @@ class PasteImageInvocation(BaseInvocation):
None
if self.mask is None
else ImageOps.invert(
services.images.get(self.mask.image_type, self.mask.image_name)
context.services.images.get(self.mask.image_type, self.mask.image_name)
)
)
# TODO: probably shouldn't invert mask here... should user be required to do it?
@@ -169,21 +196,22 @@ class PasteImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, new_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, new_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=new_image
)
class MaskFromAlphaInvocation(BaseInvocation):
class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
"""Extracts the alpha channel of an image as a mask."""
#fmt: off
# fmt: off
type: Literal["tomask"] = "tomask"
# Inputs
image: ImageField = Field(default=None, description="The image to create the mask from")
invert: bool = Field(default=False, description="Whether or not to invert the mask")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> MaskOutput:
image = context.services.images.get(
@@ -198,22 +226,22 @@ class MaskFromAlphaInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, image_mask)
context.services.images.save(image_type, image_name, image_mask, self.dict())
return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name))
class BlurInvocation(BaseInvocation):
class BlurInvocation(BaseInvocation, PILInvocationConfig):
"""Blurs an image"""
#fmt: off
# fmt: off
type: Literal["blur"] = "blur"
# Inputs
image: ImageField = Field(default=None, description="The image to blur")
radius: float = Field(default=8.0, ge=0, description="The blur radius")
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
self.image.image_type, self.image.image_name
@@ -230,22 +258,23 @@ class BlurInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, blur_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, blur_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=blur_image
)
class LerpInvocation(BaseInvocation):
class LerpInvocation(BaseInvocation, PILInvocationConfig):
"""Linear interpolation of all pixels of an image"""
#fmt: off
# fmt: off
type: Literal["lerp"] = "lerp"
# Inputs
image: ImageField = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
@@ -261,23 +290,24 @@ class LerpInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, lerp_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, lerp_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=lerp_image
)
class InverseLerpInvocation(BaseInvocation):
class InverseLerpInvocation(BaseInvocation, PILInvocationConfig):
"""Inverse linear interpolation of all pixels of an image"""
#fmt: off
# fmt: off
type: Literal["ilerp"] = "ilerp"
# Inputs
image: ImageField = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
#fmt: on
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
self.image.image_type, self.image.image_name
@@ -297,7 +327,7 @@ class InverseLerpInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, ilerp_image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, ilerp_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=ilerp_image
)

View File

@@ -2,24 +2,24 @@
from typing import Literal, Optional
from pydantic import BaseModel, Field
from torch import Tensor
import torch
from invokeai.app.models.exceptions import CanceledException
from invokeai.app.invocations.util.choose_model import choose_model
from invokeai.app.util.step_callback import diffusers_step_callback_adapter
from ...backend.model_management.model_manager import ModelManager
from ...backend.util.devices import CUDA_DEVICE, torch_dtype
from ...backend.util.devices import choose_torch_device, torch_dtype
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
from ...backend.image_util.seamless import configure_model_padding
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
import numpy as np
from accelerate.utils import set_seed
from ..services.image_storage import ImageType
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from ...backend.generator import Generator
from .image import ImageField, ImageOutput, build_image_output
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.util.util import image_to_dataURL
from diffusers.schedulers import SchedulerMixin as Scheduler
import diffusers
from diffusers import DiffusionPipeline
@@ -109,8 +109,17 @@ class NoiseInvocation(BaseInvocation):
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting noise", )
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting noise", )
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents", "noise"],
},
}
def invoke(self, context: InvocationContext) -> NoiseOutput:
device = torch.device(CUDA_DEVICE)
device = torch.device(choose_torch_device())
noise = get_noise(self.width, self.height, device, self.seed)
name = f'{context.graph_execution_state_id}__{self.id}'
@@ -136,46 +145,50 @@ class TextToLatentsInvocation(BaseInvocation):
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", )
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", )
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
sampler_name: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The sampler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
model: str = Field(default="", description="The model to use (currently ignored)")
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
# fmt: on
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents", "image"],
"type_hints": {
"model": "model"
}
},
}
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
self, context: InvocationContext, sample: Tensor, step: int
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
(width, height) = image.size
width *= 8
height *= 8
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
dataURL = image_to_dataURL(image, image_format="JPEG")
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
self.id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
self.steps,
)
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
model_info = model_manager.get_model(self.model)
model_info = choose_model(model_manager, self.model)
model_name = model_info['model_name']
model_hash = model_info['hash']
model: StableDiffusionGeneratorPipeline = model_info['model']
model.scheduler = get_scheduler(
model=model,
scheduler_name=self.sampler_name
scheduler_name=self.scheduler
)
if isinstance(model, DiffusionPipeline):
@@ -214,7 +227,7 @@ class TextToLatentsInvocation(BaseInvocation):
noise = context.services.latents.get(self.noise.latents_name)
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state.latents, state.step)
self.dispatch_progress(context, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
@@ -244,6 +257,17 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
type: Literal["l2l"] = "l2l"
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents"],
"type_hints": {
"model": "model"
}
},
}
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to use as a base image")
strength: float = Field(default=0.5, description="The strength of the latents to use")
@@ -253,7 +277,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
latent = context.services.latents.get(self.latents.latents_name)
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state.latents, state.step)
self.dispatch_progress(context, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
@@ -299,12 +323,23 @@ class LatentsToImageInvocation(BaseInvocation):
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
model: str = Field(default="", description="The model to use")
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents", "image"],
"type_hints": {
"model": "model"
}
},
}
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.services.latents.get(self.latents.latents_name)
# TODO: this only really needs the vae
model_info = context.services.model_manager.get_model(self.model)
model_info = choose_model(context.services.model_manager, self.model)
model: StableDiffusionGeneratorPipeline = model_info['model']
with torch.inference_mode():
@@ -315,7 +350,9 @@ class LatentsToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
context.services.images.save(image_type, image_name, image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image
)

View File

@@ -1,15 +1,22 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Literal, Optional
from typing import Literal
import numpy
from PIL import Image, ImageFilter, ImageOps
from pydantic import BaseModel, Field
from ..services.image_storage import ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
class MathInvocationConfig(BaseModel):
"""Helper class to provide all math invocations with additional config"""
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["math"],
}
}
class IntOutput(BaseInvocationOutput):
@@ -20,7 +27,7 @@ class IntOutput(BaseInvocationOutput):
#fmt: on
class AddInvocation(BaseInvocation):
class AddInvocation(BaseInvocation, MathInvocationConfig):
"""Adds two numbers"""
#fmt: off
type: Literal["add"] = "add"
@@ -32,7 +39,7 @@ class AddInvocation(BaseInvocation):
return IntOutput(a=self.a + self.b)
class SubtractInvocation(BaseInvocation):
class SubtractInvocation(BaseInvocation, MathInvocationConfig):
"""Subtracts two numbers"""
#fmt: off
type: Literal["sub"] = "sub"
@@ -44,7 +51,7 @@ class SubtractInvocation(BaseInvocation):
return IntOutput(a=self.a - self.b)
class MultiplyInvocation(BaseInvocation):
class MultiplyInvocation(BaseInvocation, MathInvocationConfig):
"""Multiplies two numbers"""
#fmt: off
type: Literal["mul"] = "mul"
@@ -56,7 +63,7 @@ class MultiplyInvocation(BaseInvocation):
return IntOutput(a=self.a * self.b)
class DivideInvocation(BaseInvocation):
class DivideInvocation(BaseInvocation, MathInvocationConfig):
"""Divides two numbers"""
#fmt: off
type: Literal["div"] = "div"

View File

@@ -3,10 +3,10 @@ from typing import Literal, Union
from pydantic import Field
from ..services.image_storage import ImageType
from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
class RestoreFaceInvocation(BaseInvocation):
"""Restores faces in an image."""
@@ -18,6 +18,14 @@ class RestoreFaceInvocation(BaseInvocation):
strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" )
#fmt: on
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["restoration", "image"],
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
self.image.image_type, self.image.image_name
@@ -36,7 +44,9 @@ class RestoreFaceInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
)
context.services.images.save(image_type, image_name, results[0][0], self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=results[0][0]
)

View File

@@ -5,10 +5,10 @@ from typing import Literal, Union
from pydantic import Field
from ..services.image_storage import ImageType
from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
class UpscaleInvocation(BaseInvocation):
@@ -22,6 +22,15 @@ class UpscaleInvocation(BaseInvocation):
level: Literal[2, 4] = Field(default=2, description="The upscale level")
#fmt: on
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["upscaling", "image"],
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(
self.image.image_type, self.image.image_name
@@ -40,7 +49,9 @@ class UpscaleInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
)
context.services.images.save(image_type, image_name, results[0][0], self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=results[0][0]
)

View File

@@ -0,0 +1,14 @@
from invokeai.backend.model_management.model_manager import ModelManager
def choose_model(model_manager: ModelManager, model_name: str):
"""Returns the default model if the `model_name` not a valid model, else returns the selected model."""
if model_manager.valid_model(model_name):
model = model_manager.get_model(model_name)
else:
model = model_manager.get_model()
print(
f"* Warning: '{model_name}' is not a valid model name. Using default model \'{model['model_name']}\' instead."
)
return model

View File

View File

@@ -0,0 +1,3 @@
class CanceledException(Exception):
"""Execution canceled by user."""
pass

View File

@@ -0,0 +1,26 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field
class ImageType(str, Enum):
RESULT = "results"
INTERMEDIATE = "intermediates"
UPLOAD = "uploads"
class ImageField(BaseModel):
"""An image field used for passing image objects between invocations"""
image_type: ImageType = Field(
default=ImageType.RESULT, description="The type of the image"
)
image_name: Optional[str] = Field(default=None, description="The name of the image")
class Config:
schema_extra = {
"required": [
"image_type",
"image_name",
]
}

View File

@@ -0,0 +1,26 @@
from typing import Any, Optional, Dict
from pydantic import BaseModel, Field
class InvokeAIMetadata(BaseModel):
"""An image's InvokeAI-specific metadata"""
session: Optional[str] = Field(description="The session that generated this image")
source_id: Optional[str] = Field(
description="The source id of the invocation that generated this image"
)
# TODO: figure out metadata
invocation: Optional[Dict[str, Any]] = Field(
default={}, description="The prepared invocation that generated this image"
)
class ImageMetadata(BaseModel):
"""An image's general metadata"""
created: int = Field(description="The creation timestamp of the image")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
invokeai: Optional[InvokeAIMetadata] = Field(
default={}, description="The image's InvokeAI-specific metadata"
)

View File

@@ -25,7 +25,8 @@ class EventServiceBase:
def emit_generator_progress(
self,
graph_execution_state_id: str,
invocation_id: str,
invocation_dict: dict,
source_id: str,
progress_image: ProgressImage | None,
step: int,
total_steps: int,
@@ -35,7 +36,8 @@ class EventServiceBase:
event_name="generator_progress",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id,
invocation=invocation_dict,
source_id=source_id,
progress_image=progress_image,
step=step,
total_steps=total_steps,
@@ -43,40 +45,43 @@ class EventServiceBase:
)
def emit_invocation_complete(
self, graph_execution_state_id: str, invocation_id: str, result: Dict
self, graph_execution_state_id: str, result: Dict, invocation_dict: Dict, source_id: str,
) -> None:
"""Emitted when an invocation has completed"""
self.__emit_session_event(
event_name="invocation_complete",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id,
invocation=invocation_dict,
source_id=source_id,
result=result,
),
)
def emit_invocation_error(
self, graph_execution_state_id: str, invocation_id: str, error: str
self, graph_execution_state_id: str, invocation_dict: Dict, source_id: str, error: str
) -> None:
"""Emitted when an invocation has completed"""
self.__emit_session_event(
event_name="invocation_error",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id,
invocation=invocation_dict,
source_id=source_id,
error=error,
),
)
def emit_invocation_started(
self, graph_execution_state_id: str, invocation_id: str
self, graph_execution_state_id: str, invocation_dict: Dict, source_id: str
) -> None:
"""Emitted when an invocation has started"""
self.__emit_session_event(
event_name="invocation_started",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id,
invocation=invocation_dict,
source_id=source_id,
),
)

View File

@@ -794,9 +794,6 @@ class GraphExecutionState(BaseModel):
default_factory=dict,
)
# Declare all fields as required; necessary for OpenAPI schema generation build.
# Technically only fields without a `default_factory` need to be listed here.
# See: https://github.com/pydantic/pydantic/discussions/4577
class Config:
schema_extra = {
'required': [

View File

@@ -2,24 +2,26 @@
import datetime
import os
import json
from glob import glob
from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path
from queue import Queue
from typing import Dict
from typing import Any, Callable, Dict, List, Union
from PIL.Image import Image
import PIL.Image as PILImage
from pydantic import BaseModel, Json
from invokeai.app.api.models.images import ImageResponse
from invokeai.app.models.image import ImageField, ImageType
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.item_storage import PaginatedResults
from invokeai.app.util.save_thumbnail import save_thumbnail
from invokeai.backend.image_util import PngWriter
class ImageType(str, Enum):
RESULT = "results"
INTERMEDIATE = "intermediates"
UPLOAD = "uploads"
class ImageStorageBase(ABC):
"""Responsible for storing and retrieving images."""
@@ -27,13 +29,21 @@ class ImageStorageBase(ABC):
def get(self, image_type: ImageType, image_name: str) -> Image:
pass
@abstractmethod
def list(
self, image_type: ImageType, page: int = 0, per_page: int = 10
) -> PaginatedResults[ImageResponse]:
pass
# TODO: make this a bit more flexible for e.g. cloud storage
@abstractmethod
def get_path(self, image_type: ImageType, image_name: str) -> str:
def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str:
pass
@abstractmethod
def save(self, image_type: ImageType, image_name: str, image: Image) -> None:
def save(self, image_type: ImageType, image_name: str, image: Image, metadata: Dict[str, Any] | None = None) -> str:
pass
@abstractmethod
@@ -71,25 +81,84 @@ class DiskImageStorage(ImageStorageBase):
parents=True, exist_ok=True
)
def list(
self, image_type: ImageType, page: int = 0, per_page: int = 10
) -> PaginatedResults[ImageResponse]:
dir_path = os.path.join(self.__output_folder, image_type)
image_paths = glob(f"{dir_path}/*.png")
count = len(image_paths)
sorted_image_paths = sorted(
glob(f"{dir_path}/*.png"), key=os.path.getctime, reverse=True
)
page_of_image_paths = sorted_image_paths[
page * per_page : (page + 1) * per_page
]
page_of_images: List[ImageResponse] = []
for path in page_of_image_paths:
filename = os.path.basename(path)
img = PILImage.open(path)
invokeai_metadata = json.loads(img.info.get("invokeai", "{}"))
page_of_images.append(
ImageResponse(
image_type=image_type.value,
image_name=filename,
# TODO: DiskImageStorage should not be building URLs...?
image_url=f"api/v1/images/{image_type.value}/{filename}",
thumbnail_url=f"api/v1/images/{image_type.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
# TODO: Creation of this object should happen elsewhere, just making it fit here so it works
metadata=ImageMetadata(
created=int(os.path.getctime(path)),
width=img.width,
height=img.height,
invokeai=invokeai_metadata
),
)
)
page_count_trunc = int(count / per_page)
page_count_mod = count % per_page
page_count = page_count_trunc if page_count_mod == 0 else page_count_trunc + 1
return PaginatedResults[ImageResponse](
items=page_of_images,
page=page,
pages=page_count,
per_page=per_page,
total=count,
)
def get(self, image_type: ImageType, image_name: str) -> Image:
image_path = self.get_path(image_type, image_name)
cache_item = self.__get_cache(image_path)
if cache_item:
return cache_item
image = Image.open(image_path)
image = PILImage.open(image_path)
self.__set_cache(image_path, image)
return image
# TODO: make this a bit more flexible for e.g. cloud storage
def get_path(self, image_type: ImageType, image_name: str) -> str:
path = os.path.join(self.__output_folder, image_type, image_name)
def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str:
if is_thumbnail:
path = os.path.join(
self.__output_folder, image_type, "thumbnails", image_name
)
else:
path = os.path.join(self.__output_folder, image_type, image_name)
return path
def save(self, image_type: ImageType, image_name: str, image: Image) -> None:
def save(self, image_type: ImageType, image_name: str, image: Image, metadata: Dict[str, Any] | None = None) -> str:
print(metadata)
image_subpath = os.path.join(image_type, image_name)
self.__pngWriter.save_image_and_prompt_to_png(
image, "", image_subpath, None
image, "", image_subpath, metadata
) # TODO: just pass full path to png writer
save_thumbnail(
image=image,
@@ -98,15 +167,23 @@ class DiskImageStorage(ImageStorageBase):
)
image_path = self.get_path(image_type, image_name)
self.__set_cache(image_path, image)
return image_path
def delete(self, image_type: ImageType, image_name: str) -> None:
image_path = self.get_path(image_type, image_name)
thumbnail_path = self.get_path(image_type, image_name, True)
if os.path.exists(image_path):
os.remove(image_path)
if image_path in self.__cache:
del self.__cache[image_path]
if os.path.exists(thumbnail_path):
os.remove(thumbnail_path)
if thumbnail_path in self.__cache:
del self.__cache[thumbnail_path]
def __get_cache(self, image_name: str) -> Image:
return None if image_name not in self.__cache else self.__cache[image_name]

View File

@@ -4,7 +4,7 @@ from threading import Event, Thread
from ..invocations.baseinvocation import InvocationContext
from .invocation_queue import InvocationQueueItem
from .invoker import InvocationProcessorABC, Invoker
from ..util.util import CanceledException
from ..models.exceptions import CanceledException
class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker_thread: Thread
@@ -43,10 +43,14 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
queue_item.invocation_id
)
# get the source node to provide to cliepnts (the prepared node is not as useful)
source_id = graph_execution_state.prepared_source_mapping[invocation.id]
# Send starting event
self.__invoker.services.events.emit_invocation_started(
graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id,
invocation_dict=invocation.dict(),
source_id=source_id
)
# Invoke
@@ -75,7 +79,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send complete event
self.__invoker.services.events.emit_invocation_complete(
graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id,
invocation_dict=invocation.dict(),
source_id=source_id,
result=outputs.dict(),
)
@@ -99,7 +104,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send error event
self.__invoker.services.events.emit_invocation_error(
graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id,
invocation_dict=invocation.dict(),
source_id=source_id,
error=error,
)

View File

@@ -1,23 +1,25 @@
import sqlite3
from threading import Lock
from typing import Generic, TypeVar, Union, get_args
from pydantic import BaseModel, parse_raw_as
from .item_storage import ItemStorageABC, PaginatedResults
from sqlalchemy import create_engine, String, TEXT, Engine, select
from sqlalchemy.orm import DeclarativeBase, mapped_column, Session
T = TypeVar("T", bound=BaseModel)
sqlite_memory = ":memory:"
class Base(DeclarativeBase):
pass
class SqliteItemStorage(ItemStorageABC, Generic[T]):
_filename: str
_table_name: str
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_id_field: str
_lock: Lock
_engine: Engine
# _table: ??? # TODO: figure out how to type this
def __init__(self, filename: str, table_name: str, id_field: str = "id"):
super().__init__()
@@ -25,86 +27,79 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._filename = filename
self._table_name = table_name
self._id_field = id_field # TODO: validate that T has this field
self._lock = Lock()
self._conn = sqlite3.connect(
self._filename, check_same_thread=False
) # TODO: figure out a better threading solution
self._cursor = self._conn.cursor()
self._engine = create_engine(f"sqlite+pysqlite:///{self._filename}")
self._create_table()
def _create_table(self):
try:
self._lock.acquire()
self._cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
item TEXT,
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
)
self._cursor.execute(
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
)
finally:
self._lock.release()
# dynamically create the ORM model class to avoid name collisions
# cannot access `self.__orig_class__` in `__init__` or `__new__` so
# format the table name into the class name
pascal_table_name = self._table_name.replace("_", " ").title()
pascal_table_name = pascal_table_name.replace(" ", "")
table_dict = dict(
__tablename__=self._table_name,
id=mapped_column(String, primary_key=True),
item=mapped_column(TEXT, nullable=False),
)
self._table = type(pascal_table_name, (Base,), table_dict)
Base.metadata.create_all(self._engine)
def _parse_item(self, item: str) -> T:
item_type = get_args(self.__orig_class__)[0]
return parse_raw_as(item_type, item)
def set(self, item: T):
try:
self._lock.acquire()
self._cursor.execute(
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
(item.json(),),
)
self._conn.commit()
finally:
self._lock.release()
session = Session(self._engine)
item_id = str(getattr(item, self._id_field))
new_item = self._table(id=item_id, item=item.json())
session.merge(new_item)
session.commit()
session.close()
self._on_changed(item)
def get(self, id: str) -> Union[T, None]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),)
)
result = self._cursor.fetchone()
finally:
self._lock.release()
session = Session(self._engine)
if not result:
item = session.get(self._table, str(id))
session.close()
if not item:
return None
return self._parse_item(result[0])
return self._parse_item(item.item)
def delete(self, id: str):
try:
self._lock.acquire()
self._cursor.execute(
f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),)
)
self._conn.commit()
finally:
self._lock.release()
session = Session(self._engine)
item = session.get(self._table, id)
session.delete(item)
session.commit()
session.close()
self._on_deleted(id)
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
(per_page, page * per_page),
)
result = self._cursor.fetchall()
session = Session(self._engine)
items = list(map(lambda r: self._parse_item(r[0]), result))
stmt = select(self._table.item).limit(per_page).offset(page * per_page)
result = session.execute(stmt)
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
items = list(map(lambda r: self._parse_item(r[0]), result))
count = session.query(self._table.item).count()
session.commit()
session.close()
pageCount = int(count / per_page) + 1
@@ -115,23 +110,19 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
def search(
self, query: str, page: int = 0, per_page: int = 10
) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
(f"%{query}%", per_page, page * per_page),
)
result = self._cursor.fetchall()
session = Session(self._engine)
items = list(map(lambda r: self._parse_item(r[0]), result))
stmt = (
session.query(self._table)
.where(self._table.item.like(f"%{query}%"))
.limit(per_page)
.offset(page * per_page)
)
self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
(f"%{query}%",),
)
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
result = session.execute(stmt)
items = list(map(lambda r: self._parse_item(r[0].item), result))
count = session.query(self._table.item).count()
pageCount = int(count / per_page) + 1

View File

View File

@@ -1,14 +1,17 @@
from re import S
import torch
from PIL import Image
from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL
from ...backend.generator.base import Generator
from ...backend.stable_diffusion import PipelineIntermediateState
class CanceledException(Exception):
pass
def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id: str, context: InvocationContext, ):
def fast_latents_step_callback(
sample: torch.Tensor,
step: int,
steps: int,
id: str,
context: InvocationContext,
):
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
@@ -18,18 +21,21 @@ def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id:
dataURL = image_to_dataURL(image, image_format="JPEG")
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_id = graph_execution_state.prepared_source_mapping[id]
invocation = graph_execution_state.execution_graph.get_node(id)
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
steps,
graph_execution_state_id=context.graph_execution_state_id,
invocation_dict=invocation.dict(),
source_id=source_id,
progress_image={"width": width, "height": height, "dataURL": dataURL},
step=step,
total_steps=steps,
)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
@@ -37,6 +43,8 @@ def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return fast_latents_step_callback(progress_state.latents, progress_state.step, **kwargs)
return fast_latents_step_callback(
progress_state.latents, progress_state.step, **kwargs
)
else:
return fast_latents_step_callback(*cb_args, **kwargs)

View File

@@ -561,7 +561,7 @@ class Args(object):
"--autoimport",
default=None,
type=str,
help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly",
help="(DEPRECATED - NONFUNCTIONAL). Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly",
)
model_group.add_argument(
"--autoconvert",

View File

@@ -67,7 +67,6 @@ def install_requested_models(
scan_directory: Path = None,
external_models: List[str] = None,
scan_at_startup: bool = False,
convert_to_diffusers: bool = False,
precision: str = "float16",
purge_deleted: bool = False,
config_file_path: Path = None,
@@ -113,7 +112,6 @@ def install_requested_models(
try:
model_manager.heuristic_import(
path_url_or_repo,
convert=convert_to_diffusers,
commit_to_conf=config_file_path,
)
except KeyboardInterrupt:
@@ -122,7 +120,7 @@ def install_requested_models(
pass
if scan_at_startup and scan_directory.is_dir():
argument = "--autoconvert" if convert_to_diffusers else "--autoimport"
argument = "--autoconvert"
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f"{Globals.initfile}.new")
directory = str(scan_directory).replace("\\", "/")

View File

@@ -41,7 +41,7 @@ class PngWriter:
info = PngImagePlugin.PngInfo()
info.add_text("Dream", dream_prompt)
if metadata:
info.add_text("sd-metadata", json.dumps(metadata))
info.add_text("invokeai", json.dumps(metadata))
image.save(path, "PNG", pnginfo=info, compress_level=compress_level)
return path

View File

@@ -7,3 +7,4 @@ from .convert_ckpt_to_diffusers import (
)
from .model_manager import ModelManager

View File

@@ -1,4 +1,4 @@
"""
"""enum
Manage a cache of Stable Diffusion model files for fast switching.
They are moved between GPU and CPU as necessary. If CPU memory falls
below a preset minimum, the least recently used model will be
@@ -15,7 +15,7 @@ import sys
import textwrap
import time
import warnings
from enum import Enum
from enum import Enum, auto
from pathlib import Path
from shutil import move, rmtree
from typing import Any, Optional, Union, Callable
@@ -24,8 +24,12 @@ import safetensors
import safetensors.torch
import torch
import transformers
from diffusers import AutoencoderKL
from diffusers import logging as dlogging
from diffusers import (
AutoencoderKL,
UNet2DConditionModel,
SchedulerMixin,
logging as dlogging,
)
from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
@@ -33,37 +37,58 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline
from transformers import (
CLIPTextModel,
CLIPTokenizer,
CLIPFeatureExtractor,
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from ..stable_diffusion import (
StableDiffusionGeneratorPipeline,
)
from ..util import CUDA_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum):
V1 = 1
V1_INPAINT = 2
V2 = 3
V2_e = 4
V2_v = 5
UNKNOWN = 99
class SDLegacyType(Enum):
V1 = auto()
V1_INPAINT = auto()
V2 = auto()
V2_e = auto()
V2_v = auto()
UNKNOWN = auto()
class SDModelComponent(Enum):
vae="vae"
text_encoder="text_encoder"
tokenizer="tokenizer"
unet="unet"
scheduler="scheduler"
safety_checker="safety_checker"
feature_extractor="feature_extractor"
DEFAULT_MAX_MODELS = 2
class ModelManager(object):
'''
"""
Model manager handles loading, caching, importing, deleting, converting, and editing models.
'''
"""
def __init__(
self,
config: OmegaConf|Path,
device_type: torch.device = CUDA_DEVICE,
precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload=False,
embedding_path: Path=None,
self,
config: OmegaConf | Path,
device_type: torch.device = CUDA_DEVICE,
precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload=False,
embedding_path: Path = None,
):
"""
Initialize with the path to the models.yaml config file or
an initialized OmegaConf dictionary. Optional parameters
are the torch device type, precision, max_loaded_models,
and sequential_offload boolean. Note that the default device
and sequential_offload boolean. Note that the default device
type and precision are set up for a CUDA system running at half precision.
"""
# prevent nasty-looking CLIP log message
@@ -87,15 +112,25 @@ class ModelManager(object):
"""
return model_name in self.config
def get_model(self, model_name: str=None)->dict:
"""
Given a model named identified in models.yaml, return
the model object. If in RAM will load into GPU VRAM.
If on disk, will load from there.
def get_model(self, model_name: str = None) -> dict:
"""Given a model named identified in models.yaml, return a dict
containing the model object and some of its key features. If
in RAM will load into GPU VRAM. If on disk, will load from
there.
The dict has the following keys:
'model': The StableDiffusionGeneratorPipeline object
'model_name': The name of the model in models.yaml
'width': The width of images trained by this model
'height': The height of images trained by this model
'hash': A unique hash of this model's files on disk.
"""
if not model_name:
return self.get_model(self.current_model) if self.current_model else self.get_model(self.default_model())
return (
self.get_model(self.current_model)
if self.current_model
else self.get_model(self.default_model())
)
if not self.valid_model(model_name):
print(
f'** "{model_name}" is not a known model name. Please check your models.yaml file'
@@ -135,6 +170,81 @@ class ModelManager(object):
"hash": hash,
}
def get_model_vae(self, model_name: str=None)->AutoencoderKL:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned VAE as an
AutoencoderKL object. If no model name is provided, return the
vae from the model currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.vae)
def get_model_tokenizer(self, model_name: str=None)->CLIPTokenizer:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPTokenizer. If no
model name is provided, return the tokenizer from the model
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.tokenizer)
def get_model_unet(self, model_name: str=None)->UNet2DConditionModel:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned UNet2DConditionModel. If no model
name is provided, return the UNet from the model
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.unet)
def get_model_text_encoder(self, model_name: str=None)->CLIPTextModel:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPTextModel. If no
model name is provided, return the text encoder from the model
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.text_encoder)
def get_model_feature_extractor(self, model_name: str=None)->CLIPFeatureExtractor:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPFeatureExtractor. If no
model name is provided, return the text encoder from the model
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.feature_extractor)
def get_model_scheduler(self, model_name: str=None)->SchedulerMixin:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned scheduler. If no
model name is provided, return the text encoder from the model
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.scheduler)
def _get_sub_model(
self,
model_name: str=None,
model_part: SDModelComponent=SDModelComponent.vae,
) -> Union[
AutoencoderKL,
CLIPTokenizer,
CLIPFeatureExtractor,
UNet2DConditionModel,
CLIPTextModel,
StableDiffusionSafetyChecker,
]:
"""Given a model name identified in models.yaml, and the part of the
model you wish to retrieve, return that part. Parts are in an Enum
class named SDModelComponent, and consist of:
SDModelComponent.vae
SDModelComponent.text_encoder
SDModelComponent.tokenizer
SDModelComponent.unet
SDModelComponent.scheduler
SDModelComponent.safety_checker
SDModelComponent.feature_extractor
"""
model_dict = self.get_model(model_name)
model = model_dict["model"]
return getattr(model, model_part.value)
def default_model(self) -> str | None:
"""
Returns the name of the default model, or None
@@ -360,7 +470,7 @@ class ModelManager(object):
f"Unknown model format {model_name}: {model_format}"
)
self._add_embeddings_to_model(model)
# usage statistics
toc = time.time()
print(">> Model loaded in", "%4.2fs" % (toc - tic))
@@ -433,7 +543,7 @@ class ModelManager(object):
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width
print(f" | Default image dimensions = {width} x {height}")
return pipeline, width, height, model_hash
def _load_ckpt_model(self, model_name, mconfig):
@@ -454,14 +564,18 @@ class ModelManager(object):
from . import load_pipeline_from_original_stable_diffusion_ckpt
try:
if self.list_models()[self.current_model]['status'] == 'active':
if self.list_models()[self.current_model]["status"] == "active":
self.offload_model(self.current_model)
except Exception as e:
pass
vae_path = None
if vae:
vae_path = vae if os.path.isabs(vae) else os.path.normpath(os.path.join(Globals.root, vae))
vae_path = (
vae
if os.path.isabs(vae)
else os.path.normpath(os.path.join(Globals.root, vae))
)
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
@@ -571,9 +685,7 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
model_description = (
description or f"Imported diffusers model {model_name}"
)
model_description = description or f"Imported diffusers model {model_name}"
new_config = dict(
description=model_description,
vae=vae,
@@ -602,7 +714,7 @@ class ModelManager(object):
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
SDLegacyType.UNKNOWN
"""
global_step = checkpoint.get('global_step')
global_step = checkpoint.get("global_step")
state_dict = checkpoint.get("state_dict") or checkpoint
try:
@@ -628,13 +740,13 @@ class ModelManager(object):
return SDLegacyType.UNKNOWN
def heuristic_import(
self,
path_url_or_repo: str,
model_name: str = None,
description: str = None,
model_config_file: Path = None,
commit_to_conf: Path = None,
config_file_callback: Callable[[Path], Path] = None,
self,
path_url_or_repo: str,
model_name: str = None,
description: str = None,
model_config_file: Path = None,
commit_to_conf: Path = None,
config_file_callback: Callable[[Path], Path] = None,
) -> str:
"""Accept a string which could be:
- a HF diffusers repo_id
@@ -738,8 +850,8 @@ class ModelManager(object):
# another round of heuristics to guess the correct config file.
checkpoint = None
if model_path.suffix in [".ckpt",".pt"]:
self.scan_model(model_path,model_path)
if model_path.suffix in [".ckpt", ".pt"]:
self.scan_model(model_path, model_path)
checkpoint = torch.load(model_path)
else:
checkpoint = safetensors.torch.load_file(model_path)
@@ -761,19 +873,16 @@ class ModelManager(object):
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
Globals.root,
"configs/stable-diffusion/v1-inpainting-inference.yaml",
)
elif model_type == SDLegacyType.V2_v:
print(
" | SD-v2-v model detected"
)
print(" | SD-v2-v model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
elif model_type == SDLegacyType.V2_e:
print(
" | SD-v2-e model detected"
)
print(" | SD-v2-e model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
@@ -820,16 +929,16 @@ class ModelManager(object):
return model_name
def convert_and_import(
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae:dict=None,
vae_path:Path=None,
original_config_file: Path = None,
commit_to_conf: Path = None,
scan_needed: bool=True,
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae: dict = None,
vae_path: Path = None,
original_config_file: Path = None,
commit_to_conf: Path = None,
scan_needed: bool = True,
) -> str:
"""
Convert a legacy ckpt weights file to diffuser model and import
@@ -857,10 +966,10 @@ class ModelManager(object):
try:
# By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file
vae_model=None
vae_model = None
if vae:
vae_model=self._load_vae(vae)
vae_path=None
vae_model = self._load_vae(vae)
vae_path = None
convert_ckpt_to_diffusers(
ckpt_path,
diffusers_path,
@@ -976,16 +1085,16 @@ class ModelManager(object):
legacy_locations = [
Path(
models_dir,
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
),
Path(models_dir, "bert-base-uncased/models--bert-base-uncased"),
Path(
models_dir,
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14",
),
]
legacy_locations.extend(list(global_cache_dir("diffusers").glob('*')))
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
legacy_layout = False
for model in legacy_locations:
legacy_layout = legacy_layout or model.exists()
@@ -1003,7 +1112,7 @@ class ModelManager(object):
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
>> Otherwise press <enter> to continue."""
)
input('continue> ')
input("continue> ")
# transformer files get moved into the hub directory
if cls._is_huggingface_hub_directory_present():
@@ -1090,12 +1199,12 @@ class ModelManager(object):
print(
f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
)
def _has_cuda(self) -> bool:
return self.device.type == "cuda"
def _diffuser_sha256(
self, name_or_path: Union[str, Path], chunksize=4096
self, name_or_path: Union[str, Path], chunksize=16777216
) -> Union[str, bytes]:
path = None
if isinstance(name_or_path, Path):

View File

@@ -531,8 +531,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
run_id: str = None,
additional_guidance: List[Callable] = None,
):
# FIXME: do we still use any slicing now that PyTorch 2.0 has scaled dot-product attention on all platforms?
# self._adjust_memory_efficient_attention(latents)
self._adjust_memory_efficient_attention(latents)
if run_id is None:
run_id = secrets.token_urlsafe(self.ID_LENGTH)
if additional_guidance is None:

View File

@@ -158,14 +158,9 @@ def main():
report_model_error(opt, e)
# try to autoconvert new models
if path := opt.autoimport:
gen.model_manager.heuristic_import(
str(path), convert=False, commit_to_conf=opt.conf
)
if path := opt.autoconvert:
gen.model_manager.heuristic_import(
str(path), convert=True, commit_to_conf=opt.conf
str(path), commit_to_conf=opt.conf
)
# web server loops forever
@@ -581,6 +576,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!replay"):
file_path = command.replace("!replay", "", 1).strip()
file_path = os.path.join(opt.outdir, file_path)
if infile is None and os.path.isfile(file_path):
infile = open(file_path, "r", encoding="utf-8")
completer.add_history(command)

View File

@@ -199,17 +199,6 @@ class addModelsForm(npyscreen.FormMultiPage):
relx=4,
scroll_exit=True,
)
self.nextrely += 1
self.convert_models = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==",
values=["Keep original format", "Convert to diffusers"],
value=0,
begin_entry_at=4,
max_height=4,
hidden=True, # will appear when imported models box is edited
scroll_exit=True,
)
self.cancel = self.add_widget_intelligent(
npyscreen.ButtonPress,
name="CANCEL",
@@ -244,8 +233,6 @@ class addModelsForm(npyscreen.FormMultiPage):
self.show_directory_fields.addVisibleWhenSelected(i)
self.show_directory_fields.when_value_edited = self._clear_scan_directory
self.import_model_paths.when_value_edited = self._show_hide_convert
self.autoload_directory.when_value_edited = self._show_hide_convert
def resize(self):
super().resize()
@@ -256,13 +243,6 @@ class addModelsForm(npyscreen.FormMultiPage):
if not self.show_directory_fields.value:
self.autoload_directory.value = ""
def _show_hide_convert(self):
model_paths = self.import_model_paths.value or ""
autoload_directory = self.autoload_directory.value or ""
self.convert_models.hidden = (
len(model_paths) == 0 and len(autoload_directory) == 0
)
def _get_starter_model_labels(self) -> List[str]:
window_width, window_height = get_terminal_size()
label_width = 25
@@ -322,7 +302,6 @@ class addModelsForm(npyscreen.FormMultiPage):
.scan_directory: Path to a directory of models to scan and import
.autoscan_on_startup: True if invokeai should scan and import at startup time
.import_model_paths: list of URLs, repo_ids and file paths to import
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
"""
# we're using a global here rather than storing the result in the parentapp
# due to some bug in npyscreen that is causing attributes to be lost
@@ -359,7 +338,6 @@ class addModelsForm(npyscreen.FormMultiPage):
# URLs and the like
selections.import_model_paths = self.import_model_paths.value.split()
selections.convert_to_diffusers = self.convert_models.value[0] == 1
class AddModelApplication(npyscreen.NPSAppManaged):
@@ -372,7 +350,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
scan_directory=None,
autoscan_on_startup=None,
import_model_paths=None,
convert_to_diffusers=None,
)
def onStart(self):
@@ -393,7 +370,6 @@ def process_and_execute(opt: Namespace, selections: Namespace):
directory_to_scan = selections.scan_directory
scan_at_startup = selections.autoscan_on_startup
potential_models_to_install = selections.import_model_paths
convert_to_diffusers = selections.convert_to_diffusers
install_requested_models(
install_initial_models=models_to_install,
@@ -401,7 +377,6 @@ def process_and_execute(opt: Namespace, selections: Namespace):
scan_directory=Path(directory_to_scan) if directory_to_scan else None,
external_models=potential_models_to_install,
scan_at_startup=scan_at_startup,
convert_to_diffusers=convert_to_diffusers,
precision="float32"
if opt.full_precision
else choose_precision(torch.device(choose_torch_device())),

View File

@@ -6,3 +6,5 @@ stats.html
index.html
.yarn/
*.scss
src/services/api/
src/services/fixtures/*

View File

@@ -3,4 +3,8 @@ dist/
node_modules/
patches/
stats.html
index.html
.yarn/
*.scss
src/services/api/
src/services/fixtures/*

View File

@@ -0,0 +1,87 @@
# Generated axios API client
- [Generated axios API client](#generated-axios-api-client)
- [Generation](#generation)
- [Generate the API client from the nodes web server](#generate-the-api-client-from-the-nodes-web-server)
- [Generate the API client from JSON](#generate-the-api-client-from-json)
- [Getting the JSON from the nodes web server](#getting-the-json-from-the-nodes-web-server)
- [Getting the JSON with a python script](#getting-the-json-with-a-python-script)
- [Generate the API client](#generate-the-api-client)
- [The generated client](#the-generated-client)
- [API client customisation](#api-client-customisation)
This API client is generated by an [openapi code generator](https://github.com/ferdikoomen/openapi-typescript-codegen).
All files in `invokeai/frontend/web/src/services/api/` are made by the generator.
## Generation
The axios client may be generated by from the OpenAPI schema from the nodes web server, or from JSON.
### Generate the API client from the nodes web server
We need to start the nodes web server, which serves the OpenAPI schema to the generator.
1. Start the nodes web server.
```bash
# from the repo root
python scripts/invoke-new.py --web
```
2. Generate the API client.
```bash
# from invokeai/frontend/web/
yarn api:web
```
### Generate the API client from JSON
The JSON can be acquired from the nodes web server, or with a python script.
#### Getting the JSON from the nodes web server
Start the nodes web server as described above, then download the file.
```bash
# from invokeai/frontend/web/
curl http://localhost:9090/openapi.json -o openapi.json
```
#### Getting the JSON with a python script
Run this python script from the repo root, so it can access the nodes server modules.
The script will output `openapi.json` in the repo root. Then we need to move it to `invokeai/frontend/web/`.
```bash
# from the repo root
python invokeai/app/util/generate_openapi_json.py
mv invokeai/app/util/openapi.json invokeai/frontend/web/services/fixtures/
```
#### Generate the API client
Now we can generate the API client from the JSON.
```bash
# from invokeai/frontend/web/
yarn api:file
```
## The generated client
The client will be written to `invokeai/frontend/web/services/api/`:
- `axios` client
- TS types
- An easily parseable schema, which we can use to generate UI
## API client customisation
The generator has a default `request.ts` file that implements a base `axios` client. The generated client uses this base client.
One shortcoming of this is base client is it does not provide response headers unless the response body is empty. To fix this, we provide our own lightly-patched `request.ts`.
To access the headers, call `getHeaders(response)` on any response from the generated api client. This function is exported from `invokeai/frontend/web/src/services/util/getHeaders.ts`.

View File

@@ -0,0 +1,21 @@
# Events
Events via `socket.io`
## `actions.ts`
Redux actions for all socket events. Payloads all include a timestamp, and optionally some other data.
Any reducer (or middleware) can respond to the actions.
## `middleware.ts`
Redux middleware for events.
Handles dispatching the event actions. Only put logic here if it can't really go anywhere else.
For example, on connect we want to load images to the gallery if it's not populated. This requires dispatching a thunk, so we need to directly dispatch this in the middleware.
## `types.ts`
Hand-written types for the socket events. Cannot generate these from the server, but fortunately they are few and simple.

View File

@@ -0,0 +1,17 @@
# Node Editor Design
WIP
nodes
everything in `src/features/nodes/`
have a look at `state.nodes.invocation`
- on socket connect, if no schema saved, fetch `localhost:9090/openapi.json`, save JSON to `state.nodes.schema`
- on fulfilled schema fetch, `parseSchema()` the schema. this outputs a `Record<string, Invocation>` which is saved to `state.nodes.invocations` - `Invocation` is like a template for the node
- when you add a node, the the `Invocation` template is passed to `InvocationComponent.tsx` to build the UI component for that node
- inputs/outputs have field types - and each field type gets an `FieldComponent` which includes a dispatcher to write state changes to redux `nodesSlice`
- `reactflow` sends changes to nodes/edges to redux
- to invoke, `buildNodesGraph()` state, then send this
- changed onClick Invoke button actions to build the schema, then when schema builds it dispatches the actual network request to create the session - see `session.ts`

View File

@@ -0,0 +1,29 @@
# Package Scripts
WIP walkthrough of `package.json` scripts.
## `theme` & `theme:watch`
These run the Chakra CLI to generate types for the theme, or watch for code change and re-generate the types.
The CLI essentially monkeypatches Chakra's files in `node_modules`.
## `postinstall`
The `postinstall` script patches a few packages and runs the Chakra CLI to generate types for the theme.
### Patch `@chakra-ui/cli`
See: <https://github.com/chakra-ui/chakra-ui/issues/7394>
### Patch `redux-persist`
We want to persist the canvas state to `localStorage` but many canvas operations change data very quickly, so we need to debounce the writes to `localStorage`.
`redux-persist` is unfortunately unmaintained. The repo's current code is nonfunctional, but the last release's code depends on a package that was removed from `npm` for being malware, so we cannot just fork it.
So, we have to patch it directly. Perhaps a better way would be to write a debounced storage adapter, but I couldn't figure out how to do that.
### Patch `redux-deep-persist`
This package makes blacklisting and whitelisting persist configs very simple, but we have to patch it to match `redux-persist` for the types to work.

View File

@@ -1,10 +1,16 @@
# InvokeAI Web UI
- [InvokeAI Web UI](#invokeai-web-ui)
- [Stack](#stack)
- [Contributing](#contributing)
- [Dev Environment](#dev-environment)
- [Production builds](#production-builds)
The UI is a fairly straightforward Typescript React app. The only really fancy stuff is the Unified Canvas.
Code in `invokeai/frontend/web/` if you want to have a look.
## Details
## Stack
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a custom redux middleware to help).
@@ -32,7 +38,7 @@ Start everything in dev mode:
1. Start the dev server: `yarn dev`
2. Start the InvokeAI UI per usual: `invokeai --web`
3. Point your browser to the dev server address e.g. `http://localhost:5173/`
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
### Production builds

View File

@@ -1,6 +1,7 @@
import React, { PropsWithChildren } from 'react';
import { IAIPopoverProps } from '../web/src/common/components/IAIPopover';
import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton';
import { InvokeTabName } from 'features/ui/store/tabMap';
export {};
@@ -64,9 +65,24 @@ declare module '@invoke-ai/invoke-ai-ui' {
declare class SettingsModal extends React.Component<SettingsModalProps> {
public constructor(props: SettingsModalProps);
}
declare class StatusIndicator extends React.Component<StatusIndicatorProps> {
public constructor(props: StatusIndicatorProps);
}
declare class ModelSelect extends React.Component<ModelSelectProps> {
public constructor(props: ModelSelectProps);
}
}
declare function Invoke(props: PropsWithChildren): JSX.Element;
interface InvokeProps extends PropsWithChildren {
apiUrl?: string;
disabledPanels?: string[];
disabledTabs?: InvokeTabName[];
token?: string;
}
declare function Invoke(props: InvokeProps): JSX.Element;
export {
ThemeChanger,
@@ -74,5 +90,7 @@ export {
IAIPopover,
IAIIconButton,
SettingsModal,
StatusIndicator,
ModelSelect,
};
export = Invoke;

View File

@@ -5,7 +5,10 @@
"scripts": {
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"dev:nodes": "concurrently \"vite dev --mode nodes\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build",
"api:web": "openapi -i http://localhost:9090/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
"api:file": "openapi -i src/services/fixtures/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
"preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .",
@@ -41,9 +44,10 @@
"@chakra-ui/react": "^2.5.1",
"@chakra-ui/styled-system": "^2.6.1",
"@chakra-ui/theme-tools": "^2.0.16",
"@dagrejs/graphlib": "^2.1.12",
"@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6",
"@reduxjs/toolkit": "^1.9.2",
"@reduxjs/toolkit": "^1.9.3",
"chakra-ui-contextmenu": "^1.0.5",
"dateformat": "^5.0.3",
"formik": "^2.2.9",
@@ -67,7 +71,9 @@
"react-redux": "^8.0.5",
"react-transition-group": "^4.4.5",
"react-zoom-pan-pinch": "^2.6.1",
"reactflow": "^11.7.0",
"redux-deep-persist": "^1.0.7",
"redux-dynamic-middlewares": "^2.2.0",
"redux-persist": "^6.0.0",
"socket.io-client": "^4.6.0",
"use-image": "^1.1.0",
@@ -83,6 +89,7 @@
"@typescript-eslint/eslint-plugin": "^5.52.0",
"@typescript-eslint/parser": "^5.52.0",
"@vitejs/plugin-react-swc": "^3.2.0",
"axios": "^1.3.4",
"babel-plugin-transform-imports": "^2.0.0",
"concurrently": "^7.6.0",
"eslint": "^8.34.0",
@@ -90,13 +97,17 @@
"eslint-plugin-prettier": "^4.2.1",
"eslint-plugin-react": "^7.32.2",
"eslint-plugin-react-hooks": "^4.6.0",
"form-data": "^4.0.0",
"husky": "^8.0.3",
"lint-staged": "^13.1.2",
"madge": "^6.0.0",
"openapi-types": "^12.1.0",
"openapi-typescript-codegen": "^0.23.0",
"postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0",
"terser": "^5.16.4",
"typescript": "4.9.5",
"vite": "^4.1.2",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.0.5",

View File

@@ -522,6 +522,10 @@
"resetComplete": "Web UI has been reset. Refresh the page to reload."
},
"toast": {
"serverError": "Server Error",
"disconnected": "Disconnected from Server",
"connected": "Connected to Server",
"canceled": "Processing Canceled",
"tempFoldersEmptied": "Temp Folder Emptied",
"uploadFailed": "Upload failed",
"uploadFailedMultipleImagesDesc": "Multiple images pasted, may only upload one image at a time",

View File

@@ -13,16 +13,42 @@ import { Box, Flex, Grid, Portal, useColorMode } from '@chakra-ui/react';
import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants';
import ImageGalleryPanel from 'features/gallery/components/ImageGalleryPanel';
import Lightbox from 'features/lightbox/components/Lightbox';
import { useAppSelector } from './storeHooks';
import { useAppDispatch, useAppSelector } from './storeHooks';
import { PropsWithChildren, useEffect } from 'react';
import { setDisabledPanels, setDisabledTabs } from 'features/ui/store/uiSlice';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { shouldTransformUrlsChanged } from 'features/system/store/systemSlice';
keepGUIAlive();
const App = (props: PropsWithChildren) => {
interface Props extends PropsWithChildren {
options: {
disabledPanels: string[];
disabledTabs: InvokeTabName[];
shouldTransformUrls?: boolean;
};
}
const App = (props: Props) => {
useToastWatcher();
const currentTheme = useAppSelector((state) => state.ui.currentTheme);
const { setColorMode } = useColorMode();
const dispatch = useAppDispatch();
useEffect(() => {
dispatch(setDisabledPanels(props.options.disabledPanels));
}, [dispatch, props.options.disabledPanels]);
useEffect(() => {
dispatch(setDisabledTabs(props.options.disabledTabs));
}, [dispatch, props.options.disabledTabs]);
useEffect(() => {
dispatch(
shouldTransformUrlsChanged(Boolean(props.options.shouldTransformUrls))
);
}, [dispatch, props.options.shouldTransformUrls]);
useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');

View File

@@ -14,6 +14,8 @@
import { InvokeTabName } from 'features/ui/store/tabMap';
import { IRect } from 'konva/lib/types';
import { ImageMetadata, ImageType } from 'services/api';
import { AnyInvocation } from 'services/events/types';
/**
* TODO:
@@ -113,7 +115,7 @@ export declare type Metadata = SystemGenerationMetadata & {
};
// An Image has a UUID, url, modified timestamp, width, height and maybe metadata
export declare type Image = {
export declare type _Image = {
uuid: string;
url: string;
thumbnail: string;
@@ -124,11 +126,23 @@ export declare type Image = {
category: GalleryCategory;
isBase64?: boolean;
dreamPrompt?: 'string';
name?: string;
};
/**
* ResultImage
*/
export declare type Image = {
name: string;
type: ImageType;
url: string;
thumbnail: string;
metadata: ImageMetadata;
};
// GalleryImages is an array of Image.
export declare type GalleryImages = {
images: Array<Image>;
images: Array<_Image>;
};
/**
@@ -275,7 +289,7 @@ export declare type SystemStatusResponse = SystemStatus;
export declare type SystemConfigResponse = SystemConfig;
export declare type ImageResultResponse = Omit<Image, 'uuid'> & {
export declare type ImageResultResponse = Omit<_Image, 'uuid'> & {
boundingBox?: IRect;
generationMode: InvokeTabName;
};
@@ -296,7 +310,7 @@ export declare type ErrorResponse = {
};
export declare type GalleryImagesResponse = {
images: Array<Omit<Image, 'uuid'>>;
images: Array<Omit<_Image, 'uuid'>>;
areMoreImagesAvailable: boolean;
category: GalleryCategory;
};

View File

@@ -13,9 +13,13 @@ import { InvokeTabName } from 'features/ui/store/tabMap';
export const generateImage = createAction<InvokeTabName>(
'socketio/generateImage'
);
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN');
export const runFacetool = createAction<InvokeAI.Image>('socketio/runFacetool');
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage');
export const runESRGAN = createAction<InvokeAI._Image>('socketio/runESRGAN');
export const runFacetool = createAction<InvokeAI._Image>(
'socketio/runFacetool'
);
export const deleteImage = createAction<InvokeAI._Image>(
'socketio/deleteImage'
);
export const requestImages = createAction<GalleryCategory>(
'socketio/requestImages'
);

View File

@@ -91,7 +91,7 @@ const makeSocketIOEmitters = (
})
);
},
emitRunESRGAN: (imageToProcess: InvokeAI.Image) => {
emitRunESRGAN: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true));
const {
@@ -119,7 +119,7 @@ const makeSocketIOEmitters = (
})
);
},
emitRunFacetool: (imageToProcess: InvokeAI.Image) => {
emitRunFacetool: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true));
const {
@@ -150,7 +150,7 @@ const makeSocketIOEmitters = (
})
);
},
emitDeleteImage: (imageToDelete: InvokeAI.Image) => {
emitDeleteImage: (imageToDelete: InvokeAI._Image) => {
const { url, uuid, category, thumbnail } = imageToDelete;
dispatch(removeImage(imageToDelete));
socketio.emit('deleteImage', url, thumbnail, uuid, category);

View File

@@ -34,8 +34,9 @@ import type { RootState } from 'app/store';
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
import {
clearInitialImage,
initialImageSelected,
setInfillMethod,
setInitialImage,
// setInitialImage,
setMaskPath,
} from 'features/parameters/store/generationSlice';
import { tabMap } from 'features/ui/store/tabMap';
@@ -146,7 +147,8 @@ const makeSocketIOListeners = (
const activeTabName = tabMap[activeTab];
switch (activeTabName) {
case 'img2img': {
dispatch(setInitialImage(newImage));
dispatch(initialImageSelected(newImage.uuid));
// dispatch(setInitialImage(newImage));
break;
}
}
@@ -262,7 +264,7 @@ const makeSocketIOListeners = (
*/
// Generate a UUID for each image
const preparedImages = images.map((image): InvokeAI.Image => {
const preparedImages = images.map((image): InvokeAI._Image => {
return {
uuid: uuidv4(),
...image,
@@ -334,7 +336,7 @@ const makeSocketIOListeners = (
if (
initialImage === url ||
(initialImage as InvokeAI.Image)?.url === url
(initialImage as InvokeAI._Image)?.url === url
) {
dispatch(clearInitialImage());
}

View File

@@ -29,6 +29,8 @@ export const socketioMiddleware = () => {
path: `${window.location.pathname}socket.io`,
});
socketio.disconnect();
let areListenersSet = false;
const middleware: Middleware = (store) => (next) => (action) => {

View File

@@ -2,18 +2,35 @@ import { combineReducers, configureStore } from '@reduxjs/toolkit';
import { persistReducer } from 'redux-persist';
import storage from 'redux-persist/lib/storage'; // defaults to localStorage for web
import dynamicMiddlewares from 'redux-dynamic-middlewares';
import { getPersistConfig } from 'redux-deep-persist';
import canvasReducer from 'features/canvas/store/canvasSlice';
import galleryReducer from 'features/gallery/store/gallerySlice';
import lightboxReducer from 'features/lightbox/store/lightboxSlice';
import generationReducer from 'features/parameters/store/generationSlice';
import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
import systemReducer from 'features/system/store/systemSlice';
import galleryReducer, {
GalleryState,
} from 'features/gallery/store/gallerySlice';
import resultsReducer, {
resultsAdapter,
ResultsState,
} from 'features/gallery/store/resultsSlice';
import uploadsReducer from 'features/gallery/store/uploadsSlice';
import lightboxReducer, {
LightboxState,
} from 'features/lightbox/store/lightboxSlice';
import generationReducer, {
GenerationState,
} from 'features/parameters/store/generationSlice';
import postprocessingReducer, {
PostprocessingState,
} from 'features/parameters/store/postprocessingSlice';
import systemReducer, { SystemState } from 'features/system/store/systemSlice';
import uiReducer from 'features/ui/store/uiSlice';
import modelsReducer from 'features/system/store/modelSlice';
import nodesReducer, { NodesState } from 'features/nodes/store/nodesSlice';
import { socketioMiddleware } from './socketio/middleware';
import { socketMiddleware } from 'services/events/middleware';
import { CanvasState } from 'features/canvas/store/canvasTypes';
/**
* redux-persist provides an easy and reliable way to persist state across reloads.
@@ -29,13 +46,21 @@ import { socketioMiddleware } from './socketio/middleware';
* The necesssary nested persistors with blacklists are configured below.
*/
const canvasBlacklist = [
/**
* Canvas slice persist blacklist
*/
const canvasBlacklist: (keyof CanvasState)[] = [
'cursorPosition',
'isCanvasInitialized',
'doesCanvasNeedScaling',
].map((blacklistItem) => `canvas.${blacklistItem}`);
];
const systemBlacklist = [
canvasBlacklist.map((blacklistItem) => `canvas.${blacklistItem}`);
/**
* System slice persist blacklist
*/
const systemBlacklist: (keyof SystemState)[] = [
'currentIteration',
'currentStatus',
'currentStep',
@@ -48,30 +73,101 @@ const systemBlacklist = [
'totalIterations',
'totalSteps',
'openModel',
'cancelOptions.cancelAfter',
].map((blacklistItem) => `system.${blacklistItem}`);
'isCancelScheduled',
'sessionId',
'progressImage',
];
const galleryBlacklist = [
systemBlacklist.map((blacklistItem) => `system.${blacklistItem}`);
/**
* Gallery slice persist blacklist
*/
const galleryBlacklist: (keyof GalleryState)[] = [
'categories',
'currentCategory',
'currentImage',
'currentImageUuid',
'shouldAutoSwitchToNewImages',
'intermediateImage',
].map((blacklistItem) => `gallery.${blacklistItem}`);
];
const lightboxBlacklist = ['isLightboxOpen'].map(
(blacklistItem) => `lightbox.${blacklistItem}`
galleryBlacklist.map((blacklistItem) => `gallery.${blacklistItem}`);
/**
* Lightbox slice persist blacklist
*/
const lightboxBlacklist: (keyof LightboxState)[] = ['isLightboxOpen'];
lightboxBlacklist.map((blacklistItem) => `lightbox.${blacklistItem}`);
/**
* Nodes slice persist blacklist
*/
const nodesBlacklist: (keyof NodesState)[] = ['schema', 'invocations'];
nodesBlacklist.map((blacklistItem) => `nodes.${blacklistItem}`);
/**
* Generation slice persist blacklist
*/
const generationBlacklist: (keyof GenerationState)[] = [];
generationBlacklist.map((blacklistItem) => `generation.${blacklistItem}`);
/**
* Postprocessing slice persist blacklist
*/
const postprocessingBlacklist: (keyof PostprocessingState)[] = [];
postprocessingBlacklist.map(
(blacklistItem) => `postprocessing.${blacklistItem}`
);
/**
* Results slice persist blacklist
*
* Currently blacklisting results slice entirely, see persist config below
*/
const resultsBlacklist: (keyof ResultsState)[] = [];
resultsBlacklist.map((blacklistItem) => `results.${blacklistItem}`);
/**
* Uploads slice persist blacklist
*
* Currently blacklisting uploads slice entirely, see persist config below
*/
const uploadsBlacklist: (keyof NodesState)[] = [];
uploadsBlacklist.map((blacklistItem) => `uploads.${blacklistItem}`);
/**
* Models slice persist blacklist
*/
const modelsBlacklist: (keyof NodesState)[] = [];
modelsBlacklist.map((blacklistItem) => `models.${blacklistItem}`);
/**
* UI slice persist blacklist
*/
const uiBlacklist: (keyof NodesState)[] = [];
uiBlacklist.map((blacklistItem) => `ui.${blacklistItem}`);
const rootReducer = combineReducers({
generation: generationReducer,
postprocessing: postprocessingReducer,
gallery: galleryReducer,
system: systemReducer,
canvas: canvasReducer,
ui: uiReducer,
gallery: galleryReducer,
generation: generationReducer,
lightbox: lightboxReducer,
models: modelsReducer,
nodes: nodesReducer,
postprocessing: postprocessingReducer,
results: resultsReducer,
system: systemReducer,
ui: uiReducer,
uploads: uploadsReducer,
});
const rootPersistConfig = getPersistConfig({
@@ -80,23 +176,40 @@ const rootPersistConfig = getPersistConfig({
rootReducer,
blacklist: [
...canvasBlacklist,
...systemBlacklist,
...galleryBlacklist,
...generationBlacklist,
...lightboxBlacklist,
...modelsBlacklist,
...nodesBlacklist,
...postprocessingBlacklist,
// ...resultsBlacklist,
'results',
...systemBlacklist,
...uiBlacklist,
// ...uploadsBlacklist,
'uploads',
],
debounce: 300,
});
const persistedReducer = persistReducer(rootPersistConfig, rootReducer);
// Continue with store setup
// TODO: rip the old middleware out when nodes is complete
export function buildMiddleware() {
if (import.meta.env.MODE === 'nodes' || import.meta.env.MODE === 'package') {
return socketMiddleware();
} else {
return socketioMiddleware();
}
}
export const store = configureStore({
reducer: persistedReducer,
middleware: (getDefaultMiddleware) =>
getDefaultMiddleware({
immutableCheck: false,
serializableCheck: false,
}).concat(socketioMiddleware()),
}).concat(dynamicMiddlewares),
devTools: {
// Uncommenting these very rapidly called actions makes the redux dev tools output much more readable
actionsDenylist: [

View File

@@ -0,0 +1,8 @@
import { createAsyncThunk } from '@reduxjs/toolkit';
import { AppDispatch, RootState } from './store';
// https://redux-toolkit.js.org/usage/usage-with-typescript#defining-a-pre-typed-createasyncthunk
export const createAppAsyncThunk = createAsyncThunk.withTypes<{
state: RootState;
dispatch: AppDispatch;
}>();

View File

@@ -2,7 +2,6 @@ import { Box, useToast } from '@chakra-ui/react';
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import useImageUploader from 'common/hooks/useImageUploader';
import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ResourceKey } from 'i18next';
import {
@@ -15,6 +14,7 @@ import {
} from 'react';
import { FileRejection, useDropzone } from 'react-dropzone';
import { useTranslation } from 'react-i18next';
import { imageUploaded } from 'services/thunks/image';
import ImageUploadOverlay from './ImageUploadOverlay';
type ImageUploaderProps = {
@@ -49,7 +49,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
const fileAcceptedCallback = useCallback(
async (file: File) => {
dispatch(uploadImage({ imageFile: file }));
dispatch(imageUploaded({ formData: { file } }));
},
[dispatch]
);
@@ -124,7 +124,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
return;
}
dispatch(uploadImage({ imageFile: file }));
dispatch(imageUploaded({ formData: { file } }));
};
document.addEventListener('paste', pasteImageListener);
return () => {

View File

@@ -1,27 +1,160 @@
import { Flex, Heading, Text, VStack } from '@chakra-ui/react';
import { useTranslation } from 'react-i18next';
import WorkInProgress from './WorkInProgress';
// import WorkInProgress from './WorkInProgress';
// import ReactFlow, {
// applyEdgeChanges,
// applyNodeChanges,
// Background,
// Controls,
// Edge,
// Handle,
// Node,
// NodeTypes,
// OnEdgesChange,
// OnNodesChange,
// Position,
// } from 'reactflow';
export default function NodesWIP() {
const { t } = useTranslation();
return (
<WorkInProgress>
<Flex
sx={{
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
w: '100%',
h: '100%',
gap: 4,
textAlign: 'center',
}}
>
<Heading>{t('common.nodes')}</Heading>
<VStack maxW="50rem" gap={4}>
<Text>{t('common.nodesDesc')}</Text>
</VStack>
</Flex>
</WorkInProgress>
);
}
// import 'reactflow/dist/style.css';
// import {
// Fragment,
// FunctionComponent,
// ReactNode,
// useCallback,
// useMemo,
// useState,
// } from 'react';
// import { OpenAPIV3 } from 'openapi-types';
// import { filter, map, reduce } from 'lodash';
// import {
// Box,
// Flex,
// FormControl,
// FormLabel,
// Input,
// Select,
// Switch,
// Text,
// NumberInput,
// NumberInputField,
// NumberInputStepper,
// NumberIncrementStepper,
// NumberDecrementStepper,
// Tooltip,
// chakra,
// Badge,
// Heading,
// VStack,
// HStack,
// Menu,
// MenuButton,
// MenuList,
// MenuItem,
// MenuItemOption,
// MenuGroup,
// MenuOptionGroup,
// MenuDivider,
// IconButton,
// } from '@chakra-ui/react';
// import { FaPlus } from 'react-icons/fa';
// import {
// FIELD_NAMES as FIELD_NAMES,
// FIELDS,
// INVOCATION_NAMES as INVOCATION_NAMES,
// INVOCATIONS,
// } from 'features/nodeEditor/constants';
// console.log('invocations', INVOCATIONS);
// const nodeTypes = reduce(
// INVOCATIONS,
// (acc, val, key) => {
// acc[key] = val.component;
// return acc;
// },
// {} as NodeTypes
// );
// console.log('nodeTypes', nodeTypes);
// // make initial nodes one of every node for now
// let n = 0;
// const initialNodes = map(INVOCATIONS, (i) => ({
// id: i.type,
// type: i.title,
// position: { x: (n += 20), y: (n += 20) },
// data: {},
// }));
// console.log('initialNodes', initialNodes);
// export default function NodesWIP() {
// const [nodes, setNodes] = useState<Node[]>([]);
// const [edges, setEdges] = useState<Edge[]>([]);
// const onNodesChange: OnNodesChange = useCallback(
// (changes) => setNodes((nds) => applyNodeChanges(changes, nds)),
// []
// );
// const onEdgesChange: OnEdgesChange = useCallback(
// (changes) => setEdges((eds: Edge[]) => applyEdgeChanges(changes, eds)),
// []
// );
// return (
// <Box
// sx={{
// position: 'relative',
// width: 'full',
// height: 'full',
// borderRadius: 'md',
// }}
// >
// <ReactFlow
// nodeTypes={nodeTypes}
// nodes={nodes}
// edges={edges}
// onNodesChange={onNodesChange}
// onEdgesChange={onEdgesChange}
// >
// <Background />
// <Controls />
// </ReactFlow>
// <HStack sx={{ position: 'absolute', top: 2, right: 2 }}>
// {FIELD_NAMES.map((field) => (
// <Badge
// key={field}
// colorScheme={FIELDS[field].color}
// sx={{ userSelect: 'none' }}
// >
// {field}
// </Badge>
// ))}
// </HStack>
// <Menu>
// <MenuButton
// as={IconButton}
// aria-label="Options"
// icon={<FaPlus />}
// sx={{ position: 'absolute', top: 2, left: 2 }}
// />
// <MenuList>
// {INVOCATION_NAMES.map((name) => {
// const invocation = INVOCATIONS[name];
// return (
// <Tooltip
// key={name}
// label={invocation.description}
// placement="end"
// hasArrow
// >
// <MenuItem>{invocation.title}</MenuItem>
// </Tooltip>
// );
// })}
// </MenuList>
// </Menu>
// </Box>
// );
// }
export default {};

View File

@@ -14,6 +14,8 @@ const WorkInProgress = (props: WorkInProgressProps) => {
width: '100%',
height: '100%',
bg: 'base.850',
borderRadius: 'base',
position: 'relative',
}}
>
{children}

View File

@@ -0,0 +1,72 @@
import { RootState } from 'app/store';
import { InvokeTabName, tabMap } from 'features/ui/store/tabMap';
import { find } from 'lodash';
import {
Graph,
ImageToImageInvocation,
TextToImageInvocation,
} from 'services/api';
import { buildHiResNode, buildImg2ImgNode } from './nodes/image2Image';
import { buildIteration } from './nodes/iteration';
import { buildTxt2ImgNode } from './nodes/text2Image';
function mapTabToFunction(activeTabName: InvokeTabName) {
switch (activeTabName) {
case 'txt2img':
return buildTxt2ImgNode;
case 'img2img':
return buildImg2ImgNode;
default:
return buildTxt2ImgNode;
}
}
const buildBaseNode = (
state: RootState
): Record<string, TextToImageInvocation | ImageToImageInvocation> => {
const { activeTab } = state.ui;
const activeTabName = tabMap[activeTab];
return mapTabToFunction(activeTabName)(state);
};
type BuildGraphOutput = {
graph: Graph;
nodeIdsToSubscribe: string[];
};
export const buildGraph = (state: RootState): BuildGraphOutput => {
const { generation, postprocessing } = state;
const { iterations } = generation;
const { hiresFix, hiresStrength } = postprocessing;
const baseNode = buildBaseNode(state);
let graph: Graph = { nodes: baseNode };
const nodeIdsToSubscribe: string[] = [];
if (iterations > 1) {
graph = buildIteration({ graph, iterations });
}
if (hiresFix) {
const { node, edge } = buildHiResNode(
baseNode as Record<string, TextToImageInvocation>,
hiresStrength
);
graph = {
nodes: {
...graph.nodes,
...node,
},
edges: [...(graph.edges || []), edge],
};
nodeIdsToSubscribe.push(Object.keys(node)[0]);
}
console.log('buildGraph: ', graph);
return { graph, nodeIdsToSubscribe };
};

View File

@@ -0,0 +1,6 @@
import dateFormat from 'dateformat';
/**
* Get a `now` timestamp with 1s precision, formatted as ISO datetime.
*/
export const getTimestamp = () => dateFormat(new Date(), 'isoDateTime');

View File

@@ -0,0 +1,28 @@
import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks';
import { OpenAPI } from 'services/api';
export const getUrlAlt = (url: string, shouldTransformUrls: boolean) => {
if (OpenAPI.BASE && shouldTransformUrls) {
return [OpenAPI.BASE, url].join('/');
}
return url;
};
export const useGetUrl = () => {
const shouldTransformUrls = useAppSelector(
(state: RootState) => state.system.shouldTransformUrls
);
return {
shouldTransformUrls,
getUrl: (url: string) => {
if (OpenAPI.BASE && shouldTransformUrls) {
return [OpenAPI.BASE, url].join('/');
}
return url;
},
};
};

View File

@@ -0,0 +1,98 @@
import { v4 as uuidv4 } from 'uuid';
import { RootState } from 'app/store';
import {
Edge,
ImageToImageInvocation,
TextToImageInvocation,
} from 'services/api';
import { _Image } from 'app/invokeai';
import { initialImageSelector } from 'features/parameters/store/generationSelectors';
export const buildImg2ImgNode = (
state: RootState
): Record<string, ImageToImageInvocation> => {
const nodeId = uuidv4();
const { generation, system, models } = state;
const { shouldDisplayInProgressType } = system;
const { currentModel: model } = models;
const {
prompt,
seed,
steps,
width,
height,
cfgScale,
sampler,
seamless,
img2imgStrength: strength,
shouldFitToWidthHeight: fit,
shouldRandomizeSeed,
} = generation;
const initialImage = initialImageSelector(state);
if (!initialImage) {
// TODO: handle this
throw 'no initial image';
}
return {
[nodeId]: {
id: nodeId,
type: 'img2img',
prompt,
seed: shouldRandomizeSeed ? -1 : seed,
steps,
width,
height,
cfg_scale: cfgScale,
scheduler: sampler as ImageToImageInvocation['scheduler'],
seamless,
model,
progress_images: shouldDisplayInProgressType === 'full-res',
image: {
image_name: initialImage.name,
image_type: initialImage.type,
},
strength,
fit,
},
};
};
type hiresReturnType = {
node: Record<string, ImageToImageInvocation>;
edge: Edge;
};
export const buildHiResNode = (
baseNode: Record<string, TextToImageInvocation>,
strength?: number
): hiresReturnType => {
const nodeId = uuidv4();
const baseNodeId = Object.keys(baseNode)[0];
const baseNodeValues = Object.values(baseNode)[0];
return {
node: {
[nodeId]: {
...baseNodeValues,
id: nodeId,
type: 'img2img',
strength,
},
},
edge: {
source: {
field: 'image',
node_id: baseNodeId,
},
destination: {
field: 'image',
node_id: nodeId,
},
},
};
};

View File

@@ -0,0 +1,81 @@
import { v4 as uuidv4 } from 'uuid';
import {
Edge,
Graph,
ImageToImageInvocation,
IterateInvocation,
RangeInvocation,
TextToImageInvocation,
} from 'services/api';
import { buildImg2ImgNode } from './image2Image';
type BuildIteration = {
graph: Graph;
iterations: number;
};
const buildRangeNode = (
iterations: number
): Record<string, RangeInvocation> => {
const nodeId = uuidv4();
return {
[nodeId]: {
id: nodeId,
type: 'range',
start: 0,
stop: iterations,
step: 1,
},
};
};
const buildIterateNode = (): Record<string, IterateInvocation> => {
const nodeId = uuidv4();
return {
[nodeId]: {
id: nodeId,
type: 'iterate',
collection: [],
index: 0,
},
};
};
export const buildIteration = ({
graph,
iterations,
}: BuildIteration): Graph => {
const rangeNode = buildRangeNode(iterations);
const iterateNode = buildIterateNode();
const baseNode: Graph['nodes'] = graph.nodes;
const edges: Edge[] = [
{
source: {
field: 'collection',
node_id: Object.keys(rangeNode)[0],
},
destination: {
field: 'collection',
node_id: Object.keys(iterateNode)[0],
},
},
{
source: {
field: 'item',
node_id: Object.keys(iterateNode)[0],
},
destination: {
field: 'seed',
node_id: Object.keys(baseNode!)[0],
},
},
];
return {
nodes: {
...rangeNode,
...iterateNode,
...graph.nodes,
},
edges,
};
};

View File

@@ -0,0 +1,43 @@
import { v4 as uuidv4 } from 'uuid';
import { RootState } from 'app/store';
import { TextToImageInvocation } from 'services/api';
export const buildTxt2ImgNode = (
state: RootState
): Record<string, TextToImageInvocation> => {
const nodeId = uuidv4();
const { generation, system, models } = state;
const { shouldDisplayInProgressType } = system;
const { currentModel: model } = models;
const {
prompt,
seed,
steps,
width,
height,
cfgScale: cfg_scale,
sampler,
seamless,
shouldRandomizeSeed,
} = generation;
// missing fields in TextToImageInvocation: strength, hires_fix
return {
[nodeId]: {
id: nodeId,
type: 'txt2img',
prompt,
seed: shouldRandomizeSeed ? -1 : seed,
steps,
width,
height,
cfg_scale,
scheduler: sampler as TextToImageInvocation['scheduler'],
seamless,
model,
progress_images: shouldDisplayInProgressType === 'full-res',
},
};
};

View File

@@ -1,8 +1,10 @@
import React, { lazy, PropsWithChildren } from 'react';
import React, { lazy, PropsWithChildren, useEffect, useState } from 'react';
import { Provider } from 'react-redux';
import { PersistGate } from 'redux-persist/integration/react';
import { store } from './app/store';
import { buildMiddleware, store } from './app/store';
import { persistor } from './persistor';
import { OpenAPI } from 'services/api';
import { InvokeTabName } from 'features/ui/store/tabMap';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
@@ -17,18 +19,61 @@ import Loading from './Loading';
// Localization
import './i18n';
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
const App = lazy(() => import('./app/App'));
const ThemeLocaleProvider = lazy(() => import('./app/ThemeLocaleProvider'));
export default function Component(props: PropsWithChildren) {
interface Props extends PropsWithChildren {
apiUrl?: string;
disabledPanels?: string[];
disabledTabs?: InvokeTabName[];
token?: string;
shouldTransformUrls?: boolean;
}
export default function Component({
apiUrl,
disabledPanels = [],
disabledTabs = [],
token,
children,
shouldTransformUrls,
}: Props) {
useEffect(() => {
// configure API client token
if (token) {
OpenAPI.TOKEN = token;
}
// configure API client base url
if (apiUrl) {
OpenAPI.BASE = apiUrl;
}
// reset dynamically added middlewares
resetMiddlewares();
// TODO: at this point, after resetting the middleware, we really ought to clean up the socket
// stuff by calling `dispatch(socketReset())`. but we cannot dispatch from here as we are
// outside the provider. it's not needed until there is the possibility that we will change
// the `apiUrl`/`token` dynamically.
// rebuild socket middleware with token and apiUrl
addMiddleware(buildMiddleware());
}, [apiUrl, token]);
return (
<React.StrictMode>
<Provider store={store}>
<PersistGate loading={<Loading />} persistor={persistor}>
<React.Suspense fallback={<Loading showText />}>
<ThemeLocaleProvider>
<App>{props.children}</App>
<App
options={{ disabledPanels, disabledTabs, shouldTransformUrls }}
>
{children}
</App>
</ThemeLocaleProvider>
</React.Suspense>
</PersistGate>

View File

@@ -5,6 +5,8 @@ import ThemeChanger from './features/system/components/ThemeChanger';
import IAIPopover from './common/components/IAIPopover';
import IAIIconButton from './common/components/IAIIconButton';
import SettingsModal from './features/system/components/SettingsModal/SettingsModal';
import StatusIndicator from './features/system/components/StatusIndicator';
import ModelSelect from 'features/system/components/ModelSelect';
export default Component;
export {
@@ -13,4 +15,6 @@ export {
IAIPopover,
IAIIconButton,
SettingsModal,
StatusIndicator,
ModelSelect,
};

View File

@@ -1,6 +1,7 @@
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
import { GalleryState } from 'features/gallery/store/gallerySlice';
import { ImageConfig } from 'konva/lib/shapes/Image';
import { isEqual } from 'lodash';
@@ -25,7 +26,7 @@ type Props = Omit<ImageConfig, 'image'>;
const IAICanvasIntermediateImage = (props: Props) => {
const { ...rest } = props;
const intermediateImage = useAppSelector(selector);
const { getUrl } = useGetUrl();
const [loadedImageElement, setLoadedImageElement] =
useState<HTMLImageElement | null>(null);
@@ -36,8 +37,8 @@ const IAICanvasIntermediateImage = (props: Props) => {
tempImage.onload = () => {
setLoadedImageElement(tempImage);
};
tempImage.src = intermediateImage.url;
}, [intermediateImage]);
tempImage.src = getUrl(intermediateImage.url);
}, [intermediateImage, getUrl]);
if (!intermediateImage?.boundingBox) return null;

View File

@@ -1,5 +1,6 @@
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
import { rgbaColorToString } from 'features/canvas/util/colorToString';
import { isEqual } from 'lodash';
@@ -32,6 +33,7 @@ const selector = createSelector(
const IAICanvasObjectRenderer = () => {
const { objects } = useAppSelector(selector);
const { getUrl } = useGetUrl();
if (!objects) return null;
@@ -40,7 +42,12 @@ const IAICanvasObjectRenderer = () => {
{objects.map((obj, i) => {
if (isCanvasBaseImage(obj)) {
return (
<IAICanvasImage key={i} x={obj.x} y={obj.y} url={obj.image.url} />
<IAICanvasImage
key={i}
x={obj.x}
y={obj.y}
url={getUrl(obj.image.url)}
/>
);
} else if (isCanvasBaseLine(obj)) {
const line = (

View File

@@ -1,5 +1,6 @@
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
import { GroupConfig } from 'konva/lib/Group';
import { isEqual } from 'lodash';
@@ -53,11 +54,16 @@ const IAICanvasStagingArea = (props: Props) => {
width,
height,
} = useAppSelector(selector);
const { getUrl } = useGetUrl();
return (
<Group {...rest}>
{shouldShowStagingImage && currentStagingAreaImage && (
<IAICanvasImage url={currentStagingAreaImage.image.url} x={x} y={y} />
<IAICanvasImage
url={getUrl(currentStagingAreaImage.image.url)}
x={x}
y={y}
/>
)}
{shouldShowStagingOutline && (
<Group>

View File

@@ -156,7 +156,7 @@ export const canvasSlice = createSlice({
setCursorPosition: (state, action: PayloadAction<Vector2d | null>) => {
state.cursorPosition = action.payload;
},
setInitialCanvasImage: (state, action: PayloadAction<InvokeAI.Image>) => {
setInitialCanvasImage: (state, action: PayloadAction<InvokeAI._Image>) => {
const image = action.payload;
const { stageDimensions } = state;
@@ -291,7 +291,7 @@ export const canvasSlice = createSlice({
state,
action: PayloadAction<{
boundingBox: IRect;
image: InvokeAI.Image;
image: InvokeAI._Image;
}>
) => {
const { boundingBox, image } = action.payload;

View File

@@ -37,7 +37,7 @@ export type CanvasImage = {
y: number;
width: number;
height: number;
image: InvokeAI.Image;
image: InvokeAI._Image;
};
export type CanvasMaskLine = {
@@ -125,7 +125,7 @@ export interface CanvasState {
cursorPosition: Vector2d | null;
doesCanvasNeedScaling: boolean;
futureLayerStates: CanvasLayerState[];
intermediateImage?: InvokeAI.Image;
intermediateImage?: InvokeAI._Image;
isCanvasInitialized: boolean;
isDrawing: boolean;
isMaskEnabled: boolean;

View File

@@ -105,7 +105,7 @@ export const mergeAndUploadCanvas =
const { url, width, height } = image;
const newImage: InvokeAI.Image = {
const newImage: InvokeAI._Image = {
uuid: uuidv4(),
category: shouldSaveToGallery ? 'result' : 'user',
...image,

View File

@@ -14,8 +14,9 @@ import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice';
import FaceRestoreSettings from 'features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings';
import UpscaleSettings from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings';
import {
initialImageSelected,
setAllParameters,
setInitialImage,
// setInitialImage,
setSeed,
} from 'features/parameters/store/generationSlice';
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
@@ -45,11 +46,15 @@ import {
FaShareAlt,
FaTrash,
} from 'react-icons/fa';
import { gallerySelector } from '../store/gallerySelectors';
import {
gallerySelector,
selectedImageSelector,
} from '../store/gallerySelectors';
import DeleteImageModal from './DeleteImageModal';
import { useCallback } from 'react';
import useSetBothPrompts from 'features/parameters/hooks/usePrompt';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
import { useGetUrl } from 'common/util/getUrl';
const currentImageButtonsSelector = createSelector(
[
@@ -59,6 +64,7 @@ const currentImageButtonsSelector = createSelector(
uiSelector,
lightboxSelector,
activeTabNameSelector,
selectedImageSelector,
],
(
system: SystemState,
@@ -66,7 +72,8 @@ const currentImageButtonsSelector = createSelector(
postprocessing,
ui,
lightbox,
activeTabName
activeTabName,
selectedImage
) => {
const { isProcessing, isConnected, isGFPGANAvailable, isESRGANAvailable } =
system;
@@ -91,6 +98,7 @@ const currentImageButtonsSelector = createSelector(
shouldShowImageDetails,
activeTabName,
isLightboxOpen,
selectedImage,
};
},
{
@@ -117,26 +125,32 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
facetoolStrength,
shouldDisableToolbarButtons,
shouldShowImageDetails,
currentImage,
// currentImage,
isLightboxOpen,
activeTabName,
selectedImage,
} = useAppSelector(currentImageButtonsSelector);
const { getUrl, shouldTransformUrls } = useGetUrl();
const toast = useToast();
const { t } = useTranslation();
const setBothPrompts = useSetBothPrompts();
const handleClickUseAsInitialImage = () => {
if (!currentImage) return;
if (!selectedImage) return;
if (isLightboxOpen) dispatch(setIsLightboxOpen(false));
dispatch(setInitialImage(currentImage));
dispatch(setActiveTab('img2img'));
dispatch(initialImageSelected(selectedImage.name));
// dispatch(setInitialImage(currentImage));
// dispatch(setActiveTab('img2img'));
};
const handleCopyImage = async () => {
if (!currentImage) return;
if (!selectedImage) return;
const blob = await fetch(currentImage.url).then((res) => res.blob());
const blob = await fetch(getUrl(selectedImage.url)).then((res) =>
res.blob()
);
const data = [new ClipboardItem({ [blob.type]: blob })];
await navigator.clipboard.write(data);
@@ -150,24 +164,26 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
};
const handleCopyImageLink = () => {
navigator.clipboard
.writeText(
currentImage ? window.location.toString() + currentImage.url : ''
)
.then(() => {
toast({
title: t('toast.imageLinkCopied'),
status: 'success',
duration: 2500,
isClosable: true,
});
const url = selectedImage
? shouldTransformUrls
? getUrl(selectedImage.url)
: window.location.toString() + selectedImage.url
: '';
navigator.clipboard.writeText(url).then(() => {
toast({
title: t('toast.imageLinkCopied'),
status: 'success',
duration: 2500,
isClosable: true,
});
});
};
useHotkeys(
'shift+i',
() => {
if (currentImage) {
if (selectedImage) {
handleClickUseAsInitialImage();
toast({
title: t('toast.sentToImageToImage'),
@@ -185,24 +201,27 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
});
}
},
[currentImage]
[selectedImage]
);
const handleClickUseAllParameters = () => {
if (!currentImage) return;
currentImage.metadata && dispatch(setAllParameters(currentImage.metadata));
if (currentImage.metadata?.image.type === 'img2img') {
dispatch(setActiveTab('img2img'));
} else if (currentImage.metadata?.image.type === 'txt2img') {
dispatch(setActiveTab('txt2img'));
}
if (!selectedImage) return;
// selectedImage.metadata &&
// dispatch(setAllParameters(selectedImage.metadata));
// if (selectedImage.metadata?.image.type === 'img2img') {
// dispatch(setActiveTab('img2img'));
// } else if (selectedImage.metadata?.image.type === 'txt2img') {
// dispatch(setActiveTab('txt2img'));
// }
};
useHotkeys(
'a',
() => {
if (
['txt2img', 'img2img'].includes(currentImage?.metadata?.image?.type)
['txt2img', 'img2img'].includes(
selectedImage?.metadata?.sd_metadata?.type
)
) {
handleClickUseAllParameters();
toast({
@@ -221,18 +240,18 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
});
}
},
[currentImage]
[selectedImage]
);
const handleClickUseSeed = () => {
currentImage?.metadata &&
dispatch(setSeed(currentImage.metadata.image.seed));
selectedImage?.metadata &&
dispatch(setSeed(selectedImage.metadata.sd_metadata.seed));
};
useHotkeys(
's',
() => {
if (currentImage?.metadata?.image?.seed) {
if (selectedImage?.metadata?.sd_metadata?.seed) {
handleClickUseSeed();
toast({
title: t('toast.seedSet'),
@@ -250,19 +269,19 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
});
}
},
[currentImage]
[selectedImage]
);
const handleClickUsePrompt = useCallback(() => {
if (currentImage?.metadata?.image?.prompt) {
setBothPrompts(currentImage?.metadata?.image?.prompt);
if (selectedImage?.metadata?.sd_metadata?.prompt) {
setBothPrompts(selectedImage?.metadata?.sd_metadata?.prompt);
}
}, [currentImage?.metadata?.image?.prompt, setBothPrompts]);
}, [selectedImage?.metadata?.sd_metadata?.prompt, setBothPrompts]);
useHotkeys(
'p',
() => {
if (currentImage?.metadata?.image?.prompt) {
if (selectedImage?.metadata?.sd_metadata?.prompt) {
handleClickUsePrompt();
toast({
title: t('toast.promptSet'),
@@ -280,11 +299,11 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
});
}
},
[currentImage]
[selectedImage]
);
const handleClickUpscale = () => {
currentImage && dispatch(runESRGAN(currentImage));
// selectedImage && dispatch(runESRGAN(selectedImage));
};
useHotkeys(
@@ -308,7 +327,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
}
},
[
currentImage,
selectedImage,
isESRGANAvailable,
shouldDisableToolbarButtons,
isConnected,
@@ -318,7 +337,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
);
const handleClickFixFaces = () => {
currentImage && dispatch(runFacetool(currentImage));
// selectedImage && dispatch(runFacetool(selectedImage));
};
useHotkeys(
@@ -342,7 +361,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
}
},
[
currentImage,
selectedImage,
isGFPGANAvailable,
shouldDisableToolbarButtons,
isConnected,
@@ -355,10 +374,10 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
dispatch(setShouldShowImageDetails(!shouldShowImageDetails));
const handleSendToCanvas = () => {
if (!currentImage) return;
if (!selectedImage) return;
if (isLightboxOpen) dispatch(setIsLightboxOpen(false));
dispatch(setInitialCanvasImage(currentImage));
// dispatch(setInitialCanvasImage(selectedImage));
dispatch(requestCanvasRescale());
if (activeTabName !== 'unifiedCanvas') {
@@ -376,7 +395,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
useHotkeys(
'i',
() => {
if (currentImage) {
if (selectedImage) {
handleClickShowImageDetails();
} else {
toast({
@@ -387,7 +406,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
});
}
},
[currentImage, shouldShowImageDetails]
[selectedImage, shouldShowImageDetails]
);
const handleLightBox = () => {
@@ -448,7 +467,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
{t('parameters.copyImageToLink')}
</IAIButton>
<Link download={true} href={currentImage?.url}>
<Link download={true} href={getUrl(selectedImage!.url)}>
<IAIButton leftIcon={<FaDownload />} size="sm" w="100%">
{t('parameters.downloadImage')}
</IAIButton>
@@ -477,7 +496,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
icon={<FaQuoteRight />}
tooltip={`${t('parameters.usePrompt')} (P)`}
aria-label={`${t('parameters.usePrompt')} (P)`}
isDisabled={!currentImage?.metadata?.image?.prompt}
isDisabled={!selectedImage?.metadata?.sd_metadata?.prompt}
onClick={handleClickUsePrompt}
/>
@@ -485,7 +504,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
icon={<FaSeedling />}
tooltip={`${t('parameters.useSeed')} (S)`}
aria-label={`${t('parameters.useSeed')} (S)`}
isDisabled={!currentImage?.metadata?.image?.seed}
isDisabled={!selectedImage?.metadata?.sd_metadata?.seed}
onClick={handleClickUseSeed}
/>
@@ -495,7 +514,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
aria-label={`${t('parameters.useAll')} (A)`}
isDisabled={
!['txt2img', 'img2img'].includes(
currentImage?.metadata?.image?.type
selectedImage?.metadata?.sd_metadata?.type
)
}
onClick={handleClickUseAllParameters}
@@ -521,7 +540,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
<IAIButton
isDisabled={
!isGFPGANAvailable ||
!currentImage ||
!selectedImage ||
!(isConnected && !isProcessing) ||
!facetoolStrength
}
@@ -550,7 +569,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
<IAIButton
isDisabled={
!isESRGANAvailable ||
!currentImage ||
!selectedImage ||
!(isConnected && !isProcessing) ||
!upscalingLevel
}
@@ -572,15 +591,15 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
/>
</ButtonGroup>
<DeleteImageModal image={currentImage}>
{/* <DeleteImageModal image={selectedImage}>
<IAIIconButton
icon={<FaTrash />}
tooltip={`${t('parameters.deleteImage')} (Del)`}
aria-label={`${t('parameters.deleteImage')} (Del)`}
isDisabled={!currentImage || !isConnected || isProcessing}
isDisabled={!selectedImage || !isConnected || isProcessing}
colorScheme="error"
/>
</DeleteImageModal>
</DeleteImageModal> */}
</Flex>
);
};

View File

@@ -4,17 +4,20 @@ import { useAppSelector } from 'app/storeHooks';
import { isEqual } from 'lodash';
import { MdPhoto } from 'react-icons/md';
import { gallerySelector } from '../store/gallerySelectors';
import {
gallerySelector,
selectedImageSelector,
} from '../store/gallerySelectors';
import CurrentImageButtons from './CurrentImageButtons';
import CurrentImagePreview from './CurrentImagePreview';
export const currentImageDisplaySelector = createSelector(
[gallerySelector],
(gallery) => {
[gallerySelector, selectedImageSelector],
(gallery, selectedImage) => {
const { currentImage, intermediateImage } = gallery;
return {
hasAnImageToDisplay: currentImage || intermediateImage,
hasAnImageToDisplay: selectedImage || intermediateImage,
};
},
{

View File

@@ -1,26 +1,46 @@
import { Box, Flex, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/storeHooks';
import { GalleryState } from 'features/gallery/store/gallerySlice';
import { useGetUrl } from 'common/util/getUrl';
import { systemSelector } from 'features/system/store/systemSelectors';
import { uiSelector } from 'features/ui/store/uiSelectors';
import { isEqual } from 'lodash';
import { ReactEventHandler } from 'react';
import { APP_METADATA_HEIGHT } from 'theme/util/constants';
import { gallerySelector } from '../store/gallerySelectors';
import { selectedImageSelector } from '../store/gallerySelectors';
import CurrentImageFallback from './CurrentImageFallback';
import ImageMetadataViewer from './ImageMetaDataViewer/ImageMetadataViewer';
import NextPrevImageButtons from './NextPrevImageButtons';
export const imagesSelector = createSelector(
[gallerySelector, uiSelector],
(gallery: GalleryState, ui) => {
const { currentImage, intermediateImage } = gallery;
[uiSelector, selectedImageSelector, systemSelector],
(ui, selectedImage, system) => {
const { shouldShowImageDetails } = ui;
const { progressImage } = system;
// TODO: Clean this up, this is really gross
const imageToDisplay = progressImage
? {
url: progressImage.dataURL,
width: progressImage.width,
height: progressImage.height,
isProgressImage: true,
image: progressImage,
}
: selectedImage
? {
url: selectedImage.url,
width: selectedImage.metadata.width,
height: selectedImage.metadata.height,
isProgressImage: false,
image: selectedImage,
}
: null;
return {
imageToDisplay: intermediateImage ? intermediateImage : currentImage,
isIntermediate: Boolean(intermediateImage),
shouldShowImageDetails,
imageToDisplay,
};
},
{
@@ -31,8 +51,9 @@ export const imagesSelector = createSelector(
);
export default function CurrentImagePreview() {
const { shouldShowImageDetails, imageToDisplay, isIntermediate } =
const { shouldShowImageDetails, imageToDisplay } =
useAppSelector(imagesSelector);
const { getUrl } = useGetUrl();
return (
<Flex
@@ -46,37 +67,49 @@ export default function CurrentImagePreview() {
>
{imageToDisplay && (
<Image
src={imageToDisplay.url}
src={
imageToDisplay.isProgressImage
? imageToDisplay.url
: getUrl(imageToDisplay.url)
}
width={imageToDisplay.width}
height={imageToDisplay.height}
fallback={!isIntermediate ? <CurrentImageFallback /> : undefined}
fallback={
!imageToDisplay.isProgressImage ? (
<CurrentImageFallback />
) : undefined
}
sx={{
objectFit: 'contain',
maxWidth: '100%',
maxHeight: '100%',
height: 'auto',
position: 'absolute',
imageRendering: isIntermediate ? 'pixelated' : 'initial',
imageRendering: imageToDisplay.isProgressImage
? 'pixelated'
: 'initial',
borderRadius: 'base',
}}
/>
)}
{!shouldShowImageDetails && <NextPrevImageButtons />}
{shouldShowImageDetails && imageToDisplay && (
<Box
sx={{
position: 'absolute',
top: '0',
width: '100%',
height: '100%',
borderRadius: 'base',
overflow: 'scroll',
maxHeight: APP_METADATA_HEIGHT,
}}
>
<ImageMetadataViewer image={imageToDisplay} />
</Box>
)}
{shouldShowImageDetails &&
imageToDisplay &&
'metadata' in imageToDisplay.image && (
<Box
sx={{
position: 'absolute',
top: '0',
width: '100%',
height: '100%',
borderRadius: 'base',
overflow: 'scroll',
maxHeight: APP_METADATA_HEIGHT,
}}
>
<ImageMetadataViewer image={imageToDisplay.image} />
</Box>
)}
</Flex>
);
}

View File

@@ -52,7 +52,7 @@ interface DeleteImageModalProps {
/**
* The image to delete.
*/
image?: InvokeAI.Image;
image?: InvokeAI._Image;
}
/**

View File

@@ -9,11 +9,14 @@ import {
useToast,
} from '@chakra-ui/react';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { setCurrentImage } from 'features/gallery/store/gallerySlice';
import {
imageSelected,
setCurrentImage,
} from 'features/gallery/store/gallerySlice';
import {
initialImageSelected,
setAllImageToImageParameters,
setAllParameters,
setInitialImage,
setSeed,
} from 'features/parameters/store/generationSlice';
import { DragEvent, memo, useState } from 'react';
@@ -31,6 +34,7 @@ import { useTranslation } from 'react-i18next';
import useSetBothPrompts from 'features/parameters/hooks/usePrompt';
import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice';
import IAIIconButton from 'common/components/IAIIconButton';
import { useGetUrl } from 'common/util/getUrl';
interface HoverableImageProps {
image: InvokeAI.Image;
@@ -40,7 +44,7 @@ interface HoverableImageProps {
const memoEqualityCheck = (
prev: HoverableImageProps,
next: HoverableImageProps
) => prev.image.uuid === next.image.uuid && prev.isSelected === next.isSelected;
) => prev.image.name === next.image.name && prev.isSelected === next.isSelected;
/**
* Gallery image component with delete/use all/use seed buttons on hover.
@@ -55,7 +59,8 @@ const HoverableImage = memo((props: HoverableImageProps) => {
shouldUseSingleGalleryColumn,
} = useAppSelector(hoverableImageSelector);
const { image, isSelected } = props;
const { url, thumbnail, uuid, metadata } = image;
const { url, thumbnail, name, metadata } = image;
const { getUrl } = useGetUrl();
const [isHovered, setIsHovered] = useState<boolean>(false);
@@ -69,10 +74,9 @@ const HoverableImage = memo((props: HoverableImageProps) => {
const handleMouseOut = () => setIsHovered(false);
const handleUsePrompt = () => {
if (image.metadata?.image?.prompt) {
setBothPrompts(image.metadata?.image?.prompt);
if (image.metadata?.sd_metadata?.prompt) {
setBothPrompts(image.metadata?.sd_metadata?.prompt);
}
toast({
title: t('toast.promptSet'),
status: 'success',
@@ -82,7 +86,8 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleUseSeed = () => {
image.metadata && dispatch(setSeed(image.metadata.image.seed));
image.metadata.sd_metadata &&
dispatch(setSeed(image.metadata.sd_metadata.image.seed));
toast({
title: t('toast.seedSet'),
status: 'success',
@@ -92,20 +97,11 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleSendToImageToImage = () => {
dispatch(setInitialImage(image));
if (activeTabName !== 'img2img') {
dispatch(setActiveTab('img2img'));
}
toast({
title: t('toast.sentToImageToImage'),
status: 'success',
duration: 2500,
isClosable: true,
});
dispatch(initialImageSelected(image.name));
};
const handleSendToCanvas = () => {
dispatch(setInitialCanvasImage(image));
// dispatch(setInitialCanvasImage(image));
dispatch(resizeAndScaleCanvas());
@@ -122,7 +118,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleUseAllParameters = () => {
metadata && dispatch(setAllParameters(metadata));
metadata.sd_metadata && dispatch(setAllParameters(metadata.sd_metadata));
toast({
title: t('toast.parametersSet'),
status: 'success',
@@ -132,11 +128,13 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleUseInitialImage = async () => {
if (metadata?.image?.init_image_path) {
const response = await fetch(metadata.image.init_image_path);
if (metadata.sd_metadata?.image?.init_image_path) {
const response = await fetch(
metadata.sd_metadata?.image?.init_image_path
);
if (response.ok) {
dispatch(setActiveTab('img2img'));
dispatch(setAllImageToImageParameters(metadata));
dispatch(setAllImageToImageParameters(metadata?.sd_metadata));
toast({
title: t('toast.initialImageSet'),
status: 'success',
@@ -155,16 +153,18 @@ const HoverableImage = memo((props: HoverableImageProps) => {
});
};
const handleSelectImage = () => dispatch(setCurrentImage(image));
const handleSelectImage = () => {
dispatch(imageSelected(image.name));
};
const handleDragStart = (e: DragEvent<HTMLDivElement>) => {
e.dataTransfer.setData('invokeai/imageUuid', uuid);
e.dataTransfer.effectAllowed = 'move';
// e.dataTransfer.setData('invokeai/imageUuid', uuid);
// e.dataTransfer.effectAllowed = 'move';
};
const handleLightBox = () => {
dispatch(setCurrentImage(image));
dispatch(setIsLightboxOpen(true));
// dispatch(setCurrentImage(image));
// dispatch(setIsLightboxOpen(true));
};
return (
@@ -177,28 +177,30 @@ const HoverableImage = memo((props: HoverableImageProps) => {
</MenuItem>
<MenuItem
onClickCapture={handleUsePrompt}
isDisabled={image?.metadata?.image?.prompt === undefined}
isDisabled={image?.metadata?.sd_metadata?.prompt === undefined}
>
{t('parameters.usePrompt')}
</MenuItem>
<MenuItem
onClickCapture={handleUseSeed}
isDisabled={image?.metadata?.image?.seed === undefined}
isDisabled={image?.metadata?.sd_metadata?.seed === undefined}
>
{t('parameters.useSeed')}
</MenuItem>
<MenuItem
onClickCapture={handleUseAllParameters}
isDisabled={
!['txt2img', 'img2img'].includes(image?.metadata?.image?.type)
!['txt2img', 'img2img'].includes(
image?.metadata?.sd_metadata?.type
)
}
>
{t('parameters.useAll')}
</MenuItem>
<MenuItem
onClickCapture={handleUseInitialImage}
isDisabled={image?.metadata?.image?.type !== 'img2img'}
isDisabled={image?.metadata?.sd_metadata?.type !== 'img2img'}
>
{t('parameters.useInitImg')}
</MenuItem>
@@ -209,9 +211,9 @@ const HoverableImage = memo((props: HoverableImageProps) => {
{t('parameters.sendToUnifiedCanvas')}
</MenuItem>
<MenuItem data-warning>
<DeleteImageModal image={image}>
{/* <DeleteImageModal image={image}>
<p>{t('parameters.deleteImage')}</p>
</DeleteImageModal>
</DeleteImageModal> */}
</MenuItem>
</MenuList>
)}
@@ -219,7 +221,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
{(ref) => (
<Box
position="relative"
key={uuid}
key={name}
onMouseOver={handleMouseOver}
onMouseOut={handleMouseOut}
userSelect="none"
@@ -244,7 +246,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
shouldUseSingleGalleryColumn ? 'contain' : galleryImageObjectFit
}
rounded="md"
src={thumbnail || url}
src={getUrl(thumbnail || url)}
loading="lazy"
sx={{
position: 'absolute',
@@ -290,7 +292,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
insetInlineEnd: 1,
}}
>
<DeleteImageModal image={image}>
{/* <DeleteImageModal image={image}>
<IAIIconButton
aria-label={t('parameters.deleteImage')}
icon={<FaTrashAlt />}
@@ -298,7 +300,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
fontSize={14}
isDisabled={!mayDeleteImage}
/>
</DeleteImageModal>
</DeleteImageModal> */}
</Box>
)}
</Box>

View File

@@ -1,4 +1,4 @@
import { ButtonGroup, Flex, Grid, Icon, Text } from '@chakra-ui/react';
import { ButtonGroup, Flex, Grid, Icon, Image, Text } from '@chakra-ui/react';
import { requestImages } from 'app/socketio/actions';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIButton from 'common/components/IAIButton';
@@ -25,9 +25,44 @@ import HoverableImage from './HoverableImage';
import Scrollable from 'features/ui/components/common/Scrollable';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
import {
resultsAdapter,
selectResultsAll,
selectResultsTotal,
} from '../store/resultsSlice';
import {
receivedResultImagesPage,
receivedUploadImagesPage,
} from 'services/thunks/gallery';
import { selectUploadsAll, uploadsAdapter } from '../store/uploadsSlice';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
const GALLERY_SHOW_BUTTONS_MIN_WIDTH = 290;
const gallerySelector = createSelector(
[
(state: RootState) => state.uploads,
(state: RootState) => state.results,
(state: RootState) => state.gallery,
],
(uploads, results, gallery) => {
const { currentCategory } = gallery;
return currentCategory === 'result'
? {
images: resultsAdapter.getSelectors().selectAll(results),
isLoading: results.isLoading,
areMoreImagesAvailable: results.page < results.pages - 1,
}
: {
images: uploadsAdapter.getSelectors().selectAll(uploads),
isLoading: uploads.isLoading,
areMoreImagesAvailable: uploads.page < uploads.pages - 1,
};
}
);
const ImageGalleryContent = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
@@ -35,7 +70,7 @@ const ImageGalleryContent = () => {
const [shouldShouldIconButtons, setShouldShouldIconButtons] = useState(true);
const {
images,
// images,
currentCategory,
currentImageUuid,
shouldPinGallery,
@@ -43,12 +78,24 @@ const ImageGalleryContent = () => {
galleryGridTemplateColumns,
galleryImageObjectFit,
shouldAutoSwitchToNewImages,
areMoreImagesAvailable,
// areMoreImagesAvailable,
shouldUseSingleGalleryColumn,
} = useAppSelector(imageGallerySelector);
const { images, areMoreImagesAvailable, isLoading } =
useAppSelector(gallerySelector);
// const handleClickLoadMore = () => {
// dispatch(requestImages(currentCategory));
// };
const handleClickLoadMore = () => {
dispatch(requestImages(currentCategory));
if (currentCategory === 'result') {
dispatch(receivedResultImagesPage());
}
if (currentCategory === 'user') {
dispatch(receivedUploadImagesPage());
}
};
const handleChangeGalleryImageMinimumWidth = (v: number) => {
@@ -203,11 +250,11 @@ const ImageGalleryContent = () => {
style={{ gridTemplateColumns: galleryGridTemplateColumns }}
>
{images.map((image) => {
const { uuid } = image;
const isSelected = currentImageUuid === uuid;
const { name } = image;
const isSelected = currentImageUuid === name;
return (
<HoverableImage
key={uuid}
key={name}
image={image}
isSelected={isSelected}
/>
@@ -217,6 +264,7 @@ const ImageGalleryContent = () => {
<IAIButton
onClick={handleClickLoadMore}
isDisabled={!areMoreImagesAvailable}
isLoading={isLoading}
flexShrink={0}
>
{areMoreImagesAvailable

View File

@@ -11,6 +11,7 @@ import {
} from '@chakra-ui/react';
import * as InvokeAI from 'app/invokeai';
import { useAppDispatch } from 'app/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
import promptToString from 'common/util/promptToString';
import { seedWeightsToString } from 'common/util/seedWeightPairs';
import useSetBothPrompts from 'features/parameters/hooks/usePrompt';
@@ -18,7 +19,7 @@ import {
setCfgScale,
setHeight,
setImg2imgStrength,
setInitialImage,
// setInitialImage,
setMaskPath,
setPerlin,
setSampler,
@@ -45,6 +46,7 @@ import { useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next';
import { FaCopy } from 'react-icons/fa';
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
import * as png from '@stevebel/png';
type MetadataItemProps = {
isLink?: boolean;
@@ -120,7 +122,7 @@ type ImageMetadataViewerProps = {
const memoEqualityCheck = (
prev: ImageMetadataViewerProps,
next: ImageMetadataViewerProps
) => prev.image.uuid === next.image.uuid;
) => prev.image.name === next.image.name;
// TODO: Show more interesting information in this component.
@@ -137,8 +139,8 @@ const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
dispatch(setShouldShowImageDetails(false));
});
const metadata = image?.metadata?.image || {};
const dreamPrompt = image?.dreamPrompt;
const metadata = image?.metadata.sd_metadata || {};
const dreamPrompt = image?.metadata.sd_metadata?.dreamPrompt;
const {
cfg_scale,
@@ -160,11 +162,23 @@ const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
type,
variations,
width,
model_weights,
} = metadata;
const { t } = useTranslation();
const { getUrl } = useGetUrl();
const metadataJSON = JSON.stringify(image.metadata, null, 2);
const metadataJSON = JSON.stringify(image, null, 2);
// fetch(getUrl(image.url))
// .then((r) => r.arrayBuffer())
// .then((buffer) => {
// const { text } = png.decode(buffer);
// const metadata = text?.['sd-metadata']
// ? JSON.parse(text['sd-metadata'] ?? {})
// : {};
// console.log(metadata);
// });
return (
<Flex
@@ -183,18 +197,49 @@ const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
>
<Flex gap={2}>
<Text fontWeight="semibold">File:</Text>
<Link href={image.url} isExternal maxW="calc(100% - 3rem)">
<Link href={getUrl(image.url)} isExternal maxW="calc(100% - 3rem)">
{image.url.length > 64
? image.url.substring(0, 64).concat('...')
: image.url}
<ExternalLinkIcon mx="2px" />
</Link>
</Flex>
<Flex gap={2} direction="column">
<Flex gap={2}>
<Tooltip label="Copy metadata JSON">
<IconButton
aria-label={t('accessibility.copyMetadataJson')}
icon={<FaCopy />}
size="xs"
variant="ghost"
fontSize={14}
onClick={() => navigator.clipboard.writeText(metadataJSON)}
/>
</Tooltip>
<Text fontWeight="semibold">Metadata JSON:</Text>
</Flex>
<Box
sx={{
mt: 0,
mr: 2,
mb: 4,
ml: 2,
padding: 4,
borderRadius: 'base',
overflowX: 'scroll',
wordBreak: 'break-all',
bg: 'whiteAlpha.500',
_dark: { bg: 'blackAlpha.500' },
}}
>
<pre>{metadataJSON}</pre>
</Box>
</Flex>
{Object.keys(metadata).length > 0 ? (
<>
{type && <MetadataItem label="Generation type" value={type} />}
{image.metadata?.model_weights && (
<MetadataItem label="Model" value={image.metadata.model_weights} />
{model_weights && (
<MetadataItem label="Model" value={model_weights} />
)}
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
@@ -288,14 +333,14 @@ const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
onClick={() => dispatch(setHeight(height))}
/>
)}
{init_image_path && (
{/* {init_image_path && (
<MetadataItem
label="Initial image"
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImage(init_image_path))}
/>
)}
)} */}
{mask_image_path && (
<MetadataItem
label="Mask image"
@@ -408,37 +453,6 @@ const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
{dreamPrompt && (
<MetadataItem withCopy label="Dream Prompt" value={dreamPrompt} />
)}
<Flex gap={2} direction="column">
<Flex gap={2}>
<Tooltip label="Copy metadata JSON">
<IconButton
aria-label={t('accessibility.copyMetadataJson')}
icon={<FaCopy />}
size="xs"
variant="ghost"
fontSize={14}
onClick={() => navigator.clipboard.writeText(metadataJSON)}
/>
</Tooltip>
<Text fontWeight="semibold">Metadata JSON:</Text>
</Flex>
<Box
sx={{
mt: 0,
mr: 2,
mb: 4,
ml: 2,
padding: 4,
borderRadius: 'base',
overflowX: 'scroll',
wordBreak: 'break-all',
bg: 'whiteAlpha.500',
_dark: { bg: 'blackAlpha.500' },
}}
>
<pre>{metadataJSON}</pre>
</Box>
</Flex>
</>
) : (
<Center width="100%" pt={10}>

View File

@@ -7,6 +7,16 @@ import {
uiSelector,
} from 'features/ui/store/uiSelectors';
import { isEqual } from 'lodash';
import {
selectResultsAll,
selectResultsById,
selectResultsEntities,
} from './resultsSlice';
import {
selectUploadsAll,
selectUploadsById,
selectUploadsEntities,
} from './uploadsSlice';
export const gallerySelector = (state: RootState) => state.gallery;
@@ -75,3 +85,18 @@ export const hoverableImageSelector = createSelector(
},
}
);
export const selectedImageSelector = createSelector(
[gallerySelector, selectResultsEntities, selectUploadsEntities],
(gallery, allResults, allUploads) => {
const selectedImageName = gallery.selectedImageName;
if (selectedImageName in allResults) {
return allResults[selectedImageName];
}
if (selectedImageName in allUploads) {
return allUploads[selectedImageName];
}
}
);

View File

@@ -1,14 +1,17 @@
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import * as InvokeAI from 'app/invokeai';
import { invocationComplete } from 'services/events/actions';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { IRect } from 'konva/lib/types';
import { clamp } from 'lodash';
import { isImageOutput } from 'services/types/guards';
import { imageUploaded } from 'services/thunks/image';
export type GalleryCategory = 'user' | 'result';
export type AddImagesPayload = {
images: Array<InvokeAI.Image>;
images: Array<InvokeAI._Image>;
areMoreImagesAvailable: boolean;
category: GalleryCategory;
};
@@ -16,16 +19,33 @@ export type AddImagesPayload = {
type GalleryImageObjectFitType = 'contain' | 'cover';
export type Gallery = {
images: InvokeAI.Image[];
images: InvokeAI._Image[];
latest_mtime?: number;
earliest_mtime?: number;
areMoreImagesAvailable: boolean;
};
export interface GalleryState {
currentImage?: InvokeAI.Image;
/**
* The selected image's unique name
* Use `selectedImageSelector` to access the image
*/
selectedImageName: string;
/**
* The currently selected image
* @deprecated See `state.gallery.selectedImageName`
*/
currentImage?: InvokeAI._Image;
/**
* The currently selected image's uuid.
* @deprecated See `state.gallery.selectedImageName`, use `selectedImageSelector` to access the image
*/
currentImageUuid: string;
intermediateImage?: InvokeAI.Image & {
/**
* The current progress image
* @deprecated See `state.system.progressImage`
*/
intermediateImage?: InvokeAI._Image & {
boundingBox?: IRect;
generationMode?: InvokeTabName;
};
@@ -42,6 +62,7 @@ export interface GalleryState {
}
const initialState: GalleryState = {
selectedImageName: '',
currentImageUuid: '',
galleryImageMinimumWidth: 64,
galleryImageObjectFit: 'cover',
@@ -69,7 +90,10 @@ export const gallerySlice = createSlice({
name: 'gallery',
initialState,
reducers: {
setCurrentImage: (state, action: PayloadAction<InvokeAI.Image>) => {
imageSelected: (state, action: PayloadAction<string>) => {
state.selectedImageName = action.payload;
},
setCurrentImage: (state, action: PayloadAction<InvokeAI._Image>) => {
state.currentImage = action.payload;
state.currentImageUuid = action.payload.uuid;
},
@@ -124,7 +148,7 @@ export const gallerySlice = createSlice({
addImage: (
state,
action: PayloadAction<{
image: InvokeAI.Image;
image: InvokeAI._Image;
category: GalleryCategory;
}>
) => {
@@ -150,7 +174,10 @@ export const gallerySlice = createSlice({
setIntermediateImage: (
state,
action: PayloadAction<
InvokeAI.Image & { boundingBox?: IRect; generationMode?: InvokeTabName }
InvokeAI._Image & {
boundingBox?: IRect;
generationMode?: InvokeTabName;
}
>
) => {
state.intermediateImage = action.payload;
@@ -252,9 +279,31 @@ export const gallerySlice = createSlice({
state.shouldUseSingleGalleryColumn = action.payload;
},
},
extraReducers(builder) {
/**
* Invocation Complete
*/
builder.addCase(invocationComplete, (state, action) => {
const { data } = action.payload;
if (isImageOutput(data.result)) {
state.selectedImageName = data.result.image.image_name;
state.intermediateImage = undefined;
}
});
/**
* Upload Image - FULFILLED
*/
builder.addCase(imageUploaded.fulfilled, (state, action) => {
const { location } = action.payload;
const imageName = location.split('/').pop() || '';
state.selectedImageName = imageName;
});
},
});
export const {
imageSelected,
addImage,
clearIntermediateImage,
removeImage,

View File

@@ -0,0 +1,149 @@
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
import { Image } from 'app/invokeai';
import { invocationComplete } from 'services/events/actions';
import { RootState } from 'app/store';
import {
receivedResultImagesPage,
IMAGES_PER_PAGE,
} from 'services/thunks/gallery';
import { isImageOutput } from 'services/types/guards';
import {
buildImageUrls,
deserializeImageField,
extractTimestampFromImageName,
} from 'services/util/deserializeImageField';
import { deserializeImageResponse } from 'services/util/deserializeImageResponse';
import { getUrlAlt } from 'common/util/getUrl';
import { ImageMetadata } from 'services/api';
// import { deserializeImageField } from 'services/util/deserializeImageField';
// use `createEntityAdapter` to create a slice for results images
// https://redux-toolkit.js.org/api/createEntityAdapter#overview
// the "Entity" is InvokeAI.ResultImage, while the "entities" are instances of that type
export const resultsAdapter = createEntityAdapter<Image>({
// Provide a callback to get a stable, unique identifier for each entity. This defaults to
// `(item) => item.id`, but for our result images, the `name` is the unique identifier.
selectId: (image) => image.name,
// Order all images by their time (in descending order)
sortComparer: (a, b) => b.metadata.created - a.metadata.created,
});
// This type is intersected with the Entity type to create the shape of the state
type AdditionalResultsState = {
// these are a bit misleading; they refer to sessions, not results, but we don't have a route
// to list all images directly at this time...
page: number; // current page we are on
pages: number; // the total number of pages available
isLoading: boolean; // whether we are loading more images or not, mostly a placeholder
nextPage: number; // the next page to request
};
// export type ResultsState = ReturnType<
// typeof resultsAdapter.getInitialState<AdditionalResultsState>
// >;
export const initialResultsState =
resultsAdapter.getInitialState<AdditionalResultsState>({
// provide the additional initial state
page: 0,
pages: 0,
isLoading: false,
nextPage: 0,
});
export type ResultsState = typeof initialResultsState;
const resultsSlice = createSlice({
name: 'results',
initialState: initialResultsState,
reducers: {
// the adapter provides some helper reducers; see the docs for all of them
// can use them as helper functions within a reducer, or use the function itself as a reducer
// here we just use the function itself as the reducer. we'll call this on `invocation_complete`
// to add a single result
resultAdded: resultsAdapter.upsertOne,
},
extraReducers: (builder) => {
// here we can respond to a fulfilled call of the `getNextResultsPage` thunk
// because we pass in the fulfilled thunk action creator, everything is typed
/**
* Received Result Images Page - PENDING
*/
builder.addCase(receivedResultImagesPage.pending, (state) => {
state.isLoading = true;
});
/**
* Received Result Images Page - FULFILLED
*/
builder.addCase(receivedResultImagesPage.fulfilled, (state, action) => {
const { items, page, pages } = action.payload;
const resultImages = items.map((image) =>
deserializeImageResponse(image)
);
// use the adapter reducer to append all the results to state
resultsAdapter.addMany(state, resultImages);
state.page = page;
state.pages = pages;
state.nextPage = items.length < IMAGES_PER_PAGE ? page : page + 1;
state.isLoading = false;
});
/**
* Invocation Complete
*/
builder.addCase(invocationComplete, (state, action) => {
const { data } = action.payload;
const { result, invocation, graph_execution_state_id, source_id } = data;
if (isImageOutput(result)) {
const name = result.image.image_name;
const type = result.image.image_type;
const { url, thumbnail } = buildImageUrls(type, name);
const timestamp = extractTimestampFromImageName(name);
const image: Image = {
name,
type,
url,
thumbnail,
metadata: {
created: timestamp,
width: result.width, // TODO: add tese dimensions
height: result.height,
invokeai: {
session: graph_execution_state_id,
source_id,
invocation,
},
},
};
// const resultImage = deserializeImageField(result.image, invocation);
resultsAdapter.addOne(state, image);
}
});
},
});
// Create a set of memoized selectors based on the location of this entity state
// to be used as selectors in a `useAppSelector()` call
export const {
selectAll: selectResultsAll,
selectById: selectResultsById,
selectEntities: selectResultsEntities,
selectIds: selectResultsIds,
selectTotal: selectResultsTotal,
} = resultsAdapter.getSelectors<RootState>((state) => state.results);
export const { resultAdded } = resultsSlice.actions;
export default resultsSlice.reducer;

View File

@@ -1,54 +0,0 @@
import { AnyAction, ThunkAction } from '@reduxjs/toolkit';
import * as InvokeAI from 'app/invokeai';
import { RootState } from 'app/store';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { setInitialImage } from 'features/parameters/store/generationSlice';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { v4 as uuidv4 } from 'uuid';
import { addImage } from '../gallerySlice';
type UploadImageConfig = {
imageFile: File;
};
export const uploadImage =
(
config: UploadImageConfig
): ThunkAction<void, RootState, unknown, AnyAction> =>
async (dispatch, getState) => {
const { imageFile } = config;
const state = getState() as RootState;
const activeTabName = activeTabNameSelector(state);
const formData = new FormData();
formData.append('file', imageFile, imageFile.name);
formData.append(
'data',
JSON.stringify({
kind: 'init',
})
);
const response = await fetch(`${window.location.origin}/upload`, {
method: 'POST',
body: formData,
});
const image = (await response.json()) as InvokeAI.ImageUploadResponse;
const newImage: InvokeAI.Image = {
uuid: uuidv4(),
category: 'user',
...image,
};
dispatch(addImage({ image: newImage, category: 'user' }));
if (activeTabName === 'unifiedCanvas') {
dispatch(setInitialCanvasImage(newImage));
} else if (activeTabName === 'img2img') {
dispatch(setInitialImage(newImage));
}
};

View File

@@ -0,0 +1,95 @@
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
import { Image } from 'app/invokeai';
import { RootState } from 'app/store';
import {
receivedUploadImagesPage,
IMAGES_PER_PAGE,
} from 'services/thunks/gallery';
import { imageUploaded } from 'services/thunks/image';
import { deserializeImageField } from 'services/util/deserializeImageField';
import { deserializeImageResponse } from 'services/util/deserializeImageResponse';
export const uploadsAdapter = createEntityAdapter<Image>({
selectId: (image) => image.name,
sortComparer: (a, b) => b.metadata.created - a.metadata.created,
});
type AdditionalUploadsState = {
page: number;
pages: number;
isLoading: boolean;
nextPage: number;
};
export type UploadssState = ReturnType<
typeof uploadsAdapter.getInitialState<AdditionalUploadsState>
>;
const uploadsSlice = createSlice({
name: 'uploads',
initialState: uploadsAdapter.getInitialState<AdditionalUploadsState>({
page: 0,
pages: 0,
nextPage: 0,
isLoading: false,
}),
reducers: {
uploadAdded: uploadsAdapter.addOne,
},
extraReducers: (builder) => {
/**
* Received Upload Images Page - PENDING
*/
builder.addCase(receivedUploadImagesPage.pending, (state) => {
state.isLoading = true;
});
/**
* Received Upload Images Page - FULFILLED
*/
builder.addCase(receivedUploadImagesPage.fulfilled, (state, action) => {
const { items, page, pages } = action.payload;
const images = items.map((image) => deserializeImageResponse(image));
uploadsAdapter.addMany(state, images);
state.page = page;
state.pages = pages;
state.nextPage = items.length < IMAGES_PER_PAGE ? page : page + 1;
state.isLoading = false;
});
/**
* Upload Image - FULFILLED
*/
builder.addCase(imageUploaded.fulfilled, (state, action) => {
const { location, response } = action.payload;
const { image_name, image_url, image_type, metadata, thumbnail_url } =
response;
const uploadedImage: Image = {
name: image_name,
url: image_url,
thumbnail: thumbnail_url,
type: 'uploads',
metadata,
};
uploadsAdapter.addOne(state, uploadedImage);
});
},
});
export const {
selectAll: selectUploadsAll,
selectById: selectUploadsById,
selectEntities: selectUploadsEntities,
selectIds: selectUploadsIds,
selectTotal: selectUploadsTotal,
} = uploadsAdapter.getSelectors<RootState>((state) => state.uploads);
export const { uploadAdded } = uploadsSlice.actions;
export default uploadsSlice.reducer;

View File

@@ -1,9 +1,10 @@
import * as React from 'react';
import { TransformComponent, useTransformContext } from 'react-zoom-pan-pinch';
import * as InvokeAI from 'app/invokeai';
import { useGetUrl } from 'common/util/getUrl';
type ReactPanZoomProps = {
image: InvokeAI.Image;
image: InvokeAI._Image;
styleClass?: string;
alt?: string;
ref?: React.Ref<HTMLImageElement>;
@@ -22,6 +23,7 @@ export default function ReactPanZoomImage({
scaleY,
}: ReactPanZoomProps) {
const { centerView } = useTransformContext();
const { getUrl } = useGetUrl();
return (
<TransformComponent
@@ -35,7 +37,7 @@ export default function ReactPanZoomImage({
transform: `rotate(${rotation}deg) scaleX(${scaleX}) scaleY(${scaleY})`,
width: '100%',
}}
src={image.url}
src={getUrl(image.url)}
alt={alt}
ref={ref}
className={styleClass ? styleClass : ''}

View File

@@ -0,0 +1,47 @@
import { v4 as uuidv4 } from 'uuid';
import 'reactflow/dist/style.css';
import { useCallback } from 'react';
import {
Tooltip,
Menu,
MenuButton,
MenuList,
MenuItem,
IconButton,
} from '@chakra-ui/react';
import { FaPlus } from 'react-icons/fa';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { nodeAdded } from '../store/nodesSlice';
import { map } from 'lodash';
import { RootState } from 'app/store';
export const AddNodeMenu = () => {
const dispatch = useAppDispatch();
const invocations = useAppSelector(
(state: RootState) => state.nodes.invocations
);
const addNode = useCallback(
(nodeType: string) => {
dispatch(nodeAdded({ id: uuidv4(), invocation: invocations[nodeType] }));
},
[dispatch, invocations]
);
return (
<Menu>
<MenuButton as={IconButton} aria-label="Add Node" icon={<FaPlus />} />
<MenuList>
{map(invocations, ({ title, description, type }, key) => {
return (
<Tooltip key={key} label={description} placement="end" hasArrow>
<MenuItem onClick={() => addNode(type)}>{title}</MenuItem>
</Tooltip>
);
})}
</MenuList>
</Menu>
);
};

View File

@@ -0,0 +1,78 @@
import { Tooltip } from '@chakra-ui/react';
import { CSSProperties, useMemo } from 'react';
import {
Handle,
Position,
Connection,
HandleType,
useReactFlow,
} from 'reactflow';
import { FIELDS, HANDLE_TOOLTIP_OPEN_DELAY } from '../constants';
// import { useConnectionEventStyles } from '../hooks/useConnectionEventStyles';
import { InputField, OutputField } from '../types';
const handleBaseStyles: CSSProperties = {
position: 'absolute',
width: '1rem',
height: '1rem',
opacity: 0.5,
borderWidth: 0,
};
const inputHandleStyles: CSSProperties = {
left: '-1.7rem',
};
const outputHandleStyles: CSSProperties = {
right: '-1.7rem',
};
const requiredConnectionStyles: CSSProperties = {
opacity: 1,
};
type FieldHandleProps = {
nodeId: string;
field: InputField | OutputField;
isValidConnection: (connection: Connection) => boolean;
handleType: HandleType;
styles?: CSSProperties;
};
export const FieldHandle = (props: FieldHandleProps) => {
const { nodeId, field, isValidConnection, handleType, styles } = props;
const { name, title, type, description, connectionType } = field;
// this needs to iterate over every candicate target node, calculating graph cycles
// WIP
// const connectionEventStyles = useConnectionEventStyles(
// nodeId,
// type,
// handleType
// );
return (
<Tooltip
key={name}
label={`${title} (${type})`}
placement={handleType === 'target' ? 'start' : 'end'}
hasArrow
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
>
<Handle
type={handleType}
id={name}
isValidConnection={isValidConnection}
position={handleType === 'target' ? Position.Left : Position.Right}
style={{
backgroundColor: `var(--invokeai-colors-${FIELDS[type].color}-500)`,
...styles,
...handleBaseStyles,
...(handleType === 'target' ? inputHandleStyles : outputHandleStyles),
...(connectionType === 'always' ? requiredConnectionStyles : {}),
// ...connectionEventStyles,
}}
/>
</Tooltip>
);
};

View File

@@ -0,0 +1,18 @@
import 'reactflow/dist/style.css';
import { Tooltip, Badge, HStack } from '@chakra-ui/react';
import { map } from 'lodash';
import { FIELDS } from '../constants';
export const FieldTypeLegend = () => {
return (
<HStack>
{map(FIELDS, ({ title, description, color }, key) => (
<Tooltip key={key} label={description}>
<Badge colorScheme={color} sx={{ userSelect: 'none' }}>
{title}
</Badge>
</Tooltip>
))}
</HStack>
);
};

View File

@@ -0,0 +1,104 @@
import {
Background,
Controls,
MiniMap,
OnConnect,
OnEdgesChange,
OnNodesChange,
ReactFlow,
ConnectionLineType,
OnConnectStart,
OnConnectEnd,
Panel,
} from 'reactflow';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { RootState } from 'app/store';
import {
connectionEnded,
connectionMade,
connectionStarted,
edgesChanged,
nodesChanged,
} from '../store/nodesSlice';
import { useCallback } from 'react';
import { InvocationComponent } from './InvocationComponent';
import { AddNodeMenu } from './AddNodeMenu';
import { FieldTypeLegend } from './FieldTypeLegend';
import { Button } from '@chakra-ui/react';
import { nodesGraphBuilt } from 'services/thunks/session';
const nodeTypes = { invocation: InvocationComponent };
export const Flow = () => {
const dispatch = useAppDispatch();
const nodes = useAppSelector((state: RootState) => state.nodes.nodes);
const edges = useAppSelector((state: RootState) => state.nodes.edges);
const onNodesChange: OnNodesChange = useCallback(
(changes) => {
dispatch(nodesChanged(changes));
},
[dispatch]
);
const onEdgesChange: OnEdgesChange = useCallback(
(changes) => {
dispatch(edgesChanged(changes));
},
[dispatch]
);
const onConnectStart: OnConnectStart = useCallback(
(event, params) => {
dispatch(connectionStarted(params));
},
[dispatch]
);
const onConnect: OnConnect = useCallback(
(connection) => {
dispatch(connectionMade(connection));
},
[dispatch]
);
const onConnectEnd: OnConnectEnd = useCallback(
(event) => {
dispatch(connectionEnded());
},
[dispatch]
);
const handleInvoke = useCallback(() => {
dispatch(nodesGraphBuilt());
}, [dispatch]);
return (
<ReactFlow
nodeTypes={nodeTypes}
nodes={nodes}
edges={edges}
onNodesChange={onNodesChange}
onEdgesChange={onEdgesChange}
onConnectStart={onConnectStart}
onConnect={onConnect}
onConnectEnd={onConnectEnd}
defaultEdgeOptions={{
style: { strokeWidth: 2 },
}}
>
<Panel position="top-left">
<AddNodeMenu />
</Panel>
<Panel position="top-center">
<Button onClick={handleInvoke}>Will it blend?</Button>
</Panel>
<Panel position="top-right">
<FieldTypeLegend />
</Panel>
<Background />
<Controls />
<MiniMap nodeStrokeWidth={3} zoomable pannable />
</ReactFlow>
);
};

View File

@@ -0,0 +1,50 @@
import { Box } from '@chakra-ui/react';
import { InputField } from '../types';
import { BooleanInputFieldComponent } from './fields/BooleanInputFieldComponent';
import { EnumInputFieldComponent } from './fields/EnumInputFieldComponent';
import { ImageInputFieldComponent } from './fields/ImageInputFieldComponent';
import { LatentsInputFieldComponent } from './fields/LatentsInputFieldComponent';
import { ModelInputFieldComponent } from './fields/ModelInputFieldComponent';
import { NumberInputFieldComponent } from './fields/NumberInputFieldComponent';
import { StringInputFieldComponent } from './fields/StringInputFieldComponent';
type InputFieldComponentProps = {
nodeId: string;
field: InputField;
};
// build an individual input element based on the schema
export const InputFieldComponent = (props: InputFieldComponentProps) => {
const { nodeId, field } = props;
const { type, value } = field;
if (type === 'string') {
return <StringInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'boolean') {
return <BooleanInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'integer' || type === 'float') {
return <NumberInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'enum') {
return <EnumInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'image') {
return <ImageInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'latents') {
return <LatentsInputFieldComponent nodeId={nodeId} field={field} />;
}
if (type === 'model') {
return <ModelInputFieldComponent nodeId={nodeId} field={field} />;
}
return <Box p={2}>Unknown field type: {type}</Box>;
};

View File

@@ -0,0 +1,145 @@
import { NodeProps, useReactFlow } from 'reactflow';
import {
Box,
Flex,
FormControl,
FormLabel,
Heading,
HStack,
Tooltip,
Icon,
Code,
Text,
} from '@chakra-ui/react';
import { FaInfoCircle } from 'react-icons/fa';
import { Invocation } from '../types';
import { InputFieldComponent } from './InputFieldComponent';
import { FieldHandle } from './FieldHandle';
import { isEqual, map, size } from 'lodash';
import { memo, useMemo } from 'react';
import { useIsValidConnection } from '../hooks/useIsValidConnection';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks';
const connectedInputFieldsSelector = createSelector(
(state: RootState) => state.nodes.edges,
(edges) => {
return edges.map((e) => e.targetHandle);
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
);
export const InvocationComponent = memo((props: NodeProps<Invocation>) => {
const { id, data, selected } = props;
const { type, title, description, inputs, outputs } = data;
const isValidConnection = useIsValidConnection();
const connectedInputs = useAppSelector(connectedInputFieldsSelector);
// TODO: determine if a field/handle is connected and disable the input if so
return (
<Box
sx={{
padding: 4,
bg: 'base.800',
borderRadius: 'md',
boxShadow: 'dark-lg',
borderWidth: 2,
borderColor: selected ? 'base.400' : 'transparent',
}}
>
<Flex flexDirection="column" gap={2}>
<>
<Code>{id}</Code>
<HStack justifyContent="space-between">
<Heading size="sm" fontWeight={500} color="base.100">
{title}
</Heading>
<Tooltip
label={description}
placement="top"
hasArrow
shouldWrapChildren
>
<Icon color="base.300" as={FaInfoCircle} />
</Tooltip>
</HStack>
{map(inputs, (input, i) => {
const isConnected = connectedInputs.includes(input.name);
return (
<Box
key={i}
position="relative"
p={2}
borderWidth={1}
borderRadius="md"
sx={{
borderColor:
!isConnected && input.connectionType === 'always'
? 'warning.400'
: undefined,
}}
>
<FormControl isDisabled={isConnected}>
<HStack justifyContent="space-between" alignItems="center">
<FormLabel>{input.title}</FormLabel>
<Tooltip
label={input.description}
placement="top"
hasArrow
shouldWrapChildren
>
<Icon color="base.400" as={FaInfoCircle} />
</Tooltip>
</HStack>
<InputFieldComponent nodeId={id} field={input} />
</FormControl>
{input.connectionType !== 'never' && (
<FieldHandle
nodeId={id}
field={input}
isValidConnection={isValidConnection}
handleType="target"
/>
)}
</Box>
);
})}
{map(outputs).map((output, i) => {
// const top = `${(100 / (size(outputs) + 1)) * (i + 1)}%`;
const { name, title } = output;
return (
<Box
key={name}
position="relative"
p={2}
borderWidth={1}
borderRadius="md"
>
<FormControl>
<FormLabel textAlign="end">{title} Output</FormLabel>
</FormControl>
<FieldHandle
key={name}
nodeId={id}
field={output}
isValidConnection={isValidConnection}
handleType="source"
/>
</Box>
);
})}
</>
</Flex>
<Flex></Flex>
</Box>
);
});
InvocationComponent.displayName = 'InvocationComponent';

View File

@@ -0,0 +1,46 @@
import 'reactflow/dist/style.css';
import { Box } from '@chakra-ui/react';
import { ReactFlowProvider } from 'reactflow';
import { Flow } from './Flow';
import { useAppSelector } from 'app/storeHooks';
import { RootState } from 'app/store';
import { buildNodesGraph } from '../util/buildNodesGraph';
const NodeEditor = () => {
const state = useAppSelector((state: RootState) => state);
const graph = buildNodesGraph(state);
return (
<Box
sx={{
position: 'relative',
width: 'full',
height: 'full',
borderRadius: 'md',
bg: 'base.850',
}}
>
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
<Box
as="pre"
fontFamily="monospace"
position="absolute"
top={2}
left={2}
width="full"
height="full"
userSelect="none"
pointerEvents="none"
opacity={0.7}
>
<Box w="50%">{JSON.stringify(graph, null, 2)}</Box>
</Box>
</Box>
);
};
export default NodeEditor;

View File

@@ -0,0 +1,28 @@
import { Switch } from '@chakra-ui/react';
import { useAppDispatch } from 'app/storeHooks';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { BooleanInputField } from 'features/nodes/types';
import { ChangeEvent } from 'react';
import { FieldComponentProps } from './types';
export const BooleanInputFieldComponent = (
props: FieldComponentProps<BooleanInputField>
) => {
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const handleValueChanged = (e: ChangeEvent<HTMLInputElement>) => {
dispatch(
fieldValueChanged({
nodeId,
fieldId: field.name,
value: e.target.checked,
})
);
};
return (
<Switch onChange={handleValueChanged} isChecked={field.value}></Switch>
);
};

View File

@@ -0,0 +1,32 @@
import { Select } from '@chakra-ui/react';
import { useAppDispatch } from 'app/storeHooks';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { EnumInputField } from 'features/nodes/types';
import { ChangeEvent } from 'react';
import { FieldComponentProps } from './types';
export const EnumInputFieldComponent = (
props: FieldComponentProps<EnumInputField>
) => {
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const handleValueChanged = (e: ChangeEvent<HTMLSelectElement>) => {
dispatch(
fieldValueChanged({
nodeId,
fieldId: field.name,
value: e.target.value,
})
);
};
return (
<Select onChange={handleValueChanged} value={field.value}>
{field.options.map((option) => (
<option key={option}>{option}</option>
))}
</Select>
);
};

View File

@@ -0,0 +1,11 @@
import { ImageInputField } from 'features/nodes/types';
import { FaImage } from 'react-icons/fa';
import { FieldComponentProps } from './types';
export const ImageInputFieldComponent = (
props: FieldComponentProps<ImageInputField>
) => {
const { nodeId, field } = props;
return <FaImage />;
};

View File

@@ -0,0 +1,11 @@
import { LatentsInputField } from 'features/nodes/types';
import { TbBrandMatrix } from 'react-icons/tb';
import { FieldComponentProps } from './types';
export const LatentsInputFieldComponent = (
props: FieldComponentProps<LatentsInputField>
) => {
const { nodeId, field } = props;
return <TbBrandMatrix />;
};

View File

@@ -0,0 +1,49 @@
import { Select } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { ModelInputField } from 'features/nodes/types';
import { isEqual, map } from 'lodash';
import { ChangeEvent } from 'react';
import { FieldComponentProps } from './types';
const availableModelsSelector = createSelector(
(state: RootState) => state.models.modelList,
(modelList) => {
return map(modelList, (_, name) => name);
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
);
export const ModelInputFieldComponent = (
props: FieldComponentProps<ModelInputField>
) => {
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const availableModels = useAppSelector(availableModelsSelector);
const handleValueChanged = (e: ChangeEvent<HTMLSelectElement>) => {
dispatch(
fieldValueChanged({
nodeId,
fieldId: field.name,
value: e.target.value,
})
);
};
return (
<Select onChange={handleValueChanged} value={field.value}>
{availableModels.map((option) => (
<option key={option}>{option}</option>
))}
</Select>
);
};

View File

@@ -0,0 +1,33 @@
import {
NumberDecrementStepper,
NumberIncrementStepper,
NumberInput,
NumberInputField,
NumberInputStepper,
} from '@chakra-ui/react';
import { useAppDispatch } from 'app/storeHooks';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { IntegerInputField, FloatInputField } from 'features/nodes/types';
import { FieldComponentProps } from './types';
export const NumberInputFieldComponent = (
props: FieldComponentProps<IntegerInputField | FloatInputField>
) => {
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const handleValueChanged = (_: string, value: number) => {
dispatch(fieldValueChanged({ nodeId, fieldId: field.name, value }));
};
return (
<NumberInput onChange={handleValueChanged} value={field.value}>
<NumberInputField />
<NumberInputStepper>
<NumberIncrementStepper />
<NumberDecrementStepper />
</NumberInputStepper>
</NumberInput>
);
};

View File

@@ -0,0 +1,22 @@
import { Input } from '@chakra-ui/react';
import { useAppDispatch } from 'app/storeHooks';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { StringInputField } from 'features/nodes/types';
import { ChangeEvent } from 'react';
import { FieldComponentProps } from './types';
export const StringInputFieldComponent = (
props: FieldComponentProps<StringInputField>
) => {
const { nodeId, field } = props;
const dispatch = useAppDispatch();
const handleValueChanged = (e: ChangeEvent<HTMLInputElement>) => {
dispatch(
fieldValueChanged({ nodeId, fieldId: field.name, value: e.target.value })
);
};
return <Input onChange={handleValueChanged} value={field.value}></Input>;
};

View File

@@ -0,0 +1,6 @@
import { InputField } from 'features/nodes/types';
export type FieldComponentProps<T extends InputField> = {
nodeId: string;
field: T;
};

View File

@@ -0,0 +1,57 @@
import { FieldType, FieldUIConfig } from './types';
export const HANDLE_TOOLTIP_OPEN_DELAY = 500;
export const FIELD_TYPE_MAP: Record<string, FieldType> = {
integer: 'integer',
number: 'float',
string: 'string',
boolean: 'boolean',
enum: 'enum',
ImageField: 'image',
LatentsField: 'latents',
model: 'model',
};
export const FIELDS: Record<FieldType, FieldUIConfig> = {
integer: {
color: 'red',
title: 'Integer',
description: 'Integers are whole numbers, without a decimal point.',
},
float: {
color: 'orange',
title: 'Float',
description: 'Floats are numbers with a decimal point.',
},
string: {
color: 'yellow',
title: 'String',
description: 'Strings are text.',
},
boolean: {
color: 'green',
title: 'Boolean',
description: 'Booleans are true or false.',
},
enum: {
color: 'blue',
title: 'Enum',
description: 'Enums are values that may be one of a number of options.',
},
image: {
color: 'purple',
title: 'Image',
description: 'Images may be passed between nodes.',
},
latents: {
color: 'pink',
title: 'Latents',
description: 'Latents may be passed between nodes.',
},
model: {
color: 'teal',
title: 'Model',
description: 'Models are models.',
},
};

Some files were not shown because too many files have changed in this diff Show More