Compare commits

..

93 Commits

Author SHA1 Message Date
psychedelicious
5189fd70a4 feat(ui, nodes): metadata wip 2023-04-14 12:52:07 +10:00
psychedelicious
c79e7d3bc1 fix(ui): fix sqlalchemy dynamic model instantiation 2023-04-13 09:27:46 +10:00
psychedelicious
f0268235ed feat(ui): rewrite SqliteItemStore in sqlalchemy 2023-04-12 22:34:32 +10:00
psychedelicious
a428c473ae feat(ui): handle already-connected fields 2023-04-12 12:32:58 +10:00
psychedelicious
832e5f66e1 feat(ui): add url host transformation 2023-04-12 12:29:17 +10:00
Mary Hipp
43e820c98c add redux-dynamic-middlewares as a dependency 2023-04-12 12:29:17 +10:00
psychedelicious
708b236769 chore(ui): rebuild api, update types 2023-04-12 12:29:17 +10:00
psychedelicious
c182f64620 feat(ui): wip node editor 2023-04-12 12:29:17 +10:00
psychedelicious
6e4c3a7127 docs(ui): update nodes doc 2023-04-12 12:29:17 +10:00
psychedelicious
7b690a8127 feat(ui): validation connections w/ graphlib 2023-04-12 12:29:17 +10:00
psychedelicious
fe7cf16547 feat(ui): wip model handling and graph topology validation 2023-04-12 12:29:17 +10:00
psychedelicious
451fe7abcd feat(ui): it blends 2023-04-12 12:29:17 +10:00
psychedelicious
ebc76a4785 feat(ui): increase edge width 2023-04-12 12:29:17 +10:00
psychedelicious
2d1b818824 feat(ui): add connection validation styling 2023-04-12 12:29:17 +10:00
psychedelicious
d93473eaae fix(ui): add basic node edges & connection validation 2023-04-12 12:29:17 +10:00
psychedelicious
f6714d74be fix(ui): fix handle 2023-04-12 12:29:17 +10:00
psychedelicious
d26a414560 feat(ui): hook up nodes to redux 2023-04-12 12:29:17 +10:00
psychedelicious
5aec29b25f feat(ui): cleanup nodes ui stuff 2023-04-12 12:29:17 +10:00
psychedelicious
abde52573e feat(ui): nodes before deleting stuff 2023-04-12 12:29:17 +10:00
psychedelicious
49ea838a3c feat(ui): remove extraneous field types 2023-04-12 12:29:17 +10:00
psychedelicious
9bd79c04a6 feat(ui): wip node editor 2023-04-12 12:29:17 +10:00
psychedelicious
b55b2a8947 fix(ui): disable event subscription
it is not fully baked just yet
2023-04-12 12:29:17 +10:00
psychedelicious
e3a8fceb5d feat(ui): first steps to node editor ui 2023-04-12 12:29:17 +10:00
psychedelicious
1e09fdc8be feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
2023-04-12 12:29:17 +10:00
Mary Hipp
d0e9ec267c feat(ui): add hi-res functionality for txt2img generations 2023-04-12 12:29:17 +10:00
Mary Hipp
880e1743ac feat(ui): update ModelSelect for nodes API 2023-04-12 12:29:17 +10:00
Mary Hipp
f59d4a0015 feat(ui): generate iterations graph 2023-04-12 12:29:17 +10:00
psychedelicious
152d4e76aa feat(ui): add exampleGraphs object w/ iterations example 2023-04-12 12:29:17 +10:00
psychedelicious
b829af7410 fix(ui): fix middleware order for multi-node graphs 2023-04-12 12:29:16 +10:00
psychedelicious
dce604b567 feat(ui): increase StatusIndicator font size 2023-04-12 12:29:16 +10:00
psychedelicious
1ed4354753 feat(ui): improve InvocationCompleteEvent types 2023-04-12 12:29:16 +10:00
psychedelicious
db8ba8b0bf chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
3cd2695676 fix(ui): fix img2img type 2023-04-12 12:29:16 +10:00
psychedelicious
2787d32881 feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like

sorry, i was scattered and this commit has a lot of unrelated stuff in it.
2023-04-12 12:29:16 +10:00
psychedelicious
96768078fa feat(ui): prep for socket jwt 2023-04-12 12:29:16 +10:00
psychedelicious
13c9639d7b feat(ui): dynamic middleware loading 2023-04-12 12:29:16 +10:00
Mary Hipp
f104f0a390 feat(ui) working on making socket URL dynamic 2023-04-12 12:29:16 +10:00
Mary Hipp
c49d2accb7 feat(ui): export StatusIndicator and ModelSelect for header use 2023-04-12 12:29:16 +10:00
Mary Hipp
749a0912c8 feat(ui): add optional token for auth 2023-04-12 12:29:16 +10:00
psychedelicious
759e5613cd feat(ui): wip events, comments, and general refactoring 2023-04-12 12:29:16 +10:00
psychedelicious
ac9b83722e lang(ui): add toast strings 2023-04-12 12:29:16 +10:00
psychedelicious
439a35e064 docs(ui): organise and update docs 2023-04-12 12:29:16 +10:00
Mary Hipp
7286843698 feat(ui): add support to disableTabs 2023-04-12 12:29:16 +10:00
Mary Hipp
77ba1b77d7 disable panels when app mounts 2023-04-12 12:29:16 +10:00
Mary Hipp
e749e7e915 feat(ui): invert logic to be disabled 2023-04-12 12:29:16 +10:00
Mary Hipp
e486559d8f feat(ui): disable panels based on app props 2023-04-12 12:29:16 +10:00
psychedelicious
2d8982c23d feat(ui): wip refactor socket events 2023-04-12 12:29:16 +10:00
psychedelicious
02d510ba17 chore(ui): regenerate api 2023-04-12 12:29:16 +10:00
psychedelicious
84d9ccb014 feat(ui): wip gallery migration 2023-04-12 12:29:16 +10:00
psychedelicious
b9fc136f25 feat(ui): wip gallery migration 2023-04-12 12:29:16 +10:00
psychedelicious
f6691dbf3b chore(ui): regenerate api 2023-04-12 12:29:16 +10:00
psychedelicious
cb11717b9c feat(ui): patch api generation for headers access 2023-04-12 12:29:16 +10:00
Mary Hipp
35c950c50d fix(ui): restore removed type 2023-04-12 12:29:16 +10:00
Mary Hipp
afb0b564e9 feat(ui): POST upload working 2023-04-12 12:29:16 +10:00
Mary Hipp
657efadffa fix(ui): separate thunk for initial gallery load so it properly gets index 0 2023-04-12 12:29:16 +10:00
psychedelicious
5b1ffc292f feat(ui): clean up & comment results slice 2023-04-12 12:29:16 +10:00
psychedelicious
cad289dfe5 feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
2023-04-12 12:29:16 +10:00
psychedelicious
1df999d082 chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
2023-04-12 12:29:16 +10:00
psychedelicious
1372536728 chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
23a69ea7bf docs(ui): update readme 2023-04-12 12:29:16 +10:00
psychedelicious
5cff28aaf3 chore(ui): bump redux-toolkit 2023-04-12 12:29:16 +10:00
psychedelicious
21fba1aac6 feat(ui): load images on socket connect
Rudimentary
2023-04-12 12:29:16 +10:00
psychedelicious
c992c2fe7d feat(ui): add type guards for outputs 2023-04-12 12:29:16 +10:00
psychedelicious
3e76c1a3cd feat(ui): make thunk types more consistent 2023-04-12 12:29:16 +10:00
psychedelicious
5eb077accc feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
2023-04-12 12:29:16 +10:00
psychedelicious
007794f48b feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
2023-04-12 12:29:16 +10:00
psychedelicious
95a336c26a feat(ui): add rtk action type guard 2023-04-12 12:29:16 +10:00
psychedelicious
6ca0798303 fix(ui): fix middleware types 2023-04-12 12:29:16 +10:00
psychedelicious
bb9986bfd2 feat(ui): handle random seeds 2023-04-12 12:29:16 +10:00
psychedelicious
11f34e0388 feat(ui): add nodes mode script 2023-04-12 12:29:16 +10:00
maryhipp
dea27f451a chore(ui): add support for package mode 2023-04-12 12:29:16 +10:00
maryhipp
be32f5639b feat(ui): get intermediate images working but types are stubbed out 2023-04-12 12:29:16 +10:00
maryhipp
6fd9840608 feat(ui): img2img implementation 2023-04-12 12:29:16 +10:00
maryhipp
158528cf12 feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node 2023-04-12 12:29:16 +10:00
maryhipp
1401a26a41 feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation 2023-04-12 12:29:16 +10:00
maryhipp
213a2dcdc8 add optional apiUrl prop 2023-04-12 12:29:16 +10:00
maryhipp
85019ab1b0 use reference to sampler_name 2023-04-12 12:29:16 +10:00
maryhipp
683f8b324e use reference to sampler_name 2023-04-12 12:29:16 +10:00
maryhipp
8a45efbaf3 start building out node translations from frontend state and add notes about missing features 2023-04-12 12:29:16 +10:00
psychedelicious
14a1871087 feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
2023-04-12 12:29:16 +10:00
psychedelicious
3e3ac329c8 feat(ui): add socketio types 2023-04-12 12:29:16 +10:00
psychedelicious
1db0940c67 fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
2023-04-12 12:29:16 +10:00
psychedelicious
b7fa23be64 fix(ui): disable OG web server socket connection 2023-04-12 12:29:16 +10:00
psychedelicious
9be2c02d5e chore(ui): regenerate api client 2023-04-12 12:29:16 +10:00
psychedelicious
686f03d2cc feat(ui): nodes cancel 2023-04-12 12:29:16 +10:00
psychedelicious
2b6ca72b36 feat(ui): more nodes api prototyping 2023-04-12 12:29:16 +10:00
psychedelicious
bfc0c0b3f6 feat(ui): generate object args for api client 2023-04-12 12:29:16 +10:00
psychedelicious
e3c3ddc45b feat(backend): fixes for nodes/generator 2023-04-12 12:29:16 +10:00
psychedelicious
9436f8e81e chore(ui): update openapi.json 2023-04-12 12:29:16 +10:00
psychedelicious
68d1c35b6f chore(ui): update .eslintignore, .prettierignore 2023-04-12 12:29:16 +10:00
psychedelicious
46f54c81ed chore(ui): organize generated files 2023-04-12 12:29:16 +10:00
psychedelicious
860d495732 fix(ui): update client & nodes test code w/ new Edge type 2023-04-12 12:29:16 +10:00
psychedelicious
7c24706778 feat(ui): add axios client generator and simple example 2023-04-12 12:29:16 +10:00
253 changed files with 3040 additions and 7396 deletions

14
.github/CODEOWNERS vendored
View File

@@ -1,16 +1,16 @@
# continuous integration
/.github/workflows/ @lstein @blessedcoolant
/.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation
/docs/ @lstein @tildebyte @blessedcoolant
/mkdocs.yml @lstein @blessedcoolant
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
/mkdocs.yml @lstein @mauwii @blessedcoolant
# nodes
/invokeai/app/ @Kyle0654 @blessedcoolant
# installation and configuration
/pyproject.toml @lstein @blessedcoolant
/docker/ @lstein @blessedcoolant
/pyproject.toml @mauwii @lstein @blessedcoolant
/docker/ @mauwii @lstein @blessedcoolant
/scripts/ @ebr @lstein
/installer/ @lstein @ebr
/invokeai/assets @lstein @ebr
@@ -22,11 +22,11 @@
/invokeai/backend @blessedcoolant @psychedelicious @lstein
# generation, model management, postprocessing
/invokeai/backend @damian0815 @lstein @blessedcoolant @jpphoto @gregghelt2
/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto
# front ends
/invokeai/frontend/CLI @lstein
/invokeai/frontend/install @lstein @ebr
/invokeai/frontend/install @lstein @ebr @mauwii
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/web @psychedelicious @blessedcoolant

2
.gitignore vendored
View File

@@ -9,8 +9,6 @@ models/ldm/stable-diffusion-v1/model.ckpt
configs/models.user.yaml
config/models.user.yml
invokeai.init
.version
.last_model
# ignore the Anaconda/Miniconda installer used while building Docker image
anaconda.sh

View File

@@ -84,7 +84,7 @@ installing lots of models.
6. Wait while the installer does its thing. After installing the software,
the installer will launch a script that lets you configure InvokeAI and
select a set of starting image generation models.
select a set of starting image generaiton models.
7. Find the folder that InvokeAI was installed into (it is not the
same as the unpacked zip file directory!) The default location of this
@@ -148,11 +148,6 @@ not supported.
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
```
_For non-GPU systems:_
```terminal
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
_For Macintoshes, either Intel or M1/M2:_
```sh

View File

@@ -32,7 +32,7 @@ turned on and off on the command line using `--nsfw_checker` and
At installation time, InvokeAI will ask whether the checker should be
activated by default (neither argument given on the command line). The
response is stored in the InvokeAI initialization file (usually
`invokeai.init` in your home directory). You can change the default at any
`.invokeai` in your home directory). You can change the default at any
time by opening this file in a text editor and commenting or
uncommenting the line `--nsfw_checker`.

View File

@@ -3,16 +3,12 @@
import os
from argparse import Namespace
from invokeai.app.services.metadata import PngMetadataService, MetadataServiceBase
from ..services.default_graphs import create_system_graphs
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
from ...backend import Globals
from ..services.model_manager_initializer import get_model_manager
from ..services.restoration_services import RestorationServices
from ..services.graph import GraphExecutionState, LibraryGraph
from ..services.graph import GraphExecutionState
from ..services.image_storage import DiskImageStorage
from ..services.invocation_queue import MemoryInvocationQueue
from ..services.invocation_services import InvocationServices
@@ -62,9 +58,7 @@ class ApiDependencies:
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents'))
metadata = PngMetadataService()
images = DiskImageStorage(f'{output_folder}/images', metadata_service=metadata)
images = DiskImageStorage(f'{output_folder}/images')
# TODO: build a file/path manager?
db_location = os.path.join(output_folder, "invokeai.db")
@@ -74,11 +68,7 @@ class ApiDependencies:
events=events,
latents=latents,
images=images,
metadata=metadata,
queue=MemoryInvocationQueue(),
graph_library=SqliteItemStorage[LibraryGraph](
filename=db_location, table_name="graphs"
),
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
filename=db_location, table_name="graph_executions"
),
@@ -86,8 +76,6 @@ class ApiDependencies:
restoration=RestorationServices(config),
)
create_system_graphs(services.graph_library)
ApiDependencies.invoker = Invoker(services)
@staticmethod

View File

@@ -45,7 +45,7 @@ class FastAPIEventService(EventServiceBase):
)
except Empty:
await asyncio.sleep(0.1)
await asyncio.sleep(0.001)
pass
except asyncio.CancelledError as e:

View File

@@ -1,19 +1,7 @@
from typing import Optional
from pydantic import BaseModel, Field
from invokeai.app.models.image import ImageType
from invokeai.app.services.metadata import InvokeAIMetadata
class ImageResponseMetadata(BaseModel):
"""An image's metadata. Used only in HTTP responses."""
created: int = Field(description="The creation timestamp of the image")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
invokeai: Optional[InvokeAIMetadata] = Field(
description="The image's InvokeAI-specific metadata"
)
from invokeai.app.models.metadata import ImageMetadata
class ImageResponse(BaseModel):
@@ -23,12 +11,4 @@ class ImageResponse(BaseModel):
image_name: str = Field(description="The name of the image")
image_url: str = Field(description="The url of the image")
thumbnail_url: str = Field(description="The url of the image's thumbnail")
metadata: ImageResponseMetadata = Field(description="The image's metadata")
class ProgressImage(BaseModel):
"""The progress image sent intermittently during processing"""
width: int = Field(description="The effective width of the image in pixels")
height: int = Field(description="The effective height of the image in pixels")
dataURL: str = Field(description="The image data as a b64 data URL")
metadata: ImageMetadata = Field(description="The image's metadata")

View File

@@ -3,15 +3,14 @@ import io
from datetime import datetime, timezone
import json
import os
from typing import Any
import uuid
from fastapi import HTTPException, Path, Query, Request, UploadFile
from fastapi import Path, Query, Request, UploadFile
from fastapi.responses import FileResponse, Response
from fastapi.routing import APIRouter
from PIL import Image
from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
from invokeai.app.services.metadata import InvokeAIMetadata
from invokeai.app.api.models.images import ImageResponse
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.item_storage import PaginatedResults
from ...services.image_storage import ImageType
@@ -19,110 +18,85 @@ from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"])
@images_router.get("/{image_type}/{image_name}", operation_id="get_image")
async def get_image(
image_type: ImageType = Path(description="The type of image to get"),
image_name: str = Path(description="The name of the image to get"),
) -> FileResponse | Response:
):
"""Gets a result"""
# TODO: This is not really secure at all. At least make sure only output results are served
filename = ApiDependencies.invoker.services.images.get_path(image_type, image_name)
return FileResponse(filename)
path = ApiDependencies.invoker.services.images.get_path(
image_type=image_type, image_name=image_name
)
if ApiDependencies.invoker.services.images.validate_path(path):
return FileResponse(path)
else:
raise HTTPException(status_code=404)
@images_router.get(
"/{image_type}/thumbnails/{image_name}", operation_id="get_thumbnail"
)
@images_router.get("/{image_type}/thumbnails/{image_name}", operation_id="get_thumbnail")
async def get_thumbnail(
image_type: ImageType = Path(description="The type of image to get"),
image_name: str = Path(description="The name of the image to get"),
) -> FileResponse | Response:
):
"""Gets a thumbnail"""
path = ApiDependencies.invoker.services.images.get_path(
image_type=image_type, image_name=image_name, is_thumbnail=True
)
if ApiDependencies.invoker.services.images.validate_path(path):
return FileResponse(path)
else:
raise HTTPException(status_code=404)
# TODO: This is not really secure at all. At least make sure only output results are served
filename = ApiDependencies.invoker.services.images.get_path(image_type, 'thumbnails/' + image_name)
return FileResponse(filename)
@images_router.post(
"/uploads/",
operation_id="upload_image",
responses={
201: {
"description": "The image was uploaded successfully",
"model": ImageResponse,
},
415: {"description": "Image upload failed"},
201: {"description": "The image was uploaded successfully", "model": ImageResponse},
404: {"description": "Session not found"},
},
status_code=201,
status_code=201
)
async def upload_image(
file: UploadFile, request: Request, response: Response
) -> ImageResponse:
async def upload_image(file: UploadFile, request: Request, response: Response) -> ImageResponse:
if not file.content_type.startswith("image"):
raise HTTPException(status_code=415, detail="Not an image")
return Response(status_code=415)
contents = await file.read()
try:
img = Image.open(io.BytesIO(contents))
except:
# Error opening the image
raise HTTPException(status_code=415, detail="Failed to read image")
return Response(status_code=415)
filename = f"{uuid.uuid4()}_{str(int(datetime.now(timezone.utc).timestamp()))}.png"
(image_path, thumbnail_path, ctime) = ApiDependencies.invoker.services.images.save(
ImageType.UPLOAD, filename, img
)
invokeai_metadata = ApiDependencies.invoker.services.metadata.get_metadata(img)
image_path = ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, img)
invokeai_metadata = json.loads(img.info.get("invokeai", "{}"))
res = ImageResponse(
image_type=ImageType.UPLOAD,
image_name=filename,
# TODO: DiskImageStorage should not be building URLs...?
image_url=f"api/v1/images/{ImageType.UPLOAD.value}/{filename}",
thumbnail_url=f"api/v1/images/{ImageType.UPLOAD.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
metadata=ImageResponseMetadata(
created=ctime,
# TODO: Creation of this object should happen elsewhere, just making it fit here so it works
metadata=ImageMetadata(
created=int(os.path.getctime(image_path)),
width=img.width,
height=img.height,
invokeai=invokeai_metadata,
invokeai=invokeai_metadata
),
)
response.status_code = 201
response.headers["Location"] = request.url_for(
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
)
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
)
return res
@images_router.get(
"/",
operation_id="list_images",
responses={200: {"model": PaginatedResults[ImageResponse]}},
)
async def list_images(
image_type: ImageType = Query(
default=ImageType.RESULT, description="The type of images to get"
),
image_type: ImageType = Query(default=ImageType.RESULT, description="The type of images to get"),
page: int = Query(default=0, description="The page of images to get"),
per_page: int = Query(default=10, description="The number of images per page"),
) -> PaginatedResults[ImageResponse]:
"""Gets a list of images"""
result = ApiDependencies.invoker.services.images.list(image_type, page, per_page)
result = ApiDependencies.invoker.services.images.list(
image_type, page, per_page
)
return result

View File

@@ -7,40 +7,11 @@ from pydantic import BaseModel, Field
import networkx as nx
import matplotlib.pyplot as plt
from ..invocations.baseinvocation import BaseInvocation
from ..invocations.image import ImageField
from ..services.graph import GraphExecutionState, LibraryGraph, GraphInvocation, Edge
from ..models.image import ImageField
from ..services.graph import GraphExecutionState
from ..services.invoker import Invoker
def add_field_argument(command_parser, name: str, field, default_override = None):
default = default_override if default_override is not None else field.default if field.default_factory is None else field.default_factory()
if get_origin(field.type_) == Literal:
allowed_values = get_args(field.type_)
allowed_types = set()
for val in allowed_values:
allowed_types.add(type(val))
allowed_types_list = list(allowed_types)
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
command_parser.add_argument(
f"--{name}",
dest=name,
type=field_type,
default=default,
choices=allowed_values,
help=field.field_info.description,
)
else:
command_parser.add_argument(
f"--{name}",
dest=name,
type=field.type_,
default=default,
help=field.field_info.description,
)
def add_parsers(
subparsers,
commands: list[type],
@@ -65,26 +36,30 @@ def add_parsers(
if name in exclude_fields:
continue
add_field_argument(command_parser, name, field)
if get_origin(field.type_) == Literal:
allowed_values = get_args(field.type_)
allowed_types = set()
for val in allowed_values:
allowed_types.add(type(val))
allowed_types_list = list(allowed_types)
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
def add_graph_parsers(
subparsers,
graphs: list[LibraryGraph],
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
):
for graph in graphs:
command_parser = subparsers.add_parser(graph.name, help=graph.description)
if add_arguments is not None:
add_arguments(command_parser)
# Add arguments for inputs
for exposed_input in graph.exposed_inputs:
node = graph.graph.get_node(exposed_input.node_path)
field = node.__fields__[exposed_input.field]
default_override = getattr(node, exposed_input.field)
add_field_argument(command_parser, exposed_input.alias, field, default_override)
command_parser.add_argument(
f"--{name}",
dest=name,
type=field_type,
default=field.default if field.default_factory is None else field.default_factory(),
choices=allowed_values,
help=field.field_info.description,
)
else:
command_parser.add_argument(
f"--{name}",
dest=name,
type=field.type_,
default=field.default if field.default_factory is None else field.default_factory(),
help=field.field_info.description,
)
class CliContext:
@@ -92,38 +67,17 @@ class CliContext:
session: GraphExecutionState
parser: argparse.ArgumentParser
defaults: dict[str, Any]
graph_nodes: dict[str, str]
nodes_added: list[str]
def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser):
self.invoker = invoker
self.session = session
self.parser = parser
self.defaults = dict()
self.graph_nodes = dict()
self.nodes_added = list()
def get_session(self):
self.session = self.invoker.services.graph_execution_manager.get(self.session.id)
return self.session
def reset(self):
self.session = self.invoker.create_execution_state()
self.graph_nodes = dict()
self.nodes_added = list()
# Leave defaults unchanged
def add_node(self, node: BaseInvocation):
self.get_session()
self.session.graph.add_node(node)
self.nodes_added.append(node.id)
self.invoker.services.graph_execution_manager.set(self.session)
def add_edge(self, edge: Edge):
self.get_session()
self.session.add_edge(edge)
self.invoker.services.graph_execution_manager.set(self.session)
class ExitCli(Exception):
"""Exception to exit the CLI"""

View File

@@ -13,22 +13,17 @@ from typing import (
from pydantic import BaseModel
from pydantic.fields import Field
from invokeai.app.services.metadata import PngMetadataService
from .services.default_graphs import create_system_graphs
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
from ..backend import Args
from .cli.commands import BaseCommand, CliContext, ExitCli, add_graph_parsers, add_parsers, get_graph_execution_history
from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_graph_execution_history
from .cli.completer import set_autocompleter
from .invocations import *
from .invocations.baseinvocation import BaseInvocation
from .services.events import EventServiceBase
from .services.model_manager_initializer import get_model_manager
from .services.restoration_services import RestorationServices
from .services.graph import Edge, EdgeConnection, ExposedNodeInput, GraphExecutionState, GraphInvocation, LibraryGraph, are_connection_types_compatible
from .services.default_graphs import default_text_to_image_graph_id
from .services.graph import Edge, EdgeConnection, GraphExecutionState, are_connection_types_compatible
from .services.image_storage import DiskImageStorage
from .services.invocation_queue import MemoryInvocationQueue
from .services.invocation_services import InvocationServices
@@ -63,7 +58,7 @@ def add_invocation_args(command_parser):
)
def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser:
def get_command_parser() -> argparse.ArgumentParser:
# Create invocation parser
parser = argparse.ArgumentParser()
@@ -81,72 +76,20 @@ def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser:
commands = BaseCommand.get_all_subclasses()
add_parsers(subparsers, commands, exclude_fields=["type"])
# Create subparsers for exposed CLI graphs
# TODO: add a way to identify these graphs
text_to_image = services.graph_library.get(default_text_to_image_graph_id)
add_graph_parsers(subparsers, [text_to_image], add_arguments=add_invocation_args)
return parser
class NodeField():
alias: str
node_path: str
field: str
field_type: type
def __init__(self, alias: str, node_path: str, field: str, field_type: type):
self.alias = alias
self.node_path = node_path
self.field = field
self.field_type = field_type
def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str,NodeField]:
return {k:NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()}
def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
"""Gets the node field for the specified field alias"""
exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias)
node_type = type(graph.graph.get_node(exposed_input.node_path))
return NodeField(alias=exposed_input.alias, node_path=f'{node_id}.{exposed_input.node_path}', field=exposed_input.field, field_type=get_type_hints(node_type)[exposed_input.field])
def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
"""Gets the node field for the specified field alias"""
exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias)
node_type = type(graph.graph.get_node(exposed_output.node_path))
node_output_type = node_type.get_output_type()
return NodeField(alias=exposed_output.alias, node_path=f'{node_id}.{exposed_output.node_path}', field=exposed_output.field, field_type=get_type_hints(node_output_type)[exposed_output.field])
def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
"""Gets the inputs for the specified invocation from the context"""
node_type = type(invocation)
if node_type is not GraphInvocation:
return fields_from_type_hints(get_type_hints(node_type), invocation.id)
else:
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
return {e.alias: get_node_input_field(graph, e.alias, invocation.id) for e in graph.exposed_inputs}
def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
"""Gets the outputs for the specified invocation from the context"""
node_type = type(invocation)
if node_type is not GraphInvocation:
return fields_from_type_hints(get_type_hints(node_type.get_output_type()), invocation.id)
else:
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs}
def generate_matching_edges(
a: BaseInvocation, b: BaseInvocation, context: CliContext
a: BaseInvocation, b: BaseInvocation
) -> list[Edge]:
"""Generates all possible edges between two invocations"""
afields = get_node_outputs(a, context)
bfields = get_node_inputs(b, context)
atype = type(a)
btype = type(b)
aoutputtype = atype.get_output_type()
afields = get_type_hints(aoutputtype)
bfields = get_type_hints(btype)
matching_fields = set(afields.keys()).intersection(bfields.keys())
@@ -155,14 +98,14 @@ def generate_matching_edges(
matching_fields = matching_fields.difference(invalid_fields)
# Validate types
matching_fields = [f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type)]
matching_fields = [f for f in matching_fields if are_connection_types_compatible(afields[f], bfields[f])]
edges = [
Edge(
source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field),
destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field)
source=EdgeConnection(node_id=a.id, field=field),
destination=EdgeConnection(node_id=b.id, field=field)
)
for alias in matching_fields
for field in matching_fields
]
return edges
@@ -202,8 +145,6 @@ def invoke_cli():
events = EventServiceBase()
metadata = PngMetadataService()
output_folder = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../outputs")
)
@@ -215,12 +156,8 @@ def invoke_cli():
model_manager=model_manager,
events=events,
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
images=DiskImageStorage(f'{output_folder}/images', metadata_service=metadata),
metadata=metadata,
images=DiskImageStorage(f'{output_folder}/images'),
queue=MemoryInvocationQueue(),
graph_library=SqliteItemStorage[LibraryGraph](
filename=db_location, table_name="graphs"
),
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
filename=db_location, table_name="graph_executions"
),
@@ -228,12 +165,9 @@ def invoke_cli():
restoration=RestorationServices(config),
)
system_graphs = create_system_graphs(services.graph_library)
system_graph_names = set([g.name for g in system_graphs])
invoker = Invoker(services)
session: GraphExecutionState = invoker.create_execution_state()
parser = get_command_parser(services)
parser = get_command_parser()
re_negid = re.compile('^-[0-9]+$')
@@ -251,12 +185,11 @@ def invoke_cli():
try:
# Refresh the state of the session
#history = list(get_graph_execution_history(context.session))
history = list(reversed(context.nodes_added))
history = list(get_graph_execution_history(context.session))
# Split the command for piping
cmds = cmd_input.split("|")
start_id = len(context.nodes_added)
start_id = len(history)
current_id = start_id
new_invocations = list()
for cmd in cmds:
@@ -272,24 +205,8 @@ def invoke_cli():
args[field_name] = field_default
# Parse invocation
command: CliCommand = None # type:ignore
system_graph: LibraryGraph|None = None
if args['type'] in system_graph_names:
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))
for exposed_input in system_graph.exposed_inputs:
if exposed_input.alias in args:
node = invocation.graph.get_node(exposed_input.node_path)
field = exposed_input.field
setattr(node, field, args[exposed_input.alias])
command = CliCommand(command = invocation)
context.graph_nodes[invocation.id] = system_graph.id
else:
args["id"] = current_id
command = CliCommand(command=args)
if command is None:
continue
args["id"] = current_id
command = CliCommand(command=args)
# Run any CLI commands immediately
if isinstance(command.command, BaseCommand):
@@ -300,7 +217,6 @@ def invoke_cli():
command.command.run(context)
continue
# TODO: handle linking with library graphs
# Pipe previous command output (if there was a previous command)
edges: list[Edge] = list()
if len(history) > 0 or current_id != start_id:
@@ -313,7 +229,7 @@ def invoke_cli():
else context.session.graph.get_node(from_id)
)
matching_edges = generate_matching_edges(
from_node, command.command, context
from_node, command.command
)
edges.extend(matching_edges)
@@ -326,7 +242,7 @@ def invoke_cli():
link_node = context.session.graph.get_node(node_id)
matching_edges = generate_matching_edges(
link_node, command.command, context
link_node, command.command
)
matching_destinations = [e.destination for e in matching_edges]
edges = [e for e in edges if e.destination not in matching_destinations]
@@ -340,14 +256,12 @@ def invoke_cli():
if re_negid.match(node_id):
node_id = str(current_id + int(node_id))
# TODO: handle missing input/output
node_output = get_node_outputs(context.session.graph.get_node(node_id), context)[link[1]]
node_input = get_node_inputs(command.command, context)[link[2]]
edges.append(
Edge(
source=EdgeConnection(node_id=node_output.node_path, field=node_output.field),
destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field)
source=EdgeConnection(node_id=node_id, field=link[1]),
destination=EdgeConnection(
node_id=command.command.id, field=link[2]
)
)
)
@@ -356,10 +270,10 @@ def invoke_cli():
current_id = current_id + 1
# Add the node to the session
context.add_node(command.command)
context.session.add_node(command.command)
for edge in edges:
print(edge)
context.add_edge(edge)
context.session.add_edge(edge)
# Execute all remaining nodes
invoke_all(context)
@@ -371,7 +285,7 @@ def invoke_cli():
except SessionError:
# Start a new session
print("Session error: creating a new session")
context.reset()
context.session = context.invoker.create_execution_state()
except ExitCli:
break

View File

@@ -95,7 +95,7 @@ class UIConfig(TypedDict, total=False):
],
]
tags: List[str]
title: str
class CustomisedSchemaExtra(TypedDict):
ui: UIConfig

View File

@@ -1,17 +1,16 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from typing import Literal, Optional
from typing import Literal
import cv2 as cv
import numpy as np
import numpy.random
from PIL import Image, ImageOps
from pydantic import Field
from .baseinvocation import (
BaseInvocation,
InvocationConfig,
InvocationContext,
BaseInvocationOutput,
)
from ..services.image_storage import ImageType
from .baseinvocation import BaseInvocation, InvocationContext, BaseInvocationOutput
from .image import ImageField, ImageOutput
class IntCollectionOutput(BaseInvocationOutput):
@@ -34,9 +33,7 @@ class RangeInvocation(BaseInvocation):
step: int = Field(default=1, description="The step of the range")
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
return IntCollectionOutput(
collection=list(range(self.start, self.stop, self.step))
)
return IntCollectionOutput(collection=list(range(self.start, self.stop, self.step)))
class RandomRangeInvocation(BaseInvocation):
@@ -46,19 +43,8 @@ class RandomRangeInvocation(BaseInvocation):
# Inputs
low: int = Field(default=0, description="The inclusive low value")
high: int = Field(
default=np.iinfo(np.int32).max, description="The exclusive high value"
)
high: int = Field(default=np.iinfo(np.int32).max, description="The exclusive high value")
size: int = Field(default=1, description="The number of values to generate")
seed: Optional[int] = Field(
ge=0,
le=np.iinfo(np.int32).max,
description="The seed for the RNG",
default_factory=lambda: numpy.random.randint(0, np.iinfo(np.int32).max),
)
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
rng = np.random.default_rng(self.seed)
return IntCollectionOutput(
collection=list(rng.integers(low=self.low, high=self.high, size=self.size))
)
return IntCollectionOutput(collection=list(numpy.random.randint(self.low, self.high, size=self.size)))

View File

@@ -56,14 +56,9 @@ class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_inpainted, metadata)
context.services.images.save(image_type, image_name, image_inpainted, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image_inpainted,
)
)

View File

@@ -4,9 +4,7 @@ from functools import partial
from typing import Literal, Optional, Union
import numpy as np
from diffusers import ControlNetModel
from torch import Tensor
import torch
from pydantic import BaseModel, Field
@@ -16,7 +14,8 @@ from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState
from ..util.step_callback import stable_diffusion_step_callback
from ..models.exceptions import CanceledException
from ..util.step_callback import diffusers_step_callback_adapter
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
@@ -55,56 +54,35 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
model: str = Field(default="", description="The model to use (currently ignored)")
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
control_model: Optional[str] = Field(default=None, description="The control model to use")
control_image: Optional[ImageField] = Field(default=None, description="The processed control image")
# control_strength: Optional[float] = Field(default=1.0, ge=0, le=1, description="The strength of the controlnet")
# fmt: on
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
)
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
# Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model)
self.model_name = model["model_name"]
# loading controlnet image (currently requires pre-processed image)
control_image = (
None if self.control_image is None
else context.services.images.get(
self.control_image.image_type, self.control_image.image_name
)
)
# loading controlnet model
if (self.control_model is None or self.control_model==''):
control_model = None
else:
# FIXME: change this to dropdown menu?
control_model = ControlNetModel.from_pretrained(self.control_model,
torch_dtype=torch.float16).to("cuda")
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
txt2img = Txt2Img(model, control_model=control_model)
outputs = txt2img.generate(
outputs = Txt2Img(model).generate(
prompt=self.prompt,
step_callback=partial(self.dispatch_progress, context, source_node_id),
control_image=control_image,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "control_image" }
exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
@@ -119,17 +97,21 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_id = graph_execution_state.prepared_source_mapping[self.id]
invocation = graph_execution_state.execution_graph.get_node(self.id)
metadata = {
"session": context.graph_execution_state_id,
"source_id": source_id,
"invocation": invocation.dict()
}
context.services.images.save(
image_type, image_name, generate_output.image, metadata
)
context.services.images.save(image_type, image_name, generate_output.image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=generate_output.image,
image=generate_output.image
)
@@ -149,17 +131,20 @@ class ImageToImageInvocation(TextToImageInvocation):
)
def dispatch_progress(
self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
) -> None:
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
)
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
@@ -173,22 +158,17 @@ class ImageToImageInvocation(TextToImageInvocation):
# Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
self.model = model["model_name"]
outputs = Img2Img(model).generate(
prompt=self.prompt,
init_image=image,
init_mask=mask,
step_callback=partial(self.dispatch_progress, context, source_node_id),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
prompt=self.prompt,
init_image=image,
init_mask=mask,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
@@ -203,19 +183,13 @@ class ImageToImageInvocation(TextToImageInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, result_image, metadata)
context.services.images.save(image_type, image_name, result_image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image,
image=result_image
)
class InpaintInvocation(ImageToImageInvocation):
"""Generates an image using inpaint."""
@@ -231,17 +205,20 @@ class InpaintInvocation(ImageToImageInvocation):
)
def dispatch_progress(
self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
) -> None:
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
)
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
@@ -259,22 +236,17 @@ class InpaintInvocation(ImageToImageInvocation):
# Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
self.model = model["model_name"]
outputs = Inpaint(model).generate(
prompt=self.prompt,
init_img=image,
init_mask=mask,
step_callback=partial(self.dispatch_progress, context, source_node_id),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
prompt=self.prompt,
init_img=image,
init_mask=mask,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
@@ -289,14 +261,9 @@ class InpaintInvocation(ImageToImageInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, result_image, metadata)
context.services.images.save(image_type, image_name, result_image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image,
image=result_image
)

View File

@@ -1,5 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Literal, Optional
import numpy
@@ -7,6 +8,7 @@ from PIL import Image, ImageFilter, ImageOps
from pydantic import BaseModel, Field
from ..models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
@@ -32,30 +34,27 @@ class ImageOutput(BaseInvocationOutput):
# fmt: off
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
width: Optional[int] = Field(default=None, description="The width of the image in pixels")
height: Optional[int] = Field(default=None, description="The height of the image in pixels")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
# fmt: on
class Config:
schema_extra = {
"required": ["type", "image", "width", "height", "mode"]
"required": [
"type",
"image",
"width",
"height",
]
}
def build_image_output(
image_type: ImageType, image_name: str, image: Image.Image
) -> ImageOutput:
"""Builds an ImageOutput and its ImageField"""
image_field = ImageField(
image_name=image_name,
image_type=image_type,
)
return ImageOutput(
image=image_field,
width=image.width,
height=image.height,
mode=image.mode,
)
image_field = ImageField(image_name=image_name, image_type=image_type)
return ImageOutput(image=image_field, width=image.width, height=image.height)
class MaskOutput(BaseInvocationOutput):
@@ -75,24 +74,23 @@ class MaskOutput(BaseInvocationOutput):
}
class LoadImageInvocation(BaseInvocation):
"""Load an image and provide it as output."""
# # TODO: this isn't really necessary anymore
# class LoadImageInvocation(BaseInvocation):
# """Load an image from a filename and provide it as output."""
# #fmt: off
# type: Literal["load_image"] = "load_image"
# fmt: off
type: Literal["load_image"] = "load_image"
# # Inputs
# image_type: ImageType = Field(description="The type of the image")
# image_name: str = Field(description="The name of the image")
# #fmt: on
# Inputs
image_type: ImageType = Field(description="The type of the image")
image_name: str = Field(description="The name of the image")
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image_type, self.image_name)
return build_image_output(
image_type=self.image_type,
image_name=self.image_name,
image=image,
)
# def invoke(self, context: InvocationContext) -> ImageOutput:
# return ImageOutput(
# image_type=self.image_type,
# image_name=self.image_name,
# image=result_image
# )
class ShowImageInvocation(BaseInvocation):
@@ -147,16 +145,9 @@ class CropImageInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_crop, metadata)
context.services.images.save(image_type, image_name, image_crop, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image_crop,
image_type=image_type, image_name=image_name, image=image_crop
)
@@ -205,16 +196,9 @@ class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, new_image, metadata)
context.services.images.save(image_type, image_name, new_image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,
image=new_image,
image_type=image_type, image_name=image_name, image=new_image
)
@@ -242,12 +226,7 @@ class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_mask, metadata)
context.services.images.save(image_type, image_name, image_mask, self.dict())
return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name))
@@ -279,12 +258,7 @@ class BlurInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, blur_image, metadata)
context.services.images.save(image_type, image_name, blur_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=blur_image
)
@@ -316,12 +290,7 @@ class LerpInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, lerp_image, metadata)
context.services.images.save(image_type, image_name, lerp_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=lerp_image
)
@@ -358,12 +327,7 @@ class InverseLerpInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, ilerp_image, metadata)
context.services.images.save(image_type, image_name, ilerp_image, self.dict())
return build_image_output(
image_type=image_type, image_name=image_name, image=ilerp_image
)

View File

@@ -1,13 +1,12 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
import random
from typing import Literal, Optional
from pydantic import BaseModel, Field
import torch
from invokeai.app.models.exceptions import CanceledException
from invokeai.app.invocations.util.choose_model import choose_model
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.app.util.step_callback import diffusers_step_callback_adapter
from ...backend.model_management.model_manager import ModelManager
from ...backend.util.devices import choose_torch_device, torch_dtype
@@ -31,8 +30,6 @@ class LatentsField(BaseModel):
latents_name: Optional[str] = Field(default=None, description="The name of the latents")
class Config:
schema_extra = {"required": ["latents_name"]}
class LatentsOutput(BaseInvocationOutput):
"""Base class for invocations that output latents"""
@@ -102,17 +99,13 @@ def get_noise(width:int, height:int, device:torch.device, seed:int = 0, latent_c
return x
def random_seed():
return random.randint(0, np.iinfo(np.uint32).max)
class NoiseInvocation(BaseInvocation):
"""Generates latent noise."""
type: Literal["noise"] = "noise"
# Inputs
seed: int = Field(ge=0, le=np.iinfo(np.uint32).max, description="The seed to use", default_factory=random_seed)
seed: int = Field(default=0, ge=0, le=np.iinfo(np.uint32).max, description="The seed to use", )
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting noise", )
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting noise", )
@@ -172,15 +165,22 @@ class TextToLatentsInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
self, context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState
) -> None:
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
)
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
model_info = choose_model(model_manager, self.model)
model_name = model_info['model_name']
@@ -190,7 +190,7 @@ class TextToLatentsInvocation(BaseInvocation):
model=model,
scheduler_name=self.scheduler
)
if isinstance(model, DiffusionPipeline):
for component in [model.unet, model.vae]:
configure_model_padding(component,
@@ -226,12 +226,8 @@ class TextToLatentsInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, source_node_id, state)
self.dispatch_progress(context, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
@@ -280,12 +276,8 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
noise = context.services.latents.get(self.noise.latents_name)
latent = context.services.latents.get(self.latents.latents_name)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, source_node_id, state)
self.dispatch_progress(context, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
@@ -295,7 +287,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
latent, device=model.device, dtype=latent.dtype
)
timesteps, _ = model.get_img2img_timesteps(
self.steps,
self.strength,
@@ -358,12 +350,7 @@ class LatentsToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image, metadata)
context.services.images.save(image_type, image_name, image, self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,

View File

@@ -1,18 +0,0 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from typing import Literal
from pydantic import Field
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
from .math import IntOutput
# Pass-through parameter nodes - used by subgraphs
class ParamIntInvocation(BaseInvocation):
"""An integer parameter"""
#fmt: off
type: Literal["param_int"] = "param_int"
a: int = Field(default=0, description="The integer value")
#fmt: on
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=self.a)

View File

@@ -1,9 +1,10 @@
from datetime import datetime, timezone
from typing import Literal, Union
from pydantic import Field
from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
@@ -43,12 +44,7 @@ class RestoreFaceInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, results[0][0], metadata)
context.services.images.save(image_type, image_name, results[0][0], self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,

View File

@@ -1,10 +1,12 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Literal, Union
from pydantic import Field
from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput, build_image_output
@@ -47,12 +49,7 @@ class UpscaleInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, results[0][0], metadata)
context.services.images.save(image_type, image_name, results[0][0], self.dict())
return build_image_output(
image_type=image_type,
image_name=image_name,

View File

@@ -9,14 +9,6 @@ class ImageType(str, Enum):
UPLOAD = "uploads"
def is_image_type(obj):
try:
ImageType(obj)
except ValueError:
return False
return True
class ImageField(BaseModel):
"""An image field used for passing image objects between invocations"""
@@ -26,4 +18,9 @@ class ImageField(BaseModel):
image_name: Optional[str] = Field(default=None, description="The name of the image")
class Config:
schema_extra = {"required": ["image_type", "image_name"]}
schema_extra = {
"required": [
"image_type",
"image_name",
]
}

View File

@@ -0,0 +1,26 @@
from typing import Any, Optional, Dict
from pydantic import BaseModel, Field
class InvokeAIMetadata(BaseModel):
"""An image's InvokeAI-specific metadata"""
session: Optional[str] = Field(description="The session that generated this image")
source_id: Optional[str] = Field(
description="The source id of the invocation that generated this image"
)
# TODO: figure out metadata
invocation: Optional[Dict[str, Any]] = Field(
default={}, description="The prepared invocation that generated this image"
)
class ImageMetadata(BaseModel):
"""An image's general metadata"""
created: int = Field(description="The creation timestamp of the image")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
invokeai: Optional[InvokeAIMetadata] = Field(
default={}, description="The image's InvokeAI-specific metadata"
)

View File

@@ -1,56 +0,0 @@
from ..invocations.latent import LatentsToImageInvocation, NoiseInvocation, TextToLatentsInvocation
from ..invocations.params import ParamIntInvocation
from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph
from .item_storage import ItemStorageABC
default_text_to_image_graph_id = '539b2af5-2b4d-4d8c-8071-e54a3255fc74'
def create_text_to_image() -> LibraryGraph:
return LibraryGraph(
id=default_text_to_image_graph_id,
name='t2i',
description='Converts text to an image',
graph=Graph(
nodes={
'width': ParamIntInvocation(id='width', a=512),
'height': ParamIntInvocation(id='height', a=512),
'3': NoiseInvocation(id='3'),
'4': TextToLatentsInvocation(id='4'),
'5': LatentsToImageInvocation(id='5')
},
edges=[
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='3', field='width')),
Edge(source=EdgeConnection(node_id='height', field='a'), destination=EdgeConnection(node_id='3', field='height')),
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='4', field='width')),
Edge(source=EdgeConnection(node_id='height', field='a'), destination=EdgeConnection(node_id='4', field='height')),
Edge(source=EdgeConnection(node_id='3', field='noise'), destination=EdgeConnection(node_id='4', field='noise')),
Edge(source=EdgeConnection(node_id='4', field='latents'), destination=EdgeConnection(node_id='5', field='latents')),
]
),
exposed_inputs=[
ExposedNodeInput(node_path='4', field='prompt', alias='prompt'),
ExposedNodeInput(node_path='width', field='a', alias='width'),
ExposedNodeInput(node_path='height', field='a', alias='height')
],
exposed_outputs=[
ExposedNodeOutput(node_path='5', field='image', alias='image')
])
def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[LibraryGraph]:
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
graphs: list[LibraryGraph] = list()
text_to_image = graph_library.get(default_text_to_image_graph_id)
# TODO: Check if the graph is the same as the default one, and if not, update it
#if text_to_image is None:
text_to_image = create_text_to_image()
graph_library.set(text_to_image)
graphs.append(text_to_image)
return graphs

View File

@@ -1,9 +1,10 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Any
from invokeai.app.api.models.images import ProgressImage
from invokeai.app.util.misc import get_timestamp
from typing import Any, Dict, TypedDict
ProgressImage = TypedDict(
"ProgressImage", {"dataURL": str, "width": int, "height": int}
)
class EventServiceBase:
session_event: str = "session_event"
@@ -13,8 +14,7 @@ class EventServiceBase:
def dispatch(self, event_name: str, payload: Any) -> None:
pass
def __emit_session_event(self, event_name: str, payload: dict) -> None:
payload["timestamp"] = get_timestamp()
def __emit_session_event(self, event_name: str, payload: Dict) -> None:
self.dispatch(
event_name=EventServiceBase.session_event,
payload=dict(event=event_name, data=payload),
@@ -25,8 +25,8 @@ class EventServiceBase:
def emit_generator_progress(
self,
graph_execution_state_id: str,
node: dict,
source_node_id: str,
invocation_dict: dict,
source_id: str,
progress_image: ProgressImage | None,
step: int,
total_steps: int,
@@ -36,60 +36,52 @@ class EventServiceBase:
event_name="generator_progress",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
progress_image=progress_image.dict() if progress_image is not None else None,
invocation=invocation_dict,
source_id=source_id,
progress_image=progress_image,
step=step,
total_steps=total_steps,
),
)
def emit_invocation_complete(
self,
graph_execution_state_id: str,
result: dict,
node: dict,
source_node_id: str,
self, graph_execution_state_id: str, result: Dict, invocation_dict: Dict, source_id: str,
) -> None:
"""Emitted when an invocation has completed"""
self.__emit_session_event(
event_name="invocation_complete",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
invocation=invocation_dict,
source_id=source_id,
result=result,
),
)
def emit_invocation_error(
self,
graph_execution_state_id: str,
node: dict,
source_node_id: str,
error: str,
self, graph_execution_state_id: str, invocation_dict: Dict, source_id: str, error: str
) -> None:
"""Emitted when an invocation has completed"""
self.__emit_session_event(
event_name="invocation_error",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
invocation=invocation_dict,
source_id=source_id,
error=error,
),
)
def emit_invocation_started(
self, graph_execution_state_id: str, node: dict, source_node_id: str
self, graph_execution_state_id: str, invocation_dict: Dict, source_id: str
) -> None:
"""Emitted when an invocation has started"""
self.__emit_session_event(
event_name="invocation_started",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
invocation=invocation_dict,
source_id=source_id,
),
)
@@ -97,7 +89,5 @@ class EventServiceBase:
"""Emitted when a session has completed all invocations"""
self.__emit_session_event(
event_name="graph_execution_state_complete",
payload=dict(
graph_execution_state_id=graph_execution_state_id,
),
payload=dict(graph_execution_state_id=graph_execution_state_id),
)

View File

@@ -2,6 +2,7 @@
import copy
import itertools
import traceback
import uuid
from types import NoneType
from typing import (
@@ -16,7 +17,7 @@ from typing import (
)
import networkx as nx
from pydantic import BaseModel, root_validator, validator
from pydantic import BaseModel, validator
from pydantic.fields import Field
from ..invocations import *
@@ -25,6 +26,7 @@ from ..invocations.baseinvocation import (
BaseInvocationOutput,
InvocationContext,
)
from .invocation_services import InvocationServices
class EdgeConnection(BaseModel):
@@ -213,7 +215,7 @@ InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()]
class Graph(BaseModel):
id: str = Field(description="The id of this graph", default_factory=lambda: uuid.uuid4().__str__())
id: str = Field(description="The id of this graph", default_factory=uuid.uuid4)
# TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict
@@ -281,8 +283,7 @@ class Graph(BaseModel):
:raises InvalidEdgeError: the provided edge is invalid.
"""
self._validate_edge(edge)
if edge not in self.edges:
if self._is_edge_valid(edge) and edge not in self.edges:
self.edges.append(edge)
else:
raise InvalidEdgeError()
@@ -353,7 +354,7 @@ class Graph(BaseModel):
return True
def _validate_edge(self, edge: Edge):
def _is_edge_valid(self, edge: Edge) -> bool:
"""Validates that a new edge doesn't create a cycle in the graph"""
# Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)
@@ -361,53 +362,54 @@ class Graph(BaseModel):
from_node = self.get_node(edge.source.node_id)
to_node = self.get_node(edge.destination.node_id)
except NodeNotFoundError:
raise InvalidEdgeError("One or both nodes don't exist")
return False
# Validate that an edge to this node+field doesn't already exist
input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
raise InvalidEdgeError(f'Edge to node {edge.destination.node_id} field {edge.destination.field} already exists')
return False
# Validate that no cycles would be created
g = self.nx_graph_flat()
g.add_edge(edge.source.node_id, edge.destination.node_id)
if not nx.is_directed_acyclic_graph(g):
raise InvalidEdgeError(f'Edge creates a cycle in the graph')
return False
# Validate that the field types are compatible
if not are_connections_compatible(
from_node, edge.source.field, to_node, edge.destination.field
):
raise InvalidEdgeError(f'Fields are incompatible')
return False
# Validate if iterator output type matches iterator input type (if this edge results in both being set)
if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
if not self._is_iterator_connection_valid(
edge.destination.node_id, new_input=edge.source
):
raise InvalidEdgeError(f'Iterator input type does not match iterator output type')
return False
# Validate if iterator input type matches output type (if this edge results in both being set)
if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
if not self._is_iterator_connection_valid(
edge.source.node_id, new_output=edge.destination
):
raise InvalidEdgeError(f'Iterator output type does not match iterator input type')
return False
# Validate if collector input type matches output type (if this edge results in both being set)
if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
if not self._is_collector_connection_valid(
edge.destination.node_id, new_input=edge.source
):
raise InvalidEdgeError(f'Collector output type does not match collector input type')
return False
# Validate if collector output type matches input type (if this edge results in both being set)
if isinstance(from_node, CollectInvocation) and edge.source.field == "collection":
if not self._is_collector_connection_valid(
edge.source.node_id, new_output=edge.destination
):
raise InvalidEdgeError(f'Collector input type does not match collector output type')
return False
return True
def has_node(self, node_path: str) -> bool:
"""Determines whether or not a node exists in the graph."""
@@ -731,7 +733,7 @@ class Graph(BaseModel):
for sgn in (
gn for gn in self.nodes.values() if isinstance(gn, GraphInvocation)
):
g = sgn.graph.nx_graph_flat(g, self._get_node_path(sgn.id, prefix))
sgn.graph.nx_graph_flat(g, self._get_node_path(sgn.id, prefix))
# TODO: figure out if iteration nodes need to be expanded
@@ -748,7 +750,9 @@ class Graph(BaseModel):
class GraphExecutionState(BaseModel):
"""Tracks the state of a graph execution"""
id: str = Field(description="The id of the execution state", default_factory=lambda: uuid.uuid4().__str__())
id: str = Field(
description="The id of the execution state", default_factory=uuid.uuid4
)
# TODO: Store a reference to the graph instead of the actual graph?
graph: Graph = Field(description="The graph being executed")
@@ -854,8 +858,7 @@ class GraphExecutionState(BaseModel):
def is_complete(self) -> bool:
"""Returns true if the graph is complete"""
node_ids = set(self.graph.nx_graph_flat().nodes)
return self.has_error() or all((k in self.executed for k in node_ids))
return self.has_error() or all((k in self.executed for k in self.graph.nodes))
def has_error(self) -> bool:
"""Returns true if the graph has any errors"""
@@ -943,11 +946,11 @@ class GraphExecutionState(BaseModel):
def _iterator_graph(self) -> nx.DiGraph:
"""Gets a DiGraph with edges to collectors removed so an ancestor search produces all active iterators for any node"""
g = self.graph.nx_graph_flat()
g = self.graph.nx_graph()
collectors = (
n
for n in self.graph.nodes
if isinstance(self.graph.get_node(n), CollectInvocation)
if isinstance(self.graph.nodes[n], CollectInvocation)
)
for c in collectors:
g.remove_edges_from(list(g.in_edges(c)))
@@ -959,7 +962,7 @@ class GraphExecutionState(BaseModel):
iterators = [
n
for n in nx.ancestors(g, node_id)
if isinstance(self.graph.get_node(n), IterateInvocation)
if isinstance(self.graph.nodes[n], IterateInvocation)
]
return iterators
@@ -1095,9 +1098,7 @@ class GraphExecutionState(BaseModel):
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
def _is_edge_valid(self, edge: Edge) -> bool:
try:
self.graph._validate_edge(edge)
except InvalidEdgeError:
if not self._is_edge_valid(edge):
return False
# Invalid if destination has already been prepared or executed
@@ -1143,52 +1144,4 @@ class GraphExecutionState(BaseModel):
self.graph.delete_edge(edge)
class ExposedNodeInput(BaseModel):
node_path: str = Field(description="The node path to the node with the input")
field: str = Field(description="The field name of the input")
alias: str = Field(description="The alias of the input")
class ExposedNodeOutput(BaseModel):
node_path: str = Field(description="The node path to the node with the output")
field: str = Field(description="The field name of the output")
alias: str = Field(description="The alias of the output")
class LibraryGraph(BaseModel):
id: str = Field(description="The unique identifier for this library graph", default_factory=uuid.uuid4)
graph: Graph = Field(description="The graph")
name: str = Field(description="The name of the graph")
description: str = Field(description="The description of the graph")
exposed_inputs: list[ExposedNodeInput] = Field(description="The inputs exposed by this graph", default_factory=list)
exposed_outputs: list[ExposedNodeOutput] = Field(description="The outputs exposed by this graph", default_factory=list)
@validator('exposed_inputs', 'exposed_outputs')
def validate_exposed_aliases(cls, v):
if len(v) != len(set(i.alias for i in v)):
raise ValueError("Duplicate exposed alias")
return v
@root_validator
def validate_exposed_nodes(cls, values):
graph = values['graph']
# Validate exposed inputs
for exposed_input in values['exposed_inputs']:
if not graph.has_node(exposed_input.node_path):
raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist")
node = graph.get_node(exposed_input.node_path)
if get_input_field(node, exposed_input.field) is None:
raise ValueError(f"Exposed input field {exposed_input.field} does not exist on node {exposed_input.node_path}")
# Validate exposed outputs
for exposed_output in values['exposed_outputs']:
if not graph.has_node(exposed_output.node_path):
raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist")
node = graph.get_node(exposed_output.node_path)
if get_output_field(node, exposed_output.field) is None:
raise ValueError(f"Exposed output field {exposed_output.field} does not exist on node {exposed_output.node_path}")
return values
GraphInvocation.update_forward_refs()

View File

@@ -1,24 +1,25 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import datetime
import os
import json
from glob import glob
from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path
from queue import Queue
from typing import Dict, List, Tuple
from typing import Any, Callable, Dict, List, Union
from PIL.Image import Image
import PIL.Image as PILImage
from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
from invokeai.app.models.image import ImageType
from invokeai.app.services.metadata import (
InvokeAIMetadata,
MetadataServiceBase,
build_invokeai_metadata_pnginfo,
)
from pydantic import BaseModel, Json
from invokeai.app.api.models.images import ImageResponse
from invokeai.app.models.image import ImageField, ImageType
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.item_storage import PaginatedResults
from invokeai.app.util.misc import get_timestamp
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
from invokeai.app.util.save_thumbnail import save_thumbnail
from invokeai.backend.image_util import PngWriter
class ImageStorageBase(ABC):
@@ -26,14 +27,12 @@ class ImageStorageBase(ABC):
@abstractmethod
def get(self, image_type: ImageType, image_name: str) -> Image:
"""Retrieves an image as PIL Image."""
pass
@abstractmethod
def list(
self, image_type: ImageType, page: int = 0, per_page: int = 10
) -> PaginatedResults[ImageResponse]:
"""Gets a paginated list of images."""
pass
# TODO: make this a bit more flexible for e.g. cloud storage
@@ -41,51 +40,35 @@ class ImageStorageBase(ABC):
def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str:
"""Gets the path to an image or its thumbnail."""
pass
# TODO: make this a bit more flexible for e.g. cloud storage
@abstractmethod
def validate_path(self, path: str) -> bool:
"""Validates an image path."""
pass
@abstractmethod
def save(
self,
image_type: ImageType,
image_name: str,
image: Image,
metadata: InvokeAIMetadata | None = None,
) -> Tuple[str, str, int]:
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image path, thumbnail path, and created timestamp."""
def save(self, image_type: ImageType, image_name: str, image: Image, metadata: Dict[str, Any] | None = None) -> str:
pass
@abstractmethod
def delete(self, image_type: ImageType, image_name: str) -> None:
"""Deletes an image and its thumbnail (if one exists)."""
pass
def create_name(self, context_id: str, node_id: str) -> str:
"""Creates a unique contextual image filename."""
return f"{context_id}_{node_id}_{str(get_timestamp())}.png"
return f"{context_id}_{node_id}_{str(int(datetime.datetime.now(datetime.timezone.utc).timestamp()))}.png"
class DiskImageStorage(ImageStorageBase):
"""Stores images on disk"""
__output_folder: str
__pngWriter: PngWriter
__cache_ids: Queue # TODO: this is an incredibly naive cache
__cache: Dict[str, Image]
__max_cache_size: int
__metadata_service: MetadataServiceBase
def __init__(self, output_folder: str, metadata_service: MetadataServiceBase):
def __init__(self, output_folder: str):
self.__output_folder = output_folder
self.__pngWriter = PngWriter(output_folder)
self.__cache = dict()
self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config
self.__metadata_service = metadata_service
Path(output_folder).mkdir(parents=True, exist_ok=True)
@@ -118,8 +101,7 @@ class DiskImageStorage(ImageStorageBase):
for path in page_of_image_paths:
filename = os.path.basename(path)
img = PILImage.open(path)
invokeai_metadata = self.__metadata_service.get_metadata(img)
invokeai_metadata = json.loads(img.info.get("invokeai", "{}"))
page_of_images.append(
ImageResponse(
@@ -128,12 +110,12 @@ class DiskImageStorage(ImageStorageBase):
# TODO: DiskImageStorage should not be building URLs...?
image_url=f"api/v1/images/{image_type.value}/{filename}",
thumbnail_url=f"api/v1/images/{image_type.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
# TODO: Creation of this object should happen elsewhere (?), just making it fit here so it works
metadata=ImageResponseMetadata(
# TODO: Creation of this object should happen elsewhere, just making it fit here so it works
metadata=ImageMetadata(
created=int(os.path.getctime(path)),
width=img.width,
height=img.height,
invokeai=invokeai_metadata,
invokeai=invokeai_metadata
),
)
)
@@ -164,50 +146,28 @@ class DiskImageStorage(ImageStorageBase):
def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str:
# strip out any relative path shenanigans
basename = os.path.basename(image_name)
if is_thumbnail:
path = os.path.join(
self.__output_folder, image_type, "thumbnails", basename
self.__output_folder, image_type, "thumbnails", image_name
)
else:
path = os.path.join(self.__output_folder, image_type, basename)
path = os.path.join(self.__output_folder, image_type, image_name)
return path
def validate_path(self, path: str) -> bool:
try:
os.stat(path)
return True
except Exception:
return False
def save(
self,
image_type: ImageType,
image_name: str,
image: Image,
metadata: InvokeAIMetadata | None = None,
) -> Tuple[str, str, int]:
def save(self, image_type: ImageType, image_name: str, image: Image, metadata: Dict[str, Any] | None = None) -> str:
print(metadata)
image_subpath = os.path.join(image_type, image_name)
self.__pngWriter.save_image_and_prompt_to_png(
image, "", image_subpath, metadata
) # TODO: just pass full path to png writer
save_thumbnail(
image=image,
filename=image_name,
path=os.path.join(self.__output_folder, image_type, "thumbnails"),
)
image_path = self.get_path(image_type, image_name)
# TODO: Reading the image and then saving it strips the metadata...
if metadata:
pnginfo = build_invokeai_metadata_pnginfo(metadata=metadata)
image.save(image_path, "PNG", pnginfo=pnginfo)
else:
image.save(image_path) # this saved image has an empty info
thumbnail_name = get_thumbnail_name(image_name)
thumbnail_path = self.get_path(image_type, thumbnail_name, is_thumbnail=True)
thumbnail_image = make_thumbnail(image)
thumbnail_image.save(thumbnail_path)
self.__set_cache(image_path, image)
self.__set_cache(thumbnail_path, thumbnail_image)
return (image_path, thumbnail_path, int(os.path.getctime(image_path)))
return image_path
def delete(self, image_type: ImageType, image_name: str) -> None:
image_path = self.get_path(image_type, image_name)

View File

@@ -1,17 +1,30 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import time
from abc import ABC, abstractmethod
from queue import Queue
from pydantic import BaseModel, Field
import time
class InvocationQueueItem(BaseModel):
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
invocation_id: str = Field(description="The ID of the node being invoked")
invoke_all: bool = Field(default=False)
timestamp: float = Field(default_factory=time.time)
# TODO: make this serializable
class InvocationQueueItem:
# session_id: str
graph_execution_state_id: str
invocation_id: str
invoke_all: bool
timestamp: float
def __init__(
self,
# session_id: str,
graph_execution_state_id: str,
invocation_id: str,
invoke_all: bool = False,
):
# self.session_id = session_id
self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id
self.invoke_all = invoke_all
self.timestamp = time.time()
class InvocationQueueABC(ABC):

View File

@@ -1,5 +1,4 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from invokeai.app.services.metadata import MetadataServiceBase
from invokeai.backend import ModelManager
from .events import EventServiceBase
@@ -15,13 +14,11 @@ class InvocationServices:
events: EventServiceBase
latents: LatentsStorageBase
images: ImageStorageBase
metadata: MetadataServiceBase
queue: InvocationQueueABC
model_manager: ModelManager
restoration: RestorationServices
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
graph_library: ItemStorageABC["LibraryGraph"]
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
processor: "InvocationProcessorABC"
@@ -31,9 +28,7 @@ class InvocationServices:
events: EventServiceBase,
latents: LatentsStorageBase,
images: ImageStorageBase,
metadata: MetadataServiceBase,
queue: InvocationQueueABC,
graph_library: ItemStorageABC["LibraryGraph"],
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
processor: "InvocationProcessorABC",
restoration: RestorationServices,
@@ -42,9 +37,7 @@ class InvocationServices:
self.events = events
self.latents = latents
self.images = images
self.metadata = metadata
self.queue = queue
self.graph_library = graph_library
self.graph_execution_manager = graph_execution_manager
self.processor = processor
self.restoration = restoration

View File

@@ -1,96 +0,0 @@
import json
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, TypedDict
from PIL import Image, PngImagePlugin
from pydantic import BaseModel
from invokeai.app.models.image import ImageType, is_image_type
class MetadataImageField(TypedDict):
"""Pydantic-less ImageField, used for metadata parsing."""
image_type: ImageType
image_name: str
class MetadataLatentsField(TypedDict):
"""Pydantic-less LatentsField, used for metadata parsing."""
latents_name: str
# TODO: This is a placeholder for `InvocationsUnion` pending resolution of circular imports
NodeMetadata = Dict[
str, str | int | float | bool | MetadataImageField | MetadataLatentsField
]
class InvokeAIMetadata(TypedDict, total=False):
"""InvokeAI-specific metadata format."""
session_id: Optional[str]
node: Optional[NodeMetadata]
def build_invokeai_metadata_pnginfo(
metadata: InvokeAIMetadata | None,
) -> PngImagePlugin.PngInfo:
"""Builds a PngInfo object with key `"invokeai"` and value `metadata`"""
pnginfo = PngImagePlugin.PngInfo()
if metadata is not None:
pnginfo.add_text("invokeai", json.dumps(metadata))
return pnginfo
class MetadataServiceBase(ABC):
@abstractmethod
def get_metadata(self, image: Image.Image) -> InvokeAIMetadata | None:
"""Gets the InvokeAI metadata from a PIL Image, skipping invalid values"""
pass
@abstractmethod
def build_metadata(
self, session_id: str, node: BaseModel
) -> InvokeAIMetadata | None:
"""Builds an InvokeAIMetadata object"""
pass
class PngMetadataService(MetadataServiceBase):
"""Handles loading and building metadata for images."""
# TODO: Use `InvocationsUnion` to **validate** metadata as representing a fully-functioning node
def _load_metadata(self, image: Image.Image) -> dict | None:
"""Loads a specific info entry from a PIL Image."""
try:
info = image.info.get("invokeai")
if type(info) is not str:
return None
loaded_metadata = json.loads(info)
if type(loaded_metadata) is not dict:
return None
if len(loaded_metadata.items()) == 0:
return None
return loaded_metadata
except:
return None
def get_metadata(self, image: Image.Image) -> dict | None:
"""Retrieves an image's metadata as a dict"""
loaded_metadata = self._load_metadata(image)
return loaded_metadata
def build_metadata(self, session_id: str, node: BaseModel) -> InvokeAIMetadata:
metadata = InvokeAIMetadata(session_id=session_id, node=node.dict())
return metadata

View File

@@ -43,14 +43,14 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
queue_item.invocation_id
)
# get the source node id to provide to clients (the prepared node id is not as useful)
source_node_id = graph_execution_state.prepared_source_mapping[invocation.id]
# get the source node to provide to cliepnts (the prepared node is not as useful)
source_id = graph_execution_state.prepared_source_mapping[invocation.id]
# Send starting event
self.__invoker.services.events.emit_invocation_started(
graph_execution_state_id=graph_execution_state.id,
node=invocation.dict(),
source_node_id=source_node_id
invocation_dict=invocation.dict(),
source_id=source_id
)
# Invoke
@@ -79,8 +79,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send complete event
self.__invoker.services.events.emit_invocation_complete(
graph_execution_state_id=graph_execution_state.id,
node=invocation.dict(),
source_node_id=source_node_id,
invocation_dict=invocation.dict(),
source_id=source_id,
result=outputs.dict(),
)
@@ -104,8 +104,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send error event
self.__invoker.services.events.emit_invocation_error(
graph_execution_state_id=graph_execution_state.id,
node=invocation.dict(),
source_node_id=source_node_id,
invocation_dict=invocation.dict(),
source_id=source_id,
error=error,
)

View File

@@ -1,23 +1,25 @@
import sqlite3
from threading import Lock
from typing import Generic, TypeVar, Union, get_args
from pydantic import BaseModel, parse_raw_as
from .item_storage import ItemStorageABC, PaginatedResults
from sqlalchemy import create_engine, String, TEXT, Engine, select
from sqlalchemy.orm import DeclarativeBase, mapped_column, Session
T = TypeVar("T", bound=BaseModel)
sqlite_memory = ":memory:"
class Base(DeclarativeBase):
pass
class SqliteItemStorage(ItemStorageABC, Generic[T]):
_filename: str
_table_name: str
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_id_field: str
_lock: Lock
_engine: Engine
# _table: ??? # TODO: figure out how to type this
def __init__(self, filename: str, table_name: str, id_field: str = "id"):
super().__init__()
@@ -25,86 +27,79 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._filename = filename
self._table_name = table_name
self._id_field = id_field # TODO: validate that T has this field
self._lock = Lock()
self._conn = sqlite3.connect(
self._filename, check_same_thread=False
) # TODO: figure out a better threading solution
self._cursor = self._conn.cursor()
self._engine = create_engine(f"sqlite+pysqlite:///{self._filename}")
self._create_table()
def _create_table(self):
try:
self._lock.acquire()
self._cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
item TEXT,
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
)
self._cursor.execute(
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
)
finally:
self._lock.release()
# dynamically create the ORM model class to avoid name collisions
# cannot access `self.__orig_class__` in `__init__` or `__new__` so
# format the table name into the class name
pascal_table_name = self._table_name.replace("_", " ").title()
pascal_table_name = pascal_table_name.replace(" ", "")
table_dict = dict(
__tablename__=self._table_name,
id=mapped_column(String, primary_key=True),
item=mapped_column(TEXT, nullable=False),
)
self._table = type(pascal_table_name, (Base,), table_dict)
Base.metadata.create_all(self._engine)
def _parse_item(self, item: str) -> T:
item_type = get_args(self.__orig_class__)[0]
return parse_raw_as(item_type, item)
def set(self, item: T):
try:
self._lock.acquire()
self._cursor.execute(
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
(item.json(),),
)
self._conn.commit()
finally:
self._lock.release()
session = Session(self._engine)
item_id = str(getattr(item, self._id_field))
new_item = self._table(id=item_id, item=item.json())
session.merge(new_item)
session.commit()
session.close()
self._on_changed(item)
def get(self, id: str) -> Union[T, None]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),)
)
result = self._cursor.fetchone()
finally:
self._lock.release()
session = Session(self._engine)
if not result:
item = session.get(self._table, str(id))
session.close()
if not item:
return None
return self._parse_item(result[0])
return self._parse_item(item.item)
def delete(self, id: str):
try:
self._lock.acquire()
self._cursor.execute(
f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),)
)
self._conn.commit()
finally:
self._lock.release()
session = Session(self._engine)
item = session.get(self._table, id)
session.delete(item)
session.commit()
session.close()
self._on_deleted(id)
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
(per_page, page * per_page),
)
result = self._cursor.fetchall()
session = Session(self._engine)
items = list(map(lambda r: self._parse_item(r[0]), result))
stmt = select(self._table.item).limit(per_page).offset(page * per_page)
result = session.execute(stmt)
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
items = list(map(lambda r: self._parse_item(r[0]), result))
count = session.query(self._table.item).count()
session.commit()
session.close()
pageCount = int(count / per_page) + 1
@@ -115,23 +110,19 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
def search(
self, query: str, page: int = 0, per_page: int = 10
) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
(f"%{query}%", per_page, page * per_page),
)
result = self._cursor.fetchall()
session = Session(self._engine)
items = list(map(lambda r: self._parse_item(r[0]), result))
stmt = (
session.query(self._table)
.where(self._table.item.like(f"%{query}%"))
.limit(per_page)
.offset(page * per_page)
)
self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
(f"%{query}%",),
)
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
result = session.execute(stmt)
items = list(map(lambda r: self._parse_item(r[0].item), result))
count = session.query(self._table.item).count()
pageCount = int(count / per_page) + 1

View File

@@ -1,5 +0,0 @@
import datetime
def get_timestamp():
return int(datetime.datetime.now(datetime.timezone.utc).timestamp())

View File

@@ -0,0 +1,25 @@
import os
from PIL import Image
def save_thumbnail(
image: Image.Image,
filename: str,
path: str,
size: int = 256,
) -> str:
"""
Saves a thumbnail of an image, returning its path.
"""
base_filename = os.path.splitext(filename)[0]
thumbnail_path = os.path.join(path, base_filename + ".webp")
if os.path.exists(thumbnail_path):
return thumbnail_path
image_copy = image.copy()
image_copy.thumbnail(size=(size, size))
image_copy.save(thumbnail_path, "WEBP")
return thumbnail_path

View File

@@ -1,41 +1,17 @@
from invokeai.app.api.models.images import ProgressImage
from invokeai.app.models.exceptions import CanceledException
from re import S
import torch
from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL
from ...backend.generator.base import Generator
from ...backend.stable_diffusion import PipelineIntermediateState
def stable_diffusion_step_callback(
def fast_latents_step_callback(
sample: torch.Tensor,
step: int,
steps: int,
id: str,
context: InvocationContext,
intermediate_state: PipelineIntermediateState,
node: dict,
source_node_id: str,
):
if context.services.queue.is_canceled(context.graph_execution_state_id):
raise CanceledException
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be. Use
# that estimate if it is available.
if intermediate_state.predicted_original is not None:
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
# TODO: This does not seem to be needed any more?
# # txt2img provides a Tensor in the step_callback
# # img2img provides a PipelineIntermediateState
# if isinstance(sample, PipelineIntermediateState):
# # this was an img2img
# print('img2img')
# latents = sample.latents
# step = sample.step
# else:
# print('txt2img')
# latents = sample
# step = intermediate_state.step
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
@@ -45,11 +21,30 @@ def stable_diffusion_step_callback(
dataURL = image_to_dataURL(image, image_format="JPEG")
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_id = graph_execution_state.prepared_source_mapping[id]
invocation = graph_execution_state.execution_graph.get_node(id)
context.services.events.emit_generator_progress(
graph_execution_state_id=context.graph_execution_state_id,
node=node,
source_node_id=source_node_id,
progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
step=intermediate_state.step,
total_steps=node["steps"],
invocation_dict=invocation.dict(),
source_id=source_id,
progress_image={"width": width, "height": height, "dataURL": dataURL},
step=step,
total_steps=steps,
)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
This adapter grabs the needed data and passes it along to the callback function.
"""
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return fast_latents_step_callback(
progress_state.latents, progress_state.step, **kwargs
)
else:
return fast_latents_step_callback(*cb_args, **kwargs)

View File

@@ -1,15 +0,0 @@
import os
from PIL import Image
def get_thumbnail_name(image_name: str) -> str:
"""Formats given an image name, returns the appropriate thumbnail image name"""
thumbnail_name = os.path.splitext(image_name)[0] + ".webp"
return thumbnail_name
def make_thumbnail(image: Image.Image, size: int = 256) -> Image.Image:
"""Makes a thumbnail from a PIL Image"""
thumbnail = image.copy()
thumbnail.thumbnail(size=(size, size))
return thumbnail

View File

@@ -10,7 +10,7 @@ from .generator import (
Img2Img,
Inpaint
)
from .model_management import ModelManager, SDModelComponent
from .model_management import ModelManager
from .safety_checker import SafetyChecker
from .args import Args
from .globals import Globals

View File

@@ -86,11 +86,9 @@ class InvokeAIGenerator(metaclass=ABCMeta):
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
**kwargs,
):
self.model_info=model_info
self.params=params
self.kwargs = kwargs
def generate(self,
prompt: str='',
@@ -131,12 +129,9 @@ class InvokeAIGenerator(metaclass=ABCMeta):
model=model,
scheduler_name=generator_args.get('scheduler')
)
# get conditioning from prompt via Compel package
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt, model=model)
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model)
gen_class = self._generator_class()
generator = gen_class(model, self.params.precision, **self.kwargs)
generator = gen_class(model, self.params.precision)
if self.params.variation_amount > 0:
generator.set_variation(generator_args.get('seed'),
generator_args.get('variation_amount'),
@@ -286,7 +281,7 @@ class Generator:
precision: str
model: DiffusionPipeline
def __init__(self, model: DiffusionPipeline, precision: str, **kwargs):
def __init__(self, model: DiffusionPipeline, precision: str):
self.model = model
self.precision = precision
self.seed = None
@@ -359,6 +354,7 @@ class Generator:
seed = seed if seed is not None and seed >= 0 else self.new_seed()
first_seed = seed
seed, initial_noise = self.generate_initial_noise(seed, width, height)
# There used to be an additional self.model.ema_scope() here, but it breaks
# the inpaint-1.5 model. Not sure what it did.... ?
with scope(self.model.device.type):

View File

@@ -4,10 +4,6 @@ invokeai.backend.generator.txt2img inherits from invokeai.backend.generator
import PIL.Image
import torch
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from diffusers.models.controlnet import ControlNetModel, ControlNetOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from ..stable_diffusion import (
ConditioningData,
PostprocessingSettings,
@@ -17,13 +13,8 @@ from .base import Generator
class Txt2Img(Generator):
def __init__(self, model, precision,
control_model: Optional[Union[ControlNetModel, List[ControlNetModel]]] = None,
**kwargs):
self.control_model = control_model
if isinstance(self.control_model, list):
self.control_model = MultiControlNetModel(self.control_model)
super().__init__(model, precision, **kwargs)
def __init__(self, model, precision):
super().__init__(model, precision)
@torch.no_grad()
def get_make_image(
@@ -51,12 +42,9 @@ class Txt2Img(Generator):
kwargs are 'width' and 'height'
"""
self.perlin = perlin
control_image = kwargs.get("control_image", None)
do_classifier_free_guidance = cfg_scale > 1.0
# noinspection PyTypeChecker
pipeline: StableDiffusionGeneratorPipeline = self.model
pipeline.control_model = self.control_model
pipeline.scheduler = sampler
uc, c, extra_conditioning_info = conditioning
@@ -73,37 +61,6 @@ class Txt2Img(Generator):
),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
# FIXME: still need to test with different widths, heights, devices, dtypes
# and add in batch_size, num_images_per_prompt?
if control_image is not None:
if isinstance(self.control_model, ControlNetModel):
control_image = pipeline.prepare_control_image(
image=control_image,
do_classifier_free_guidance=do_classifier_free_guidance,
width=width,
height=height,
# batch_size=batch_size * num_images_per_prompt,
# num_images_per_prompt=num_images_per_prompt,
device=self.control_model.device,
dtype=self.control_model.dtype,
)
elif isinstance(self.control_model, MultiControlNetModel):
images = []
for image_ in control_image:
image_ = self.model.prepare_control_image(
image=image_,
do_classifier_free_guidance=do_classifier_free_guidance,
width=width,
height=height,
# batch_size=batch_size * num_images_per_prompt,
# num_images_per_prompt=num_images_per_prompt,
device=self.control_model.device,
dtype=self.control_model.dtype,
)
images.append(image_)
control_image = images
kwargs["control_image"] = control_image
def make_image(x_T: torch.Tensor, _: int) -> PIL.Image.Image:
pipeline_output = pipeline.image_from_embeddings(
latents=torch.zeros_like(x_T, dtype=self.torch_dtype()),
@@ -111,7 +68,6 @@ class Txt2Img(Generator):
num_inference_steps=steps,
conditioning_data=conditioning_data,
callback=step_callback,
**kwargs,
)
if (

View File

@@ -41,7 +41,7 @@ class PngWriter:
info = PngImagePlugin.PngInfo()
info.add_text("Dream", dream_prompt)
if metadata:
info.add_text("sd-metadata", json.dumps(metadata))
info.add_text("invokeai", json.dumps(metadata))
image.save(path, "PNG", pnginfo=info, compress_level=compress_level)
return path

View File

@@ -5,7 +5,6 @@ from .convert_ckpt_to_diffusers import (
convert_ckpt_to_diffusers,
load_pipeline_from_original_stable_diffusion_ckpt,
)
from .model_manager import ModelManager,SDModelComponent
from .model_manager import ModelManager

View File

@@ -57,7 +57,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list
return self.concept_list
elif Globals.internet_available is True:
else:
try:
models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/")
@@ -73,8 +73,6 @@ class HuggingFaceConceptsLibrary(object):
" ** You may load .bin and .pt file(s) manually using the --embedding_directory argument."
)
return self.concept_list
else:
return self.concept_list
def get_concept_model_path(self, concept_name: str) -> str:
"""

View File

@@ -9,20 +9,16 @@ from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union
import einops
import PIL.Image
import numpy as np
from accelerate.utils import set_seed
import psutil
import torch
import torchvision.transforms as T
from compel import EmbeddingsProvider
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel, ControlNetOutput
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import (
StableDiffusionImg2ImgPipeline,
)
@@ -31,7 +27,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import (
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.outputs import BaseOutput
from torchvision.transforms.functional import resize as tv_resize
@@ -309,7 +304,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
feature_extractor: Optional[CLIPFeatureExtractor],
requires_safety_checker: bool = False,
precision: str = "float32",
control_model: ControlNetModel = None,
):
super().__init__(
vae,
@@ -330,8 +324,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
# FIXME: can't currently register control module
# control_model=control_model,
)
self.invokeai_diffuser = InvokeAIDiffuserComponent(
self.unet, self._unet_forward, is_running_diffusers=True
@@ -351,7 +343,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
self._model_group = FullyLoadedModelGroup(self.unet.device)
self._model_group.install(*self._submodels)
self.control_model = control_model
def _adjust_memory_efficient_attention(self, latents: torch.Tensor):
"""
@@ -454,15 +445,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def _submodels(self) -> Sequence[torch.nn.Module]:
module_names, _, _ = self.extract_init_dict(dict(self.config))
submodels = []
for name in module_names.keys():
if hasattr(self, name):
value = getattr(self, name)
else:
value = getattr(self.config, name)
if isinstance(value, torch.nn.Module):
submodels.append(value)
return submodels
values = [getattr(self, name) for name in module_names.keys()]
return [m for m in values if isinstance(m, torch.nn.Module)]
def image_from_embeddings(
self,
@@ -473,7 +457,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise: torch.Tensor,
callback: Callable[[PipelineIntermediateState], None] = None,
run_id=None,
**kwargs,
) -> InvokeAIStableDiffusionPipelineOutput:
r"""
Function invoked when calling the pipeline for generation.
@@ -494,7 +477,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise=noise,
run_id=run_id,
callback=callback,
**kwargs,
)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
torch.cuda.empty_cache()
@@ -519,7 +501,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
additional_guidance: List[Callable] = None,
run_id=None,
callback: Callable[[PipelineIntermediateState], None] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if timesteps is None:
self.scheduler.set_timesteps(
@@ -537,7 +518,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
additional_guidance=additional_guidance,
run_id=run_id,
callback=callback,
**kwargs,
)
return result.latents, result.attention_map_saver
@@ -550,7 +530,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise: torch.Tensor,
run_id: str = None,
additional_guidance: List[Callable] = None,
**kwargs,
):
self._adjust_memory_efficient_attention(latents)
if run_id is None:
@@ -565,7 +544,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
yield PipelineIntermediateState(
run_id=run_id,
step=-1,
timestep=self.scheduler.config.num_train_timesteps,
timestep=self.scheduler.num_train_timesteps,
latents=latents,
)
@@ -589,7 +568,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
step_index=i,
total_step_count=len(timesteps),
additional_guidance=additional_guidance,
**kwargs,
)
latents = step_output.prev_sample
@@ -630,7 +608,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
step_index: int,
total_step_count: int,
additional_guidance: List[Callable] = None,
**kwargs,
):
# invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value
timestep = t[0]
@@ -642,33 +619,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# i.e. before or after passing it to InvokeAIDiffuserComponent
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
if (self.control_model is not None) and (kwargs.get("control_image") is not None):
control_image = kwargs.get("control_image") # should be a processed tensor derived from the control image(s)
control_scale = kwargs.get("control_scale", 1.0) # control_scale default is 1.0
# handling case where using multiple control models but only specifying single control_scale
# so reshape control_scale to match number of control models
if isinstance(self.control_model, MultiControlNetModel) and isinstance(control_scale, float):
control_scale = [control_scale] * len(self.control_model.nets)
if conditioning_data.guidance_scale > 1.0:
# expand the latents input to control model if doing classifier free guidance
# (which I think for now is always true, there is conditional elsewhere that stops execution if
# classifier_free_guidance is <= 1.0 ?)
latent_control_input = torch.cat([latent_model_input] * 2)
else:
latent_control_input = latent_model_input
# controlnet inference
down_block_res_samples, mid_block_res_sample = self.control_model(
latent_control_input,
timestep,
encoder_hidden_states=torch.cat([conditioning_data.unconditioned_embeddings,
conditioning_data.text_embeddings]),
controlnet_cond=control_image,
conditioning_scale=control_scale,
return_dict=False,
)
else:
down_block_res_samples, mid_block_res_sample = None, None
# predict the noise residual
noise_pred = self.invokeai_diffuser.do_diffusion_step(
latent_model_input,
@@ -678,8 +628,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
conditioning_data.guidance_scale,
step_index=step_index,
total_step_count=total_step_count,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
)
# compute the previous noisy sample x_t -> x_t-1
@@ -701,7 +649,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
t,
text_embeddings,
cross_attention_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
):
"""predict the noise residual"""
if is_inpainting_model(self.unet) and latents.size(1) == 4:
@@ -721,8 +668,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# First three args should be positional, not keywords, so torch hooks can see them.
return self.unet(
latents, t, text_embeddings, cross_attention_kwargs=cross_attention_kwargs,
**kwargs,
latents, t, text_embeddings, cross_attention_kwargs=cross_attention_kwargs
).sample
def img2img_from_embeddings(
@@ -969,7 +915,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def channels(self) -> int:
"""Compatible with DiffusionWrapper"""
return self.unet.config.in_channels
return self.unet.in_channels
def decode_latents(self, latents):
# Explicit call to get the vae loaded, since `decode` isn't the forward method.
@@ -984,48 +930,3 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
debug_image(
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
)
# Copied from diffusers pipeline_stable_diffusion_controlnet.py
# Returns torch.Tensor of shape (batch_size, 3, height, width)
def prepare_control_image(
self,
image,
width=512,
height=512,
batch_size=1,
num_images_per_prompt=1,
device="cuda",
dtype=torch.float16,
do_classifier_free_guidance=True,
):
if not isinstance(image, torch.Tensor):
if isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
images = []
for image_ in image:
image_ = image_.convert("RGB")
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
image_ = np.array(image_)
image_ = image_[None, :]
images.append(image_)
image = images
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.cat(image, dim=0)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
# image batch size is the same as prompt batch size
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance:
image = torch.cat([image] * 2)
return image

View File

@@ -1,6 +1,7 @@
# adapted from bloc97's CrossAttentionControl colab
# https://github.com/bloc97/CrossAttentionControl
import enum
import math
from typing import Callable, Optional
@@ -9,7 +10,8 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import AttentionProcessor
from diffusers.models.cross_attention import AttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from torch import nn
from ...util import torch_dtype
@@ -186,7 +188,7 @@ class Context:
class InvokeAICrossAttentionMixin:
"""
Enable InvokeAI-flavoured Attention calculation, which does aggressive low-memory slicing and calls
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
and dymamic slicing strategy selection.
"""
@@ -207,7 +209,7 @@ class InvokeAICrossAttentionMixin:
Set custom attention calculator to be called when attention is calculated
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
which returns either the suggested_attention_slice or an adjusted equivalent.
`module` is the current Attention module for which the callback is being invoked.
`module` is the current CrossAttention module for which the callback is being invoked.
`suggested_attention_slice` is the default-calculated attention slice
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
@@ -343,11 +345,11 @@ class InvokeAICrossAttentionMixin:
def restore_default_cross_attention(
model,
is_running_diffusers: bool,
restore_attention_processor: Optional[AttentionProcessor] = None,
restore_attention_processor: Optional[AttnProcessor] = None,
):
if is_running_diffusers:
unet = model
unet.set_attn_processor(restore_attention_processor or AttnProcessor())
unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor())
else:
remove_attention_function(model)
@@ -406,9 +408,12 @@ def override_cross_attention(model, context: Context, is_running_diffusers=False
def get_cross_attention_modules(
model, which: CrossAttentionType
) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
from ldm.modules.attention import CrossAttention # avoid circular import
cross_attention_class: type = (
InvokeAIDiffusersCrossAttention
if isinstance(model, UNet2DConditionModel)
else CrossAttention
)
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
attention_module_tuples = [
@@ -423,10 +428,10 @@ def get_cross_attention_modules(
print(
f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model "
+ f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed "
+ "or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, "
+ f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, "
+ f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows "
+ "what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not "
+ "work properly until it is fixed."
+ f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not "
+ f"work properly until it is fixed."
)
return attention_module_tuples
@@ -545,7 +550,7 @@ def get_mem_free_total(device):
class InvokeAIDiffusersCrossAttention(
diffusers.models.attention.Attention, InvokeAICrossAttentionMixin
diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin
):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@@ -567,8 +572,8 @@ class InvokeAIDiffusersCrossAttention(
"""
# base implementation
class AttnProcessor:
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
class CrossAttnProcessor:
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length)
@@ -596,9 +601,9 @@ class AttnProcessor:
from dataclasses import dataclass, field
import torch
from diffusers.models.attention_processor import (
Attention,
AttnProcessor,
from diffusers.models.cross_attention import (
CrossAttention,
CrossAttnProcessor,
SlicedAttnProcessor,
)
@@ -648,7 +653,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
def __call__(
self,
attn: Attention,
attn: CrossAttention,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,

View File

@@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
from diffusers.models.attention_processor import AttentionProcessor
from diffusers.models.cross_attention import AttnProcessor
from typing_extensions import TypeAlias
from invokeai.backend.globals import Globals
@@ -101,7 +101,7 @@ class InvokeAIDiffuserComponent:
def override_cross_attention(
self, conditioning: ExtraConditioningInfo, step_count: int
) -> Dict[str, AttentionProcessor]:
) -> Dict[str, AttnProcessor]:
"""
setup cross attention .swap control. for diffusers this replaces the attention processor, so
the previous attention processor is returned so that the caller can restore it later.
@@ -118,7 +118,7 @@ class InvokeAIDiffuserComponent:
)
def restore_default_cross_attention(
self, restore_attention_processor: Optional["AttentionProcessor"] = None
self, restore_attention_processor: Optional["AttnProcessor"] = None
):
self.conditioning = None
self.cross_attention_control_context = None
@@ -168,7 +168,6 @@ class InvokeAIDiffuserComponent:
unconditional_guidance_scale: float,
step_index: Optional[int] = None,
total_step_count: Optional[int] = None,
**kwargs,
):
"""
:param x: current latents
@@ -197,7 +196,7 @@ class InvokeAIDiffuserComponent:
if wants_hybrid_conditioning:
unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(
x, sigma, unconditioning, conditioning, **kwargs,
x, sigma, unconditioning, conditioning
)
elif wants_cross_attention_control:
(
@@ -209,14 +208,13 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
)
elif self.sequential_guidance:
(
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning_sequentially(
x, sigma, unconditioning, conditioning, **kwargs,
x, sigma, unconditioning, conditioning
)
else:
@@ -224,7 +222,7 @@ class InvokeAIDiffuserComponent:
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning(
x, sigma, unconditioning, conditioning, **kwargs,
x, sigma, unconditioning, conditioning
)
combined_next_x = self._combine(
@@ -264,20 +262,20 @@ class InvokeAIDiffuserComponent:
# TODO remove when compvis codepath support is dropped
if step_index is None and sigma is None:
raise ValueError(
"Either step_index or sigma is required when doing cross attention control, but both are None."
f"Either step_index or sigma is required when doing cross attention control, but both are None."
)
percent_through = self.estimate_percent_through(step_index, sigma)
return percent_through
# methods below are called from do_diffusion_step and should be considered private to this class.
def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs):
def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning):
# fast batched path
x_twice = torch.cat([x] * 2)
sigma_twice = torch.cat([sigma] * 2)
both_conditionings = torch.cat([unconditioning, conditioning])
both_results = self.model_forward_callback(
x_twice, sigma_twice, both_conditionings, **kwargs,
x_twice, sigma_twice, both_conditionings
)
unconditioned_next_x, conditioned_next_x = both_results.chunk(2)
if conditioned_next_x.device.type == "mps":
@@ -291,17 +289,16 @@ class InvokeAIDiffuserComponent:
sigma,
unconditioning: torch.Tensor,
conditioning: torch.Tensor,
**kwargs,
):
# low-memory sequential path
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning, **kwargs)
conditioned_next_x = self.model_forward_callback(x, sigma, conditioning, **kwargs)
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning)
conditioned_next_x = self.model_forward_callback(x, sigma, conditioning)
if conditioned_next_x.device.type == "mps":
# prevent a result filled with zeros. seems to be a torch bug.
conditioned_next_x = conditioned_next_x.clone()
return unconditioned_next_x, conditioned_next_x
def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs):
def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning):
assert isinstance(conditioning, dict)
assert isinstance(unconditioning, dict)
x_twice = torch.cat([x] * 2)
@@ -316,7 +313,7 @@ class InvokeAIDiffuserComponent:
else:
both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]])
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(
x_twice, sigma_twice, both_conditionings, **kwargs,
x_twice, sigma_twice, both_conditionings
).chunk(2)
return unconditioned_next_x, conditioned_next_x
@@ -327,7 +324,6 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
):
if self.is_running_diffusers:
return self._apply_cross_attention_controlled_conditioning__diffusers(
@@ -336,7 +332,6 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
)
else:
return self._apply_cross_attention_controlled_conditioning__compvis(
@@ -345,7 +340,6 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
)
def _apply_cross_attention_controlled_conditioning__diffusers(
@@ -355,7 +349,6 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
):
context: Context = self.cross_attention_control_context
@@ -371,7 +364,6 @@ class InvokeAIDiffuserComponent:
sigma,
unconditioning,
{"swap_cross_attn_context": cross_attn_processor_context},
**kwargs,
)
# do requested cross attention types for conditioning (positive prompt)
@@ -383,7 +375,6 @@ class InvokeAIDiffuserComponent:
sigma,
conditioning,
{"swap_cross_attn_context": cross_attn_processor_context},
**kwargs,
)
return unconditioned_next_x, conditioned_next_x
@@ -394,7 +385,6 @@ class InvokeAIDiffuserComponent:
unconditioning,
conditioning,
cross_attention_control_types_to_do,
**kwargs,
):
# print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
# slower non-batched path (20% slower on mac MPS)
@@ -408,13 +398,13 @@ class InvokeAIDiffuserComponent:
context: Context = self.cross_attention_control_context
try:
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning, **kwargs)
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning)
# process x using the original prompt, saving the attention maps
# print("saving attention maps for", cross_attention_control_types_to_do)
for ca_type in cross_attention_control_types_to_do:
context.request_save_attention_maps(ca_type)
_ = self.model_forward_callback(x, sigma, conditioning, **kwargs,)
_ = self.model_forward_callback(x, sigma, conditioning)
context.clear_requests(cleanup=False)
# process x again, using the saved attention maps to control where self.edited_conditioning will be applied
@@ -425,7 +415,7 @@ class InvokeAIDiffuserComponent:
self.conditioning.cross_attention_control_args.edited_conditioning
)
conditioned_next_x = self.model_forward_callback(
x, sigma, edited_conditioning, **kwargs,
x, sigma, edited_conditioning
)
context.clear_requests(cleanup=True)
@@ -609,6 +599,7 @@ class InvokeAIDiffuserComponent:
)
# below is fugly omg
num_actual_conditionings = len(c_or_weighted_c_list)
conditionings = [uc] + [c for c, weight in weighted_cond_list]
weights = [1] + [weight for c, weight in weighted_cond_list]
chunk_count = ceil(len(conditionings) / 2)

View File

@@ -1,9 +1,10 @@
'''
"""
Minimalist updater script. Prompts user for the tag or branch to update to and runs
pip install <path_to_git_source>.
'''
"""
import os
import platform
import requests
from rich import box, print
from rich.console import Console, Group, group
@@ -15,10 +16,8 @@ from rich.text import Text
from invokeai.version import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_TAG="https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
INVOKE_AI_BRANCH="https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
INVOKE_AI_SRC = "https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_REL = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
OS = platform.uname().system
ARCH = platform.uname().machine
@@ -29,22 +28,22 @@ if OS == "Windows":
else:
console = Console(style=Style(color="grey74", bgcolor="grey19"))
def get_versions()->dict:
def get_versions() -> dict:
return requests.get(url=INVOKE_AI_REL).json()
def welcome(versions: dict):
@group()
def text():
yield f'InvokeAI Version: [bold yellow]{__version__}'
yield ''
yield 'This script will update InvokeAI to the latest release, or to a development version of your choice.'
yield ''
yield '[bold yellow]Options:'
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
yield f"InvokeAI Version: [bold yellow]{__version__}"
yield ""
yield "This script will update InvokeAI to the latest release, or to a development version of your choice."
yield ""
yield "[bold yellow]Options:"
yield f"""[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
[2] Update to the bleeding-edge development version ([italic]main[/italic])
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to'''
[3] Manually enter the tag or branch name you wish to update"""
console.rule()
print(
@@ -60,41 +59,33 @@ def welcome(versions: dict):
)
console.line()
def main():
versions = get_versions()
welcome(versions)
tag = None
branch = None
release = None
choice = Prompt.ask('Choice:',choices=['1','2','3','4'],default='1')
if choice=='1':
release = versions[0]['tag_name']
elif choice=='2':
release = 'main'
elif choice=='3':
tag = Prompt.ask('Enter an InvokeAI tag name')
elif choice=='4':
branch = Prompt.ask('Enter an InvokeAI branch name')
choice = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
if release:
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
elif tag:
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
if choice == "1":
tag = versions[0]["tag_name"]
elif choice == "2":
tag = "main"
elif choice == "3":
tag = Prompt.ask("Enter an InvokeAI tag or branch name")
print(f":crossed_fingers: Upgrading to [yellow]{tag}[/yellow]")
cmd = f"pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517"
print("")
print("")
if os.system(cmd) == 0:
print(f":heavy_check_mark: Upgrade successful")
else:
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
print('')
print('')
if os.system(cmd)==0:
print(f':heavy_check_mark: Upgrade successful')
else:
print(f':exclamation: [bold red]Upgrade failed[/red bold]')
print(f":exclamation: [bold red]Upgrade failed[/red bold]")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
import{j as y,cO as Ie,r as _,cP as bt,q as Lr,cQ as o,cR as b,cS as v,cT as S,cU as Vr,cV as ut,cW as vt,cN as ft,cX as mt,n as gt,cY as ht,E as pt}from"./index-e53e8108.js";import{d as yt,i as St,T as xt,j as $t,h as kt}from"./storeHooks-5cde7d31.js";var Or=`
import{j as y,cN as Ie,r as _,cO as bt,q as Lr,cP as o,cQ as b,cR as v,cS as S,cT as Vr,cU as ut,cV as vt,cM as ft,cW as mt,n as gt,cX as ht,E as pt}from"./index-f7f41e1f.js";import{d as yt,i as St,T as xt,j as $t,h as kt}from"./storeHooks-eaf47ae3.js";var Or=`
:root {
--chakra-vh: 100vh;
}

View File

@@ -12,7 +12,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-e53e8108.js"></script>
<script type="module" crossorigin src="./assets/index-f7f41e1f.js"></script>
<link rel="stylesheet" href="./assets/index-5483945c.css">
</head>

View File

@@ -8,6 +8,7 @@
"darkTheme": "داكن",
"lightTheme": "فاتح",
"greenTheme": "أخضر",
"text2img": "نص إلى صورة",
"img2img": "صورة إلى صورة",
"unifiedCanvas": "لوحة موحدة",
"nodes": "عقد",

View File

@@ -7,6 +7,7 @@
"darkTheme": "Dunkel",
"lightTheme": "Hell",
"greenTheme": "Grün",
"text2img": "Text zu Bild",
"img2img": "Bild zu Bild",
"nodes": "Knoten",
"langGerman": "Deutsch",

View File

@@ -505,9 +505,7 @@
"info": "Info",
"deleteImage": "Delete Image",
"initialImage": "Initial Image",
"showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview",
"showPreview": "Show Preview"
"showOptionsPanel": "Show Options Panel"
},
"settings": {
"models": "Models",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Oscuro",
"lightTheme": "Claro",
"greenTheme": "Verde",
"text2img": "Texto a Imagen",
"img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado",
"nodes": "Nodos",
@@ -69,11 +70,7 @@
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar"
"loadingInvokeAI": "Cargando invocar a la IA"
},
"gallery": {
"generations": "Generaciones",
@@ -407,8 +404,7 @@
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia",
"scanForModels": "Buscar modelos"
"addDifference": "Añadir una diferencia"
},
"parameters": {
"images": "Imágenes",
@@ -578,7 +574,7 @@
"autoSaveToGallery": "Guardar automáticamente en galería",
"saveBoxRegionOnly": "Guardar solo región dentro de la caja",
"limitStrokesToBox": "Limitar trazos a la caja",
"showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo",
"clearCanvasHistory": "Limpiar historial de lienzo",
"clearHistory": "Limpiar historial",
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Sombre",
"lightTheme": "Clair",
"greenTheme": "Vert",
"text2img": "Texte en image",
"img2img": "Image en image",
"unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds",
@@ -46,19 +47,7 @@
"statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé",
"discordLabel": "Discord",
"githubLabel": "Github",
"accept": "Accepter",
"statusMergingModels": "Mélange des modèles",
"loadingInvokeAI": "Chargement de Invoke AI",
"cancel": "Annuler",
"langEnglish": "Anglais",
"statusConvertingModel": "Conversion du modèle",
"statusModelConverted": "Modèle converti",
"loading": "Chargement",
"pinOptionsPanel": "Épingler la page d'options",
"statusMergedModels": "Modèles mélangés",
"txt2img": "Texte vers image",
"postprocessing": "Post-Traitement"
"githubLabel": "Github"
},
"gallery": {
"generations": "Générations",
@@ -529,15 +518,5 @@
"betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué"
},
"accessibility": {
"uploadImage": "Charger une image",
"reset": "Réinitialiser",
"nextImage": "Image suivante",
"previousImage": "Image précédente",
"useThisParameter": "Utiliser ce paramètre",
"zoomIn": "Zoom avant",
"zoomOut": "Zoom arrière",
"showOptionsPanel": "Montrer la page d'options"
}
}

View File

@@ -125,6 +125,7 @@
"langSimplifiedChinese": "סינית",
"langUkranian": "אוקראינית",
"langSpanish": "ספרדית",
"text2img": "טקסט לתמונה",
"img2img": "תמונה לתמונה",
"unifiedCanvas": "קנבס מאוחד",
"nodes": "צמתים",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Scuro",
"lightTheme": "Chiaro",
"greenTheme": "Verde",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata",
"nodes": "Nodi",
@@ -69,11 +70,7 @@
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla"
"loadingInvokeAI": "Caricamento Invoke AI"
},
"gallery": {
"generations": "Generazioni",
@@ -407,8 +404,7 @@
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello",
"scanForModels": "Cerca modelli"
"pickModelType": "Scegli il tipo di modello"
},
"parameters": {
"images": "Immagini",
@@ -578,7 +574,7 @@
"autoSaveToGallery": "Salvataggio automatico nella Galleria",
"saveBoxRegionOnly": "Salva solo l'area di selezione",
"limitStrokesToBox": "Limita i tratti all'area di selezione",
"showCanvasDebugInfo": "Mostra ulteriori informazioni sulla Tela",
"showCanvasDebugInfo": "Mostra informazioni di debug della Tela",
"clearCanvasHistory": "Cancella cronologia Tela",
"clearHistory": "Cancella la cronologia",
"clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.",
@@ -616,7 +612,7 @@
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom indietro",
"zoomOut": "Zoom Indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",

View File

@@ -11,6 +11,7 @@
"langArabic": "العربية",
"langEnglish": "English",
"langDutch": "Nederlands",
"text2img": "텍스트->이미지",
"unifiedCanvas": "통합 캔버스",
"langFrench": "Français",
"langGerman": "Deutsch",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Donker",
"lightTheme": "Licht",
"greenTheme": "Groen",
"text2img": "Tekst naar afbeelding",
"img2img": "Afbeelding naar afbeelding",
"unifiedCanvas": "Centraal canvas",
"nodes": "Knooppunten",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Ciemny",
"lightTheme": "Jasny",
"greenTheme": "Zielony",
"text2img": "Tekst na obraz",
"img2img": "Obraz na obraz",
"unifiedCanvas": "Tryb uniwersalny",
"nodes": "Węzły",

View File

@@ -20,6 +20,7 @@
"langSpanish": "Espanhol",
"langRussian": "Русский",
"langUkranian": "Украї́нська",
"text2img": "Texto para Imagem",
"img2img": "Imagem para Imagem",
"unifiedCanvas": "Tela Unificada",
"nodes": "Nós",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Noite",
"lightTheme": "Dia",
"greenTheme": "Verde",
"text2img": "Texto Para Imagem",
"img2img": "Imagem Para Imagem",
"unifiedCanvas": "Tela Unificada",
"nodes": "Nódulos",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Темная",
"lightTheme": "Светлая",
"greenTheme": "Зеленая",
"text2img": "Изображение из текста (text2img)",
"img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст",
"nodes": "Ноды",

View File

@@ -8,6 +8,7 @@
"darkTheme": "Темна",
"lightTheme": "Світла",
"greenTheme": "Зелена",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли",

View File

@@ -8,6 +8,7 @@
"darkTheme": "暗色",
"lightTheme": "亮色",
"greenTheme": "绿色",
"text2img": "文字到图像",
"img2img": "图像到图像",
"unifiedCanvas": "统一画布",
"nodes": "节点",

View File

@@ -33,6 +33,7 @@
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
}
}

View File

@@ -80,8 +80,6 @@ interface InvokeProps extends PropsWithChildren {
disabledPanels?: string[];
disabledTabs?: InvokeTabName[];
token?: string;
shouldTransformUrls?: boolean;
shouldFetchImages?: boolean;
}
declare function Invoke(props: InvokeProps): JSX.Element;

View File

@@ -6,7 +6,6 @@
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"dev:nodes": "concurrently \"vite dev --mode nodes\" \"yarn run theme:watch\"",
"dev:host": "concurrently \"vite dev --host\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build",
"api:web": "openapi -i http://localhost:9090/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
"api:file": "openapi -i src/services/fixtures/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
@@ -48,13 +47,11 @@
"@dagrejs/graphlib": "^2.1.12",
"@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6",
"@fontsource/inter": "^4.5.15",
"@reduxjs/toolkit": "^1.9.3",
"chakra-ui-contextmenu": "^1.0.5",
"dateformat": "^5.0.3",
"formik": "^2.2.9",
"framer-motion": "^9.0.4",
"fuse.js": "^6.6.2",
"i18next": "^22.4.10",
"i18next-browser-languagedetector": "^7.0.1",
"i18next-http-backend": "^2.1.1",
@@ -83,8 +80,8 @@
"uuid": "^9.0.0"
},
"devDependencies": {
"@fontsource/inter": "^4.5.15",
"@types/dateformat": "^5.0.0",
"@types/lodash": "^4.14.194",
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@types/react-transition-group": "^4.4.5",

View File

@@ -18,7 +18,7 @@
"training": "Training",
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
"upload": "Hochladen",
"upload": "Upload",
"close": "Schließen",
"load": "Laden",
"statusConnected": "Verbunden",
@@ -41,34 +41,12 @@
"statusUpscaling": "Hochskalierung",
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
"statusLoadingModel": "Laden des Modells",
"statusModelChanged": "Modell Geändert",
"cancel": "Abbruch",
"accept": "Annehmen",
"back": "Zurück",
"langEnglish": "Englisch",
"langDutch": "Niederländisch",
"langFrench": "Französisch",
"oceanTheme": "Ozean",
"langItalian": "Italienisch",
"langPortuguese": "Portogisisch",
"langRussian": "Russisch",
"langUkranian": "Ukrainisch",
"hotkeysLabel": "Tastenkombinationen",
"githubLabel": "Github",
"discordLabel": "Discord",
"txt2img": "Text zu Bild",
"postprocessing": "Nachbearbeitung",
"langPolish": "Polnisch",
"langJapanese": "Japanisch",
"langArabic": "Arabisch",
"langKorean": "Koreanisch",
"langHebrew": "Hebräisch",
"langSpanish": "Spanisch"
"statusModelChanged": "Modell Geändert"
},
"gallery": {
"generations": "Erzeugungen",
"showGenerations": "Zeige Erzeugnisse",
"uploads": "Hochgelades",
"uploads": "Uploads",
"showUploads": "Zeige Uploads",
"galleryImageSize": "Bildgröße",
"galleryImageResetSize": "Größe zurücksetzen",
@@ -334,11 +312,7 @@
"deleteModel": "Model löschen",
"deleteConfig": "Konfiguration löschen",
"deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?",
"deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen.",
"customConfig": "Benutzerdefinierte Konfiguration",
"invokeRoot": "InvokeAI Ordner",
"formMessageDiffusersVAELocationDesc": "Falls nicht angegeben, sucht InvokeAI nach der VAE-Datei innerhalb des oben angegebenen Modell Speicherortes.",
"checkpointModels": "Kontrollpunkte"
"deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen."
},
"parameters": {
"images": "Bilder",
@@ -396,10 +370,7 @@
"useInitImg": "Ausgangsbild verwenden",
"deleteImage": "Bild löschen",
"initialImage": "Ursprüngliches Bild",
"showOptionsPanel": "Optionsleiste zeigen",
"cancel": {
"setType": "Abbruchart festlegen"
}
"showOptionsPanel": "Optionsleiste zeigen"
},
"settings": {
"displayInProgress": "Bilder in Bearbeitung anzeigen",
@@ -518,25 +489,5 @@
"betaDarkenOutside": "Außen abdunkeln",
"betaLimitToBox": "Begrenzung auf das Feld",
"betaPreserveMasked": "Maskiertes bewahren"
},
"accessibility": {
"modelSelect": "Model Auswahl",
"uploadImage": "Bild hochladen",
"previousImage": "Voriges Bild",
"useThisParameter": "Benutze diesen Parameter",
"copyMetadataJson": "Kopiere metadata JSON",
"zoomIn": "Vergrößern",
"rotateClockwise": "Im Uhrzeigersinn drehen",
"flipHorizontally": "Horizontal drehen",
"flipVertically": "Vertikal drehen",
"modifyConfig": "Optionen einstellen",
"toggleAutoscroll": "Auroscroll ein/ausschalten",
"toggleLogViewer": "Log Betrachter ein/ausschalten",
"showGallery": "Zeige Galerie",
"showOptionsPanel": "Zeige Optionen",
"reset": "Zurücksetzen",
"nextImage": "Nächstes Bild",
"zoomOut": "Verkleinern",
"rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen"
}
}

View File

@@ -8,7 +8,7 @@
"nextImage": "Next Image",
"useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "Exit Viewer",
"exitViewer": "ExitViewer",
"zoomIn": "Zoom In",
"zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise",
@@ -19,8 +19,7 @@
"toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel",
"menu": "Menu"
"showOptionsPanel": "Show Options Panel"
},
"common": {
"hotkeysLabel": "Hotkeys",
@@ -53,7 +52,6 @@
"txt2img": "Text To Image",
"img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas",
"linear": "Linear",
"nodes": "Nodes",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
@@ -507,9 +505,7 @@
"info": "Info",
"deleteImage": "Delete Image",
"initialImage": "Initial Image",
"showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview",
"showPreview": "Show Preview"
"showOptionsPanel": "Show Options Panel"
},
"settings": {
"models": "Models",

View File

@@ -73,8 +73,7 @@
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar",
"linear": "Lineal"
"cancel": "Cancelar"
},
"gallery": {
"generations": "Generaciones",
@@ -484,9 +483,7 @@
"negativePrompts": "Preguntas negativas",
"imageToImage": "Imagen a imagen",
"denoisingStrength": "Intensidad de la eliminación del ruido",
"hiresStrength": "Alta resistencia",
"showPreview": "Mostrar la vista previa",
"hidePreview": "Ocultar la vista previa"
"hiresStrength": "Alta resistencia"
},
"settings": {
"models": "Modelos",
@@ -532,11 +529,7 @@
"metadataLoadFailed": "Error al cargar metadatos",
"initialImageSet": "Imágen inicial establecida",
"initialImageNotSet": "Imagen inicial no establecida",
"initialImageNotSetDesc": "Error al establecer la imágen inicial",
"serverError": "Error en el servidor",
"disconnected": "Desconectado del servidor",
"canceled": "Procesando la cancelación",
"connected": "Conectado al servidor"
"initialImageNotSetDesc": "Error al establecer la imágen inicial"
},
"tooltip": {
"feature": {
@@ -632,7 +625,6 @@
"toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones",
"menu": "Menú"
"showOptionsPanel": "Mostrar el panel de opciones"
}
}

View File

@@ -1,122 +0,0 @@
{
"accessibility": {
"reset": "Resetoi",
"useThisParameter": "Käytä tätä parametria",
"modelSelect": "Mallin Valinta",
"exitViewer": "Poistu katselimesta",
"uploadImage": "Lataa kuva",
"copyMetadataJson": "Kopioi metadata JSON:iin",
"invokeProgressBar": "Invoken edistymispalkki",
"nextImage": "Seuraava kuva",
"previousImage": "Edellinen kuva",
"zoomIn": "Lähennä",
"flipHorizontally": "Käännä vaakasuoraan",
"zoomOut": "Loitonna",
"rotateCounterClockwise": "Kierrä vastapäivään",
"rotateClockwise": "Kierrä myötäpäivään",
"flipVertically": "Käännä pystysuoraan",
"showGallery": "Näytä galleria",
"modifyConfig": "Muokkaa konfiguraatiota",
"toggleAutoscroll": "Kytke automaattinen vieritys",
"toggleLogViewer": "Kytke lokin katselutila",
"showOptionsPanel": "Näytä asetukset"
},
"common": {
"postProcessDesc2": "Erillinen käyttöliittymä tullaan julkaisemaan helpottaaksemme työnkulkua jälkikäsittelyssä.",
"training": "Kouluta",
"statusLoadingModel": "Ladataan mallia",
"statusModelChanged": "Malli vaihdettu",
"statusConvertingModel": "Muunnetaan mallia",
"statusModelConverted": "Malli muunnettu",
"langFrench": "Ranska",
"langItalian": "Italia",
"languagePickerLabel": "Kielen valinta",
"hotkeysLabel": "Pikanäppäimet",
"reportBugLabel": "Raportoi Bugista",
"langPolish": "Puola",
"themeLabel": "Teema",
"langDutch": "Hollanti",
"settingsLabel": "Asetukset",
"githubLabel": "Github",
"darkTheme": "Tumma",
"lightTheme": "Vaalea",
"greenTheme": "Vihreä",
"langGerman": "Saksa",
"langPortuguese": "Portugali",
"discordLabel": "Discord",
"langEnglish": "Englanti",
"oceanTheme": "Meren sininen",
"langRussian": "Venäjä",
"langUkranian": "Ukraina",
"langSpanish": "Espanja",
"upload": "Lataa",
"statusMergedModels": "Mallit yhdistelty",
"img2img": "Kuva kuvaksi",
"nodes": "Solmut",
"nodesDesc": "Solmupohjainen järjestelmä kuvien generoimiseen on parhaillaan kehitteillä. Pysy kuulolla päivityksistä tähän uskomattomaan ominaisuuteen liittyen.",
"postProcessDesc1": "Invoke AI tarjoaa monenlaisia jälkikäsittelyominaisuukisa. Kuvan laadun skaalaus sekä kasvojen korjaus ovat jo saatavilla WebUI:ssä. Voit ottaa ne käyttöön lisäasetusten valikosta teksti kuvaksi sekä kuva kuvaksi -välilehdiltä. Voit myös suoraan prosessoida kuvia käyttämällä kuvan toimintapainikkeita nykyisen kuvan yläpuolella tai tarkastelussa.",
"postprocessing": "Jälkikäsitellään",
"postProcessing": "Jälkikäsitellään",
"cancel": "Peruuta",
"close": "Sulje",
"accept": "Hyväksy",
"statusConnected": "Yhdistetty",
"statusError": "Virhe",
"statusProcessingComplete": "Prosessointi valmis",
"load": "Lataa",
"back": "Takaisin",
"statusGeneratingTextToImage": "Generoidaan tekstiä kuvaksi",
"trainingDesc2": "InvokeAI tukee jo mukautettujen upotusten kouluttamista tekstin inversiolla käyttäen pääskriptiä.",
"statusDisconnected": "Yhteys katkaistu",
"statusPreparing": "Valmistellaan",
"statusIterationComplete": "Iteraatio valmis",
"statusMergingModels": "Yhdistellään malleja",
"statusProcessingCanceled": "Valmistelu peruutettu",
"statusSavingImage": "Tallennetaan kuvaa",
"statusGeneratingImageToImage": "Generoidaan kuvaa kuvaksi",
"statusRestoringFacesGFPGAN": "Korjataan kasvoja (GFPGAN)",
"statusRestoringFacesCodeFormer": "Korjataan kasvoja (CodeFormer)",
"statusGeneratingInpainting": "Generoidaan sisällemaalausta",
"statusGeneratingOutpainting": "Generoidaan ulosmaalausta",
"statusRestoringFaces": "Korjataan kasvoja",
"pinOptionsPanel": "Kiinnitä asetukset -paneeli",
"loadingInvokeAI": "Ladataan Invoke AI:ta",
"loading": "Ladataan",
"statusGenerating": "Generoidaan",
"txt2img": "Teksti kuvaksi",
"trainingDesc1": "Erillinen työnkulku omien upotusten ja tarkastuspisteiden kouluttamiseksi käyttäen tekstin inversiota ja dreamboothia selaimen käyttöliittymässä.",
"postProcessDesc3": "Invoke AI:n komentorivi tarjoaa paljon muita ominaisuuksia, kuten esimerkiksi Embiggenin.",
"unifiedCanvas": "Yhdistetty kanvas",
"statusGenerationComplete": "Generointi valmis"
},
"gallery": {
"uploads": "Lataukset",
"showUploads": "Näytä lataukset",
"galleryImageResetSize": "Resetoi koko",
"maintainAspectRatio": "Säilytä kuvasuhde",
"galleryImageSize": "Kuvan koko",
"pinGallery": "Kiinnitä galleria",
"showGenerations": "Näytä generaatiot",
"singleColumnLayout": "Yhden sarakkeen asettelu",
"generations": "Generoinnit",
"gallerySettings": "Gallerian asetukset",
"autoSwitchNewImages": "Vaihda uusiin kuviin automaattisesti",
"allImagesLoaded": "Kaikki kuvat ladattu",
"noImagesInGallery": "Ei kuvia galleriassa",
"loadMore": "Lataa lisää"
},
"hotkeys": {
"keyboardShortcuts": "näppäimistön pikavalinnat",
"appHotkeys": "Sovelluksen pikanäppäimet",
"generalHotkeys": "Yleiset pikanäppäimet",
"galleryHotkeys": "Gallerian pikanäppäimet",
"unifiedCanvasHotkeys": "Yhdistetyn kanvaan pikanäppäimet",
"cancel": {
"desc": "Peruuta kuvan luominen",
"title": "Peruuta"
},
"invoke": {
"desc": "Luo kuva"
}
}
}

View File

@@ -73,8 +73,7 @@
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla",
"linear": "Lineare"
"cancel": "Annulla"
},
"gallery": {
"generations": "Generazioni",
@@ -484,9 +483,7 @@
},
"hSymmetryStep": "Passi Simmetria Orizzontale",
"vSymmetryStep": "Passi Simmetria Verticale",
"symmetry": "Simmetria",
"hidePreview": "Nascondi l'anteprima",
"showPreview": "Mostra l'anteprima"
"symmetry": "Simmetria"
},
"settings": {
"models": "Modelli",
@@ -532,11 +529,7 @@
"metadataLoadFailed": "Impossibile caricare i metadati",
"initialImageSet": "Immagine iniziale impostata",
"initialImageNotSet": "Immagine iniziale non impostata",
"initialImageNotSetDesc": "Impossibile caricare l'immagine iniziale",
"serverError": "Errore del Server",
"disconnected": "Disconnesso dal Server",
"connected": "Connesso al Server",
"canceled": "Elaborazione annullata"
"initialImageNotSetDesc": "Impossibile caricare l'immagine iniziale"
},
"tooltip": {
"feature": {
@@ -632,7 +625,6 @@
"showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione",
"menu": "Menu"
"modifyConfig": "Modifica configurazione"
}
}

View File

@@ -37,43 +37,7 @@
"statusUpscaling": "アップスケーリング",
"statusUpscalingESRGAN": "アップスケーリング (ESRGAN)",
"statusLoadingModel": "モデルを読み込む",
"statusModelChanged": "モデルを変更",
"cancel": "キャンセル",
"accept": "同意",
"langBrPortuguese": "Português do Brasil",
"langRussian": "Русский",
"langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"img2img": "img2img",
"unifiedCanvas": "Unified Canvas",
"statusMergingModels": "モデルのマージ",
"statusModelConverted": "変換済モデル",
"statusGeneratingInpainting": "Inpaintingを生成",
"statusIterationComplete": "Iteration Complete",
"statusGeneratingOutpainting": "Outpaintingを生成",
"loading": "ロード中",
"loadingInvokeAI": "Invoke AIをロード中",
"statusConvertingModel": "モデルの変換",
"statusMergedModels": "マージ済モデル",
"pinOptionsPanel": "オプションパネルを固定",
"githubLabel": "Github",
"hotkeysLabel": "ホットキー",
"langHebrew": "עברית",
"discordLabel": "Discord",
"langItalian": "Italiano",
"langEnglish": "English",
"oceanTheme": "オーシャン",
"langArabic": "アラビア語",
"langDutch": "Nederlands",
"langFrench": "Français",
"langGerman": "Deutsch",
"langPortuguese": "Português",
"nodes": "ノード",
"langKorean": "한국어",
"langPolish": "Polski",
"txt2img": "txt2img",
"postprocessing": "Post Processing"
"statusModelChanged": "モデルを変更"
},
"gallery": {
"uploads": "アップロード",
@@ -82,14 +46,11 @@
"galleryImageResetSize": "サイズをリセット",
"gallerySettings": "ギャラリーの設定",
"maintainAspectRatio": "アスペクト比を維持",
"singleColumnLayout": "1カラムレイアウト",
"singleColumnLayout": "シングルカラムレイアウト",
"pinGallery": "ギャラリーにピン留め",
"allImagesLoaded": "すべての画像を読み込む",
"loadMore": "さらに読み込む",
"noImagesInGallery": "ギャラリーに画像がありません",
"generations": "生成",
"showGenerations": "生成過程を見る",
"autoSwitchNewImages": "新しい画像に自動切替"
"noImagesInGallery": "ギャラリーに画像がありません"
},
"hotkeys": {
"keyboardShortcuts": "キーボードショートカット",
@@ -98,16 +59,14 @@
"galleryHotkeys": "ギャラリーのホットキー",
"unifiedCanvasHotkeys": "Unified Canvasのホットキー",
"invoke": {
"desc": "画像を生成",
"title": "Invoke"
"desc": "画像を生成"
},
"cancel": {
"title": "キャンセル",
"desc": "画像の生成をキャンセル"
},
"focusPrompt": {
"desc": "プロンプトテキストボックスにフォーカス",
"title": "プロジェクトにフォーカス"
"desc": "プロンプトテキストボックスにフォーカス"
},
"toggleOptions": {
"title": "オプションパネルのトグル",
@@ -451,27 +410,5 @@
"accept": "同意",
"showHide": "表示/非表示",
"discardAll": "すべて破棄"
},
"accessibility": {
"modelSelect": "モデルを選択",
"invokeProgressBar": "進捗バー",
"reset": "リセット",
"uploadImage": "画像をアップロード",
"previousImage": "前の画像",
"nextImage": "次の画像",
"useThisParameter": "このパラメータを使用する",
"copyMetadataJson": "メタデータをコピー(JSON)",
"zoomIn": "ズームイン",
"exitViewer": "ExitViewer",
"zoomOut": "ズームアウト",
"rotateCounterClockwise": "反時計回りに回転",
"rotateClockwise": "時計回りに回転",
"flipHorizontally": "水平方向に反転",
"flipVertically": "垂直方向に反転",
"toggleAutoscroll": "自動スクロールの切替",
"modifyConfig": "Modify Config",
"toggleLogViewer": "Log Viewerの切替",
"showGallery": "ギャラリーを表示",
"showOptionsPanel": "オプションパネルを表示"
}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -62,18 +62,7 @@
"statusConvertingModel": "Omzetten van model",
"statusModelConverted": "Model omgezet",
"statusMergingModels": "Samenvoegen van modellen",
"statusMergedModels": "Modellen samengevoegd",
"cancel": "Annuleer",
"accept": "Akkoord",
"langPortuguese": "Português",
"pinOptionsPanel": "Zet deelscherm Opties vast",
"loading": "Bezig met laden",
"loadingInvokeAI": "Bezig met laden van Invoke AI",
"oceanTheme": "Oceaan",
"langHebrew": "עברית",
"langKorean": "한국어",
"txt2img": "Tekst naar afbeelding",
"postprocessing": "Nabewerking"
"statusMergedModels": "Modellen samengevoegd"
},
"gallery": {
"generations": "Gegenereerde afbeeldingen",
@@ -312,7 +301,7 @@
"name": "Naam",
"nameValidationMsg": "Geef een naam voor je model",
"description": "Beschrijving",
"descriptionValidationMsg": "Voeg een beschrijving toe voor je model",
"descriptionValidationMsg": "Voeg een beschrijving toe voor je model.",
"config": "Configuratie",
"configValidationMsg": "Pad naar het configuratiebestand van je model.",
"modelLocation": "Locatie model",
@@ -402,13 +391,7 @@
"modelMergeInterpAddDifferenceHelp": "In deze stand wordt model 3 eerst van model 2 afgehaald. Wat daar uitkomt wordt gemengd met model 1, gebruikmakend van de hierboven ingestelde alfawaarde.",
"inverseSigmoid": "Keer Sigmoid om",
"sigmoid": "Sigmoid",
"weightedSum": "Gewogen som",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "geen",
"addDifference": "Voeg verschil toe",
"scanForModels": "Scan naar modellen",
"pickModelType": "Kies modelsoort"
"weightedSum": "Gewogen som"
},
"parameters": {
"images": "Afbeeldingen",
@@ -578,7 +561,7 @@
"autoSaveToGallery": "Bewaar automatisch naar galerij",
"saveBoxRegionOnly": "Bewaar alleen tekengebied",
"limitStrokesToBox": "Beperk streken tot tekenvak",
"showCanvasDebugInfo": "Toon aanvullende canvasgegevens",
"showCanvasDebugInfo": "Toon foutopsporingsgegevens canvas",
"clearCanvasHistory": "Wis canvasgeschiedenis",
"clearHistory": "Wis geschiedenis",
"clearCanvasHistoryMessage": "Het wissen van de canvasgeschiedenis laat het huidige canvas ongemoeid, maar wist onherstelbaar de geschiedenis voor het ongedaan maken en herhalen.",
@@ -604,27 +587,5 @@
"betaDarkenOutside": "Verduister buiten tekenvak",
"betaLimitToBox": "Beperk tot tekenvak",
"betaPreserveMasked": "Behoud masker"
},
"accessibility": {
"exitViewer": "Stop viewer",
"zoomIn": "Zoom in",
"rotateCounterClockwise": "Draai tegen de klok in",
"modelSelect": "Modelkeuze",
"invokeProgressBar": "Voortgangsbalk Invoke",
"reset": "Herstel",
"uploadImage": "Upload afbeelding",
"previousImage": "Vorige afbeelding",
"nextImage": "Volgende afbeelding",
"useThisParameter": "Gebruik deze parameter",
"copyMetadataJson": "Kopieer metagegevens-JSON",
"zoomOut": "Zoom uit",
"rotateClockwise": "Draai met de klok mee",
"flipHorizontally": "Spiegel horizontaal",
"flipVertically": "Spiegel verticaal",
"modifyConfig": "Wijzig configuratie",
"toggleAutoscroll": "Autom. scrollen aan/uit",
"toggleLogViewer": "Logboekviewer aan/uit",
"showGallery": "Toon galerij",
"showOptionsPanel": "Toon deelscherm Opties"
}
}

View File

@@ -9,7 +9,7 @@
"lightTheme": "Светлая",
"greenTheme": "Зеленая",
"img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Единый холст",
"unifiedCanvas": "Универсальный холст",
"nodes": "Ноды",
"langRussian": "Русский",
"nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.",
@@ -53,28 +53,7 @@
"loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад",
"statusConvertingModel": "Конвертация модели",
"cancel": "Отменить",
"accept": "Принять",
"oceanTheme": "Океан",
"langUkranian": "Украинский",
"langEnglish": "Английский",
"postprocessing": "Постобработка",
"langArabic": "Арабский",
"langSpanish": "Испанский",
"langSimplifiedChinese": "Китайский (упрощенный)",
"langDutch": "Нидерландский",
"langFrench": "Французский",
"langGerman": "Немецкий",
"langHebrew": "Иврит",
"langItalian": "Итальянский",
"langJapanese": "Японский",
"langKorean": "Корейский",
"langPolish": "Польский",
"langPortuguese": "Португальский",
"txt2img": "Текст в изображение (txt2img)",
"langBrPortuguese": "Португальский (Бразилия)",
"linear": "Линейная обработка"
"statusConvertingModel": "Конвертация модели"
},
"gallery": {
"generations": "Генерации",
@@ -93,11 +72,11 @@
"noImagesInGallery": "Изображений нет"
},
"hotkeys": {
"keyboardShortcuts": "Горячие клавиши",
"keyboardShortcuts": "Клавиатурные сокращения",
"appHotkeys": "Горячие клавиши приложения",
"generalHotkeys": "Общие горячие клавиши",
"galleryHotkeys": "Горячие клавиши галереи",
"unifiedCanvasHotkeys": "Горячие клавиши Единого холста",
"unifiedCanvasHotkeys": "Горячие клавиши универсального холста",
"invoke": {
"title": "Invoke",
"desc": "Сгенерировать изображение"
@@ -287,12 +266,12 @@
"desc": "Сбросить вид холста"
},
"previousStagingImage": {
"title": "Предыдущее изображение",
"desc": "Предыдущая область изображения"
"title": "Previous Staging Image",
"desc": "Предыдущее изображение"
},
"nextStagingImage": {
"title": "Следующее изображение",
"desc": "Следующая область изображения"
"title": "Next Staging Image",
"desc": "Следующее изображение"
},
"acceptStagingImage": {
"title": "Принять изображение",
@@ -374,42 +353,7 @@
"modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели",
"scanForModels": "Просканировать модели",
"sigmoid": "Сигмоид",
"formMessageDiffusersModelLocation": "Расположение Diffusers-модели",
"modelThree": "Модель 3",
"modelMergeHeaderHelp2": "Только Diffusers-модели доступны для объединения. Если вы хотите объединить checkpoint-модели, сначала преобразуйте их в Diffusers.",
"pickModelType": "Выбрать тип модели",
"formMessageDiffusersVAELocation": "Расположение VAE",
"v1": "v1",
"convertToDiffusersSaveLocation": "Путь сохранения",
"customSaveLocation": "Пользовательский путь сохранения",
"alpha": "Альфа",
"diffusersModels": "Diffusers",
"customConfig": "Пользовательский конфиг",
"pathToCustomConfig": "Путь к пользовательскому конфигу",
"inpainting": "v1 Inpainting",
"sameFolder": "В ту же папку",
"modelOne": "Модель 1",
"mergedModelCustomSaveLocation": "Пользовательский путь",
"none": "пусто",
"addDifference": "Добавить разницу",
"vaeRepoIDValidationMsg": "Онлайн репозиторий VAE",
"convertToDiffusersHelpText2": "Этот процесс заменит вашу запись в Model Manager на версию той же модели в Diffusers.",
"custom": "Пользовательский",
"modelTwo": "Модель 2",
"mergedModelSaveLocation": "Путь сохранения",
"merge": "Объединить",
"interpolationType": "Тип интерполяции",
"modelMergeInterpAddDifferenceHelp": "В этом режиме Модель 3 сначала вычитается из Модели 2. Результирующая версия смешивается с Моделью 1 с установленным выше коэффициентом Альфа.",
"modelMergeHeaderHelp1": "Вы можете объединить до трех разных моделей, чтобы создать смешанную, соответствующую вашим потребностям.",
"modelMergeAlphaHelp": "Альфа влияет на силу смешивания моделей. Более низкие значения альфа приводят к меньшему влиянию второй модели.",
"inverseSigmoid": "Обратный Сигмоид",
"weightedSum": "Взвешенная сумма",
"safetensorModels": "SafeTensors",
"v2_768": "v2 (768px)",
"v2_base": "v2 (512px)"
"mergeModels": "Объединить модели"
},
"parameters": {
"images": "Изображения",
@@ -436,7 +380,7 @@
"scale": "Масштаб",
"otherOptions": "Другие параметры",
"seamlessTiling": "Бесшовный узор",
"hiresOptim": "Оптимизация High Res",
"hiresOptim": "Высокое разрешение",
"imageFit": "Уместить изображение",
"codeformerFidelity": "Точность",
"seamSize": "Размер шва",
@@ -453,11 +397,11 @@
"infillScalingHeader": "Заполнение и масштабирование",
"img2imgStrength": "Сила обработки img2img",
"toggleLoopback": "Зациклить обработку",
"invoke": "Invoke",
"invoke": "Вызвать",
"promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)",
"sendTo": "Отправить",
"sendToImg2Img": "Отправить в img2img",
"sendToUnifiedCanvas": "Отправить на Единый холст",
"sendToUnifiedCanvas": "Отправить на холст",
"copyImageToLink": "Скопировать ссылку",
"downloadImage": "Скачать",
"openInViewer": "Открыть в просмотрщике",
@@ -469,24 +413,7 @@
"info": "Метаданные",
"deleteImage": "Удалить изображение",
"initialImage": "Исходное изображение",
"showOptionsPanel": "Показать панель настроек",
"vSymmetryStep": "Шаг верт. симметрии",
"cancel": {
"immediate": "Отменить немедленно",
"schedule": "Отменить после текущей итерации",
"isScheduled": "Отмена",
"setType": "Установить тип отмены"
},
"general": "Основное",
"hiresStrength": "Сила High Res",
"symmetry": "Симметрия",
"hSymmetryStep": "Шаг гор. симметрии",
"hidePreview": "Скрыть предпросмотр",
"imageToImage": "Изображение в изображение",
"denoisingStrength": "Сила шумоподавления",
"copyImage": "Скопировать изображение",
"negativePrompts": "Исключающий запрос",
"showPreview": "Показать предпросмотр"
"showOptionsPanel": "Показать панель настроек"
},
"settings": {
"models": "Модели",
@@ -496,11 +423,10 @@
"displayHelpIcons": "Показывать значки подсказок",
"useCanvasBeta": "Показывать инструменты слева (Beta UI)",
"enableImageDebugging": "Включить отладку",
"resetWebUI": "Сброс настроек Web UI",
"resetWebUI": "Вернуть умолчания",
"resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.",
"resetWebUIDesc2": "Если изображения не отображаются в галерее или не работает что-то еще, пожалуйста, попробуйте сбросить настройки, прежде чем сообщать о проблеме на GitHub.",
"resetComplete": "Интерфейс сброшен. Обновите эту страницу.",
"useSlidersForAll": "Использовать ползунки для всех параметров"
"resetComplete": "Интерфейс сброшен. Обновите эту страницу."
},
"toast": {
"tempFoldersEmptied": "Временная папка очищена",
@@ -515,7 +441,7 @@
"imageSavedToGallery": "Изображение сохранено в галерею",
"canvasMerged": "Холст объединен",
"sentToImageToImage": "Отправить в img2img",
"sentToUnifiedCanvas": "Отправлено на Единый холст",
"sentToUnifiedCanvas": "Отправить на холст",
"parametersSet": "Параметры заданы",
"parametersNotSet": "Параметры не заданы",
"parametersNotSetDesc": "Не найдены метаданные изображения.",
@@ -532,11 +458,7 @@
"metadataLoadFailed": "Не удалось загрузить метаданные",
"initialImageSet": "Исходное изображение задано",
"initialImageNotSet": "Исходное изображение не задано",
"initialImageNotSetDesc": "Не получилось загрузить исходное изображение",
"serverError": "Ошибка сервера",
"disconnected": "Отключено от сервера",
"connected": "Подключено к серверу",
"canceled": "Обработка отменена"
"initialImageNotSetDesc": "Не получилось загрузить исходное изображение"
},
"tooltip": {
"feature": {
@@ -585,7 +507,7 @@
"autoSaveToGallery": "Автосохранение в галерее",
"saveBoxRegionOnly": "Сохранять только выделение",
"limitStrokesToBox": "Ограничить штрихи выделением",
"showCanvasDebugInfo": "Показать доп. информацию о холсте",
"showCanvasDebugInfo": "Показать отладку холста",
"clearCanvasHistory": "Очистить историю холста",
"clearHistory": "Очистить историю",
"clearCanvasHistoryMessage": "Очистка истории холста оставляет текущий холст нетронутым, но удаляет историю отмен и повторов.",
@@ -613,26 +535,6 @@
"betaPreserveMasked": "Сохранять маскируемую область"
},
"accessibility": {
"modelSelect": "Выбор модели",
"uploadImage": "Загрузить изображение",
"nextImage": "Следующее изображение",
"previousImage": "Предыдущее изображение",
"zoomIn": "Приблизить",
"zoomOut": "Отдалить",
"rotateClockwise": "Повернуть по часовой стрелке",
"rotateCounterClockwise": "Повернуть против часовой стрелки",
"flipVertically": "Перевернуть вертикально",
"flipHorizontally": "Отразить горизонтально",
"toggleAutoscroll": "Включить автопрокрутку",
"toggleLogViewer": "Показать или скрыть просмотрщик логов",
"showOptionsPanel": "Показать опции",
"showGallery": "Показать галерею",
"invokeProgressBar": "Индикатор выполнения",
"reset": "Сброс",
"modifyConfig": "Изменить конфиг",
"useThisParameter": "Использовать этот параметр",
"copyMetadataJson": "Скопировать метаданные JSON",
"exitViewer": "Закрыть просмотрщик",
"menu": "Меню"
"modelSelect": "Выбор модели"
}
}

View File

@@ -1,254 +0,0 @@
{
"accessibility": {
"copyMetadataJson": "Kopiera metadata JSON",
"zoomIn": "Zooma in",
"exitViewer": "Avslutningsvisare",
"modelSelect": "Välj modell",
"uploadImage": "Ladda upp bild",
"invokeProgressBar": "Invoke förloppsmätare",
"nextImage": "Nästa bild",
"toggleAutoscroll": "Växla automatisk rullning",
"flipHorizontally": "Vänd vågrätt",
"flipVertically": "Vänd lodrätt",
"zoomOut": "Zooma ut",
"toggleLogViewer": "Växla logvisare",
"reset": "Starta om",
"previousImage": "Föregående bild",
"useThisParameter": "Använd denna parametern",
"showGallery": "Visa galleri",
"rotateCounterClockwise": "Rotera moturs",
"rotateClockwise": "Rotera medurs",
"modifyConfig": "Ändra konfiguration",
"showOptionsPanel": "Visa inställningspanelen"
},
"common": {
"hotkeysLabel": "Snabbtangenter",
"reportBugLabel": "Rapportera bugg",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Inställningar",
"darkTheme": "Mörk",
"lightTheme": "Ljus",
"greenTheme": "Grön",
"oceanTheme": "Hav",
"langEnglish": "Engelska",
"langDutch": "Nederländska",
"langFrench": "Franska",
"langGerman": "Tyska",
"langItalian": "Italienska",
"langArabic": "العربية",
"langHebrew": "עברית",
"langPolish": "Polski",
"langPortuguese": "Português",
"langBrPortuguese": "Português do Brasil",
"langSimplifiedChinese": "简体中文",
"langJapanese": "日本語",
"langKorean": "한국어",
"langRussian": "Русский",
"unifiedCanvas": "Förenad kanvas",
"nodesDesc": "Ett nodbaserat system för bildgenerering är under utveckling. Håll utkik för uppdateringar om denna fantastiska funktion.",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"postProcessDesc2": "Ett dedikerat användargränssnitt kommer snart att släppas för att underlätta mer avancerade arbetsflöden av efterbehandling.",
"trainingDesc1": "Ett dedikerat arbetsflöde för träning av dina egna inbäddningar och kontrollpunkter genom Textual Inversion eller Dreambooth från webbgränssnittet.",
"trainingDesc2": "InvokeAI stöder redan träning av anpassade inbäddningar med hjälp av Textual Inversion genom huvudscriptet.",
"upload": "Ladda upp",
"close": "Stäng",
"cancel": "Avbryt",
"accept": "Acceptera",
"statusDisconnected": "Frånkopplad",
"statusGeneratingTextToImage": "Genererar text till bild",
"statusGeneratingImageToImage": "Genererar Bild till bild",
"statusGeneratingInpainting": "Genererar Måla i",
"statusGenerationComplete": "Generering klar",
"statusModelConverted": "Modell konverterad",
"statusMergingModels": "Sammanfogar modeller",
"pinOptionsPanel": "Nåla fast inställningspanelen",
"loading": "Laddar",
"loadingInvokeAI": "Laddar Invoke AI",
"statusRestoringFaces": "Återskapar ansikten",
"languagePickerLabel": "Språkväljare",
"themeLabel": "Tema",
"txt2img": "Text till bild",
"nodes": "Noder",
"img2img": "Bild till bild",
"postprocessing": "Efterbehandling",
"postProcessing": "Efterbehandling",
"load": "Ladda",
"training": "Träning",
"postProcessDesc1": "Invoke AI erbjuder ett brett utbud av efterbehandlingsfunktioner. Uppskalning och ansiktsåterställning finns redan tillgängligt i webbgränssnittet. Du kommer åt dem ifrån Avancerade inställningar-menyn under Bild till bild-fliken. Du kan också behandla bilder direkt genom att använda knappen bildåtgärder ovanför nuvarande bild eller i bildvisaren.",
"postProcessDesc3": "Invoke AI's kommandotolk erbjuder många olika funktioner, bland annat \"Förstora\".",
"statusGenerating": "Genererar",
"statusError": "Fel",
"back": "Bakåt",
"statusConnected": "Ansluten",
"statusPreparing": "Förbereder",
"statusProcessingCanceled": "Bearbetning avbruten",
"statusProcessingComplete": "Bearbetning färdig",
"statusGeneratingOutpainting": "Genererar Fyll ut",
"statusIterationComplete": "Itterering klar",
"statusSavingImage": "Sparar bild",
"statusRestoringFacesGFPGAN": "Återskapar ansikten (GFPGAN)",
"statusRestoringFacesCodeFormer": "Återskapar ansikten (CodeFormer)",
"statusUpscaling": "Skala upp",
"statusUpscalingESRGAN": "Uppskalning (ESRGAN)",
"statusModelChanged": "Modell ändrad",
"statusLoadingModel": "Laddar modell",
"statusConvertingModel": "Konverterar modell",
"statusMergedModels": "Modeller sammanfogade"
},
"gallery": {
"generations": "Generationer",
"showGenerations": "Visa generationer",
"uploads": "Uppladdningar",
"showUploads": "Visa uppladdningar",
"galleryImageSize": "Bildstorlek",
"allImagesLoaded": "Alla bilder laddade",
"loadMore": "Ladda mer",
"galleryImageResetSize": "Återställ storlek",
"gallerySettings": "Galleriinställningar",
"maintainAspectRatio": "Behåll bildförhållande",
"pinGallery": "Nåla fast galleri",
"noImagesInGallery": "Inga bilder i galleriet",
"autoSwitchNewImages": "Ändra automatiskt till nya bilder",
"singleColumnLayout": "Enkolumnslayout"
},
"hotkeys": {
"generalHotkeys": "Allmänna snabbtangenter",
"galleryHotkeys": "Gallerisnabbtangenter",
"unifiedCanvasHotkeys": "Snabbtangenter för sammanslagskanvas",
"invoke": {
"title": "Anropa",
"desc": "Genererar en bild"
},
"cancel": {
"title": "Avbryt",
"desc": "Avbryt bildgenerering"
},
"focusPrompt": {
"desc": "Fokusera området för promptinmatning",
"title": "Fokusprompt"
},
"pinOptions": {
"desc": "Nåla fast alternativpanelen",
"title": "Nåla fast alternativ"
},
"toggleOptions": {
"title": "Växla inställningar",
"desc": "Öppna och stäng alternativpanelen"
},
"toggleViewer": {
"title": "Växla visaren",
"desc": "Öppna och stäng bildvisaren"
},
"toggleGallery": {
"title": "Växla galleri",
"desc": "Öppna eller stäng galleribyrån"
},
"maximizeWorkSpace": {
"title": "Maximera arbetsyta",
"desc": "Stäng paneler och maximera arbetsyta"
},
"changeTabs": {
"title": "Växla flik",
"desc": "Byt till en annan arbetsyta"
},
"consoleToggle": {
"title": "Växla konsol",
"desc": "Öppna och stäng konsol"
},
"setSeed": {
"desc": "Använd seed för nuvarande bild",
"title": "välj seed"
},
"setParameters": {
"title": "Välj parametrar",
"desc": "Använd alla parametrar från nuvarande bild"
},
"setPrompt": {
"desc": "Använd prompt för nuvarande bild",
"title": "Välj prompt"
},
"restoreFaces": {
"title": "Återskapa ansikten",
"desc": "Återskapa nuvarande bild"
},
"upscale": {
"title": "Skala upp",
"desc": "Skala upp nuvarande bild"
},
"showInfo": {
"title": "Visa info",
"desc": "Visa metadata för nuvarande bild"
},
"sendToImageToImage": {
"title": "Skicka till Bild till bild",
"desc": "Skicka nuvarande bild till Bild till bild"
},
"deleteImage": {
"title": "Radera bild",
"desc": "Radera nuvarande bild"
},
"closePanels": {
"title": "Stäng paneler",
"desc": "Stäng öppna paneler"
},
"previousImage": {
"title": "Föregående bild",
"desc": "Visa föregående bild"
},
"nextImage": {
"title": "Nästa bild",
"desc": "Visa nästa bild"
},
"toggleGalleryPin": {
"title": "Växla gallerinål",
"desc": "Nålar fast eller nålar av galleriet i gränssnittet"
},
"increaseGalleryThumbSize": {
"title": "Förstora galleriets bildstorlek",
"desc": "Förstora miniatyrbildernas storlek"
},
"decreaseGalleryThumbSize": {
"title": "Minska gelleriets bildstorlek",
"desc": "Minska miniatyrbildernas storlek i galleriet"
},
"decreaseBrushSize": {
"desc": "Förminska storleken på kanvas- pensel eller suddgummi",
"title": "Minska penselstorlek"
},
"increaseBrushSize": {
"title": "Öka penselstorlek",
"desc": "Öka stoleken på kanvas- pensel eller suddgummi"
},
"increaseBrushOpacity": {
"title": "Öka penselns opacitet",
"desc": "Öka opaciteten för kanvaspensel"
},
"decreaseBrushOpacity": {
"desc": "Minska kanvaspenselns opacitet",
"title": "Minska penselns opacitet"
},
"moveTool": {
"title": "Flytta",
"desc": "Tillåt kanvasnavigation"
},
"fillBoundingBox": {
"title": "Fyll ram",
"desc": "Fyller ramen med pensels färg"
},
"keyboardShortcuts": "Snabbtangenter",
"appHotkeys": "Appsnabbtangenter",
"selectBrush": {
"desc": "Välj kanvaspensel",
"title": "Välj pensel"
},
"selectEraser": {
"desc": "Välj kanvassuddgummi",
"title": "Välj suddgummi"
},
"eraseBoundingBox": {
"title": "Ta bort ram"
}
}
}

View File

@@ -1,64 +0,0 @@
{
"accessibility": {
"invokeProgressBar": "Invoke ilerleme durumu",
"nextImage": "Sonraki Resim",
"useThisParameter": "Kullanıcı parametreleri",
"copyMetadataJson": "Metadata verilerini kopyala (JSON)",
"exitViewer": "Görüntüleme Modundan Çık",
"zoomIn": "Yakınlaştır",
"zoomOut": "Uzaklaştır",
"rotateCounterClockwise": "Döndür (Saat yönünün tersine)",
"rotateClockwise": "Döndür (Saat yönünde)",
"flipHorizontally": "Yatay Çevir",
"flipVertically": "Dikey Çevir",
"modifyConfig": "Ayarları Değiştir",
"toggleAutoscroll": "Otomatik kaydırmayı aç/kapat",
"toggleLogViewer": "Günlük Görüntüleyici Aç/Kapa",
"showOptionsPanel": "Ayarlar Panelini Göster",
"modelSelect": "Model Seçin",
"reset": "Sıfırla",
"uploadImage": "Resim Yükle",
"previousImage": "Önceki Resim",
"menu": "Menü",
"showGallery": "Galeriyi Göster"
},
"common": {
"hotkeysLabel": "Kısayol Tuşları",
"themeLabel": "Tema",
"languagePickerLabel": "Dil Seçimi",
"reportBugLabel": "Hata Bildir",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Ayarlar",
"darkTheme": "Karanlık Tema",
"lightTheme": "Aydınlık Tema",
"greenTheme": "Yeşil Tema",
"oceanTheme": "Okyanus Tema",
"langArabic": "Arapça",
"langEnglish": "İngilizce",
"langDutch": "Hollandaca",
"langFrench": "Fransızca",
"langGerman": "Almanca",
"langItalian": "İtalyanca",
"langJapanese": "Japonca",
"langPolish": "Lehçe",
"langPortuguese": "Portekizce",
"langBrPortuguese": "Portekizcr (Brezilya)",
"langRussian": "Rusça",
"langSimplifiedChinese": "Çince (Basit)",
"langUkranian": "Ukraynaca",
"langSpanish": "İspanyolca",
"txt2img": "Metinden Resime",
"img2img": "Resimden Metine",
"linear": "Çizgisel",
"nodes": "Düğümler",
"postprocessing": "İşlem Sonrası",
"postProcessing": "İşlem Sonrası",
"postProcessDesc2": "Daha gelişmiş özellikler için ve iş akışını kolaylaştırmak için özel bir kullanıcı arayüzü çok yakında yayınlanacaktır.",
"postProcessDesc3": "Invoke AI komut satırı arayüzü, bir çok yeni özellik sunmaktadır.",
"langKorean": "Korece",
"unifiedCanvas": "Akıllı Tuval",
"nodesDesc": "Görüntülerin oluşturulmasında hazırladığımız yeni bir sistem geliştirme aşamasındadır. Bu harika özellikler ve çok daha fazlası için bizi takip etmeye devam edin.",
"postProcessDesc1": "Invoke AI son kullanıcıya yönelik bir çok özellik sunar. Görüntü kalitesi yükseltme, yüz restorasyonu WebUI üzerinden kullanılabilir. Metinden resime ve resimden metne araçlarına gelişmiş seçenekler menüsünden ulaşabilirsiniz. İsterseniz mevcut görüntü ekranının üzerindeki veya görüntüleyicideki görüntüyü doğrudan düzenleyebilirsiniz."
}
}

View File

@@ -16,9 +16,9 @@
"postProcessing": "Постобробка",
"postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.",
"postProcessDesc2": "Найближчим часом буде випущено спеціальний інтерфейс для більш сучасних процесів постобробки.",
"postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen.",
"postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen",
"training": "Навчання",
"trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth.",
"trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth",
"trainingDesc2": "InvokeAI вже підтримує навчання моделей за допомогою TI, через інтерфейс командного рядка.",
"upload": "Завантажити",
"close": "Закрити",
@@ -43,38 +43,7 @@
"statusUpscaling": "Збільшення",
"statusUpscalingESRGAN": "Збільшення (ESRGAN)",
"statusLoadingModel": "Завантаження моделі",
"statusModelChanged": "Модель змінено",
"cancel": "Скасувати",
"accept": "Підтвердити",
"back": "Назад",
"postprocessing": "Постобробка",
"statusModelConverted": "Модель сконвертована",
"statusMergingModels": "Злиття моделей",
"loading": "Завантаження",
"loadingInvokeAI": "Завантаження Invoke AI",
"langHebrew": "Іврит",
"langKorean": "Корейська",
"langPortuguese": "Португальська",
"pinOptionsPanel": "Закріпити панель налаштувань",
"oceanTheme": "Океан",
"langArabic": "Арабська",
"langSimplifiedChinese": "Китайська (спрощена)",
"langSpanish": "Іспанська",
"langEnglish": "Англійська",
"langGerman": "Німецька",
"langItalian": "Італійська",
"langJapanese": "Японська",
"langPolish": "Польська",
"langBrPortuguese": "Португальська (Бразилія)",
"langRussian": "Російська",
"githubLabel": "Github",
"txt2img": "Текст в зображення (txt2img)",
"discordLabel": "Discord",
"langDutch": "Голландська",
"langFrench": "Французька",
"statusMergedModels": "Моделі об'єднані",
"statusConvertingModel": "Конвертація моделі",
"linear": "Лінійна обробка"
"statusModelChanged": "Модель змінено"
},
"gallery": {
"generations": "Генерації",
@@ -315,15 +284,15 @@
"description": "Опис",
"descriptionValidationMsg": "Введіть опис моделі",
"config": "Файл конфігурації",
"configValidationMsg": "Шлях до файлу конфігурації.",
"configValidationMsg": "Шлях до файлу конфігурації",
"modelLocation": "Розташування моделі",
"modelLocationValidationMsg": "Шлях до файлу з моделлю.",
"modelLocationValidationMsg": "Шлях до файлу з моделлю",
"vaeLocation": "Розтышування VAE",
"vaeLocationValidationMsg": "Шлях до VAE.",
"vaeLocationValidationMsg": "Шлях до VAE",
"width": "Ширина",
"widthValidationMsg": "Початкова ширина зображень.",
"widthValidationMsg": "Початкова ширина зображень",
"height": "Висота",
"heightValidationMsg": "Початкова висота зображень.",
"heightValidationMsg": "Початкова висота зображень",
"addModel": "Додати модель",
"updateModel": "Оновити модель",
"availableModels": "Доступні моделі",
@@ -350,66 +319,7 @@
"deleteModel": "Видалити модель",
"deleteConfig": "Видалити конфігурацію",
"deleteMsg1": "Ви точно хочете видалити модель із InvokeAI?",
"deleteMsg2": "Це не призведе до видалення файлу моделі з диску. Позніше ви можете додати його знову.",
"allModels": "Усі моделі",
"diffusersModels": "Diffusers",
"scanForModels": "Сканувати моделі",
"convert": "Конвертувати",
"convertToDiffusers": "Конвертувати в Diffusers",
"formMessageDiffusersVAELocationDesc": "Якщо не надано, InvokeAI буде шукати файл VAE в розташуванні моделі, вказаній вище.",
"convertToDiffusersHelpText3": "Файл моделі на диску НЕ буде видалено або змінено. Ви можете знову додати його в Model Manager, якщо потрібно.",
"customConfig": "Користувальницький конфіг",
"invokeRoot": "Каталог InvokeAI",
"custom": "Користувальницький",
"modelTwo": "Модель 2",
"modelThree": "Модель 3",
"mergedModelName": "Назва об'єднаної моделі",
"alpha": "Альфа",
"interpolationType": "Тип інтерполяції",
"mergedModelSaveLocation": "Шлях збереження",
"mergedModelCustomSaveLocation": "Користувальницький шлях",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Ігнорувати невідповідності між вибраними моделями",
"modelMergeHeaderHelp2": "Тільки Diffusers-моделі доступні для об'єднання. Якщо ви хочете об'єднати checkpoint-моделі, спочатку перетворіть їх на Diffusers.",
"checkpointModels": "Checkpoints",
"repo_id": "ID репозиторію",
"v2_base": "v2 (512px)",
"repoIDValidationMsg": "Онлайн-репозиторій моделі",
"formMessageDiffusersModelLocationDesc": "Вкажіть хоча б одне.",
"formMessageDiffusersModelLocation": "Шлях до Diffusers-моделі",
"v2_768": "v2 (768px)",
"formMessageDiffusersVAELocation": "Шлях до VAE",
"convertToDiffusersHelpText5": "Переконайтеся, що у вас достатньо місця на диску. Моделі зазвичай займають від 4 до 7 Гб.",
"convertToDiffusersSaveLocation": "Шлях збереження",
"v1": "v1",
"convertToDiffusersHelpText6": "Ви хочете перетворити цю модель?",
"inpainting": "v1 Inpainting",
"modelConverted": "Модель перетворено",
"sameFolder": "У ту ж папку",
"statusConverting": "Перетворення",
"merge": "Об'єднати",
"mergeModels": "Об'єднати моделі",
"modelOne": "Модель 1",
"sigmoid": "Сігмоїд",
"weightedSum": "Зважена сума",
"none": "пусто",
"addDifference": "Додати різницю",
"pickModelType": "Вибрати тип моделі",
"convertToDiffusersHelpText4": "Це одноразова дія. Вона може зайняти від 30 до 60 секунд в залежності від характеристик вашого комп'ютера.",
"pathToCustomConfig": "Шлях до конфігу користувача",
"safetensorModels": "SafeTensors",
"addCheckpointModel": "Додати модель Checkpoint/Safetensor",
"addDiffuserModel": "Додати Diffusers",
"vaeRepoID": "ID репозиторію VAE",
"vaeRepoIDValidationMsg": "Онлайн-репозиторій VAE",
"modelMergeInterpAddDifferenceHelp": "У цьому режимі Модель 3 спочатку віднімається з Моделі 2. Результуюча версія змішується з Моделью 1 із встановленим вище коефіцієнтом Альфа.",
"customSaveLocation": "Користувальницький шлях збереження",
"modelMergeAlphaHelp": "Альфа впливає силу змішування моделей. Нижчі значення альфа призводять до меншого впливу другої моделі.",
"convertToDiffusersHelpText1": "Ця модель буде конвертована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Цей процес замінить ваш запис в Model Manager на версію тієї ж моделі в Diffusers.",
"modelsMerged": "Моделі об'єднані",
"modelMergeHeaderHelp1": "Ви можете об'єднати до трьох різних моделей, щоб створити змішану, що відповідає вашим потребам.",
"inverseSigmoid": "Зворотній Сігмоїд"
"deleteMsg2": "Це не призведе до видалення файлу моделі з диску. Позніше ви можете додати його знову."
},
"parameters": {
"images": "Зображення",
@@ -436,7 +346,7 @@
"scale": "Масштаб",
"otherOptions": "інші параметри",
"seamlessTiling": "Безшовний узор",
"hiresOptim": "Оптимізація High Res",
"hiresOptim": "Висока роздільна здатність",
"imageFit": "Вмістити зображення",
"codeformerFidelity": "Точність",
"seamSize": "Размір шву",
@@ -469,24 +379,7 @@
"info": "Метадані",
"deleteImage": "Видалити зображення",
"initialImage": "Початкове зображення",
"showOptionsPanel": "Показати панель налаштувань",
"general": "Основне",
"cancel": {
"immediate": "Скасувати негайно",
"schedule": "Скасувати після поточної ітерації",
"isScheduled": "Відміна",
"setType": "Встановити тип скасування"
},
"vSymmetryStep": "Крок верт. симетрії",
"hiresStrength": "Сила High Res",
"hidePreview": "Сховати попередній перегляд",
"showPreview": "Показати попередній перегляд",
"imageToImage": "Зображення до зображення",
"denoisingStrength": "Сила шумоподавлення",
"copyImage": "Копіювати зображення",
"symmetry": "Симетрія",
"hSymmetryStep": "Крок гор. симетрії",
"negativePrompts": "Виключний запит"
"showOptionsPanel": "Показати панель налаштувань"
},
"settings": {
"models": "Моделі",
@@ -499,8 +392,7 @@
"resetWebUI": "Повернути початкові",
"resetWebUIDesc1": "Скидання настройок веб-інтерфейсу видаляє лише локальний кеш браузера з вашими зображеннями та налаштуваннями. Це не призводить до видалення зображень з диску.",
"resetWebUIDesc2": "Якщо зображення не відображаються в галереї або не працює ще щось, спробуйте скинути налаштування, перш ніж повідомляти про проблему на GitHub.",
"resetComplete": "Інтерфейс скинуто. Оновіть цю сторінку.",
"useSlidersForAll": "Використовувати повзунки для всіх параметрів"
"resetComplete": "Інтерфейс скинуто. Оновіть цю сторінку."
},
"toast": {
"tempFoldersEmptied": "Тимчасова папка очищена",
@@ -518,25 +410,21 @@
"sentToUnifiedCanvas": "Надіслати на полотно",
"parametersSet": "Параметри задані",
"parametersNotSet": "Параметри не задані",
"parametersNotSetDesc": "Не знайдені метадані цього зображення.",
"parametersNotSetDesc": "Не знайдені метадані цього зображення",
"parametersFailed": "Проблема із завантаженням параметрів",
"parametersFailedDesc": "Неможливо завантажити початкове зображення.",
"parametersFailedDesc": "Неможливо завантажити початкове зображення",
"seedSet": "Сід заданий",
"seedNotSet": "Сід не заданий",
"seedNotSetDesc": "Не вдалося знайти сід для зображення.",
"seedNotSetDesc": "Не вдалося знайти сід для зображення",
"promptSet": "Запит заданий",
"promptNotSet": "Запит не заданий",
"promptNotSetDesc": "Не вдалося знайти запит для зображення.",
"promptNotSetDesc": "Не вдалося знайти запит для зображення",
"upscalingFailed": "Збільшення не вдалося",
"faceRestoreFailed": "Відновлення облич не вдалося",
"metadataLoadFailed": "Не вдалося завантажити метадані",
"initialImageSet": "Початкове зображення задане",
"initialImageNotSet": "Початкове зображення не задане",
"initialImageNotSetDesc": "Не вдалося завантажити початкове зображення",
"serverError": "Помилка сервера",
"disconnected": "Відключено від сервера",
"connected": "Підключено до сервера",
"canceled": "Обробку скасовано"
"initialImageNotSetDesc": "Не вдалося завантажити початкове зображення"
},
"tooltip": {
"feature": {
@@ -585,10 +473,10 @@
"autoSaveToGallery": "Автозбереження до галереї",
"saveBoxRegionOnly": "Зберiгати тiльки видiлення",
"limitStrokesToBox": "Обмежити штрихи виділенням",
"showCanvasDebugInfo": "Показати дод. інформацію про полотно",
"showCanvasDebugInfo": "Показати налаштування полотна",
"clearCanvasHistory": "Очистити iсторiю полотна",
"clearHistory": "Очистити iсторiю",
"clearCanvasHistoryMessage": "Очищення історії полотна залишає поточне полотно незайманим, але видаляє історію скасування та повтору.",
"clearCanvasHistoryMessage": "Очищення історії полотна залишає поточне полотно незайманим, але видаляє історію скасування та повтору",
"clearCanvasHistoryConfirm": "Ви впевнені, що хочете очистити історію полотна?",
"emptyTempImageFolder": "Очистити тимчасову папку",
"emptyFolder": "Очистити папку",
@@ -611,28 +499,5 @@
"betaDarkenOutside": "Затемнити зовні",
"betaLimitToBox": "Обмежити виділенням",
"betaPreserveMasked": "Зберiгати замасковану область"
},
"accessibility": {
"nextImage": "Наступне зображення",
"modelSelect": "Вибір моделі",
"invokeProgressBar": "Індикатор виконання",
"reset": "Скинути",
"uploadImage": "Завантажити зображення",
"useThisParameter": "Використовувати цей параметр",
"exitViewer": "Вийти з переглядача",
"zoomIn": "Збільшити",
"zoomOut": "Зменшити",
"rotateCounterClockwise": "Обертати проти годинникової стрілки",
"rotateClockwise": "Обертати за годинниковою стрілкою",
"toggleAutoscroll": "Увімкнути автопрокручування",
"toggleLogViewer": "Показати або приховати переглядач журналів",
"showGallery": "Показати галерею",
"previousImage": "Попереднє зображення",
"copyMetadataJson": "Скопіювати метадані JSON",
"flipVertically": "Перевернути по вертикалі",
"flipHorizontally": "Відобразити по горизонталі",
"showOptionsPanel": "Показати опції",
"modifyConfig": "Змінити конфігурацію",
"menu": "Меню"
}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -481,22 +481,5 @@
"betaDarkenOutside": "暗化外部区域",
"betaLimitToBox": "限制在框内",
"betaPreserveMasked": "保留遮罩层"
},
"accessibility": {
"modelSelect": "模型选择",
"invokeProgressBar": "Invoke 进度条",
"reset": "重置",
"nextImage": "下一张图片",
"useThisParameter": "使用此参数",
"uploadImage": "上传图片",
"previousImage": "上一张图片",
"copyMetadataJson": "复制JSON元数据",
"exitViewer": "退出视口ExitViewer",
"zoomIn": "放大",
"zoomOut": "缩小",
"rotateCounterClockwise": "逆时针旋转",
"rotateClockwise": "顺时针旋转",
"flipHorizontally": "水平翻转",
"flipVertically": "垂直翻转"
}
}

View File

@@ -18,7 +18,6 @@ import { PropsWithChildren, useEffect } from 'react';
import { setDisabledPanels, setDisabledTabs } from 'features/ui/store/uiSlice';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { shouldTransformUrlsChanged } from 'features/system/store/systemSlice';
import { setShouldFetchImages } from 'features/gallery/store/resultsSlice';
keepGUIAlive();
@@ -27,7 +26,6 @@ interface Props extends PropsWithChildren {
disabledPanels: string[];
disabledTabs: InvokeTabName[];
shouldTransformUrls?: boolean;
shouldFetchImages: boolean;
};
}
@@ -52,10 +50,6 @@ const App = (props: Props) => {
);
}, [dispatch, props.options.shouldTransformUrls]);
useEffect(() => {
dispatch(setShouldFetchImages(props.options.shouldFetchImages));
}, [dispatch, props.options.shouldFetchImages]);
useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');
}, [setColorMode, currentTheme]);
@@ -73,12 +67,7 @@ const App = (props: Props) => {
h={APP_HEIGHT}
>
{props.children || <SiteHeader />}
<Flex
gap={4}
w={{ base: '100vw', xl: 'full' }}
h="full"
flexDir={{ base: 'column', xl: 'row' }}
>
<Flex gap={4} w="full" h="full">
<InvokeTabs />
<ImageGalleryPanel />
</Flex>

View File

@@ -31,13 +31,13 @@ export const DIFFUSERS_SAMPLERS: Array<string> = [
];
// Valid image widths
export const WIDTHS: Array<number> = Array.from(Array(64)).map(
(_x, i) => (i + 1) * 64
export const WIDTHS: Array<number> = Array.from(Array(65)).map(
(_x, i) => i * 64
);
// Valid image heights
export const HEIGHTS: Array<number> = Array.from(Array(64)).map(
(_x, i) => (i + 1) * 64
export const HEIGHTS: Array<number> = Array.from(Array(65)).map(
(_x, i) => i * 64
);
// Valid upscaling levels
@@ -60,5 +60,3 @@ export const IN_PROGRESS_IMAGE_TYPES: Array<{
{ key: 'Fast', value: 'latents' },
{ key: 'Accurate', value: 'full-res' },
];
export const NODE_MIN_WIDTH = 250;

View File

@@ -20,7 +20,6 @@ export const readinessSelector = createSelector(
seedWeights,
initialImage,
seed,
isImageToImageEnabled,
} = generation;
const { isProcessing, isConnected } = system;
@@ -34,7 +33,7 @@ export const readinessSelector = createSelector(
reasonsWhyNotReady.push('Missing prompt');
}
if (isImageToImageEnabled && !initialImage) {
if (activeTabName === 'img2img' && !initialImage) {
isReady = false;
reasonsWhyNotReady.push('No initial image selected');
}

View File

@@ -143,17 +143,16 @@ const makeSocketIOListeners = (
}
}
// TODO: fix
// if (shouldLoopback) {
// const activeTabName = tabMap[activeTab];
// switch (activeTabName) {
// case 'img2img': {
// dispatch(initialImageSelected(newImage.uuid));
// // dispatch(setInitialImage(newImage));
// break;
// }
// }
// }
if (shouldLoopback) {
const activeTabName = tabMap[activeTab];
switch (activeTabName) {
case 'img2img': {
dispatch(initialImageSelected(newImage.uuid));
// dispatch(setInitialImage(newImage));
break;
}
}
}
dispatch(clearIntermediateImage());

View File

@@ -6,28 +6,31 @@ import dynamicMiddlewares from 'redux-dynamic-middlewares';
import { getPersistConfig } from 'redux-deep-persist';
import canvasReducer from 'features/canvas/store/canvasSlice';
import galleryReducer from 'features/gallery/store/gallerySlice';
import resultsReducer from 'features/gallery/store/resultsSlice';
import galleryReducer, {
GalleryState,
} from 'features/gallery/store/gallerySlice';
import resultsReducer, {
resultsAdapter,
ResultsState,
} from 'features/gallery/store/resultsSlice';
import uploadsReducer from 'features/gallery/store/uploadsSlice';
import lightboxReducer from 'features/lightbox/store/lightboxSlice';
import generationReducer from 'features/parameters/store/generationSlice';
import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
import systemReducer from 'features/system/store/systemSlice';
import lightboxReducer, {
LightboxState,
} from 'features/lightbox/store/lightboxSlice';
import generationReducer, {
GenerationState,
} from 'features/parameters/store/generationSlice';
import postprocessingReducer, {
PostprocessingState,
} from 'features/parameters/store/postprocessingSlice';
import systemReducer, { SystemState } from 'features/system/store/systemSlice';
import uiReducer from 'features/ui/store/uiSlice';
import modelsReducer from 'features/system/store/modelSlice';
import nodesReducer from 'features/nodes/store/nodesSlice';
import nodesReducer, { NodesState } from 'features/nodes/store/nodesSlice';
import { socketioMiddleware } from './socketio/middleware';
import { socketMiddleware } from 'services/events/middleware';
import { canvasBlacklist } from 'features/canvas/store/canvasPersistBlacklist';
import { galleryBlacklist } from 'features/gallery/store/galleryPersistBlacklist';
import { generationBlacklist } from 'features/parameters/store/generationPersistBlacklist';
import { lightboxBlacklist } from 'features/lightbox/store/lightboxPersistBlacklist';
import { modelsBlacklist } from 'features/system/store/modelsPersistBlacklist';
import { nodesBlacklist } from 'features/nodes/store/nodesPersistBlacklist';
import { postprocessingBlacklist } from 'features/parameters/store/postprocessingPersistBlacklist';
import { systemBlacklist } from 'features/system/store/systemPersistsBlacklist';
import { uiBlacklist } from 'features/ui/store/uiPersistBlacklist';
import { CanvasState } from 'features/canvas/store/canvasTypes';
/**
* redux-persist provides an easy and reliable way to persist state across reloads.
@@ -43,6 +46,116 @@ import { uiBlacklist } from 'features/ui/store/uiPersistBlacklist';
* The necesssary nested persistors with blacklists are configured below.
*/
/**
* Canvas slice persist blacklist
*/
const canvasBlacklist: (keyof CanvasState)[] = [
'cursorPosition',
'isCanvasInitialized',
'doesCanvasNeedScaling',
];
canvasBlacklist.map((blacklistItem) => `canvas.${blacklistItem}`);
/**
* System slice persist blacklist
*/
const systemBlacklist: (keyof SystemState)[] = [
'currentIteration',
'currentStatus',
'currentStep',
'isCancelable',
'isConnected',
'isESRGANAvailable',
'isGFPGANAvailable',
'isProcessing',
'socketId',
'totalIterations',
'totalSteps',
'openModel',
'isCancelScheduled',
'sessionId',
'progressImage',
];
systemBlacklist.map((blacklistItem) => `system.${blacklistItem}`);
/**
* Gallery slice persist blacklist
*/
const galleryBlacklist: (keyof GalleryState)[] = [
'categories',
'currentCategory',
'currentImage',
'currentImageUuid',
'shouldAutoSwitchToNewImages',
'intermediateImage',
];
galleryBlacklist.map((blacklistItem) => `gallery.${blacklistItem}`);
/**
* Lightbox slice persist blacklist
*/
const lightboxBlacklist: (keyof LightboxState)[] = ['isLightboxOpen'];
lightboxBlacklist.map((blacklistItem) => `lightbox.${blacklistItem}`);
/**
* Nodes slice persist blacklist
*/
const nodesBlacklist: (keyof NodesState)[] = ['schema', 'invocations'];
nodesBlacklist.map((blacklistItem) => `nodes.${blacklistItem}`);
/**
* Generation slice persist blacklist
*/
const generationBlacklist: (keyof GenerationState)[] = [];
generationBlacklist.map((blacklistItem) => `generation.${blacklistItem}`);
/**
* Postprocessing slice persist blacklist
*/
const postprocessingBlacklist: (keyof PostprocessingState)[] = [];
postprocessingBlacklist.map(
(blacklistItem) => `postprocessing.${blacklistItem}`
);
/**
* Results slice persist blacklist
*
* Currently blacklisting results slice entirely, see persist config below
*/
const resultsBlacklist: (keyof ResultsState)[] = [];
resultsBlacklist.map((blacklistItem) => `results.${blacklistItem}`);
/**
* Uploads slice persist blacklist
*
* Currently blacklisting uploads slice entirely, see persist config below
*/
const uploadsBlacklist: (keyof NodesState)[] = [];
uploadsBlacklist.map((blacklistItem) => `uploads.${blacklistItem}`);
/**
* Models slice persist blacklist
*/
const modelsBlacklist: (keyof NodesState)[] = [];
modelsBlacklist.map((blacklistItem) => `models.${blacklistItem}`);
/**
* UI slice persist blacklist
*/
const uiBlacklist: (keyof NodesState)[] = [];
uiBlacklist.map((blacklistItem) => `ui.${blacklistItem}`);
const rootReducer = combineReducers({
canvas: canvasReducer,
gallery: galleryReducer,

View File

@@ -44,10 +44,12 @@ export type IAIFullSliderProps = {
inputReadOnly?: boolean;
withReset?: boolean;
handleReset?: () => void;
isResetDisabled?: boolean;
isSliderDisabled?: boolean;
isInputDisabled?: boolean;
tooltipSuffix?: string;
hideTooltip?: boolean;
isCompact?: boolean;
isDisabled?: boolean;
sliderFormControlProps?: FormControlProps;
sliderFormLabelProps?: FormLabelProps;
sliderMarkProps?: Omit<SliderMarkProps, 'value'>;
@@ -78,8 +80,10 @@ const IAISlider = (props: IAIFullSliderProps) => {
withReset = false,
hideTooltip = false,
isCompact = false,
isDisabled = false,
handleReset,
isResetDisabled,
isSliderDisabled,
isInputDisabled,
sliderFormControlProps,
sliderFormLabelProps,
sliderMarkProps,
@@ -145,7 +149,6 @@ const IAISlider = (props: IAIFullSliderProps) => {
}
: {}
}
isDisabled={isDisabled}
{...sliderFormControlProps}
>
<FormLabel {...sliderFormLabelProps} mb={-1}>
@@ -163,13 +166,15 @@ const IAISlider = (props: IAIFullSliderProps) => {
onMouseEnter={() => setShowTooltip(true)}
onMouseLeave={() => setShowTooltip(false)}
focusThumbOnChange={false}
isDisabled={isDisabled}
isDisabled={isSliderDisabled}
// width={width}
{...rest}
>
{withSliderMarks && (
<>
<SliderMark
value={min}
// insetInlineStart={0}
sx={{
insetInlineStart: '0 !important',
insetInlineEnd: 'unset !important',
@@ -180,6 +185,7 @@ const IAISlider = (props: IAIFullSliderProps) => {
</SliderMark>
<SliderMark
value={max}
// insetInlineEnd={0}
sx={{
insetInlineStart: 'unset !important',
insetInlineEnd: '0 !important',
@@ -215,6 +221,7 @@ const IAISlider = (props: IAIFullSliderProps) => {
value={localInputValue}
onChange={handleInputChange}
onBlur={handleInputBlur}
isDisabled={isInputDisabled}
{...sliderNumberInputProps}
>
<NumberInputField
@@ -239,8 +246,8 @@ const IAISlider = (props: IAIFullSliderProps) => {
aria-label={t('accessibility.reset')}
tooltip="Reset"
icon={<BiReset />}
isDisabled={isDisabled}
onClick={handleResetDisable}
isDisabled={isResetDisabled}
{...sliderIAIIconButtonProps}
/>
)}

View File

@@ -1,79 +0,0 @@
import { Badge, Box, ButtonGroup, Flex } from '@chakra-ui/react';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { clearInitialImage } from 'features/parameters/store/generationSlice';
import { useCallback } from 'react';
import IAIIconButton from 'common/components/IAIIconButton';
import { FaUndo, FaUpload } from 'react-icons/fa';
import { useTranslation } from 'react-i18next';
import { Image } from 'app/invokeai';
type ImageToImageOverlayProps = {
setIsLoaded: (isLoaded: boolean) => void;
image: Image;
};
const ImageToImageOverlay = ({
setIsLoaded,
image,
}: ImageToImageOverlayProps) => {
const isImageToImageEnabled = useAppSelector(
(state: RootState) => state.generation.isImageToImageEnabled
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleResetInitialImage = useCallback(() => {
dispatch(clearInitialImage());
setIsLoaded(false);
}, [dispatch, setIsLoaded]);
return (
<Box
sx={{
top: 0,
left: 0,
w: 'full',
h: 'full',
position: 'absolute',
}}
>
<ButtonGroup
sx={{
position: 'absolute',
top: 0,
right: 0,
p: 2,
}}
>
<IAIIconButton
size="sm"
isDisabled={!isImageToImageEnabled}
icon={<FaUndo />}
aria-label={t('accessibility.reset')}
onClick={handleResetInitialImage}
/>
<IAIIconButton
size="sm"
isDisabled={!isImageToImageEnabled}
icon={<FaUpload />}
aria-label={t('common.upload')}
/>
</ButtonGroup>
<Flex
sx={{
position: 'absolute',
bottom: 0,
left: 0,
p: 2,
alignItems: 'flex-start',
}}
>
<Badge variant="solid" colorScheme="base">
{image.metadata?.width} × {image.metadata?.height}
</Badge>
</Flex>
</Box>
);
};
export default ImageToImageOverlay;

View File

@@ -1,12 +0,0 @@
import { Flex, Icon } from '@chakra-ui/react';
import { FaImage } from 'react-icons/fa';
const SelectImagePlaceholder = () => {
return (
<Flex sx={{ h: 36, alignItems: 'center', justifyContent: 'center' }}>
<Icon color="base.400" boxSize={32} as={FaImage}></Icon>
</Flex>
);
};
export default SelectImagePlaceholder;

View File

@@ -1,18 +0,0 @@
import { useBreakpoint } from '@chakra-ui/react';
export default function useResolution():
| 'mobile'
| 'tablet'
| 'desktop'
| 'unknown' {
const breakpointValue = useBreakpoint();
const mobileResolutions = ['base', 'sm'];
const tabletResolutions = ['md', 'lg'];
const desktopResolutions = ['xl', '2xl'];
if (mobileResolutions.includes(breakpointValue)) return 'mobile';
if (tabletResolutions.includes(breakpointValue)) return 'tablet';
if (desktopResolutions.includes(breakpointValue)) return 'desktop';
return 'unknown';
}

View File

@@ -1,119 +0,0 @@
/**
* PARTIAL ZOD IMPLEMENTATION
*
* doesn't work well bc like most validators, zod is not built to skip invalid values.
* it mostly works but just seems clearer and simpler to manually parse for now.
*
* in the future it would be really nice if we could use zod for some things:
* - zodios (axios + zod): https://github.com/ecyrbe/zodios
* - openapi to zodios: https://github.com/astahmer/openapi-zod-client
*/
// import { z } from 'zod';
// const zMetadataStringField = z.string();
// export type MetadataStringField = z.infer<typeof zMetadataStringField>;
// const zMetadataIntegerField = z.number().int();
// export type MetadataIntegerField = z.infer<typeof zMetadataIntegerField>;
// const zMetadataFloatField = z.number();
// export type MetadataFloatField = z.infer<typeof zMetadataFloatField>;
// const zMetadataBooleanField = z.boolean();
// export type MetadataBooleanField = z.infer<typeof zMetadataBooleanField>;
// const zMetadataImageField = z.object({
// image_type: z.union([
// z.literal('results'),
// z.literal('uploads'),
// z.literal('intermediates'),
// ]),
// image_name: z.string().min(1),
// });
// export type MetadataImageField = z.infer<typeof zMetadataImageField>;
// const zMetadataLatentsField = z.object({
// latents_name: z.string().min(1),
// });
// export type MetadataLatentsField = z.infer<typeof zMetadataLatentsField>;
// /**
// * zod Schema for any node field. Use a `transform()` to manually parse, skipping invalid values.
// */
// const zAnyMetadataField = z.any().transform((val, ctx) => {
// // Grab the field name from the path
// const fieldName = String(ctx.path[ctx.path.length - 1]);
// // `id` and `type` must be strings if they exist
// if (['id', 'type'].includes(fieldName)) {
// const reservedStringPropertyResult = zMetadataStringField.safeParse(val);
// if (reservedStringPropertyResult.success) {
// return reservedStringPropertyResult.data;
// }
// return;
// }
// // Parse the rest of the fields, only returning the data if the parsing is successful
// const stringFieldResult = zMetadataStringField.safeParse(val);
// if (stringFieldResult.success) {
// return stringFieldResult.data;
// }
// const integerFieldResult = zMetadataIntegerField.safeParse(val);
// if (integerFieldResult.success) {
// return integerFieldResult.data;
// }
// const floatFieldResult = zMetadataFloatField.safeParse(val);
// if (floatFieldResult.success) {
// return floatFieldResult.data;
// }
// const booleanFieldResult = zMetadataBooleanField.safeParse(val);
// if (booleanFieldResult.success) {
// return booleanFieldResult.data;
// }
// const imageFieldResult = zMetadataImageField.safeParse(val);
// if (imageFieldResult.success) {
// return imageFieldResult.data;
// }
// const latentsFieldResult = zMetadataImageField.safeParse(val);
// if (latentsFieldResult.success) {
// return latentsFieldResult.data;
// }
// });
// /**
// * The node metadata schema.
// */
// const zNodeMetadata = z.object({
// session_id: z.string().min(1).optional(),
// node: z.record(z.string().min(1), zAnyMetadataField).optional(),
// });
// export type NodeMetadata = z.infer<typeof zNodeMetadata>;
// const zMetadata = z.object({
// invokeai: zNodeMetadata.optional(),
// 'sd-metadata': z.record(z.string().min(1), z.any()).optional(),
// });
// export type Metadata = z.infer<typeof zMetadata>;
// export const parseMetadata = (
// metadata: Record<string, any>
// ): Metadata | undefined => {
// const result = zMetadata.safeParse(metadata);
// if (!result.success) {
// console.log(result.error.issues);
// return;
// }
// return result.data;
// };
export default {};

View File

@@ -0,0 +1,72 @@
import { RootState } from 'app/store';
import { InvokeTabName, tabMap } from 'features/ui/store/tabMap';
import { find } from 'lodash';
import {
Graph,
ImageToImageInvocation,
TextToImageInvocation,
} from 'services/api';
import { buildHiResNode, buildImg2ImgNode } from './nodes/image2Image';
import { buildIteration } from './nodes/iteration';
import { buildTxt2ImgNode } from './nodes/text2Image';
function mapTabToFunction(activeTabName: InvokeTabName) {
switch (activeTabName) {
case 'txt2img':
return buildTxt2ImgNode;
case 'img2img':
return buildImg2ImgNode;
default:
return buildTxt2ImgNode;
}
}
const buildBaseNode = (
state: RootState
): Record<string, TextToImageInvocation | ImageToImageInvocation> => {
const { activeTab } = state.ui;
const activeTabName = tabMap[activeTab];
return mapTabToFunction(activeTabName)(state);
};
type BuildGraphOutput = {
graph: Graph;
nodeIdsToSubscribe: string[];
};
export const buildGraph = (state: RootState): BuildGraphOutput => {
const { generation, postprocessing } = state;
const { iterations } = generation;
const { hiresFix, hiresStrength } = postprocessing;
const baseNode = buildBaseNode(state);
let graph: Graph = { nodes: baseNode };
const nodeIdsToSubscribe: string[] = [];
if (iterations > 1) {
graph = buildIteration({ graph, iterations });
}
if (hiresFix) {
const { node, edge } = buildHiResNode(
baseNode as Record<string, TextToImageInvocation>,
hiresStrength
);
graph = {
nodes: {
...graph.nodes,
...node,
},
edges: [...(graph.edges || []), edge],
};
nodeIdsToSubscribe.push(Object.keys(node)[0]);
}
console.log('buildGraph: ', graph);
return { graph, nodeIdsToSubscribe };
};

View File

@@ -17,7 +17,7 @@ export const useGetUrl = () => {
return {
shouldTransformUrls,
getUrl: (url?: string) => {
getUrl: (url: string) => {
if (OpenAPI.BASE && shouldTransformUrls) {
return [OpenAPI.BASE, url].join('/');
}

View File

@@ -8,11 +8,14 @@ import {
import { _Image } from 'app/invokeai';
import { initialImageSelector } from 'features/parameters/store/generationSelectors';
export const buildImg2ImgNode = (state: RootState): ImageToImageInvocation => {
export const buildImg2ImgNode = (
state: RootState
): Record<string, ImageToImageInvocation> => {
const nodeId = uuidv4();
const { generation, system, models } = state;
const { selectedModelName } = models;
const { shouldDisplayInProgressType } = system;
const { currentModel: model } = models;
const {
prompt,
@@ -35,31 +38,28 @@ export const buildImg2ImgNode = (state: RootState): ImageToImageInvocation => {
throw 'no initial image';
}
const imageToImageNode: ImageToImageInvocation = {
id: nodeId,
type: 'img2img',
prompt,
steps,
width,
height,
cfg_scale: cfgScale,
scheduler: sampler as ImageToImageInvocation['scheduler'],
seamless,
model: selectedModelName,
progress_images: true,
image: {
image_name: initialImage.name,
image_type: initialImage.type,
return {
[nodeId]: {
id: nodeId,
type: 'img2img',
prompt,
seed: shouldRandomizeSeed ? -1 : seed,
steps,
width,
height,
cfg_scale: cfgScale,
scheduler: sampler as ImageToImageInvocation['scheduler'],
seamless,
model,
progress_images: shouldDisplayInProgressType === 'full-res',
image: {
image_name: initialImage.name,
image_type: initialImage.type,
},
strength,
fit,
},
strength,
fit,
};
if (!shouldRandomizeSeed) {
imageToImageNode.seed = seed;
}
return imageToImageNode;
};
type hiresReturnType = {
@@ -82,7 +82,6 @@ export const buildHiResNode = (
id: nodeId,
type: 'img2img',
strength,
fit: true,
},
},
edge: {

View File

@@ -0,0 +1,81 @@
import { v4 as uuidv4 } from 'uuid';
import {
Edge,
Graph,
ImageToImageInvocation,
IterateInvocation,
RangeInvocation,
TextToImageInvocation,
} from 'services/api';
import { buildImg2ImgNode } from './image2Image';
type BuildIteration = {
graph: Graph;
iterations: number;
};
const buildRangeNode = (
iterations: number
): Record<string, RangeInvocation> => {
const nodeId = uuidv4();
return {
[nodeId]: {
id: nodeId,
type: 'range',
start: 0,
stop: iterations,
step: 1,
},
};
};
const buildIterateNode = (): Record<string, IterateInvocation> => {
const nodeId = uuidv4();
return {
[nodeId]: {
id: nodeId,
type: 'iterate',
collection: [],
index: 0,
},
};
};
export const buildIteration = ({
graph,
iterations,
}: BuildIteration): Graph => {
const rangeNode = buildRangeNode(iterations);
const iterateNode = buildIterateNode();
const baseNode: Graph['nodes'] = graph.nodes;
const edges: Edge[] = [
{
source: {
field: 'collection',
node_id: Object.keys(rangeNode)[0],
},
destination: {
field: 'collection',
node_id: Object.keys(iterateNode)[0],
},
},
{
source: {
field: 'item',
node_id: Object.keys(iterateNode)[0],
},
destination: {
field: 'seed',
node_id: Object.keys(baseNode!)[0],
},
},
];
return {
nodes: {
...rangeNode,
...iterateNode,
...graph.nodes,
},
edges,
};
};

View File

@@ -0,0 +1,43 @@
import { v4 as uuidv4 } from 'uuid';
import { RootState } from 'app/store';
import { TextToImageInvocation } from 'services/api';
export const buildTxt2ImgNode = (
state: RootState
): Record<string, TextToImageInvocation> => {
const nodeId = uuidv4();
const { generation, system, models } = state;
const { shouldDisplayInProgressType } = system;
const { currentModel: model } = models;
const {
prompt,
seed,
steps,
width,
height,
cfgScale: cfg_scale,
sampler,
seamless,
shouldRandomizeSeed,
} = generation;
// missing fields in TextToImageInvocation: strength, hires_fix
return {
[nodeId]: {
id: nodeId,
type: 'txt2img',
prompt,
seed: shouldRandomizeSeed ? -1 : seed,
steps,
width,
height,
cfg_scale,
scheduler: sampler as TextToImageInvocation['scheduler'],
seamless,
model,
progress_images: shouldDisplayInProgressType === 'full-res',
},
};
};

View File

@@ -1,169 +0,0 @@
import { forEach, size } from 'lodash';
import { ImageField, LatentsField } from 'services/api';
const OBJECT_TYPESTRING = '[object Object]';
const STRING_TYPESTRING = '[object String]';
const NUMBER_TYPESTRING = '[object Number]';
const BOOLEAN_TYPESTRING = '[object Boolean]';
const ARRAY_TYPESTRING = '[object Array]';
const isObject = (obj: unknown): obj is Record<string | number, any> =>
Object.prototype.toString.call(obj) === OBJECT_TYPESTRING;
const isString = (obj: unknown): obj is string =>
Object.prototype.toString.call(obj) === STRING_TYPESTRING;
const isNumber = (obj: unknown): obj is number =>
Object.prototype.toString.call(obj) === NUMBER_TYPESTRING;
const isBoolean = (obj: unknown): obj is boolean =>
Object.prototype.toString.call(obj) === BOOLEAN_TYPESTRING;
const isArray = (obj: unknown): obj is Array<any> =>
Object.prototype.toString.call(obj) === ARRAY_TYPESTRING;
const parseImageField = (imageField: unknown): ImageField | undefined => {
// Must be an object
if (!isObject(imageField)) {
return;
}
// An ImageField must have both `image_name` and `image_type`
if (!('image_name' in imageField && 'image_type' in imageField)) {
return;
}
// An ImageField's `image_type` must be one of the allowed values
if (
!['results', 'uploads', 'intermediates'].includes(imageField.image_type)
) {
return;
}
// An ImageField's `image_name` must be a string
if (typeof imageField.image_name !== 'string') {
return;
}
// Build a valid ImageField
return {
image_type: imageField.image_type,
image_name: imageField.image_name,
};
};
const parseLatentsField = (latentsField: unknown): LatentsField | undefined => {
// Must be an object
if (!isObject(latentsField)) {
return;
}
// A LatentsField must have a `latents_name`
if (!('latents_name' in latentsField)) {
return;
}
// A LatentsField's `latents_name` must be a string
if (typeof latentsField.latents_name !== 'string') {
return;
}
// Build a valid LatentsField
return {
latents_name: latentsField.latents_name,
};
};
type NodeMetadata = {
[key: string]: string | number | boolean | ImageField | LatentsField;
};
type InvokeAIMetadata = {
session_id?: string;
node?: NodeMetadata;
};
export const parseNodeMetadata = (
nodeMetadata: Record<string | number, any>
): NodeMetadata | undefined => {
if (!isObject(nodeMetadata)) {
return;
}
const parsed: NodeMetadata = {};
forEach(nodeMetadata, (nodeItem, nodeKey) => {
// `id` and `type` must be strings if they are present
if (['id', 'type'].includes(nodeKey)) {
if (isString(nodeItem)) {
parsed[nodeKey] = nodeItem;
}
return;
}
// the only valid object types are ImageField and LatentsField
if (isObject(nodeItem)) {
if ('image_name' in nodeItem || 'image_type' in nodeItem) {
const imageField = parseImageField(nodeItem);
if (imageField) {
parsed[nodeKey] = imageField;
}
return;
}
if ('latents_name' in nodeItem) {
const latentsField = parseLatentsField(nodeItem);
if (latentsField) {
parsed[nodeKey] = latentsField;
}
return;
}
}
// otherwise we accept any string, number or boolean
if (isString(nodeItem) || isNumber(nodeItem) || isBoolean(nodeItem)) {
parsed[nodeKey] = nodeItem;
return;
}
});
if (size(parsed) === 0) {
return;
}
return parsed;
};
export const parseInvokeAIMetadata = (
metadata: Record<string | number, any> | undefined
): InvokeAIMetadata | undefined => {
if (metadata === undefined) {
return;
}
if (!isObject(metadata)) {
return;
}
const parsed: InvokeAIMetadata = {};
forEach(metadata, (item, key) => {
if (key === 'session_id' && isString(item)) {
parsed['session_id'] = item;
}
if (key === 'node' && isObject(item)) {
const nodeMetadata = parseNodeMetadata(item);
if (nodeMetadata) {
parsed['node'] = nodeMetadata;
}
}
});
if (size(parsed) === 0) {
return;
}
return parsed;
};

View File

@@ -30,7 +30,6 @@ interface Props extends PropsWithChildren {
disabledTabs?: InvokeTabName[];
token?: string;
shouldTransformUrls?: boolean;
shouldFetchImages?: boolean;
}
export default function Component({
@@ -40,7 +39,6 @@ export default function Component({
token,
children,
shouldTransformUrls,
shouldFetchImages = false,
}: Props) {
useEffect(() => {
// configure API client token
@@ -72,12 +70,7 @@ export default function Component({
<React.Suspense fallback={<Loading showText />}>
<ThemeLocaleProvider>
<App
options={{
disabledPanels,
disabledTabs,
shouldTransformUrls,
shouldFetchImages,
}}
options={{ disabledPanels, disabledTabs, shouldTransformUrls }}
>
{children}
</App>

View File

@@ -1,14 +0,0 @@
import { CanvasState } from './canvasTypes';
/**
* Canvas slice persist blacklist
*/
const itemsToBlacklist: (keyof CanvasState)[] = [
'cursorPosition',
'isCanvasInitialized',
'doesCanvasNeedScaling',
];
export const canvasBlacklist = itemsToBlacklist.map(
(blacklistItem) => `canvas.${blacklistItem}`
);

Some files were not shown because too many files have changed in this diff Show More