Compare commits
121 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f787e9acf6 | ||
|
|
5a24b89e54 | ||
|
|
9b482e2a4f | ||
|
|
df4dbe2d57 | ||
|
|
713bd11177 | ||
|
|
182571df4b | ||
|
|
29bfe492b6 | ||
|
|
3fb4e3050c | ||
|
|
39c7ec3cd9 | ||
|
|
26bfbdec7f | ||
|
|
7a3eaa8da9 | ||
|
|
599db7296f | ||
|
|
042aab4295 | ||
|
|
24f298283f | ||
|
|
68dac6349d | ||
|
|
b675fc19e8 | ||
|
|
659019cfd6 | ||
|
|
dcd61e1f82 | ||
|
|
f5c99b1488 | ||
|
|
810be3e1d4 | ||
|
|
60d754d1df | ||
|
|
bd07c86db9 | ||
|
|
bcbf8b6bd8 | ||
|
|
356661459b | ||
|
|
deb917825e | ||
|
|
15415c6d85 | ||
|
|
76b0380b5f | ||
|
|
2d58754789 | ||
|
|
9cdf1f599c | ||
|
|
268be97ba0 | ||
|
|
a9014673a0 | ||
|
|
d36c43a10f | ||
|
|
54a5c4e482 | ||
|
|
5e09a244e3 | ||
|
|
88648dca1a | ||
|
|
8840df2b00 | ||
|
|
af159acbdf | ||
|
|
471719bbbe | ||
|
|
b126f2ffd5 | ||
|
|
9938f12ef0 | ||
|
|
982c266073 | ||
|
|
5c37391883 | ||
|
|
ddeafc6833 | ||
|
|
41b2d5d013 | ||
|
|
29d6f48901 | ||
|
|
d5c9f4e47f | ||
|
|
24d73387d8 | ||
|
|
e0d3927265 | ||
|
|
e5f7c2a9b7 | ||
|
|
b0760710d5 | ||
|
|
764accc921 | ||
|
|
6a01fce9c1 | ||
|
|
9c732ac3b1 | ||
|
|
b70891c661 | ||
|
|
4dbf851741 | ||
|
|
6c927a9fd4 | ||
|
|
096f001634 | ||
|
|
4837e578b2 | ||
|
|
1e547ef912 | ||
|
|
f6b8970bd1 | ||
|
|
29325a7214 | ||
|
|
8ecf72838d | ||
|
|
c3ab8a6aa8 | ||
|
|
1931aa3e70 | ||
|
|
d3d8055055 | ||
|
|
476b0a0403 | ||
|
|
f66584713c | ||
|
|
33624fc2fa | ||
|
|
41c3e73a3c | ||
|
|
97553a7de2 | ||
|
|
12ba15bfa9 | ||
|
|
09d1e190e7 | ||
|
|
8eb5d08499 | ||
|
|
9be6acde7d | ||
|
|
5f83bb0069 | ||
|
|
b138882abc | ||
|
|
0cd7cdb52e | ||
|
|
1d8b7e2bcf | ||
|
|
6461f4758d | ||
|
|
3189ab6863 | ||
|
|
3f9a674d4b | ||
|
|
587f59b25b | ||
|
|
4952eada87 | ||
|
|
581029ebaa | ||
|
|
42d68780de | ||
|
|
28032a2f80 | ||
|
|
e381e021e9 | ||
|
|
641af64f93 | ||
|
|
a7b83c8b5b | ||
|
|
4cc41e0188 | ||
|
|
442fc02429 | ||
|
|
9a4d075074 | ||
|
|
17ff8196cb | ||
|
|
68f993998a | ||
|
|
7da6120b39 | ||
|
|
6cd40965c4 | ||
|
|
408a1d6dbb | ||
|
|
0b0abfbe8f | ||
|
|
cc96dcf0ed | ||
|
|
2604fd9fde | ||
|
|
857d74bbfe | ||
|
|
fd7a635777 | ||
|
|
af9110e964 | ||
|
|
a61209206b | ||
|
|
e05cc62e5f | ||
|
|
4f8a4b0f22 | ||
|
|
a743f3c9b5 | ||
|
|
217fe40d99 | ||
|
|
b76bf50b93 | ||
|
|
332bc9da5b | ||
|
|
08def3da95 | ||
|
|
daf899f9c4 | ||
|
|
13fb2d1f49 | ||
|
|
95dde802ea | ||
|
|
b4cf78a95d | ||
|
|
18f89ed5ed | ||
|
|
f170697ebe | ||
|
|
556c6a1d84 | ||
|
|
e5d9ca013e | ||
|
|
4166c756ce | ||
|
|
4f0dfbd34d |
2
.github/workflows/python-checks.yml
vendored
@@ -62,7 +62,7 @@ jobs:
|
||||
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pip install ruff
|
||||
run: pip install ruff==0.6.0
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
set -eu
|
||||
|
||||
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
scriptdir=$(dirname $(readlink -f "$0"))
|
||||
cd "$scriptdir"
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import asyncio
|
||||
from logging import Logger
|
||||
|
||||
import torch
|
||||
@@ -31,6 +32,8 @@ from invokeai.app.services.session_processor.session_processor_default import (
|
||||
)
|
||||
from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.app.services.style_preset_images.style_preset_images_disk import StylePresetImageFileStorageDisk
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_sqlite import SqliteStylePresetRecordsStorage
|
||||
from invokeai.app.services.urls.urls_default import LocalUrlService
|
||||
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
@@ -63,7 +66,12 @@ class ApiDependencies:
|
||||
invoker: Invoker
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger) -> None:
|
||||
def initialize(
|
||||
config: InvokeAIAppConfig,
|
||||
event_handler_id: int,
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
logger: Logger = logger,
|
||||
) -> None:
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
|
||||
@@ -74,6 +82,7 @@ class ApiDependencies:
|
||||
image_files = DiskImageFileStorage(f"{output_folder}/images")
|
||||
|
||||
model_images_folder = config.models_path
|
||||
style_presets_folder = config.style_presets_path
|
||||
|
||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||
|
||||
@@ -84,7 +93,7 @@ class ApiDependencies:
|
||||
board_images = BoardImagesService()
|
||||
board_records = SqliteBoardRecordStorage(db=db)
|
||||
boards = BoardService()
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
events = FastAPIEventService(event_handler_id, loop=loop)
|
||||
bulk_download = BulkDownloadService()
|
||||
image_records = SqliteImageRecordStorage(db=db)
|
||||
images = ImageService()
|
||||
@@ -109,6 +118,8 @@ class ApiDependencies:
|
||||
session_queue = SqliteSessionQueue(db=db)
|
||||
urls = LocalUrlService()
|
||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||
|
||||
services = InvocationServices(
|
||||
board_image_records=board_image_records,
|
||||
@@ -134,6 +145,8 @@ class ApiDependencies:
|
||||
workflow_records=workflow_records,
|
||||
tensors=tensors,
|
||||
conditioning=conditioning,
|
||||
style_preset_records=style_preset_records,
|
||||
style_preset_image_files=style_preset_image_files,
|
||||
)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
|
||||
@@ -218,9 +218,8 @@ async def get_image_workflow(
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.api_route(
|
||||
@images_router.get(
|
||||
"/i/{image_name}/full",
|
||||
methods=["GET", "HEAD"],
|
||||
operation_id="get_image_full",
|
||||
response_class=Response,
|
||||
responses={
|
||||
@@ -231,6 +230,18 @@ async def get_image_workflow(
|
||||
404: {"description": "Image not found"},
|
||||
},
|
||||
)
|
||||
@images_router.head(
|
||||
"/i/{image_name}/full",
|
||||
operation_id="get_image_full_head",
|
||||
response_class=Response,
|
||||
responses={
|
||||
200: {
|
||||
"description": "Return the full-resolution image",
|
||||
"content": {"image/png": {}},
|
||||
},
|
||||
404: {"description": "Image not found"},
|
||||
},
|
||||
)
|
||||
async def get_image_full(
|
||||
image_name: str = Path(description="The name of full-resolution image file to get"),
|
||||
) -> Response:
|
||||
@@ -242,6 +253,7 @@ async def get_image_full(
|
||||
content = f.read()
|
||||
response = Response(content, media_type="image/png")
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
response.headers["Content-Disposition"] = f'inline; filename="{image_name}"'
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
276
invokeai/app/api/routers/style_presets.py
Normal file
@@ -0,0 +1,276 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
import pydantic
|
||||
from fastapi import APIRouter, File, Form, HTTPException, Path, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.routers.model_manager import IMAGE_MAX_AGE
|
||||
from invokeai.app.services.style_preset_images.style_preset_images_common import StylePresetImageFileNotFoundException
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_common import (
|
||||
InvalidPresetImportDataError,
|
||||
PresetData,
|
||||
PresetType,
|
||||
StylePresetChanges,
|
||||
StylePresetNotFoundError,
|
||||
StylePresetRecordWithImage,
|
||||
StylePresetWithoutId,
|
||||
UnsupportedFileTypeError,
|
||||
parse_presets_from_file,
|
||||
)
|
||||
|
||||
|
||||
class StylePresetUpdateFormData(BaseModel):
|
||||
name: str = Field(description="Preset name")
|
||||
positive_prompt: str = Field(description="Positive prompt")
|
||||
negative_prompt: str = Field(description="Negative prompt")
|
||||
|
||||
|
||||
class StylePresetCreateFormData(StylePresetUpdateFormData):
|
||||
type: PresetType = Field(description="Preset type")
|
||||
|
||||
|
||||
style_presets_router = APIRouter(prefix="/v1/style_presets", tags=["style_presets"])
|
||||
|
||||
|
||||
@style_presets_router.get(
|
||||
"/i/{style_preset_id}",
|
||||
operation_id="get_style_preset",
|
||||
responses={
|
||||
200: {"model": StylePresetRecordWithImage},
|
||||
},
|
||||
)
|
||||
async def get_style_preset(
|
||||
style_preset_id: str = Path(description="The style preset to get"),
|
||||
) -> StylePresetRecordWithImage:
|
||||
"""Gets a style preset"""
|
||||
try:
|
||||
image = ApiDependencies.invoker.services.style_preset_image_files.get_url(style_preset_id)
|
||||
style_preset = ApiDependencies.invoker.services.style_preset_records.get(style_preset_id)
|
||||
return StylePresetRecordWithImage(image=image, **style_preset.model_dump())
|
||||
except StylePresetNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Style preset not found")
|
||||
|
||||
|
||||
@style_presets_router.patch(
|
||||
"/i/{style_preset_id}",
|
||||
operation_id="update_style_preset",
|
||||
responses={
|
||||
200: {"model": StylePresetRecordWithImage},
|
||||
},
|
||||
)
|
||||
async def update_style_preset(
|
||||
image: Optional[UploadFile] = File(description="The image file to upload", default=None),
|
||||
style_preset_id: str = Path(description="The id of the style preset to update"),
|
||||
data: str = Form(description="The data of the style preset to update"),
|
||||
) -> StylePresetRecordWithImage:
|
||||
"""Updates a style preset"""
|
||||
if image is not None:
|
||||
if not image.content_type or not image.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
contents = await image.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.style_preset_image_files.save(style_preset_id, pil_image)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
else:
|
||||
try:
|
||||
ApiDependencies.invoker.services.style_preset_image_files.delete(style_preset_id)
|
||||
except StylePresetImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
try:
|
||||
parsed_data = json.loads(data)
|
||||
validated_data = StylePresetUpdateFormData(**parsed_data)
|
||||
|
||||
name = validated_data.name
|
||||
positive_prompt = validated_data.positive_prompt
|
||||
negative_prompt = validated_data.negative_prompt
|
||||
|
||||
except pydantic.ValidationError:
|
||||
raise HTTPException(status_code=400, detail="Invalid preset data")
|
||||
|
||||
preset_data = PresetData(positive_prompt=positive_prompt, negative_prompt=negative_prompt)
|
||||
changes = StylePresetChanges(name=name, preset_data=preset_data)
|
||||
|
||||
style_preset_image = ApiDependencies.invoker.services.style_preset_image_files.get_url(style_preset_id)
|
||||
style_preset = ApiDependencies.invoker.services.style_preset_records.update(
|
||||
style_preset_id=style_preset_id, changes=changes
|
||||
)
|
||||
return StylePresetRecordWithImage(image=style_preset_image, **style_preset.model_dump())
|
||||
|
||||
|
||||
@style_presets_router.delete(
|
||||
"/i/{style_preset_id}",
|
||||
operation_id="delete_style_preset",
|
||||
)
|
||||
async def delete_style_preset(
|
||||
style_preset_id: str = Path(description="The style preset to delete"),
|
||||
) -> None:
|
||||
"""Deletes a style preset"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.style_preset_image_files.delete(style_preset_id)
|
||||
except StylePresetImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
ApiDependencies.invoker.services.style_preset_records.delete(style_preset_id)
|
||||
|
||||
|
||||
@style_presets_router.post(
|
||||
"/",
|
||||
operation_id="create_style_preset",
|
||||
responses={
|
||||
200: {"model": StylePresetRecordWithImage},
|
||||
},
|
||||
)
|
||||
async def create_style_preset(
|
||||
image: Optional[UploadFile] = File(description="The image file to upload", default=None),
|
||||
data: str = Form(description="The data of the style preset to create"),
|
||||
) -> StylePresetRecordWithImage:
|
||||
"""Creates a style preset"""
|
||||
|
||||
try:
|
||||
parsed_data = json.loads(data)
|
||||
validated_data = StylePresetCreateFormData(**parsed_data)
|
||||
|
||||
name = validated_data.name
|
||||
type = validated_data.type
|
||||
positive_prompt = validated_data.positive_prompt
|
||||
negative_prompt = validated_data.negative_prompt
|
||||
|
||||
except pydantic.ValidationError:
|
||||
raise HTTPException(status_code=400, detail="Invalid preset data")
|
||||
|
||||
preset_data = PresetData(positive_prompt=positive_prompt, negative_prompt=negative_prompt)
|
||||
style_preset = StylePresetWithoutId(name=name, preset_data=preset_data, type=type)
|
||||
new_style_preset = ApiDependencies.invoker.services.style_preset_records.create(style_preset=style_preset)
|
||||
|
||||
if image is not None:
|
||||
if not image.content_type or not image.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
contents = await image.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.style_preset_image_files.save(new_style_preset.id, pil_image)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
preset_image = ApiDependencies.invoker.services.style_preset_image_files.get_url(new_style_preset.id)
|
||||
return StylePresetRecordWithImage(image=preset_image, **new_style_preset.model_dump())
|
||||
|
||||
|
||||
@style_presets_router.get(
|
||||
"/",
|
||||
operation_id="list_style_presets",
|
||||
responses={
|
||||
200: {"model": list[StylePresetRecordWithImage]},
|
||||
},
|
||||
)
|
||||
async def list_style_presets() -> list[StylePresetRecordWithImage]:
|
||||
"""Gets a page of style presets"""
|
||||
style_presets_with_image: list[StylePresetRecordWithImage] = []
|
||||
style_presets = ApiDependencies.invoker.services.style_preset_records.get_many()
|
||||
for preset in style_presets:
|
||||
image = ApiDependencies.invoker.services.style_preset_image_files.get_url(preset.id)
|
||||
style_preset_with_image = StylePresetRecordWithImage(image=image, **preset.model_dump())
|
||||
style_presets_with_image.append(style_preset_with_image)
|
||||
|
||||
return style_presets_with_image
|
||||
|
||||
|
||||
@style_presets_router.get(
|
||||
"/i/{style_preset_id}/image",
|
||||
operation_id="get_style_preset_image",
|
||||
responses={
|
||||
200: {
|
||||
"description": "The style preset image was fetched successfully",
|
||||
},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "The style preset image could not be found"},
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
async def get_style_preset_image(
|
||||
style_preset_id: str = Path(description="The id of the style preset image to get"),
|
||||
) -> FileResponse:
|
||||
"""Gets an image file that previews the model"""
|
||||
|
||||
try:
|
||||
path = ApiDependencies.invoker.services.style_preset_image_files.get_path(style_preset_id)
|
||||
|
||||
response = FileResponse(
|
||||
path,
|
||||
media_type="image/png",
|
||||
filename=style_preset_id + ".png",
|
||||
content_disposition_type="inline",
|
||||
)
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@style_presets_router.get(
|
||||
"/export",
|
||||
operation_id="export_style_presets",
|
||||
responses={200: {"content": {"text/csv": {}}, "description": "A CSV file with the requested data."}},
|
||||
status_code=200,
|
||||
)
|
||||
async def export_style_presets():
|
||||
# Create an in-memory stream to store the CSV data
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output)
|
||||
|
||||
# Write the header
|
||||
writer.writerow(["name", "prompt", "negative_prompt"])
|
||||
|
||||
style_presets = ApiDependencies.invoker.services.style_preset_records.get_many(type=PresetType.User)
|
||||
|
||||
for preset in style_presets:
|
||||
writer.writerow([preset.name, preset.preset_data.positive_prompt, preset.preset_data.negative_prompt])
|
||||
|
||||
csv_data = output.getvalue()
|
||||
output.close()
|
||||
|
||||
return Response(
|
||||
content=csv_data,
|
||||
media_type="text/csv",
|
||||
headers={"Content-Disposition": "attachment; filename=prompt_templates.csv"},
|
||||
)
|
||||
|
||||
|
||||
@style_presets_router.post(
|
||||
"/import",
|
||||
operation_id="import_style_presets",
|
||||
)
|
||||
async def import_style_presets(file: UploadFile = File(description="The file to import")):
|
||||
try:
|
||||
style_presets = await parse_presets_from_file(file)
|
||||
ApiDependencies.invoker.services.style_preset_records.create_many(style_presets)
|
||||
except InvalidPresetImportDataError as e:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except UnsupportedFileTypeError as e:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail=str(e))
|
||||
@@ -30,6 +30,7 @@ from invokeai.app.api.routers import (
|
||||
images,
|
||||
model_manager,
|
||||
session_queue,
|
||||
style_presets,
|
||||
utilities,
|
||||
workflows,
|
||||
)
|
||||
@@ -55,11 +56,13 @@ mimetypes.add_type("text/css", ".css")
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
logger.info(f"Using torch device: {torch_device_name}")
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Add startup event to load dependencies
|
||||
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger)
|
||||
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, loop=loop, logger=logger)
|
||||
yield
|
||||
# Shut down threads
|
||||
ApiDependencies.shutdown()
|
||||
@@ -106,6 +109,7 @@ app.include_router(board_images.board_images_router, prefix="/api")
|
||||
app.include_router(app_info.app_router, prefix="/api")
|
||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||
app.include_router(workflows.workflows_router, prefix="/api")
|
||||
app.include_router(style_presets.style_presets_router, prefix="/api")
|
||||
|
||||
app.openapi = get_openapi_func(app)
|
||||
|
||||
@@ -184,8 +188,6 @@ def invoke_api() -> None:
|
||||
|
||||
check_cudnn(logger)
|
||||
|
||||
# Start our own event loop for eventing usage
|
||||
loop = asyncio.new_event_loop()
|
||||
config = uvicorn.Config(
|
||||
app=app,
|
||||
host=app_config.host,
|
||||
|
||||
@@ -21,6 +21,8 @@ from controlnet_aux import (
|
||||
from controlnet_aux.util import HWC3, ade_palette
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from transformers import pipeline
|
||||
from transformers.pipelines import DepthEstimationPipeline
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
@@ -44,13 +46,12 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything import DEPTH_ANYTHING_MODELS, DepthAnythingDetector
|
||||
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
|
||||
from invokeai.backend.image_util.dw_openpose import DWPOSE_MODELS, DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
@@ -592,7 +593,14 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
return color_map
|
||||
|
||||
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small", "small_v2"]
|
||||
# DepthAnything V2 Small model is licensed under Apache 2.0 but not the base and large models.
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": "LiheYoung/depth-anything-large-hf",
|
||||
"base": "LiheYoung/depth-anything-base-hf",
|
||||
"small": "LiheYoung/depth-anything-small-hf",
|
||||
"small_v2": "depth-anything/Depth-Anything-V2-Small-hf",
|
||||
}
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -600,28 +608,33 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.1.2",
|
||||
version="1.1.3",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
default="small_v2", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
def loader(model_path: Path):
|
||||
return DepthAnythingDetector.load_model(
|
||||
model_path, model_size=self.model_size, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
def load_depth_anything(model_path: Path):
|
||||
depth_anything_pipeline = pipeline(model=str(model_path), task="depth-estimation", local_files_only=True)
|
||||
assert isinstance(depth_anything_pipeline, DepthEstimationPipeline)
|
||||
return DepthAnythingPipeline(depth_anything_pipeline)
|
||||
|
||||
with self._context.models.load_remote_model(
|
||||
source=DEPTH_ANYTHING_MODELS[self.model_size], loader=loader
|
||||
) as model:
|
||||
depth_anything_detector = DepthAnythingDetector(model, TorchDevice.choose_torch_device())
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution)
|
||||
return processed_image
|
||||
source=DEPTH_ANYTHING_MODELS[self.model_size], loader=load_depth_anything
|
||||
) as depth_anything_detector:
|
||||
assert isinstance(depth_anything_detector, DepthAnythingPipeline)
|
||||
depth_map = depth_anything_detector.generate_depth(image)
|
||||
|
||||
# Resizing to user target specified size
|
||||
new_height = int(image.size[1] * (self.resolution / image.size[0]))
|
||||
depth_map = depth_map.resize((self.resolution, new_height))
|
||||
|
||||
return depth_map
|
||||
|
||||
|
||||
@invocation(
|
||||
|
||||
@@ -91,6 +91,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
db_dir: Path to InvokeAI databases directory.
|
||||
outputs_dir: Path to directory for outputs.
|
||||
custom_nodes_dir: Path to directory for custom nodes.
|
||||
style_presets_dir: Path to directory for style presets.
|
||||
log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
@@ -153,6 +154,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
db_dir: Path = Field(default=Path("databases"), description="Path to InvokeAI databases directory.")
|
||||
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
|
||||
custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes.")
|
||||
style_presets_dir: Path = Field(default=Path("style_presets"), description="Path to directory for style presets.")
|
||||
|
||||
# LOGGING
|
||||
log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
|
||||
@@ -300,6 +302,11 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
"""Path to the models directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.models_dir)
|
||||
|
||||
@property
|
||||
def style_presets_path(self) -> Path:
|
||||
"""Path to the style presets directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.style_presets_dir)
|
||||
|
||||
@property
|
||||
def convert_cache_path(self) -> Path:
|
||||
"""Path to the converted cache models directory, resolved to an absolute path.."""
|
||||
|
||||
@@ -1,46 +1,44 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
from queue import Empty, Queue
|
||||
|
||||
from fastapi_events.dispatcher import dispatch
|
||||
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.events.events_common import (
|
||||
EventBase,
|
||||
)
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
|
||||
|
||||
class FastAPIEventService(EventServiceBase):
|
||||
def __init__(self, event_handler_id: int) -> None:
|
||||
def __init__(self, event_handler_id: int, loop: asyncio.AbstractEventLoop) -> None:
|
||||
self.event_handler_id = event_handler_id
|
||||
self._queue = Queue[EventBase | None]()
|
||||
self._queue = asyncio.Queue[EventBase | None]()
|
||||
self._stop_event = threading.Event()
|
||||
asyncio.create_task(self._dispatch_from_queue(stop_event=self._stop_event))
|
||||
self._loop = loop
|
||||
|
||||
# We need to store a reference to the task so it doesn't get GC'd
|
||||
# See: https://docs.python.org/3/library/asyncio-task.html#creating-tasks
|
||||
self._background_tasks: set[asyncio.Task[None]] = set()
|
||||
task = self._loop.create_task(self._dispatch_from_queue(stop_event=self._stop_event))
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.remove)
|
||||
|
||||
super().__init__()
|
||||
|
||||
def stop(self, *args, **kwargs):
|
||||
self._stop_event.set()
|
||||
self._queue.put(None)
|
||||
self._loop.call_soon_threadsafe(self._queue.put_nowait, None)
|
||||
|
||||
def dispatch(self, event: EventBase) -> None:
|
||||
self._queue.put(event)
|
||||
self._loop.call_soon_threadsafe(self._queue.put_nowait, event)
|
||||
|
||||
async def _dispatch_from_queue(self, stop_event: threading.Event):
|
||||
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
event = self._queue.get(block=False)
|
||||
event = await self._queue.get()
|
||||
if not event: # Probably stopping
|
||||
continue
|
||||
# Leave the payloads as live pydantic models
|
||||
dispatch(event, middleware_id=self.event_handler_id, payload_schema_dump=False)
|
||||
|
||||
except Empty:
|
||||
await asyncio.sleep(0.1)
|
||||
pass
|
||||
|
||||
except asyncio.CancelledError as e:
|
||||
raise e # Raise a proper error
|
||||
|
||||
@@ -4,6 +4,8 @@ from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from invokeai.app.services.object_serializer.object_serializer_base import ObjectSerializerBase
|
||||
from invokeai.app.services.style_preset_images.style_preset_images_base import StylePresetImageFileStorageBase
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_base import StylePresetRecordsStorageBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from logging import Logger
|
||||
@@ -61,6 +63,8 @@ class InvocationServices:
|
||||
workflow_records: "WorkflowRecordsStorageBase",
|
||||
tensors: "ObjectSerializerBase[torch.Tensor]",
|
||||
conditioning: "ObjectSerializerBase[ConditioningFieldData]",
|
||||
style_preset_records: "StylePresetRecordsStorageBase",
|
||||
style_preset_image_files: "StylePresetImageFileStorageBase",
|
||||
):
|
||||
self.board_images = board_images
|
||||
self.board_image_records = board_image_records
|
||||
@@ -85,3 +89,5 @@ class InvocationServices:
|
||||
self.workflow_records = workflow_records
|
||||
self.tensors = tensors
|
||||
self.conditioning = conditioning
|
||||
self.style_preset_records = style_preset_records
|
||||
self.style_preset_image_files = style_preset_image_files
|
||||
|
||||
@@ -16,6 +16,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_10 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_11 import build_migration_11
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_12 import build_migration_12
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import build_migration_13
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -49,6 +50,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_11(app_config=config, logger=logger))
|
||||
migrator.register_migration(build_migration_12(app_config=config))
|
||||
migrator.register_migration(build_migration_13())
|
||||
migrator.register_migration(build_migration_14())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration14Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._create_style_presets(cursor)
|
||||
|
||||
def _create_style_presets(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Create the table used to store style presets."""
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS style_presets (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
preset_data TEXT NOT NULL,
|
||||
type TEXT NOT NULL DEFAULT "user",
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS style_presets
|
||||
AFTER UPDATE
|
||||
ON style_presets FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE style_presets SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE id = old.id;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
# Add indexes for searchable fields
|
||||
indices = [
|
||||
"CREATE INDEX IF NOT EXISTS idx_style_presets_name ON style_presets(name);",
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
|
||||
def build_migration_14() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 13 to 14..
|
||||
|
||||
This migration does the following:
|
||||
- Create the table used to store style presets.
|
||||
"""
|
||||
migration_14 = Migration(
|
||||
from_version=13,
|
||||
to_version=14,
|
||||
callback=Migration14Callback(),
|
||||
)
|
||||
|
||||
return migration_14
|
||||
|
After Width: | Height: | Size: 98 KiB |
|
After Width: | Height: | Size: 138 KiB |
|
After Width: | Height: | Size: 122 KiB |
|
After Width: | Height: | Size: 123 KiB |
|
After Width: | Height: | Size: 160 KiB |
|
After Width: | Height: | Size: 146 KiB |
|
After Width: | Height: | Size: 119 KiB |
|
After Width: | Height: | Size: 117 KiB |
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 46 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 156 KiB |
|
After Width: | Height: | Size: 141 KiB |
|
After Width: | Height: | Size: 96 KiB |
|
After Width: | Height: | Size: 91 KiB |
|
After Width: | Height: | Size: 88 KiB |
|
After Width: | Height: | Size: 107 KiB |
|
After Width: | Height: | Size: 132 KiB |
@@ -0,0 +1,33 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
|
||||
class StylePresetImageFileStorageBase(ABC):
|
||||
"""Low-level service responsible for storing and retrieving image files."""
|
||||
|
||||
@abstractmethod
|
||||
def get(self, style_preset_id: str) -> PILImageType:
|
||||
"""Retrieves a style preset image as PIL Image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_path(self, style_preset_id: str) -> Path:
|
||||
"""Gets the internal path to a style preset image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_url(self, style_preset_id: str) -> str | None:
|
||||
"""Gets the URL to fetch a style preset image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(self, style_preset_id: str, image: PILImageType) -> None:
|
||||
"""Saves a style preset image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
"""Deletes a style preset image."""
|
||||
pass
|
||||
@@ -0,0 +1,19 @@
|
||||
class StylePresetImageFileNotFoundException(Exception):
|
||||
"""Raised when an image file is not found in storage."""
|
||||
|
||||
def __init__(self, message: str = "Style preset image file not found"):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class StylePresetImageFileSaveException(Exception):
|
||||
"""Raised when an image cannot be saved."""
|
||||
|
||||
def __init__(self, message: str = "Style preset image file not saved"):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class StylePresetImageFileDeleteException(Exception):
|
||||
"""Raised when an image cannot be deleted."""
|
||||
|
||||
def __init__(self, message: str = "Style preset image file not deleted"):
|
||||
super().__init__(message)
|
||||
@@ -0,0 +1,88 @@
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.style_preset_images.style_preset_images_base import StylePresetImageFileStorageBase
|
||||
from invokeai.app.services.style_preset_images.style_preset_images_common import (
|
||||
StylePresetImageFileDeleteException,
|
||||
StylePresetImageFileNotFoundException,
|
||||
StylePresetImageFileSaveException,
|
||||
)
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_common import PresetType
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
|
||||
|
||||
class StylePresetImageFileStorageDisk(StylePresetImageFileStorageBase):
|
||||
"""Stores images on disk"""
|
||||
|
||||
def __init__(self, style_preset_images_folder: Path):
|
||||
self._style_preset_images_folder = style_preset_images_folder
|
||||
self._validate_storage_folders()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
def get(self, style_preset_id: str) -> PILImageType:
|
||||
try:
|
||||
path = self.get_path(style_preset_id)
|
||||
|
||||
return Image.open(path)
|
||||
except FileNotFoundError as e:
|
||||
raise StylePresetImageFileNotFoundException from e
|
||||
|
||||
def save(self, style_preset_id: str, image: PILImageType) -> None:
|
||||
try:
|
||||
self._validate_storage_folders()
|
||||
image_path = self._style_preset_images_folder / (style_preset_id + ".webp")
|
||||
thumbnail = make_thumbnail(image, 256)
|
||||
thumbnail.save(image_path, format="webp")
|
||||
|
||||
except Exception as e:
|
||||
raise StylePresetImageFileSaveException from e
|
||||
|
||||
def get_path(self, style_preset_id: str) -> Path:
|
||||
style_preset = self._invoker.services.style_preset_records.get(style_preset_id)
|
||||
if style_preset.type is PresetType.Default:
|
||||
default_images_dir = Path(__file__).parent / Path("default_style_preset_images")
|
||||
path = default_images_dir / (style_preset.name + ".png")
|
||||
else:
|
||||
path = self._style_preset_images_folder / (style_preset_id + ".webp")
|
||||
|
||||
return path
|
||||
|
||||
def get_url(self, style_preset_id: str) -> str | None:
|
||||
path = self.get_path(style_preset_id)
|
||||
if not self._validate_path(path):
|
||||
return
|
||||
|
||||
url = self._invoker.services.urls.get_style_preset_image_url(style_preset_id)
|
||||
|
||||
# The image URL never changes, so we must add random query string to it to prevent caching
|
||||
url += f"?{uuid_string()}"
|
||||
|
||||
return url
|
||||
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
try:
|
||||
path = self.get_path(style_preset_id)
|
||||
|
||||
if not self._validate_path(path):
|
||||
raise StylePresetImageFileNotFoundException
|
||||
|
||||
path.unlink()
|
||||
|
||||
except StylePresetImageFileNotFoundException as e:
|
||||
raise StylePresetImageFileNotFoundException from e
|
||||
except Exception as e:
|
||||
raise StylePresetImageFileDeleteException from e
|
||||
|
||||
def _validate_path(self, path: Path) -> bool:
|
||||
"""Validates the path given for an image."""
|
||||
return path.exists()
|
||||
|
||||
def _validate_storage_folders(self) -> None:
|
||||
"""Checks if the required folders exist and create them if they don't"""
|
||||
self._style_preset_images_folder.mkdir(parents=True, exist_ok=True)
|
||||
@@ -0,0 +1,146 @@
|
||||
[
|
||||
{
|
||||
"name": "Photography (General)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt}. photography. f/2.8 macro photo, bokeh, photorealism",
|
||||
"negative_prompt": "painting, digital art. sketch, blurry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Photography (Studio Lighting)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt}, photography. f/8 photo. centered subject, studio lighting.",
|
||||
"negative_prompt": "painting, digital art. sketch, blurry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Photography (Landscape)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt}, landscape photograph, f/12, lifelike, highly detailed.",
|
||||
"negative_prompt": "painting, digital art. sketch, blurry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Photography (Portrait)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt}. photography. portraiture. catch light in eyes. one flash. rembrandt lighting. Soft box. dark shadows. High contrast. 80mm lens. F2.8.",
|
||||
"negative_prompt": "painting, digital art. sketch, blurry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Photography (Black and White)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} photography. natural light. 80mm lens. F1.4. strong contrast, hard light. dark contrast. blurred background. black and white",
|
||||
"negative_prompt": "painting, digital art. sketch, colour+"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Architectural Visualization",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt}. architectural photography, f/12, luxury, aesthetically pleasing form and function.",
|
||||
"negative_prompt": "painting, digital art. sketch, blurry"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Concept Art (Fantasy)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "concept artwork of a {prompt}. (digital painterly art style)++, mythological, (textured 2d dry media brushpack)++, glazed brushstrokes, otherworldly. painting+, illustration+",
|
||||
"negative_prompt": "photo. distorted, blurry, out of focus. sketch. (cgi, 3d.)++"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Concept Art (Sci-Fi)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "(concept art)++, {prompt}, (sleek futurism)++, (textured 2d dry media)++, metallic highlights, digital painting style",
|
||||
"negative_prompt": "photo. distorted, blurry, out of focus. sketch. (cgi, 3d.)++"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Concept Art (Character)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "(character concept art)++, stylized painterly digital painting of {prompt}, (painterly, impasto. Dry brush.)++",
|
||||
"negative_prompt": "photo. distorted, blurry, out of focus. sketch. (cgi, 3d.)++"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Concept Art (Painterly)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} oil painting. high contrast. impasto. sfumato. chiaroscuro. Palette knife.",
|
||||
"negative_prompt": "photo. smooth. border. frame"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Environment Art",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} environment artwork, hyper-realistic digital painting style with cinematic composition, atmospheric, depth and detail, voluminous. textured dry brush 2d media",
|
||||
"negative_prompt": "photo, distorted, blurry, out of focus. sketch."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Interior Design (Visualization)",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} interior design photo, gentle shadows, light mid-tones, dimension, mix of smooth and textured surfaces, focus on negative space and clean lines, focus",
|
||||
"negative_prompt": "photo, distorted. sketch."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Product Rendering",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} high quality product photography, 3d rendering with key lighting, shallow depth of field, simple plain background, studio lighting.",
|
||||
"negative_prompt": "blurry, sketch, messy, dirty. unfinished."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Sketch",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} black and white pencil drawing, off-center composition, cross-hatching for shadows, bold strokes, textured paper. sketch+++",
|
||||
"negative_prompt": "blurry, photo, painting, color. messy, dirty. unfinished. frame, borders."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Line Art",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} Line art. bold outline. simplistic. white background. 2d",
|
||||
"negative_prompt": "photo. digital art. greyscale. solid black. painting"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Anime",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} anime++, bold outline, cel-shaded coloring, shounen, seinen",
|
||||
"negative_prompt": "(photo)+++. greyscale. solid black. painting"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Illustration",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "{prompt} illustration, bold linework, illustrative details, vector art style, flat coloring",
|
||||
"negative_prompt": "(photo)+++. greyscale. painting, black and white."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Vehicles",
|
||||
"type": "default",
|
||||
"preset_data": {
|
||||
"positive_prompt": "A weird futuristic normal auto, {prompt} elegant design, nice color, nice wheels",
|
||||
"negative_prompt": "sketch. digital art. greyscale. painting"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,42 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_common import (
|
||||
PresetType,
|
||||
StylePresetChanges,
|
||||
StylePresetRecordDTO,
|
||||
StylePresetWithoutId,
|
||||
)
|
||||
|
||||
|
||||
class StylePresetRecordsStorageBase(ABC):
|
||||
"""Base class for style preset storage services."""
|
||||
|
||||
@abstractmethod
|
||||
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
||||
"""Get style preset by id."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
||||
"""Creates a style preset."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
||||
"""Creates many style presets."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
||||
"""Updates a style preset."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
"""Deletes a style preset."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
||||
"""Gets many workflows."""
|
||||
pass
|
||||
@@ -0,0 +1,138 @@
|
||||
import codecs
|
||||
import csv
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
import pydantic
|
||||
from fastapi import UploadFile
|
||||
from pydantic import AliasChoices, BaseModel, ConfigDict, Field, TypeAdapter
|
||||
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
|
||||
|
||||
class StylePresetNotFoundError(Exception):
|
||||
"""Raised when a style preset is not found"""
|
||||
|
||||
|
||||
class PresetData(BaseModel, extra="forbid"):
|
||||
positive_prompt: str = Field(description="Positive prompt")
|
||||
negative_prompt: str = Field(description="Negative prompt")
|
||||
|
||||
|
||||
PresetDataValidator = TypeAdapter(PresetData)
|
||||
|
||||
|
||||
class PresetType(str, Enum, metaclass=MetaEnum):
|
||||
User = "user"
|
||||
Default = "default"
|
||||
Project = "project"
|
||||
|
||||
|
||||
class StylePresetChanges(BaseModel, extra="forbid"):
|
||||
name: Optional[str] = Field(default=None, description="The style preset's new name.")
|
||||
preset_data: Optional[PresetData] = Field(default=None, description="The updated data for style preset.")
|
||||
|
||||
|
||||
class StylePresetWithoutId(BaseModel):
|
||||
name: str = Field(description="The name of the style preset.")
|
||||
preset_data: PresetData = Field(description="The preset data")
|
||||
type: PresetType = Field(description="The type of style preset")
|
||||
|
||||
|
||||
class StylePresetRecordDTO(StylePresetWithoutId):
|
||||
id: str = Field(description="The style preset ID.")
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "StylePresetRecordDTO":
|
||||
data["preset_data"] = PresetDataValidator.validate_json(data.get("preset_data", ""))
|
||||
return StylePresetRecordDTOValidator.validate_python(data)
|
||||
|
||||
|
||||
StylePresetRecordDTOValidator = TypeAdapter(StylePresetRecordDTO)
|
||||
|
||||
|
||||
class StylePresetRecordWithImage(StylePresetRecordDTO):
|
||||
image: Optional[str] = Field(description="The path for image")
|
||||
|
||||
|
||||
class StylePresetImportRow(BaseModel):
|
||||
name: str = Field(min_length=1, description="The name of the preset.")
|
||||
positive_prompt: str = Field(
|
||||
default="",
|
||||
description="The positive prompt for the preset.",
|
||||
validation_alias=AliasChoices("positive_prompt", "prompt"),
|
||||
)
|
||||
negative_prompt: str = Field(default="", description="The negative prompt for the preset.")
|
||||
|
||||
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
||||
|
||||
|
||||
StylePresetImportList = list[StylePresetImportRow]
|
||||
StylePresetImportListTypeAdapter = TypeAdapter(StylePresetImportList)
|
||||
|
||||
|
||||
class UnsupportedFileTypeError(ValueError):
|
||||
"""Raised when an unsupported file type is encountered"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class InvalidPresetImportDataError(ValueError):
|
||||
"""Raised when invalid preset import data is encountered"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
async def parse_presets_from_file(file: UploadFile) -> list[StylePresetWithoutId]:
|
||||
"""Parses style presets from a file. The file must be a CSV or JSON file.
|
||||
|
||||
If CSV, the file must have the following columns:
|
||||
- name
|
||||
- prompt (or positive_prompt)
|
||||
- negative_prompt
|
||||
|
||||
If JSON, the file must be a list of objects with the following keys:
|
||||
- name
|
||||
- prompt (or positive_prompt)
|
||||
- negative_prompt
|
||||
|
||||
Args:
|
||||
file (UploadFile): The file to parse.
|
||||
|
||||
Returns:
|
||||
list[StylePresetWithoutId]: The parsed style presets.
|
||||
|
||||
Raises:
|
||||
UnsupportedFileTypeError: If the file type is not supported.
|
||||
InvalidPresetImportDataError: If the data in the file is invalid.
|
||||
"""
|
||||
if file.content_type not in ["text/csv", "application/json"]:
|
||||
raise UnsupportedFileTypeError()
|
||||
|
||||
if file.content_type == "text/csv":
|
||||
csv_reader = csv.DictReader(codecs.iterdecode(file.file, "utf-8"))
|
||||
data = list(csv_reader)
|
||||
else: # file.content_type == "application/json":
|
||||
json_data = await file.read()
|
||||
data = json.loads(json_data)
|
||||
|
||||
try:
|
||||
imported_presets = StylePresetImportListTypeAdapter.validate_python(data)
|
||||
|
||||
style_presets: list[StylePresetWithoutId] = []
|
||||
|
||||
for imported in imported_presets:
|
||||
preset_data = PresetData(positive_prompt=imported.positive_prompt, negative_prompt=imported.negative_prompt)
|
||||
style_preset = StylePresetWithoutId(name=imported.name, preset_data=preset_data, type=PresetType.User)
|
||||
style_presets.append(style_preset)
|
||||
except pydantic.ValidationError as e:
|
||||
if file.content_type == "text/csv":
|
||||
msg = "Invalid CSV format: must include columns 'name', 'prompt', and 'negative_prompt' and name cannot be blank"
|
||||
else: # file.content_type == "application/json":
|
||||
msg = "Invalid JSON format: must be a list of objects with keys 'name', 'prompt', and 'negative_prompt' and name cannot be blank"
|
||||
raise InvalidPresetImportDataError(msg) from e
|
||||
finally:
|
||||
file.file.close()
|
||||
|
||||
return style_presets
|
||||
@@ -0,0 +1,215 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_base import StylePresetRecordsStorageBase
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_common import (
|
||||
PresetType,
|
||||
StylePresetChanges,
|
||||
StylePresetNotFoundError,
|
||||
StylePresetRecordDTO,
|
||||
StylePresetWithoutId,
|
||||
)
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
|
||||
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
self._sync_default_style_presets()
|
||||
|
||||
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
||||
"""Gets a style preset by ID."""
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
row = self._cursor.fetchone()
|
||||
if row is None:
|
||||
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
||||
return StylePresetRecordDTO.from_dict(dict(row))
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
||||
style_preset_id = uuid_string()
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO style_presets (
|
||||
id,
|
||||
name,
|
||||
preset_data,
|
||||
type
|
||||
)
|
||||
VALUES (?, ?, ?, ?);
|
||||
""",
|
||||
(
|
||||
style_preset_id,
|
||||
style_preset.name,
|
||||
style_preset.preset_data.model_dump_json(),
|
||||
style_preset.type,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
||||
style_preset_ids = []
|
||||
try:
|
||||
self._lock.acquire()
|
||||
for style_preset in style_presets:
|
||||
style_preset_id = uuid_string()
|
||||
style_preset_ids.append(style_preset_id)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO style_presets (
|
||||
id,
|
||||
name,
|
||||
preset_data,
|
||||
type
|
||||
)
|
||||
VALUES (?, ?, ?, ?);
|
||||
""",
|
||||
(
|
||||
style_preset_id,
|
||||
style_preset.name,
|
||||
style_preset.preset_data.model_dump_json(),
|
||||
style_preset.type,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
return None
|
||||
|
||||
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
# Change the name of a style preset
|
||||
if changes.name is not None:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE style_presets
|
||||
SET name = ?
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(changes.name, style_preset_id),
|
||||
)
|
||||
|
||||
# Change the preset data for a style preset
|
||||
if changes.preset_data is not None:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE style_presets
|
||||
SET preset_data = ?
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(changes.preset_data.model_dump_json(), style_preset_id),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE from style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return None
|
||||
|
||||
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
main_query = """
|
||||
SELECT
|
||||
*
|
||||
FROM style_presets
|
||||
"""
|
||||
|
||||
if type is not None:
|
||||
main_query += "WHERE type = ? "
|
||||
|
||||
main_query += "ORDER BY LOWER(name) ASC"
|
||||
|
||||
if type is not None:
|
||||
self._cursor.execute(main_query, (type,))
|
||||
else:
|
||||
self._cursor.execute(main_query)
|
||||
|
||||
rows = self._cursor.fetchall()
|
||||
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
||||
|
||||
return style_presets
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _sync_default_style_presets(self) -> None:
|
||||
"""Syncs default style presets to the database. Internal use only."""
|
||||
|
||||
# First delete all existing default style presets
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM style_presets
|
||||
WHERE type = "default";
|
||||
"""
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
# Next, parse and create the default style presets
|
||||
with self._lock, open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
||||
presets = json.load(file)
|
||||
for preset in presets:
|
||||
style_preset = StylePresetWithoutId.model_validate(preset)
|
||||
self.create(style_preset)
|
||||
@@ -13,3 +13,8 @@ class UrlServiceBase(ABC):
|
||||
def get_model_image_url(self, model_key: str) -> str:
|
||||
"""Gets the URL for a model image"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||
"""Gets the URL for a style preset image"""
|
||||
pass
|
||||
|
||||
@@ -19,3 +19,6 @@ class LocalUrlService(UrlServiceBase):
|
||||
|
||||
def get_model_image_url(self, model_key: str) -> str:
|
||||
return f"{self._base_url_v2}/models/i/{model_key}/image"
|
||||
|
||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||
return f"{self._base_url}/style_presets/i/{style_preset_id}/image"
|
||||
|
||||
@@ -81,7 +81,7 @@ def get_openapi_func(
|
||||
# Add the output map to the schema
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
|
||||
"type": "object",
|
||||
"properties": invocation_output_map_properties,
|
||||
"properties": dict(sorted(invocation_output_map_properties.items())),
|
||||
"required": invocation_output_map_required,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from einops import repeat
|
||||
from PIL import Image
|
||||
from torchvision.transforms import Compose
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = get_config()
|
||||
logger = InvokeAILogger.get_logger(config=config)
|
||||
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
|
||||
"base": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
|
||||
"small": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
|
||||
}
|
||||
|
||||
|
||||
transform = Compose(
|
||||
[
|
||||
Resize(
|
||||
width=518,
|
||||
height=518,
|
||||
resize_target=False,
|
||||
keep_aspect_ratio=True,
|
||||
ensure_multiple_of=14,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_CUBIC,
|
||||
),
|
||||
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
||||
PrepareForNet(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class DepthAnythingDetector:
|
||||
def __init__(self, model: DPT_DINOv2, device: torch.device) -> None:
|
||||
self.model = model
|
||||
self.device = device
|
||||
|
||||
@staticmethod
|
||||
def load_model(
|
||||
model_path: Path, device: torch.device, model_size: Literal["large", "base", "small"] = "small"
|
||||
) -> DPT_DINOv2:
|
||||
match model_size:
|
||||
case "small":
|
||||
model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
|
||||
case "base":
|
||||
model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
|
||||
case "large":
|
||||
model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
|
||||
|
||||
model.load_state_dict(torch.load(model_path.as_posix(), map_location="cpu"))
|
||||
model.eval()
|
||||
|
||||
model.to(device)
|
||||
return model
|
||||
|
||||
def __call__(self, image: Image.Image, resolution: int = 512) -> Image.Image:
|
||||
if not self.model:
|
||||
logger.warn("DepthAnything model was not loaded. Returning original image")
|
||||
return image
|
||||
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
np_image = np_image[:, :, ::-1] / 255.0
|
||||
|
||||
image_height, image_width = np_image.shape[:2]
|
||||
np_image = transform({"image": np_image})["image"]
|
||||
tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(self.device)
|
||||
|
||||
with torch.no_grad():
|
||||
depth = self.model(tensor_image)
|
||||
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
|
||||
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
||||
|
||||
depth_map = repeat(depth, "h w -> h w 3").cpu().numpy().astype(np.uint8)
|
||||
depth_map = Image.fromarray(depth_map)
|
||||
|
||||
new_height = int(image_height * (resolution / image_width))
|
||||
depth_map = depth_map.resize((resolution, new_height))
|
||||
|
||||
return depth_map
|
||||
@@ -0,0 +1,31 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from transformers.pipelines import DepthEstimationPipeline
|
||||
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
|
||||
class DepthAnythingPipeline(RawModel):
|
||||
"""Custom wrapper for the Depth Estimation pipeline from transformers adding compatibility
|
||||
for Invoke's Model Management System"""
|
||||
|
||||
def __init__(self, pipeline: DepthEstimationPipeline) -> None:
|
||||
self._pipeline = pipeline
|
||||
|
||||
def generate_depth(self, image: Image.Image) -> Image.Image:
|
||||
depth_map = self._pipeline(image)["depth"]
|
||||
assert isinstance(depth_map, Image.Image)
|
||||
return depth_map
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
|
||||
if device is not None and device.type not in {"cpu", "cuda"}:
|
||||
device = None
|
||||
self._pipeline.model.to(device=device, dtype=dtype)
|
||||
self._pipeline.device = self._pipeline.model.device
|
||||
|
||||
def calc_size(self) -> int:
|
||||
from invokeai.backend.model_manager.load.model_util import calc_module_size
|
||||
|
||||
return calc_module_size(self._pipeline.model)
|
||||
@@ -1,145 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
||||
scratch = nn.Module()
|
||||
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape
|
||||
out_shape3 = out_shape
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape
|
||||
|
||||
if expand:
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape * 2
|
||||
out_shape3 = out_shape * 4
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape * 8
|
||||
|
||||
scratch.layer1_rn = nn.Conv2d(
|
||||
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer2_rn = nn.Conv2d(
|
||||
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer3_rn = nn.Conv2d(
|
||||
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
if len(in_shape) >= 4:
|
||||
scratch.layer4_rn = nn.Conv2d(
|
||||
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
class ResidualConvUnit(nn.Module):
|
||||
"""Residual convolution module."""
|
||||
|
||||
def __init__(self, features, activation, bn):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.bn = bn
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
if self.bn:
|
||||
self.bn1 = nn.BatchNorm2d(features)
|
||||
self.bn2 = nn.BatchNorm2d(features)
|
||||
|
||||
self.activation = activation
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
|
||||
out = self.activation(x)
|
||||
out = self.conv1(out)
|
||||
if self.bn:
|
||||
out = self.bn1(out)
|
||||
|
||||
out = self.activation(out)
|
||||
out = self.conv2(out)
|
||||
if self.bn:
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.groups > 1:
|
||||
out = self.conv_merge(out)
|
||||
|
||||
return self.skip_add.add(out, x)
|
||||
|
||||
|
||||
class FeatureFusionBlock(nn.Module):
|
||||
"""Feature fusion block."""
|
||||
|
||||
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super(FeatureFusionBlock, self).__init__()
|
||||
|
||||
self.deconv = deconv
|
||||
self.align_corners = align_corners
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.expand = expand
|
||||
out_features = features
|
||||
if self.expand:
|
||||
out_features = features // 2
|
||||
|
||||
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||
|
||||
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
||||
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
self.size = size
|
||||
|
||||
def forward(self, *xs, size=None):
|
||||
"""Forward pass.
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
res = self.resConfUnit1(xs[1])
|
||||
output = self.skip_add.add(output, res)
|
||||
|
||||
output = self.resConfUnit2(output)
|
||||
|
||||
if (size is None) and (self.size is None):
|
||||
modifier = {"scale_factor": 2}
|
||||
elif size is None:
|
||||
modifier = {"size": self.size}
|
||||
else:
|
||||
modifier = {"size": size}
|
||||
|
||||
output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
|
||||
|
||||
output = self.out_conv(output)
|
||||
|
||||
return output
|
||||
@@ -1,183 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from invokeai.backend.image_util.depth_anything.model.blocks import FeatureFusionBlock, _make_scratch
|
||||
|
||||
torchhub_path = Path(__file__).parent.parent / "torchhub"
|
||||
|
||||
|
||||
def _make_fusion_block(features, use_bn, size=None):
|
||||
return FeatureFusionBlock(
|
||||
features,
|
||||
nn.ReLU(False),
|
||||
deconv=False,
|
||||
bn=use_bn,
|
||||
expand=False,
|
||||
align_corners=True,
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
class DPTHead(nn.Module):
|
||||
def __init__(self, nclass, in_channels, features, out_channels, use_bn=False, use_clstoken=False):
|
||||
super(DPTHead, self).__init__()
|
||||
|
||||
self.nclass = nclass
|
||||
self.use_clstoken = use_clstoken
|
||||
|
||||
self.projects = nn.ModuleList(
|
||||
[
|
||||
nn.Conv2d(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channel,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
)
|
||||
for out_channel in out_channels
|
||||
]
|
||||
)
|
||||
|
||||
self.resize_layers = nn.ModuleList(
|
||||
[
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0
|
||||
),
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0
|
||||
),
|
||||
nn.Identity(),
|
||||
nn.Conv2d(
|
||||
in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
if use_clstoken:
|
||||
self.readout_projects = nn.ModuleList()
|
||||
for _ in range(len(self.projects)):
|
||||
self.readout_projects.append(nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()))
|
||||
|
||||
self.scratch = _make_scratch(
|
||||
out_channels,
|
||||
features,
|
||||
groups=1,
|
||||
expand=False,
|
||||
)
|
||||
|
||||
self.scratch.stem_transpose = None
|
||||
|
||||
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
||||
|
||||
head_features_1 = features
|
||||
head_features_2 = 32
|
||||
|
||||
if nclass > 1:
|
||||
self.scratch.output_conv = nn.Sequential(
|
||||
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
|
||||
)
|
||||
else:
|
||||
self.scratch.output_conv1 = nn.Conv2d(
|
||||
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
self.scratch.output_conv2 = nn.Sequential(
|
||||
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True),
|
||||
nn.Identity(),
|
||||
)
|
||||
|
||||
def forward(self, out_features, patch_h, patch_w):
|
||||
out = []
|
||||
for i, x in enumerate(out_features):
|
||||
if self.use_clstoken:
|
||||
x, cls_token = x[0], x[1]
|
||||
readout = cls_token.unsqueeze(1).expand_as(x)
|
||||
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
||||
else:
|
||||
x = x[0]
|
||||
|
||||
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
|
||||
|
||||
x = self.projects[i](x)
|
||||
x = self.resize_layers[i](x)
|
||||
|
||||
out.append(x)
|
||||
|
||||
layer_1, layer_2, layer_3, layer_4 = out
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
out = self.scratch.output_conv1(path_1)
|
||||
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
|
||||
out = self.scratch.output_conv2(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class DPT_DINOv2(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
features,
|
||||
out_channels,
|
||||
encoder="vitl",
|
||||
use_bn=False,
|
||||
use_clstoken=False,
|
||||
):
|
||||
super(DPT_DINOv2, self).__init__()
|
||||
|
||||
assert encoder in ["vits", "vitb", "vitl"]
|
||||
|
||||
# # in case the Internet connection is not stable, please load the DINOv2 locally
|
||||
# if use_local:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# torchhub_path / "facebookresearch_dinov2_main",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# source="local",
|
||||
# pretrained=False,
|
||||
# )
|
||||
# else:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# "facebookresearch/dinov2",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# )
|
||||
|
||||
self.pretrained = torch.hub.load(
|
||||
"facebookresearch/dinov2",
|
||||
"dinov2_{:}14".format(encoder),
|
||||
)
|
||||
|
||||
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
||||
|
||||
self.depth_head = DPTHead(1, dim, features, out_channels=out_channels, use_bn=use_bn, use_clstoken=use_clstoken)
|
||||
|
||||
def forward(self, x):
|
||||
h, w = x.shape[-2:]
|
||||
|
||||
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
|
||||
|
||||
patch_h, patch_w = h // 14, w // 14
|
||||
|
||||
depth = self.depth_head(features, patch_h, patch_w)
|
||||
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
|
||||
depth = F.relu(depth)
|
||||
|
||||
return depth.squeeze(1)
|
||||
@@ -1,227 +0,0 @@
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
||||
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
||||
|
||||
Args:
|
||||
sample (dict): sample
|
||||
size (tuple): image size
|
||||
|
||||
Returns:
|
||||
tuple: new size
|
||||
"""
|
||||
shape = list(sample["disparity"].shape)
|
||||
|
||||
if shape[0] >= size[0] and shape[1] >= size[1]:
|
||||
return sample
|
||||
|
||||
scale = [0, 0]
|
||||
scale[0] = size[0] / shape[0]
|
||||
scale[1] = size[1] / shape[1]
|
||||
|
||||
scale = max(scale)
|
||||
|
||||
shape[0] = math.ceil(scale * shape[0])
|
||||
shape[1] = math.ceil(scale * shape[1])
|
||||
|
||||
# resize
|
||||
sample["image"] = cv2.resize(sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method)
|
||||
|
||||
sample["disparity"] = cv2.resize(sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
tuple(shape[::-1]),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
return tuple(shape)
|
||||
|
||||
|
||||
class Resize(object):
|
||||
"""Resize sample to given size (width, height)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
height,
|
||||
resize_target=True,
|
||||
keep_aspect_ratio=False,
|
||||
ensure_multiple_of=1,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_AREA,
|
||||
):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
width (int): desired output width
|
||||
height (int): desired output height
|
||||
resize_target (bool, optional):
|
||||
True: Resize the full sample (image, mask, target).
|
||||
False: Resize image only.
|
||||
Defaults to True.
|
||||
keep_aspect_ratio (bool, optional):
|
||||
True: Keep the aspect ratio of the input sample.
|
||||
Output sample might not have the given width and height, and
|
||||
resize behaviour depends on the parameter 'resize_method'.
|
||||
Defaults to False.
|
||||
ensure_multiple_of (int, optional):
|
||||
Output width and height is constrained to be multiple of this parameter.
|
||||
Defaults to 1.
|
||||
resize_method (str, optional):
|
||||
"lower_bound": Output will be at least as large as the given size.
|
||||
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller
|
||||
than given size.)
|
||||
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
||||
Defaults to "lower_bound".
|
||||
"""
|
||||
self.__width = width
|
||||
self.__height = height
|
||||
|
||||
self.__resize_target = resize_target
|
||||
self.__keep_aspect_ratio = keep_aspect_ratio
|
||||
self.__multiple_of = ensure_multiple_of
|
||||
self.__resize_method = resize_method
|
||||
self.__image_interpolation_method = image_interpolation_method
|
||||
|
||||
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
||||
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if max_val is not None and y > max_val:
|
||||
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if y < min_val:
|
||||
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
return y
|
||||
|
||||
def get_size(self, width, height):
|
||||
# determine new height and width
|
||||
scale_height = self.__height / height
|
||||
scale_width = self.__width / width
|
||||
|
||||
if self.__keep_aspect_ratio:
|
||||
if self.__resize_method == "lower_bound":
|
||||
# scale such that output size is lower bound
|
||||
if scale_width > scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "upper_bound":
|
||||
# scale such that output size is upper bound
|
||||
if scale_width < scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "minimal":
|
||||
# scale as least as possbile
|
||||
if abs(1 - scale_width) < abs(1 - scale_height):
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
if self.__resize_method == "lower_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
|
||||
elif self.__resize_method == "upper_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
|
||||
elif self.__resize_method == "minimal":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width)
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
return (new_width, new_height)
|
||||
|
||||
def __call__(self, sample):
|
||||
width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
|
||||
|
||||
# resize sample
|
||||
sample["image"] = cv2.resize(
|
||||
sample["image"],
|
||||
(width, height),
|
||||
interpolation=self.__image_interpolation_method,
|
||||
)
|
||||
|
||||
if self.__resize_target:
|
||||
if "disparity" in sample:
|
||||
sample["disparity"] = cv2.resize(
|
||||
sample["disparity"],
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
|
||||
if "depth" in sample:
|
||||
sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
# sample["semseg_mask"] = cv2.resize(
|
||||
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
|
||||
# )
|
||||
sample["semseg_mask"] = F.interpolate(
|
||||
torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode="nearest"
|
||||
).numpy()[0, 0]
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
# sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
# print(sample['image'].shape, sample['depth'].shape)
|
||||
return sample
|
||||
|
||||
|
||||
class NormalizeImage(object):
|
||||
"""Normlize image by given mean and std."""
|
||||
|
||||
def __init__(self, mean, std):
|
||||
self.__mean = mean
|
||||
self.__std = std
|
||||
|
||||
def __call__(self, sample):
|
||||
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
||||
|
||||
return sample
|
||||
|
||||
|
||||
class PrepareForNet(object):
|
||||
"""Prepare sample for usage as network input."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __call__(self, sample):
|
||||
image = np.transpose(sample["image"], (2, 0, 1))
|
||||
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = sample["mask"].astype(np.float32)
|
||||
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
||||
|
||||
if "depth" in sample:
|
||||
depth = sample["depth"].astype(np.float32)
|
||||
sample["depth"] = np.ascontiguousarray(depth)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
|
||||
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
|
||||
|
||||
return sample
|
||||
@@ -220,11 +220,17 @@ class LoKRLayer(LoRALayerBase):
|
||||
if self.w1 is None:
|
||||
self.w1_a = values["lokr_w1_a"]
|
||||
self.w1_b = values["lokr_w1_b"]
|
||||
else:
|
||||
self.w1_b = None
|
||||
self.w1_a = None
|
||||
|
||||
self.w2 = values.get("lokr_w2", None)
|
||||
if self.w2 is None:
|
||||
self.w2_a = values["lokr_w2_a"]
|
||||
self.w2_b = values["lokr_w2_b"]
|
||||
else:
|
||||
self.w2_a = None
|
||||
self.w2_b = None
|
||||
|
||||
self.t2 = values.get("lokr_t2", None)
|
||||
|
||||
@@ -372,7 +378,39 @@ class IA3Layer(LoRALayerBase):
|
||||
self.on_input = self.on_input.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer]
|
||||
class NormLayer(LoRALayerBase):
|
||||
# bias handled in LoRALayerBase(calc_size, to)
|
||||
# weight: torch.Tensor
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.weight = values["w_norm"]
|
||||
self.bias = values.get("b_norm", None)
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"w_norm", "b_norm"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer]
|
||||
|
||||
|
||||
class LoRAModelRaw(RawModel): # (torch.nn.Module):
|
||||
@@ -513,6 +551,10 @@ class LoRAModelRaw(RawModel): # (torch.nn.Module):
|
||||
elif "on_input" in values:
|
||||
layer = IA3Layer(layer_key, values)
|
||||
|
||||
# norms
|
||||
elif "w_norm" in values:
|
||||
layer = NormLayer(layer_key, values)
|
||||
|
||||
else:
|
||||
print(f">> Encountered unknown lora layer module in {model.name}: {layer_key} - {list(values.keys())}")
|
||||
raise Exception("Unknown lora format!")
|
||||
|
||||
@@ -11,6 +11,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
|
||||
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
|
||||
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
@@ -45,6 +46,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
|
||||
SpandrelImageToImageModel,
|
||||
GroundingDinoPipeline,
|
||||
SegmentAnythingPipeline,
|
||||
DepthAnythingPipeline,
|
||||
),
|
||||
):
|
||||
return model.calc_size()
|
||||
|
||||
@@ -53,64 +53,63 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@dagrejs/dagre": "^1.1.2",
|
||||
"@dagrejs/graphlib": "^2.2.2",
|
||||
"@dagrejs/dagre": "^1.1.3",
|
||||
"@dagrejs/graphlib": "^2.2.3",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.18",
|
||||
"@invoke-ai/ui-library": "^0.0.25",
|
||||
"@nanostores/react": "^0.7.2",
|
||||
"@fontsource-variable/inter": "^5.0.20",
|
||||
"@invoke-ai/ui-library": "^0.0.29",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"chakra-react-select": "^4.7.6",
|
||||
"compare-versions": "^6.1.0",
|
||||
"chakra-react-select": "^4.9.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"dateformat": "^5.0.3",
|
||||
"fracturedjsonjs": "^4.0.1",
|
||||
"framer-motion": "^11.1.8",
|
||||
"i18next": "^23.11.3",
|
||||
"i18next-http-backend": "^2.5.1",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"framer-motion": "^11.3.24",
|
||||
"i18next": "^23.12.2",
|
||||
"i18next-http-backend": "^2.5.2",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"jsondiffpatch": "^0.6.0",
|
||||
"konva": "^9.3.6",
|
||||
"konva": "^9.3.14",
|
||||
"lodash-es": "^4.17.21",
|
||||
"nanostores": "^0.10.3",
|
||||
"nanostores": "^0.11.2",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"overlayscrollbars": "^2.7.3",
|
||||
"overlayscrollbars": "^2.10.0",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"query-string": "^9.0.0",
|
||||
"query-string": "^9.1.0",
|
||||
"react": "^18.3.1",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.2.3",
|
||||
"react-error-boundary": "^4.0.13",
|
||||
"react-hook-form": "^7.51.4",
|
||||
"react-hook-form": "^7.52.2",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^14.1.1",
|
||||
"react-icons": "^5.2.0",
|
||||
"react-i18next": "^14.1.3",
|
||||
"react-icons": "^5.2.1",
|
||||
"react-konva": "^18.2.10",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.0.19",
|
||||
"react-resizable-panels": "^2.0.23",
|
||||
"react-select": "5.8.0",
|
||||
"react-use": "^17.5.0",
|
||||
"react-virtuoso": "^4.7.10",
|
||||
"reactflow": "^11.11.3",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.9.0",
|
||||
"reactflow": "^11.11.4",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
"rfdc": "^1.3.1",
|
||||
"rfdc": "^1.4.1",
|
||||
"roarr": "^7.21.1",
|
||||
"serialize-error": "^11.0.3",
|
||||
"socket.io-client": "^4.7.5",
|
||||
"use-debounce": "^10.0.0",
|
||||
"use-debounce": "^10.0.2",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"use-image": "^1.1.1",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.23.6",
|
||||
"zod-validation-error": "^3.2.0"
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.23.8",
|
||||
"zod-validation-error": "^3.3.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"ts-toolbelt": "^9.6.0"
|
||||
@@ -118,38 +117,38 @@
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.0.10",
|
||||
"@storybook/addon-interactions": "^8.0.10",
|
||||
"@storybook/addon-links": "^8.0.10",
|
||||
"@storybook/addon-storysource": "^8.0.10",
|
||||
"@storybook/manager-api": "^8.0.10",
|
||||
"@storybook/react": "^8.0.10",
|
||||
"@storybook/react-vite": "^8.0.10",
|
||||
"@storybook/theming": "^8.0.10",
|
||||
"@storybook/addon-essentials": "^8.2.8",
|
||||
"@storybook/addon-interactions": "^8.2.8",
|
||||
"@storybook/addon-links": "^8.2.8",
|
||||
"@storybook/addon-storysource": "^8.2.8",
|
||||
"@storybook/manager-api": "^8.2.8",
|
||||
"@storybook/react": "^8.2.8",
|
||||
"@storybook/react-vite": "^8.2.8",
|
||||
"@storybook/theming": "^8.2.8",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.12.10",
|
||||
"@types/react": "^18.3.1",
|
||||
"@types/node": "^20.14.15",
|
||||
"@types/react": "^18.3.3",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^9.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.6.0",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react-swc": "^3.7.0",
|
||||
"@vitest/coverage-v8": "^1.5.0",
|
||||
"@vitest/ui": "^1.5.0",
|
||||
"concurrently": "^8.2.2",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-i18next": "^6.0.3",
|
||||
"eslint-plugin-i18next": "^6.0.9",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"knip": "^5.12.3",
|
||||
"knip": "^5.27.2",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^6.7.5",
|
||||
"prettier": "^3.2.5",
|
||||
"openapi-typescript": "^7.3.0",
|
||||
"prettier": "^3.3.3",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"storybook": "^8.0.10",
|
||||
"storybook": "^8.2.8",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"tsafe": "^1.6.6",
|
||||
"typescript": "^5.4.5",
|
||||
"vite": "^5.2.11",
|
||||
"tsafe": "^1.7.2",
|
||||
"typescript": "^5.5.4",
|
||||
"vite": "^5.4.0",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.1",
|
||||
"vite-plugin-dts": "^3.9.1",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
|
||||
5842
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -200,6 +200,7 @@
|
||||
"delete": "Delete",
|
||||
"depthAnything": "Depth Anything",
|
||||
"depthAnythingDescription": "Depth map generation using the Depth Anything technique",
|
||||
"depthAnythingSmallV2": "Small V2",
|
||||
"depthMidas": "Depth (Midas)",
|
||||
"depthMidasDescription": "Depth map generation using Midas",
|
||||
"depthZoe": "Depth (Zoe)",
|
||||
@@ -1140,6 +1141,8 @@
|
||||
"imageSavingFailed": "Image Saving Failed",
|
||||
"imageUploaded": "Image Uploaded",
|
||||
"imageUploadFailed": "Image Upload Failed",
|
||||
"importFailed": "Import Failed",
|
||||
"importSuccessful": "Import Successful",
|
||||
"invalidUpload": "Invalid Upload",
|
||||
"loadedWithWarnings": "Workflow Loaded with Warnings",
|
||||
"maskSavedAssets": "Mask Saved to Assets",
|
||||
@@ -1688,6 +1691,52 @@
|
||||
"missingUpscaleModel": "Missing upscale model",
|
||||
"missingTileControlNetModel": "No valid tile ControlNet models installed"
|
||||
},
|
||||
"stylePresets": {
|
||||
"active": "Active",
|
||||
"choosePromptTemplate": "Choose Prompt Template",
|
||||
"clearTemplateSelection": "Clear Template Selection",
|
||||
"copyTemplate": "Copy Template",
|
||||
"createPromptTemplate": "Create Prompt Template",
|
||||
"defaultTemplates": "Default Templates",
|
||||
"deleteImage": "Delete Image",
|
||||
"deleteTemplate": "Delete Template",
|
||||
"deleteTemplate2": "Are you sure you want to delete this template? This cannot be undone.",
|
||||
"exportPromptTemplates": "Export My Prompt Templates (CSV)",
|
||||
"editTemplate": "Edit Template",
|
||||
"exportDownloaded": "Export Downloaded",
|
||||
"exportFailed": "Unable to generate and download CSV",
|
||||
"flatten": "Flatten selected template into current prompt",
|
||||
"importTemplates": "Import Prompt Templates (CSV/JSON)",
|
||||
"acceptedColumnsKeys": "Accepted columns/keys:",
|
||||
"nameColumn": "'name'",
|
||||
"positivePromptColumn": "'prompt' or 'positive_prompt'",
|
||||
"negativePromptColumn": "'negative_prompt'",
|
||||
"insertPlaceholder": "Insert placeholder",
|
||||
"myTemplates": "My Templates",
|
||||
"name": "Name",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"noTemplates": "No templates",
|
||||
"noMatchingTemplates": "No matching templates",
|
||||
"promptTemplatesDesc1": "Prompt templates add text to the prompts you write in the prompt box.",
|
||||
"promptTemplatesDesc2": "Use the placeholder string <Pre>{{placeholder}}</Pre> to specify where your prompt should be included in the template.",
|
||||
"promptTemplatesDesc3": "If you omit the placeholder, the template will be appended to the end of your prompt.",
|
||||
"positivePrompt": "Positive Prompt",
|
||||
"preview": "Preview",
|
||||
"private": "Private",
|
||||
"searchByName": "Search by name",
|
||||
"shared": "Shared",
|
||||
"sharedTemplates": "Shared Templates",
|
||||
"templateActions": "Template Actions",
|
||||
"templateDeleted": "Prompt template deleted",
|
||||
"toggleViewMode": "Toggle View Mode",
|
||||
"type": "Type",
|
||||
"unableToDeleteTemplate": "Unable to delete prompt template",
|
||||
"updatePromptTemplate": "Update Prompt Template",
|
||||
"uploadImage": "Upload Image",
|
||||
"useForTemplate": "Use For Prompt Template",
|
||||
"viewList": "View Template List",
|
||||
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invite Teammates",
|
||||
"professional": "Professional",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"disabled": "Disabilitato",
|
||||
"comparingDesc": "Confronta due immagini",
|
||||
"comparing": "Confronta",
|
||||
"dontShowMeThese": "Non mostrarmi questi"
|
||||
"dontShowMeThese": "Non mostrare più"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -701,7 +701,9 @@
|
||||
"baseModelChanged": "Modello base modificato",
|
||||
"sessionRef": "Sessione: {{sessionId}}",
|
||||
"somethingWentWrong": "Qualcosa è andato storto",
|
||||
"outOfMemoryErrorDesc": "Le impostazioni della generazione attuale superano la capacità del sistema. Modifica le impostazioni e riprova."
|
||||
"outOfMemoryErrorDesc": "Le impostazioni della generazione attuale superano la capacità del sistema. Modifica le impostazioni e riprova.",
|
||||
"importFailed": "Importazione non riuscita",
|
||||
"importSuccessful": "Importazione riuscita"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@@ -1526,7 +1528,7 @@
|
||||
},
|
||||
"upscaleModel": {
|
||||
"paragraphs": [
|
||||
"Il modello di ampliamento ridimensiona l'immagine alle dimensioni di uscita prima che vengano aggiunti i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
"Il modello di ampliamento (Upscale), scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
],
|
||||
"heading": "Modello di ampliamento"
|
||||
},
|
||||
@@ -1735,12 +1737,52 @@
|
||||
"missingUpscaleModel": "Modello per l’ampliamento mancante",
|
||||
"missingTileControlNetModel": "Nessun modello ControlNet Tile valido installato",
|
||||
"postProcessingModel": "Modello di post-elaborazione",
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine)."
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
"shareAccess": "Condividi l'accesso",
|
||||
"professional": "Professionale",
|
||||
"professionalUpsell": "Disponibile nell'edizione Professional di Invoke. Fai clic qui o visita invoke.com/pricing per ulteriori dettagli."
|
||||
},
|
||||
"stylePresets": {
|
||||
"active": "Attivo",
|
||||
"choosePromptTemplate": "Scegli un modello di prompt",
|
||||
"clearTemplateSelection": "Cancella selezione modello",
|
||||
"copyTemplate": "Copia modello",
|
||||
"createPromptTemplate": "Crea modello di prompt",
|
||||
"defaultTemplates": "Modelli predefiniti",
|
||||
"deleteImage": "Elimina immagine",
|
||||
"deleteTemplate": "Elimina modello",
|
||||
"editTemplate": "Modifica modello",
|
||||
"flatten": "Unisci il modello selezionato al prompt corrente",
|
||||
"insertPlaceholder": "Inserisci segnaposto",
|
||||
"myTemplates": "I miei modelli",
|
||||
"name": "Nome",
|
||||
"negativePrompt": "Prompt Negativo",
|
||||
"noMatchingTemplates": "Nessun modello corrispondente",
|
||||
"promptTemplatesDesc1": "I modelli di prompt aggiungono testo ai prompt che scrivi nelle caselle dei prompt.",
|
||||
"promptTemplatesDesc3": "Se si omette il segnaposto, il modello verrà aggiunto alla fine del prompt.",
|
||||
"positivePrompt": "Prompt Positivo",
|
||||
"preview": "Anteprima",
|
||||
"private": "Privato",
|
||||
"searchByName": "Cerca per nome",
|
||||
"shared": "Condiviso",
|
||||
"sharedTemplates": "Modelli condivisi",
|
||||
"templateDeleted": "Modello di prompt eliminato",
|
||||
"toggleViewMode": "Attiva/disattiva visualizzazione",
|
||||
"uploadImage": "Carica immagine",
|
||||
"useForTemplate": "Usa per modello di prompt",
|
||||
"viewList": "Visualizza l'elenco dei modelli",
|
||||
"viewModeTooltip": "Ecco come apparirà il tuo prompt con il modello attualmente selezionato. Per modificare il tuo prompt, clicca in un punto qualsiasi della casella di testo.",
|
||||
"deleteTemplate2": "Vuoi davvero eliminare questo modello? Questa operazione non può essere annullata.",
|
||||
"unableToDeleteTemplate": "Impossibile eliminare il modello di prompt",
|
||||
"updatePromptTemplate": "Aggiorna il modello di prompt",
|
||||
"type": "Tipo",
|
||||
"promptTemplatesDesc2": "Utilizza la stringa segnaposto <Pre>{{placeholder}}</Pre> per specificare dove inserire il tuo prompt nel modello.",
|
||||
"importTemplates": "Importa modelli di prompt",
|
||||
"importTemplatesDesc": "Il formato deve essere un CSV con colonne 'name' e 'prompt' o 'positive_prompt' e 'negative_prompt' incluse, oppure un file JSON con chiavi 'name' e 'prompt' o 'positive_prompt' e 'negative_prompt"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,7 +493,8 @@
|
||||
"defaultSettingsSaved": "默认设置已保存",
|
||||
"huggingFacePlaceholder": "所有者或模型名称",
|
||||
"huggingFaceRepoID": "HuggingFace仓库ID",
|
||||
"loraTriggerPhrases": "LoRA 触发词"
|
||||
"loraTriggerPhrases": "LoRA 触发词",
|
||||
"ipAdapters": "IP适配器"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -1702,7 +1703,9 @@
|
||||
"upscaleModelDesc": "图像放大(图像到图像转换)模型",
|
||||
"postProcessingMissingModelWarning": "请访问 <LinkComponent>模型管理器</LinkComponent>来安装一个后处理(图像到图像转换)模型.",
|
||||
"missingModelsWarning": "请访问<LinkComponent>模型管理器</LinkComponent> 安装所需的模型:",
|
||||
"mainModelDesc": "主模型(SD1.5或SDXL架构)"
|
||||
"mainModelDesc": "主模型(SD1.5或SDXL架构)",
|
||||
"exceedsMaxSize": "放大设置超出了最大尺寸限制",
|
||||
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "邀请团队成员",
|
||||
|
||||
@@ -1,26 +1,40 @@
|
||||
/* eslint-disable no-console */
|
||||
import fs from 'node:fs';
|
||||
|
||||
import openapiTS from 'openapi-typescript';
|
||||
import openapiTS, { astToString } from 'openapi-typescript';
|
||||
import ts from 'typescript';
|
||||
|
||||
const OPENAPI_URL = 'http://127.0.0.1:9090/openapi.json';
|
||||
const OUTPUT_FILE = 'src/services/api/schema.ts';
|
||||
|
||||
async function generateTypes(schema) {
|
||||
process.stdout.write(`Generating types ${OUTPUT_FILE}...`);
|
||||
|
||||
// Use https://ts-ast-viewer.com to figure out how to create these AST nodes - define a type and use the bottom-left pane's output
|
||||
// `Blob` type
|
||||
const BLOB = ts.factory.createTypeReferenceNode(ts.factory.createIdentifier('Blob'));
|
||||
// `null` type
|
||||
const NULL = ts.factory.createLiteralTypeNode(ts.factory.createNull());
|
||||
// `Record<string, unknown>` type
|
||||
const RECORD_STRING_UNKNOWN = ts.factory.createTypeReferenceNode(ts.factory.createIdentifier('Record'), [
|
||||
ts.factory.createKeywordTypeNode(ts.SyntaxKind.StringKeyword),
|
||||
ts.factory.createKeywordTypeNode(ts.SyntaxKind.UnknownKeyword),
|
||||
]);
|
||||
|
||||
const types = await openapiTS(schema, {
|
||||
exportType: true,
|
||||
transform: (schemaObject) => {
|
||||
if ('format' in schemaObject && schemaObject.format === 'binary') {
|
||||
return schemaObject.nullable ? 'Blob | null' : 'Blob';
|
||||
return schemaObject.nullable ? ts.factory.createUnionTypeNode([BLOB, NULL]) : BLOB;
|
||||
}
|
||||
if (schemaObject.title === 'MetadataField') {
|
||||
// This is `Record<string, never>` by default, but it actually accepts any a dict of any valid JSON value.
|
||||
return 'Record<string, unknown>';
|
||||
return RECORD_STRING_UNKNOWN;
|
||||
}
|
||||
},
|
||||
defaultNonNullable: false,
|
||||
});
|
||||
fs.writeFileSync(OUTPUT_FILE, types);
|
||||
fs.writeFileSync(OUTPUT_FILE, astToString(types));
|
||||
process.stdout.write(`\nOK!\r\n`);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,11 +13,13 @@ import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardMo
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { languageSelector } from 'features/system/store/systemSelectors';
|
||||
import InvokeTabs from 'features/ui/components/InvokeTabs';
|
||||
import type { InvokeTabName } from 'features/ui/store/tabMap';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadLibraryWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadLibraryWorkflow';
|
||||
import { AnimatePresence } from 'framer-motion';
|
||||
import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
@@ -36,10 +38,11 @@ interface Props {
|
||||
imageName: string;
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
selectedWorkflowId?: string;
|
||||
destination?: InvokeTabName | undefined;
|
||||
}
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, selectedImage, destination }: Props) => {
|
||||
const App = ({ config = DEFAULT_CONFIG, selectedImage, selectedWorkflowId, destination }: Props) => {
|
||||
const language = useAppSelector(languageSelector);
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -70,6 +73,14 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage, destination }: Props) =>
|
||||
}
|
||||
}, [dispatch, config, logger]);
|
||||
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedWorkflowId) {
|
||||
getAndLoadWorkflow(selectedWorkflowId);
|
||||
}
|
||||
}, [selectedWorkflowId, getAndLoadWorkflow]);
|
||||
|
||||
useEffect(() => {
|
||||
if (destination) {
|
||||
dispatch(setActiveTab(destination));
|
||||
@@ -104,6 +115,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage, destination }: Props) =>
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
<PreselectedImage selectedImage={selectedImage} />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
|
||||
@@ -44,6 +44,7 @@ interface Props extends PropsWithChildren {
|
||||
imageName: string;
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
selectedWorkflowId?: string;
|
||||
destination?: InvokeTabName;
|
||||
customStarUi?: CustomStarUi;
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
@@ -64,6 +65,7 @@ const InvokeAIUI = ({
|
||||
projectUrl,
|
||||
queueId,
|
||||
selectedImage,
|
||||
selectedWorkflowId,
|
||||
destination,
|
||||
customStarUi,
|
||||
socketOptions,
|
||||
@@ -221,7 +223,12 @@ const InvokeAIUI = ({
|
||||
<React.Suspense fallback={<Loading />}>
|
||||
<ThemeLocaleProvider>
|
||||
<AppDndContext>
|
||||
<App config={config} selectedImage={selectedImage} destination={destination} />
|
||||
<App
|
||||
config={config}
|
||||
selectedImage={selectedImage}
|
||||
selectedWorkflowId={selectedWorkflowId}
|
||||
destination={destination}
|
||||
/>
|
||||
</AppDndContext>
|
||||
</ThemeLocaleProvider>
|
||||
</React.Suspense>
|
||||
|
||||
@@ -11,6 +11,8 @@ import {
|
||||
promptsChanged,
|
||||
} from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { getPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { utilitiesApi } from 'services/api/endpoints/utilities';
|
||||
import { socketConnected } from 'services/events/actions';
|
||||
|
||||
@@ -19,7 +21,8 @@ const matcher = isAnyOf(
|
||||
combinatorialToggled,
|
||||
maxPromptsChanged,
|
||||
maxPromptsReset,
|
||||
socketConnected
|
||||
socketConnected,
|
||||
activeStylePresetIdChanged
|
||||
);
|
||||
|
||||
export const addDynamicPromptsListener = (startAppListening: AppStartListening) => {
|
||||
@@ -28,7 +31,7 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening)
|
||||
effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => {
|
||||
cancelActiveListeners();
|
||||
const state = getState();
|
||||
const { positivePrompt } = state.controlLayers.present;
|
||||
const { positivePrompt } = getPresetModifiedPrompts(state);
|
||||
const { maxPrompts } = state.dynamicPrompts;
|
||||
|
||||
if (state.config.disabledFeatures.includes('dynamicPrompting')) {
|
||||
|
||||
@@ -28,6 +28,7 @@ import { generationPersistConfig, generationSlice } from 'features/parameters/st
|
||||
import { upscalePersistConfig, upscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { queueSlice } from 'features/queue/store/queueSlice';
|
||||
import { sdxlPersistConfig, sdxlSlice } from 'features/sdxl/store/sdxlSlice';
|
||||
import { stylePresetPersistConfig, stylePresetSlice } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { configSlice } from 'features/system/store/configSlice';
|
||||
import { systemPersistConfig, systemSlice } from 'features/system/store/systemSlice';
|
||||
import { uiPersistConfig, uiSlice } from 'features/ui/store/uiSlice';
|
||||
@@ -69,6 +70,7 @@ const allReducers = {
|
||||
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||
[api.reducerPath]: api.reducer,
|
||||
[upscaleSlice.name]: upscaleSlice.reducer,
|
||||
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||
};
|
||||
|
||||
const rootReducer = combineReducers(allReducers);
|
||||
@@ -114,6 +116,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[controlLayersPersistConfig.name]: controlLayersPersistConfig,
|
||||
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||
[upscalePersistConfig.name]: upscalePersistConfig,
|
||||
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
@@ -164,8 +167,8 @@ export const createStore = (uniqueStoreKey?: string, persist = true) =>
|
||||
reducer: rememberedRootReducer,
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
getDefaultMiddleware({
|
||||
serializableCheck: false,
|
||||
immutableCheck: false,
|
||||
serializableCheck: import.meta.env.MODE === 'development',
|
||||
immutableCheck: import.meta.env.MODE === 'development',
|
||||
})
|
||||
.concat(api.middleware)
|
||||
.concat(dynamicMiddlewares)
|
||||
|
||||
@@ -71,6 +71,7 @@ export type AppConfig = {
|
||||
*/
|
||||
maxUpscaleDimension?: number;
|
||||
allowPrivateBoards: boolean;
|
||||
allowPrivateStylePresets: boolean;
|
||||
disabledTabs: InvokeTabName[];
|
||||
disabledFeatures: AppFeature[];
|
||||
disabledSDFeatures: SDFeature[];
|
||||
|
||||
@@ -47,6 +47,7 @@ export const IAINoContentFallback = memo((props: IAINoImageFallbackProps) => {
|
||||
userSelect: 'none',
|
||||
opacity: 0.7,
|
||||
color: 'base.500',
|
||||
fontSize: 'md',
|
||||
...sx,
|
||||
}),
|
||||
[sx]
|
||||
@@ -55,11 +56,7 @@ export const IAINoContentFallback = memo((props: IAINoImageFallbackProps) => {
|
||||
return (
|
||||
<Flex sx={styles} {...rest}>
|
||||
{icon && <Icon as={icon} boxSize={boxSize} opacity={0.7} />}
|
||||
{props.label && (
|
||||
<Text textAlign="center" fontSize="md">
|
||||
{props.label}
|
||||
</Text>
|
||||
)}
|
||||
{props.label && <Text textAlign="center">{props.label}</Text>}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useImageUrlToBlob } from 'common/hooks/useImageUrlToBlob';
|
||||
import { convertImageUrlToBlob } from 'common/util/convertImageUrlToBlob';
|
||||
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
@@ -6,7 +6,6 @@ import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const useCopyImageToClipboard = () => {
|
||||
const { t } = useTranslation();
|
||||
const imageUrlToBlob = useImageUrlToBlob();
|
||||
|
||||
const isClipboardAPIAvailable = useMemo(() => {
|
||||
return Boolean(navigator.clipboard) && Boolean(window.ClipboardItem);
|
||||
@@ -23,7 +22,7 @@ export const useCopyImageToClipboard = () => {
|
||||
});
|
||||
}
|
||||
try {
|
||||
const blob = await imageUrlToBlob(image_url);
|
||||
const blob = await convertImageUrlToBlob(image_url);
|
||||
|
||||
if (!blob) {
|
||||
throw new Error('Unable to create Blob');
|
||||
@@ -45,7 +44,7 @@ export const useCopyImageToClipboard = () => {
|
||||
});
|
||||
}
|
||||
},
|
||||
[imageUrlToBlob, isClipboardAPIAvailable, t]
|
||||
[isClipboardAPIAvailable, t]
|
||||
);
|
||||
|
||||
return { isClipboardAPIAvailable, copyImageToClipboard };
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
/**
|
||||
* Converts an image URL to a Blob by creating an <img /> element, drawing it to canvas
|
||||
* and then converting the canvas to a Blob.
|
||||
*
|
||||
* @returns A function that takes a URL and returns a Promise that resolves with a Blob
|
||||
*/
|
||||
export const useImageUrlToBlob = () => {
|
||||
const imageUrlToBlob = useCallback(
|
||||
async (url: string) =>
|
||||
new Promise<Blob | null>((resolve) => {
|
||||
const img = new Image();
|
||||
img.onload = () => {
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = img.width;
|
||||
canvas.height = img.height;
|
||||
|
||||
const context = canvas.getContext('2d');
|
||||
if (!context) {
|
||||
return;
|
||||
}
|
||||
context.drawImage(img, 0, 0);
|
||||
resolve(
|
||||
new Promise<Blob | null>((resolve) => {
|
||||
canvas.toBlob(function (blob) {
|
||||
resolve(blob);
|
||||
}, 'image/png');
|
||||
})
|
||||
);
|
||||
};
|
||||
img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
|
||||
img.src = url;
|
||||
}),
|
||||
[]
|
||||
);
|
||||
|
||||
return imageUrlToBlob;
|
||||
};
|
||||
@@ -0,0 +1,33 @@
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
|
||||
/**
|
||||
* Converts an image URL to a Blob by creating an <img /> element, drawing it to canvas
|
||||
* and then converting the canvas to a Blob.
|
||||
*
|
||||
* @returns A function that takes a URL and returns a Promise that resolves with a Blob
|
||||
*/
|
||||
|
||||
export const convertImageUrlToBlob = async (url: string) =>
|
||||
new Promise<Blob | null>((resolve) => {
|
||||
const img = new Image();
|
||||
img.onload = () => {
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = img.width;
|
||||
canvas.height = img.height;
|
||||
|
||||
const context = canvas.getContext('2d');
|
||||
if (!context) {
|
||||
return;
|
||||
}
|
||||
context.drawImage(img, 0, 0);
|
||||
resolve(
|
||||
new Promise<Blob | null>((resolve) => {
|
||||
canvas.toBlob(function (blob) {
|
||||
resolve(blob);
|
||||
}, 'image/png');
|
||||
})
|
||||
);
|
||||
};
|
||||
img.crossOrigin = $authToken.get() ? 'use-credentials' : 'anonymous';
|
||||
img.src = url;
|
||||
});
|
||||
@@ -42,6 +42,7 @@ const DepthAnythingProcessor = (props: Props) => {
|
||||
|
||||
const options: { label: string; value: DepthAnythingModelSize }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.depthAnythingSmallV2'), value: 'small_v2' },
|
||||
{ label: t('controlnet.small'), value: 'small' },
|
||||
{ label: t('controlnet.base'), value: 'base' },
|
||||
{ label: t('controlnet.large'), value: 'large' },
|
||||
|
||||
@@ -94,7 +94,7 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
buildDefaults: (baseModel?: BaseModelType) => ({
|
||||
id: 'depth_anything_image_processor',
|
||||
type: 'depth_anything_image_processor',
|
||||
model_size: 'small',
|
||||
model_size: 'small_v2',
|
||||
resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
}),
|
||||
},
|
||||
|
||||
@@ -84,7 +84,7 @@ export type RequiredDepthAnythingImageProcessorInvocation = O.Required<
|
||||
'type' | 'model_size' | 'resolution' | 'offload'
|
||||
>;
|
||||
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']);
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small', 'small_v2']);
|
||||
export type DepthAnythingModelSize = z.infer<typeof zDepthAnythingModelSize>;
|
||||
export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize =>
|
||||
zDepthAnythingModelSize.safeParse(v).success;
|
||||
|
||||
@@ -24,6 +24,7 @@ export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => {
|
||||
|
||||
const options: { label: string; value: DepthAnythingModelSize }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.depthAnythingSmallV2'), value: 'small_v2' },
|
||||
{ label: t('controlnet.small'), value: 'small' },
|
||||
{ label: t('controlnet.base'), value: 'base' },
|
||||
{ label: t('controlnet.large'), value: 'large' },
|
||||
|
||||
@@ -36,7 +36,7 @@ const zContentShuffleProcessorConfig = z.object({
|
||||
});
|
||||
export type ContentShuffleProcessorConfig = z.infer<typeof zContentShuffleProcessorConfig>;
|
||||
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']);
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small', 'small_v2']);
|
||||
export type DepthAnythingModelSize = z.infer<typeof zDepthAnythingModelSize>;
|
||||
export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize =>
|
||||
zDepthAnythingModelSize.safeParse(v).success;
|
||||
@@ -298,7 +298,7 @@ export const CA_PROCESSOR_DATA: CAProcessorsData = {
|
||||
buildDefaults: () => ({
|
||||
id: 'depth_anything_image_processor',
|
||||
type: 'depth_anything_image_processor',
|
||||
model_size: 'small',
|
||||
model_size: 'small_v2',
|
||||
}),
|
||||
buildNode: (image, config) => ({
|
||||
...config,
|
||||
|
||||
@@ -30,6 +30,7 @@ import {
|
||||
PiFlowArrowBold,
|
||||
PiFoldersBold,
|
||||
PiImagesBold,
|
||||
PiPaintBrushBold,
|
||||
PiPlantBold,
|
||||
PiQuotesBold,
|
||||
PiShareFatBold,
|
||||
@@ -55,8 +56,17 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
const { downloadImage } = useDownloadImage();
|
||||
const templates = useStore($templates);
|
||||
|
||||
const { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata } =
|
||||
useImageActions(imageDTO?.image_name);
|
||||
const {
|
||||
recallAll,
|
||||
remix,
|
||||
recallSeed,
|
||||
recallPrompts,
|
||||
hasMetadata,
|
||||
hasSeed,
|
||||
hasPrompts,
|
||||
isLoadingMetadata,
|
||||
createAsPreset,
|
||||
} = useImageActions(imageDTO?.image_name);
|
||||
|
||||
const { getAndLoadEmbeddedWorkflow, getAndLoadEmbeddedWorkflowResult } = useGetAndLoadEmbeddedWorkflow({});
|
||||
|
||||
@@ -182,6 +192,13 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
>
|
||||
{t('parameters.useAll')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={isLoadingMetadata ? <SpinnerIcon /> : <PiPaintBrushBold />}
|
||||
onClickCapture={createAsPreset}
|
||||
isDisabled={isLoadingMetadata || !hasPrompts}
|
||||
>
|
||||
{t('stylePresets.useForTemplate')}
|
||||
</MenuItem>
|
||||
<MenuDivider />
|
||||
<MenuItem icon={<PiShareFatBold />} onClickCapture={handleSendToImageToImage} id="send-to-img2img">
|
||||
{t('parameters.sendToImg2Img')}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { handlers, parseAndRecallAllMetadata, parseAndRecallPrompts } from 'features/metadata/util/handlers';
|
||||
import { $stylePresetModalState } from 'features/stylePresets/store/stylePresetModal';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
|
||||
|
||||
export const useImageActions = (image_name?: string) => {
|
||||
@@ -10,6 +13,7 @@ export const useImageActions = (image_name?: string) => {
|
||||
const [hasMetadata, setHasMetadata] = useState(false);
|
||||
const [hasSeed, setHasSeed] = useState(false);
|
||||
const [hasPrompts, setHasPrompts] = useState(false);
|
||||
const { data: imageDTO } = useGetImageDTOQuery(image_name ?? skipToken);
|
||||
|
||||
useEffect(() => {
|
||||
const parseMetadata = async () => {
|
||||
@@ -61,5 +65,34 @@ export const useImageActions = (image_name?: string) => {
|
||||
parseAndRecallPrompts(metadata);
|
||||
}, [metadata]);
|
||||
|
||||
return { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata };
|
||||
const createAsPreset = useCallback(async () => {
|
||||
if (image_name && metadata && imageDTO) {
|
||||
const positivePrompt = await handlers.positivePrompt.parse(metadata);
|
||||
const negativePrompt = await handlers.negativePrompt.parse(metadata);
|
||||
|
||||
$stylePresetModalState.set({
|
||||
prefilledFormData: {
|
||||
name: '',
|
||||
positivePrompt,
|
||||
negativePrompt,
|
||||
imageUrl: imageDTO.image_url,
|
||||
type: 'user',
|
||||
},
|
||||
updatingStylePresetId: null,
|
||||
isModalOpen: true,
|
||||
});
|
||||
}
|
||||
}, [image_name, metadata, imageDTO]);
|
||||
|
||||
return {
|
||||
recallAll,
|
||||
remix,
|
||||
recallSeed,
|
||||
recallPrompts,
|
||||
hasMetadata,
|
||||
hasSeed,
|
||||
hasPrompts,
|
||||
isLoadingMetadata,
|
||||
createAsPreset,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -22,11 +22,10 @@ import {
|
||||
} from './constants';
|
||||
import { addLoRAs } from './generation/addLoRAs';
|
||||
import { addSDXLLoRas } from './generation/addSDXLLoRAs';
|
||||
import { getBoardField, getSDXLStylePrompts } from './graphBuilderUtils';
|
||||
import { getBoardField, getPresetModifiedPrompts } from './graphBuilderUtils';
|
||||
|
||||
export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise<GraphType> => {
|
||||
const { model, cfgScale: cfg_scale, scheduler, steps, vaePrecision, seed, vae } = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
const { upscaleModel, upscaleInitialImage, structure, creativity, tileControlnetModel, scale } = state.upscale;
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
@@ -99,7 +98,8 @@ export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise
|
||||
let modelNode;
|
||||
|
||||
if (model.base === 'sdxl') {
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } =
|
||||
getPresetModifiedPrompts(state);
|
||||
|
||||
posCondNode = g.addNode({
|
||||
type: 'sdxl_compel_prompt',
|
||||
@@ -132,6 +132,8 @@ export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise
|
||||
negative_style_prompt: negativeStylePrompt,
|
||||
});
|
||||
} else {
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
posCondNode = g.addNode({
|
||||
type: 'compel',
|
||||
id: POSITIVE_CONDITIONING,
|
||||
|
||||
@@ -16,7 +16,7 @@ import {
|
||||
SDXL_REFINER_POSITIVE_CONDITIONING,
|
||||
SDXL_REFINER_SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { getPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { NonNullableGraph } from 'services/api/types';
|
||||
import { isRefinerMainModelModelConfig } from 'services/api/types';
|
||||
|
||||
@@ -59,7 +59,7 @@ export const addSDXLRefinerToGraph = async (
|
||||
const modelLoaderId = modelLoaderNodeId ? modelLoaderNodeId : SDXL_MODEL_LOADER;
|
||||
|
||||
// Construct Style Prompt
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
// Unplug SDXL Latents Generation To Latents To Image
|
||||
graph.edges = graph.edges.filter((e) => !(e.source.node_id === baseNodeId && ['latents'].includes(e.source.field)));
|
||||
|
||||
@@ -16,7 +16,11 @@ import {
|
||||
POSITIVE_CONDITIONING,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
import { isNonRefinerMainModelConfig } from 'services/api/types';
|
||||
|
||||
@@ -51,7 +55,6 @@ export const buildCanvasImageToImageGraph = async (
|
||||
seamlessXAxis,
|
||||
seamlessYAxis,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
@@ -71,6 +74,8 @@ export const buildCanvasImageToImageGraph = async (
|
||||
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
|
||||
@@ -19,7 +19,11 @@ import {
|
||||
POSITIVE_CONDITIONING,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -58,7 +62,6 @@ export const buildCanvasInpaintGraph = async (
|
||||
canvasCoherenceEdgeSize,
|
||||
maskBlur,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
if (!model) {
|
||||
log.error('No model found in state');
|
||||
@@ -79,6 +82,8 @@ export const buildCanvasInpaintGraph = async (
|
||||
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const graph: NonNullableGraph = {
|
||||
id: CANVAS_INPAINT_GRAPH,
|
||||
nodes: {
|
||||
|
||||
@@ -23,7 +23,11 @@ import {
|
||||
POSITIVE_CONDITIONING,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -70,7 +74,6 @@ export const buildCanvasOutpaintGraph = async (
|
||||
canvasCoherenceEdgeSize,
|
||||
maskBlur,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
if (!model) {
|
||||
log.error('No model found in state');
|
||||
@@ -91,6 +94,8 @@ export const buildCanvasOutpaintGraph = async (
|
||||
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const graph: NonNullableGraph = {
|
||||
id: CANVAS_OUTPAINT_GRAPH,
|
||||
nodes: {
|
||||
|
||||
@@ -16,7 +16,11 @@ import {
|
||||
SDXL_REFINER_SEAMLESS,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate, getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
import { isNonRefinerMainModelConfig } from 'services/api/types';
|
||||
|
||||
@@ -51,7 +55,6 @@ export const buildCanvasSDXLImageToImageGraph = async (
|
||||
seamlessYAxis,
|
||||
img2imgStrength: strength,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
const { refinerModel, refinerStart } = state.sdxl;
|
||||
|
||||
@@ -75,7 +78,7 @@ export const buildCanvasSDXLImageToImageGraph = async (
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
// Construct Style Prompt
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
|
||||
@@ -19,7 +19,11 @@ import {
|
||||
SDXL_REFINER_SEAMLESS,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate, getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -58,7 +62,6 @@ export const buildCanvasSDXLInpaintGraph = async (
|
||||
canvasCoherenceEdgeSize,
|
||||
maskBlur,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
const { refinerModel, refinerStart } = state.sdxl;
|
||||
|
||||
@@ -83,7 +86,7 @@ export const buildCanvasSDXLInpaintGraph = async (
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
// Construct Style Prompt
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const graph: NonNullableGraph = {
|
||||
id: SDXL_CANVAS_INPAINT_GRAPH,
|
||||
|
||||
@@ -23,7 +23,11 @@ import {
|
||||
SDXL_REFINER_SEAMLESS,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate, getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { ImageDTO, Invocation, NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -70,7 +74,6 @@ export const buildCanvasSDXLOutpaintGraph = async (
|
||||
canvasCoherenceEdgeSize,
|
||||
maskBlur,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
const { refinerModel, refinerStart } = state.sdxl;
|
||||
|
||||
@@ -94,7 +97,7 @@ export const buildCanvasSDXLOutpaintGraph = async (
|
||||
const use_cpu = shouldUseCpuNoise;
|
||||
|
||||
// Construct Style Prompt
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const graph: NonNullableGraph = {
|
||||
id: SDXL_CANVAS_OUTPAINT_GRAPH,
|
||||
|
||||
@@ -14,7 +14,11 @@ import {
|
||||
SDXL_REFINER_SEAMLESS,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate, getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -44,7 +48,6 @@ export const buildCanvasSDXLTextToImageGraph = async (state: RootState): Promise
|
||||
seamlessXAxis,
|
||||
seamlessYAxis,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
@@ -67,7 +70,7 @@ export const buildCanvasSDXLTextToImageGraph = async (state: RootState): Promise
|
||||
let modelLoaderNodeId = SDXL_MODEL_LOADER;
|
||||
|
||||
// Construct Style Prompt
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
|
||||
@@ -14,7 +14,11 @@ import {
|
||||
POSITIVE_CONDITIONING,
|
||||
SEAMLESS,
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { getBoardField, getIsIntermediate } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import {
|
||||
getBoardField,
|
||||
getIsIntermediate,
|
||||
getPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { isNonRefinerMainModelConfig, type NonNullableGraph } from 'services/api/types';
|
||||
|
||||
import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
|
||||
@@ -44,7 +48,6 @@ export const buildCanvasTextToImageGraph = async (state: RootState): Promise<Non
|
||||
seamlessXAxis,
|
||||
seamlessYAxis,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
|
||||
// The bounding box determines width and height, not the width and height params
|
||||
const { width, height } = state.canvas.boundingBoxDimensions;
|
||||
@@ -64,6 +67,8 @@ export const buildCanvasTextToImageGraph = async (state: RootState): Promise<Non
|
||||
|
||||
let modelLoaderNodeId = MAIN_MODEL_LOADER;
|
||||
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
|
||||
@@ -22,7 +22,7 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
|
||||
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
|
||||
import type { GraphType } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { getBoardField } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { getBoardField, getPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { isNonRefinerMainModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -40,11 +40,12 @@ export const buildGenerationTabGraph = async (state: RootState): Promise<GraphTy
|
||||
seed,
|
||||
vae,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
const { width, height } = state.controlLayers.present.size;
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
|
||||
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const g = new Graph(CONTROL_LAYERS_GRAPH);
|
||||
const modelLoader = g.addNode({
|
||||
type: 'main_model_loader',
|
||||
|
||||
@@ -19,7 +19,7 @@ import { addSDXLRefiner } from 'features/nodes/util/graph/generation/addSDXLRefi
|
||||
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
|
||||
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { getBoardField, getSDXLStylePrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { getBoardField, getPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import type { Invocation, NonNullableGraph } from 'services/api/types';
|
||||
import { isNonRefinerMainModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -36,14 +36,13 @@ export const buildGenerationTabSDXLGraph = async (state: RootState): Promise<Non
|
||||
vaePrecision,
|
||||
vae,
|
||||
} = state.generation;
|
||||
const { positivePrompt, negativePrompt } = state.controlLayers.present;
|
||||
const { width, height } = state.controlLayers.present.size;
|
||||
|
||||
const { refinerModel, refinerStart } = state.sdxl;
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
|
||||
const { positiveStylePrompt, negativeStylePrompt } = getSDXLStylePrompts(state);
|
||||
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
|
||||
|
||||
const g = new Graph(SDXL_CONTROL_LAYERS_GRAPH);
|
||||
const modelLoader = g.addNode({
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { BoardField } from 'features/nodes/types/common';
|
||||
import { buildPresetModifiedPrompt } from 'features/stylePresets/hooks/usePresetModifiedPrompts';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { stylePresetsApi } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
/**
|
||||
* Gets the board field, based on the autoAddBoardId setting.
|
||||
@@ -14,13 +16,43 @@ export const getBoardField = (state: RootState): BoardField | undefined => {
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the SDXL style prompts, based on the concat setting.
|
||||
* Gets the prompts, modified for the active style preset.
|
||||
*/
|
||||
export const getSDXLStylePrompts = (state: RootState): { positiveStylePrompt: string; negativeStylePrompt: string } => {
|
||||
export const getPresetModifiedPrompts = (
|
||||
state: RootState
|
||||
): { positivePrompt: string; negativePrompt: string; positiveStylePrompt?: string; negativeStylePrompt?: string } => {
|
||||
const { positivePrompt, negativePrompt, positivePrompt2, negativePrompt2, shouldConcatPrompts } =
|
||||
state.controlLayers.present;
|
||||
const { activeStylePresetId } = state.stylePreset;
|
||||
|
||||
if (activeStylePresetId) {
|
||||
const { data } = stylePresetsApi.endpoints.listStylePresets.select()(state);
|
||||
|
||||
const activeStylePreset = data?.find((item) => item.id === activeStylePresetId);
|
||||
|
||||
if (activeStylePreset) {
|
||||
const presetModifiedPositivePrompt = buildPresetModifiedPrompt(
|
||||
activeStylePreset.preset_data.positive_prompt,
|
||||
positivePrompt
|
||||
);
|
||||
|
||||
const presetModifiedNegativePrompt = buildPresetModifiedPrompt(
|
||||
activeStylePreset.preset_data.negative_prompt,
|
||||
negativePrompt
|
||||
);
|
||||
|
||||
return {
|
||||
positivePrompt: presetModifiedPositivePrompt,
|
||||
negativePrompt: presetModifiedNegativePrompt,
|
||||
positiveStylePrompt: shouldConcatPrompts ? presetModifiedPositivePrompt : positivePrompt2,
|
||||
negativeStylePrompt: shouldConcatPrompts ? presetModifiedNegativePrompt : negativePrompt2,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
positivePrompt,
|
||||
negativePrompt,
|
||||
positiveStylePrompt: shouldConcatPrompts ? positivePrompt : positivePrompt2,
|
||||
negativeStylePrompt: shouldConcatPrompts ? negativePrompt : negativePrompt2,
|
||||
};
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { negativePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { ViewModePrompt } from 'features/parameters/components/Prompts/ViewModePrompt';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
export const ParamNegativePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector((s) => s.controlLayers.present.negativePrompt);
|
||||
const viewMode = useAppSelector((s) => s.stylePreset.viewMode);
|
||||
const activeStylePresetId = useAppSelector((s) => s.stylePreset.activeStylePresetId);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
if (data) {
|
||||
activeStylePreset = data.find((sp) => sp.id === activeStylePresetId);
|
||||
}
|
||||
return { activeStylePreset };
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
@@ -27,22 +43,34 @@ export const ParamNegativePrompt = memo(() => {
|
||||
|
||||
return (
|
||||
<PromptPopover isOpen={isOpen} onClose={onClose} onSelect={onSelect} width={textareaRef.current?.clientWidth}>
|
||||
<Box pos="relative">
|
||||
<Box pos="relative" w="full">
|
||||
<Textarea
|
||||
id="negativePrompt"
|
||||
name="negativePrompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
placeholder={t('parameters.globalNegativePromptPlaceholder')}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
fontSize="sm"
|
||||
variant="darkFilled"
|
||||
paddingRight={30}
|
||||
minH={28}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('parameters.negativePromptPlaceholder')} />
|
||||
{viewMode && (
|
||||
<ViewModePrompt
|
||||
prompt={prompt}
|
||||
presetPrompt={activeStylePreset?.preset_data.negative_prompt || ''}
|
||||
label={`${t('parameters.negativePromptPlaceholder')} (${t('stylePresets.preview')})`}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
|
||||
@@ -2,7 +2,9 @@ import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { positivePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { ViewModePrompt } from 'features/parameters/components/Prompts/ViewModePrompt';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
import { usePrompt } from 'features/prompt/usePrompt';
|
||||
@@ -11,11 +13,24 @@ import { memo, useCallback, useRef } from 'react';
|
||||
import type { HotkeyCallback } from 'react-hotkeys-hook';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
export const ParamPositivePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector((s) => s.controlLayers.present.positivePrompt);
|
||||
const baseModel = useAppSelector((s) => s.generation.model)?.base;
|
||||
const viewMode = useAppSelector((s) => s.stylePreset.viewMode);
|
||||
const activeStylePresetId = useAppSelector((s) => s.stylePreset.activeStylePresetId);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
if (data) {
|
||||
activeStylePreset = data.find((sp) => sp.id === activeStylePresetId);
|
||||
}
|
||||
return { activeStylePreset };
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
@@ -49,18 +64,29 @@ export const ParamPositivePrompt = memo(() => {
|
||||
name="prompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
placeholder={t('parameters.globalPositivePromptPlaceholder')}
|
||||
onChange={onChange}
|
||||
minH={28}
|
||||
minH={40}
|
||||
onKeyDown={onKeyDown}
|
||||
variant="darkFilled"
|
||||
paddingRight={30}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
{baseModel === 'sdxl' && <SDXLConcatButton />}
|
||||
<ShowDynamicPromptsPreviewButton />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('parameters.positivePromptPlaceholder')} />
|
||||
{viewMode && (
|
||||
<ViewModePrompt
|
||||
prompt={prompt}
|
||||
presetPrompt={activeStylePreset?.preset_data.positive_prompt || ''}
|
||||
label={`${t('parameters.positivePromptPlaceholder')} (${t('stylePresets.preview')})`}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
import { Text } from '@invoke-ai/ui-library';
|
||||
|
||||
export const PromptLabel = ({ label }: { label: string }) => {
|
||||
return (
|
||||
<Text variant="subtext" fontWeight="semibold" pos="absolute" top={1} left={2}>
|
||||
{label}
|
||||
</Text>
|
||||
);
|
||||
};
|
||||
@@ -1,13 +1,29 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
|
||||
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { ParamSDXLNegativeStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLNegativeStylePrompt';
|
||||
import { ParamSDXLPositiveStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLPositiveStylePrompt';
|
||||
import { memo } from 'react';
|
||||
|
||||
const concatPromptsSelector = createSelector(
|
||||
[selectGenerationSlice, selectControlLayersSlice],
|
||||
(generation, controlLayers) => {
|
||||
return generation.model?.base !== 'sdxl' || controlLayers.present.shouldConcatPrompts;
|
||||
}
|
||||
);
|
||||
|
||||
export const Prompts = memo(() => {
|
||||
const shouldConcatPrompts = useAppSelector(concatPromptsSelector);
|
||||
return (
|
||||
<Flex flexDir="column" gap={2}>
|
||||
<ParamPositivePrompt />
|
||||
{!shouldConcatPrompts && <ParamSDXLPositiveStylePrompt />}
|
||||
<ParamNegativePrompt />
|
||||
{!shouldConcatPrompts && <ParamSDXLNegativeStylePrompt />}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
import { Box, Flex, Icon, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { viewModeChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { getViewModeChunks } from 'features/stylePresets/util/getViewModeChunks';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiEyeBold } from 'react-icons/pi';
|
||||
|
||||
import { PromptLabel } from './PromptLabel';
|
||||
|
||||
export const ViewModePrompt = ({
|
||||
presetPrompt,
|
||||
prompt,
|
||||
label,
|
||||
}: {
|
||||
presetPrompt: string;
|
||||
prompt: string;
|
||||
label: string;
|
||||
}) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const presetChunks = useMemo(() => {
|
||||
return getViewModeChunks(prompt, presetPrompt);
|
||||
}, [presetPrompt, prompt]);
|
||||
|
||||
const handleExitViewMode = useCallback(() => {
|
||||
dispatch(viewModeChanged(false));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Box position="absolute" top={0} bottom={0} left={0} right={0} layerStyle="second" borderRadius="base">
|
||||
<Flex
|
||||
flexDir="column"
|
||||
onClick={handleExitViewMode}
|
||||
justifyContent="space-between"
|
||||
h="full"
|
||||
borderWidth={1}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
borderColor="transparent"
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
>
|
||||
<PromptLabel label={label} />
|
||||
<Flex overflow="scroll">
|
||||
<Text w="full" lineHeight="short">
|
||||
{presetChunks.map((chunk, index) => (
|
||||
<Text
|
||||
as="span"
|
||||
color={index === 1 ? 'white' : 'base.200'}
|
||||
fontWeight={index === 1 ? 'semibold' : 'normal'}
|
||||
key={index}
|
||||
>
|
||||
{chunk}
|
||||
</Text>
|
||||
))}
|
||||
</Text>
|
||||
</Flex>
|
||||
|
||||
<Tooltip label={t('stylePresets.viewModeTooltip')}>
|
||||
<Flex
|
||||
position="absolute"
|
||||
insetInlineEnd={0}
|
||||
insetBlockStart={0}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
p={2}
|
||||
bg="base.900"
|
||||
opacity={0.8}
|
||||
backgroundClip="border-box"
|
||||
borderBottomStartRadius="base"
|
||||
>
|
||||
<Icon as={PiEyeBold} color="base.500" boxSize={4} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { negativePrompt2Changed } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
@@ -36,16 +37,21 @@ export const ParamSDXLNegativeStylePrompt = memo(() => {
|
||||
name="prompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
placeholder={t('sdxl.negStylePrompt')}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
fontSize="sm"
|
||||
variant="darkFilled"
|
||||
paddingRight={30}
|
||||
minH={24}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('sdxl.negStylePrompt')} />
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { positivePrompt2Changed } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
@@ -33,16 +34,21 @@ export const ParamSDXLPositiveStylePrompt = memo(() => {
|
||||
name="prompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
placeholder={t('sdxl.posStylePrompt')}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
fontSize="sm"
|
||||
variant="darkFilled"
|
||||
paddingRight={30}
|
||||
minH={24}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('sdxl.posStylePrompt')} />
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
|
||||
import { SDXLPrompts } from './SDXLPrompts';
|
||||
|
||||
const meta: Meta<typeof SDXLPrompts> = {
|
||||
title: 'Feature/Prompt/SDXLPrompts',
|
||||
tags: ['autodocs'],
|
||||
component: SDXLPrompts,
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof SDXLPrompts>;
|
||||
|
||||
const Component = () => {
|
||||
return <SDXLPrompts />;
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
render: Component,
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
|
||||
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
|
||||
import { memo } from 'react';
|
||||
|
||||
import { ParamSDXLNegativeStylePrompt } from './ParamSDXLNegativeStylePrompt';
|
||||
import { ParamSDXLPositiveStylePrompt } from './ParamSDXLPositiveStylePrompt';
|
||||
|
||||
export const SDXLPrompts = memo(() => {
|
||||
const shouldConcatPrompts = useAppSelector((s) => s.controlLayers.present.shouldConcatPrompts);
|
||||
return (
|
||||
<Flex flexDir="column" gap={2} pos="relative">
|
||||
<ParamPositivePrompt />
|
||||
{!shouldConcatPrompts && <ParamSDXLPositiveStylePrompt />}
|
||||
<ParamNegativePrompt />
|
||||
{!shouldConcatPrompts && <ParamSDXLNegativeStylePrompt />}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
SDXLPrompts.displayName = 'SDXLPrompts';
|
||||
@@ -57,7 +57,11 @@ export const UpscaleWarning = () => {
|
||||
$installModelsTab.set(3);
|
||||
}, [dispatch]);
|
||||
|
||||
if ((!modelWarnings.length && !otherWarnings.length) || isLoading || !shouldShowButton) {
|
||||
if (modelWarnings.length && !shouldShowButton) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if ((!modelWarnings.length && !otherWarnings.length) || isLoading) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
import { Badge, Flex, IconButton, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { negativePromptChanged, positivePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { usePresetModifiedPrompts } from 'features/stylePresets/hooks/usePresetModifiedPrompts';
|
||||
import { activeStylePresetIdChanged, viewModeChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import type { MouseEventHandler } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiEyeBold, PiStackSimpleBold, PiXBold } from 'react-icons/pi';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
import StylePresetImage from './StylePresetImage';
|
||||
|
||||
export const ActiveStylePreset = () => {
|
||||
const viewMode = useAppSelector((s) => s.stylePreset.viewMode);
|
||||
|
||||
const activeStylePresetId = useAppSelector((s) => s.stylePreset.activeStylePresetId);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
if (data) {
|
||||
activeStylePreset = data.find((sp) => sp.id === activeStylePresetId);
|
||||
}
|
||||
return { activeStylePreset };
|
||||
},
|
||||
});
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { presetModifiedPositivePrompt, presetModifiedNegativePrompt } = usePresetModifiedPrompts();
|
||||
|
||||
const handleClearActiveStylePreset = useCallback<MouseEventHandler<HTMLButtonElement>>(
|
||||
(e) => {
|
||||
e.stopPropagation();
|
||||
dispatch(viewModeChanged(false));
|
||||
dispatch(activeStylePresetIdChanged(null));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const handleFlattenPrompts = useCallback<MouseEventHandler<HTMLButtonElement>>(
|
||||
(e) => {
|
||||
e.stopPropagation();
|
||||
dispatch(positivePromptChanged(presetModifiedPositivePrompt));
|
||||
dispatch(negativePromptChanged(presetModifiedNegativePrompt));
|
||||
dispatch(viewModeChanged(false));
|
||||
dispatch(activeStylePresetIdChanged(null));
|
||||
},
|
||||
[dispatch, presetModifiedPositivePrompt, presetModifiedNegativePrompt]
|
||||
);
|
||||
|
||||
const handleToggleViewMode = useCallback<MouseEventHandler<HTMLButtonElement>>(
|
||||
(e) => {
|
||||
e.stopPropagation();
|
||||
dispatch(viewModeChanged(!viewMode));
|
||||
},
|
||||
[dispatch, viewMode]
|
||||
);
|
||||
|
||||
if (!activeStylePreset) {
|
||||
return (
|
||||
<Flex h={25} alignItems="center">
|
||||
<Text fontSize="sm" fontWeight="semibold" color="base.300">
|
||||
{t('stylePresets.choosePromptTemplate')}
|
||||
</Text>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Flex gap={2} alignItems="center">
|
||||
<StylePresetImage imageWidth={25} presetImageUrl={activeStylePreset.image} />
|
||||
<Flex flexDir="column">
|
||||
<Badge colorScheme="invokeBlue" variant="subtle">
|
||||
{activeStylePreset.name}
|
||||
</Badge>
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Flex gap={1}>
|
||||
<Tooltip label={t('stylePresets.toggleViewMode')}>
|
||||
<IconButton
|
||||
onClick={handleToggleViewMode}
|
||||
variant="outline"
|
||||
size="sm"
|
||||
aria-label={t('stylePresets.toggleViewMode')}
|
||||
colorScheme={viewMode ? 'invokeBlue' : 'base'}
|
||||
icon={<PiEyeBold />}
|
||||
/>
|
||||
</Tooltip>
|
||||
<Tooltip label={t('stylePresets.flatten')}>
|
||||
<IconButton
|
||||
onClick={handleFlattenPrompts}
|
||||
variant="outline"
|
||||
size="sm"
|
||||
aria-label={t('stylePresets.flatten')}
|
||||
icon={<PiStackSimpleBold />}
|
||||
/>
|
||||
</Tooltip>
|
||||
<Tooltip label={t('stylePresets.clearTemplateSelection')}>
|
||||
<IconButton
|
||||
onClick={handleClearActiveStylePreset}
|
||||
variant="outline"
|
||||
size="sm"
|
||||
aria-label={t('stylePresets.clearTemplateSelection')}
|
||||
icon={<PiXBold />}
|
||||
/>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,30 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { $stylePresetModalState } from 'features/stylePresets/store/stylePresetModal';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const StylePresetCreateButton = () => {
|
||||
const handleClickAddNew = useCallback(() => {
|
||||
$stylePresetModalState.set({
|
||||
prefilledFormData: null,
|
||||
updatingStylePresetId: null,
|
||||
isModalOpen: true,
|
||||
});
|
||||
}, []);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
icon={<PiPlusBold />}
|
||||
tooltip={t('stylePresets.createPromptTemplate')}
|
||||
aria-label={t('stylePresets.createPromptTemplate')}
|
||||
onClick={handleClickAddNew}
|
||||
size="md"
|
||||
variant="ghost"
|
||||
w={8}
|
||||
h={8}
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,68 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { IconButton, spinAnimation } from '@invoke-ai/ui-library';
|
||||
import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDownloadSimpleBold, PiSpinner } from 'react-icons/pi';
|
||||
import { useLazyExportStylePresetsQuery, useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const loadingStyles: SystemStyleObject = {
|
||||
svg: { animation: spinAnimation },
|
||||
};
|
||||
|
||||
export const StylePresetExportButton = () => {
|
||||
const [exportStylePresets, { isLoading }] = useLazyExportStylePresetsQuery();
|
||||
const { t } = useTranslation();
|
||||
const { presetCount } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
const userPresets = data?.filter((preset) => preset.type === 'user') ?? EMPTY_ARRAY;
|
||||
return {
|
||||
presetCount: userPresets.length,
|
||||
};
|
||||
},
|
||||
});
|
||||
const handleClickDownloadCsv = useCallback(async () => {
|
||||
let blob;
|
||||
try {
|
||||
const response = await exportStylePresets().unwrap();
|
||||
blob = new Blob([response], { type: 'text/csv' });
|
||||
} catch (error) {
|
||||
toast({
|
||||
status: 'error',
|
||||
title: t('stylePresets.exportFailed'),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (blob) {
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = 'data.csv';
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
window.URL.revokeObjectURL(url);
|
||||
toast({
|
||||
status: 'success',
|
||||
title: t('stylePresets.exportDownloaded'),
|
||||
});
|
||||
}
|
||||
}, [exportStylePresets, t]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={handleClickDownloadCsv}
|
||||
icon={!isLoading ? <PiDownloadSimpleBold /> : <PiSpinner />}
|
||||
tooltip={t('stylePresets.exportPromptTemplates')}
|
||||
aria-label={t('stylePresets.exportPromptTemplates')}
|
||||
size="md"
|
||||
variant="link"
|
||||
w={8}
|
||||
h={8}
|
||||
sx={isLoading ? loadingStyles : undefined}
|
||||
isDisabled={isLoading || presetCount === 0}
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,124 @@
|
||||
import { Box, Button, Flex, FormControl, FormLabel, Input, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { PRESET_PLACEHOLDER } from 'features/stylePresets/hooks/usePresetModifiedPrompts';
|
||||
import { $stylePresetModalState } from 'features/stylePresets/store/stylePresetModal';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import type { SubmitHandler } from 'react-hook-form';
|
||||
import { useForm } from 'react-hook-form';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { PresetType } from 'services/api/endpoints/stylePresets';
|
||||
import { useCreateStylePresetMutation, useUpdateStylePresetMutation } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
import { StylePresetImageField } from './StylePresetImageField';
|
||||
import { StylePresetPromptField } from './StylePresetPromptField';
|
||||
import { StylePresetTypeField } from './StylePresetTypeField';
|
||||
|
||||
export type StylePresetFormData = {
|
||||
name: string;
|
||||
positivePrompt: string;
|
||||
negativePrompt: string;
|
||||
image: File | null;
|
||||
type: PresetType;
|
||||
};
|
||||
|
||||
export const StylePresetForm = ({
|
||||
updatingStylePresetId,
|
||||
formData,
|
||||
}: {
|
||||
updatingStylePresetId: string | null;
|
||||
formData: StylePresetFormData | null;
|
||||
}) => {
|
||||
const [createStylePreset] = useCreateStylePresetMutation();
|
||||
const [updateStylePreset] = useUpdateStylePresetMutation();
|
||||
const { t } = useTranslation();
|
||||
const allowPrivateStylePresets = useAppSelector((s) => s.config.allowPrivateStylePresets);
|
||||
|
||||
const { handleSubmit, control, register, formState } = useForm<StylePresetFormData>({
|
||||
defaultValues: formData || {
|
||||
name: '',
|
||||
positivePrompt: '',
|
||||
negativePrompt: '',
|
||||
image: null,
|
||||
type: 'user',
|
||||
},
|
||||
mode: 'onChange',
|
||||
});
|
||||
|
||||
const handleClickSave = useCallback<SubmitHandler<StylePresetFormData>>(
|
||||
async (data) => {
|
||||
const payload = {
|
||||
data: {
|
||||
name: data.name,
|
||||
positive_prompt: data.positivePrompt,
|
||||
negative_prompt: data.negativePrompt,
|
||||
type: data.type,
|
||||
},
|
||||
image: data.image,
|
||||
};
|
||||
|
||||
try {
|
||||
if (updatingStylePresetId) {
|
||||
await updateStylePreset({
|
||||
id: updatingStylePresetId,
|
||||
...payload,
|
||||
}).unwrap();
|
||||
} else {
|
||||
await createStylePreset(payload).unwrap();
|
||||
}
|
||||
} catch (error) {
|
||||
toast({
|
||||
status: 'error',
|
||||
title: 'Failed to save style preset',
|
||||
});
|
||||
}
|
||||
|
||||
$stylePresetModalState.set({
|
||||
prefilledFormData: null,
|
||||
updatingStylePresetId: null,
|
||||
isModalOpen: false,
|
||||
});
|
||||
},
|
||||
[updatingStylePresetId, updateStylePreset, createStylePreset]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={4}>
|
||||
<Flex alignItems="center" gap={4}>
|
||||
<StylePresetImageField control={control} name="image" />
|
||||
<FormControl orientation="vertical">
|
||||
<FormLabel>{t('stylePresets.name')}</FormLabel>
|
||||
<Input size="md" {...register('name', { required: true, minLength: 1 })} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
|
||||
<StylePresetPromptField label="Positive Prompt" control={control} name="positivePrompt" />
|
||||
<StylePresetPromptField label="Negative Prompt" control={control} name="negativePrompt" />
|
||||
<Box>
|
||||
<Text variant="subtext">{t('stylePresets.promptTemplatesDesc1')}</Text>
|
||||
<Text variant="subtext">
|
||||
<Trans
|
||||
i18nKey="stylePresets.promptTemplatesDesc2"
|
||||
components={{ Pre: <Pre /> }}
|
||||
values={{ placeholder: PRESET_PLACEHOLDER }}
|
||||
/>
|
||||
</Text>
|
||||
<Text variant="subtext">{t('stylePresets.promptTemplatesDesc3')}</Text>
|
||||
</Box>
|
||||
|
||||
<Flex justifyContent="space-between" alignItems="flex-end" gap={10}>
|
||||
{allowPrivateStylePresets ? <StylePresetTypeField control={control} name="type" /> : <Spacer />}
|
||||
<Button onClick={handleSubmit(handleClickSave)} isDisabled={!formState.isValid}>
|
||||
{t('common.save')}
|
||||
</Button>
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
const Pre = (props: PropsWithChildren) => (
|
||||
<Text as="span" fontFamily="monospace" fontWeight="semibold">
|
||||
{props.children}
|
||||
</Text>
|
||||
);
|
||||
@@ -0,0 +1,82 @@
|
||||
import { Box, Button, Flex, Icon, IconButton, Image, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useCallback } from 'react';
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import type { UseControllerProps } from 'react-hook-form';
|
||||
import { useController } from 'react-hook-form';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiUploadSimpleBold } from 'react-icons/pi';
|
||||
|
||||
import type { StylePresetFormData } from './StylePresetForm';
|
||||
|
||||
export const StylePresetImageField = (props: UseControllerProps<StylePresetFormData, 'image'>) => {
|
||||
const { field } = useController(props);
|
||||
const { t } = useTranslation();
|
||||
const onDropAccepted = useCallback(
|
||||
(files: File[]) => {
|
||||
const file = files[0];
|
||||
if (file) {
|
||||
field.onChange(file);
|
||||
}
|
||||
},
|
||||
[field]
|
||||
);
|
||||
|
||||
const handleResetImage = useCallback(() => {
|
||||
field.onChange(null);
|
||||
}, [field]);
|
||||
|
||||
const { getInputProps, getRootProps } = useDropzone({
|
||||
accept: { 'image/png': ['.png'], 'image/jpeg': ['.jpg', '.jpeg', '.png'] },
|
||||
onDropAccepted,
|
||||
noDrag: true,
|
||||
multiple: false,
|
||||
});
|
||||
|
||||
if (field.value) {
|
||||
return (
|
||||
<Box position="relative" flexShrink={0}>
|
||||
<Image
|
||||
src={URL.createObjectURL(field.value)}
|
||||
objectFit="cover"
|
||||
objectPosition="50% 50%"
|
||||
w={65}
|
||||
h={65}
|
||||
minWidth={65}
|
||||
borderRadius="base"
|
||||
/>
|
||||
<IconButton
|
||||
position="absolute"
|
||||
insetInlineEnd={0}
|
||||
insetBlockStart={0}
|
||||
onClick={handleResetImage}
|
||||
aria-label={t('stylePresets.deleteImage')}
|
||||
tooltip={t('stylePresets.deleteImage')}
|
||||
icon={<PiArrowCounterClockwiseBold />}
|
||||
size="md"
|
||||
variant="ghost"
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Tooltip label={t('stylePresets.uploadImage')}>
|
||||
<Flex
|
||||
as={Button}
|
||||
w={65}
|
||||
h={65}
|
||||
opacity={0.3}
|
||||
borderRadius="base"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
flexShrink={0}
|
||||
{...getRootProps()}
|
||||
>
|
||||
<Icon as={PiUploadSimpleBold} w={8} h={8} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
<input {...getInputProps()} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,82 @@
|
||||
import {
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalCloseButton,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
Spinner,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { convertImageUrlToBlob } from 'common/util/convertImageUrlToBlob';
|
||||
import type { PrefilledFormData } from 'features/stylePresets/store/stylePresetModal';
|
||||
import { $stylePresetModalState } from 'features/stylePresets/store/stylePresetModal';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import type { StylePresetFormData } from './StylePresetForm';
|
||||
import { StylePresetForm } from './StylePresetForm';
|
||||
|
||||
export const StylePresetModal = () => {
|
||||
const [formData, setFormData] = useState<StylePresetFormData | null>(null);
|
||||
const { t } = useTranslation();
|
||||
const stylePresetModalState = useStore($stylePresetModalState);
|
||||
|
||||
const modalTitle = useMemo(() => {
|
||||
return stylePresetModalState.updatingStylePresetId
|
||||
? t('stylePresets.updatePromptTemplate')
|
||||
: t('stylePresets.createPromptTemplate');
|
||||
}, [stylePresetModalState.updatingStylePresetId, t]);
|
||||
|
||||
const handleCloseModal = useCallback(() => {
|
||||
$stylePresetModalState.set({
|
||||
prefilledFormData: null,
|
||||
updatingStylePresetId: null,
|
||||
isModalOpen: false,
|
||||
});
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
setFormData(null);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const convertImageToBlob = async (data: PrefilledFormData | null) => {
|
||||
if (!data) {
|
||||
setFormData(null);
|
||||
} else {
|
||||
let file = null;
|
||||
if (data.imageUrl) {
|
||||
const blob = await convertImageUrlToBlob(data.imageUrl);
|
||||
if (blob) {
|
||||
file = new File([blob], 'style_preset.png', { type: 'image/png' });
|
||||
}
|
||||
}
|
||||
setFormData({
|
||||
...data,
|
||||
image: file,
|
||||
});
|
||||
}
|
||||
};
|
||||
convertImageToBlob(stylePresetModalState.prefilledFormData);
|
||||
}, [stylePresetModalState.prefilledFormData]);
|
||||
|
||||
return (
|
||||
<Modal isOpen={stylePresetModalState.isModalOpen} onClose={handleCloseModal} isCentered size="2xl">
|
||||
<ModalOverlay />
|
||||
<ModalContent>
|
||||
<ModalHeader>{modalTitle}</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody display="flex" flexDir="column" gap={4}>
|
||||
{!stylePresetModalState.prefilledFormData || formData ? (
|
||||
<StylePresetForm updatingStylePresetId={stylePresetModalState.updatingStylePresetId} formData={formData} />
|
||||
) : (
|
||||
<Spinner />
|
||||
)}
|
||||
</ModalBody>
|
||||
<ModalFooter p={2} />
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
||||