Compare commits

..

8 Commits

Author SHA1 Message Date
Lincoln Stein
39900fed4f blackify 2023-11-08 17:05:54 -05:00
Lincoln Stein
6f7e384454 Merge branch 'main' into lstein/normalized-model-manager 2023-11-08 17:05:40 -05:00
Lincoln Stein
3cf2392a11 Merge branch 'main' into lstein/normalized-model-manager 2023-10-24 14:18:07 -04:00
Lincoln Stein
c36919bbdd blackify 2023-10-23 16:15:25 -04:00
Lincoln Stein
6a210104b7 update the README for the normalized model manager 2023-10-23 16:03:40 -04:00
Lincoln Stein
ef20c591c3 fully functional proof of principle - needs docs 2023-10-22 22:51:32 -04:00
Lincoln Stein
5d6b9d747e ingestion of simple models and pipelines working 2023-10-22 16:52:39 -04:00
Lincoln Stein
7ac570ade1 first draft of normalized model manager p-o-p 2023-10-21 21:45:02 -04:00
127 changed files with 3509 additions and 2247 deletions

20
.github/workflows/pyflakes.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
on:
pull_request:
push:
branches:
- main
- development
- 'release-candidate-*'
jobs:
pyflakes:
name: runner / pyflakes
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: pyflakes
uses: reviewdog/action-pyflakes@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-review

View File

@@ -18,7 +18,8 @@ jobs:
- name: Install dependencies with pip
run: |
pip install ruff
pip install black flake8 Flake8-pyproject isort
- run: ruff check --output-format=github .
- run: ruff format --check .
- run: isort --check-only .
- run: black --check .
- run: flake8

View File

@@ -198,7 +198,6 @@ The list of schedulers has been completely revamped and brought up to date:
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
| **unipc** | UniPCMultistepScheduler | CPU only |
| **lcm** | LCMScheduler | |
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.

View File

@@ -460,10 +460,10 @@ def get_torch_source() -> (Union[str, None], str):
url = "https://download.pytorch.org/whl/cpu"
if device == "cuda":
url = "https://download.pytorch.org/whl/cu118"
url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-cuda]"
if device == "cuda_and_dml":
url = "https://download.pytorch.org/whl/cu118"
url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-directml]"
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@@ -137,7 +137,7 @@ def dest_path(dest=None) -> Path:
path_completer = PathCompleter(
only_directories=True,
expanduser=True,
get_paths=lambda: [browse_start], # noqa: B023
get_paths=lambda: [browse_start],
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
)
@@ -149,7 +149,7 @@ def dest_path(dest=None) -> Path:
completer=path_completer,
default=str(browse_start) + os.sep,
vi_mode=True,
complete_while_typing=True,
complete_while_typing=True
# Test that this is not needed on Windows
# complete_style=CompleteStyle.READLINE_LIKE,
)

View File

@@ -28,7 +28,7 @@ class FastAPIEventService(EventServiceBase):
self.__queue.put(None)
def dispatch(self, event_name: str, payload: Any) -> None:
self.__queue.put({"event_name": event_name, "payload": payload})
self.__queue.put(dict(event_name=event_name, payload=payload))
async def __dispatch_from_queue(self, stop_event: threading.Event):
"""Get events on from the queue and dispatch them, from the correct thread"""

View File

@@ -55,7 +55,7 @@ async def list_models(
) -> ModelsList:
"""Gets a list of models"""
if base_models and len(base_models) > 0:
models_raw = []
models_raw = list()
for base_model in base_models:
models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type))
else:

View File

@@ -34,4 +34,4 @@ class SocketIO:
async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
if "queue_id" in data:
await self.__sio.leave_room(sid, data["queue_id"])
await self.__sio.enter_room(sid, data["queue_id"])

View File

@@ -130,7 +130,7 @@ def custom_openapi() -> dict[str, Any]:
# Add all outputs
all_invocations = BaseInvocation.get_invocations()
output_types = set()
output_type_titles = {}
output_type_titles = dict()
for invoker in all_invocations:
output_type = signature(invoker.invoke).return_annotation
output_types.add(output_type)
@@ -171,12 +171,12 @@ def custom_openapi() -> dict[str, Any]:
# print(f"Config with name {name} already defined")
continue
openapi_schema["components"]["schemas"][name] = {
"title": name,
"description": "An enumeration.",
"type": "string",
"enum": [v.value for v in model_config_format_enum],
}
openapi_schema["components"]["schemas"][name] = dict(
title=name,
description="An enumeration.",
type="string",
enum=list(v.value for v in model_config_format_enum),
)
app.openapi_schema = openapi_schema
return app.openapi_schema

View File

@@ -25,4 +25,4 @@ spec.loader.exec_module(module)
# add core nodes to __all__
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
__all__ = [f.stem for f in python_files] # type: ignore
__all__ = list(f.stem for f in python_files) # type: ignore

View File

@@ -16,7 +16,6 @@ from pydantic.fields import FieldInfo, _Unset
from pydantic_core import PydanticUndefined
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import uuid_string
if TYPE_CHECKING:
@@ -31,6 +30,74 @@ class InvalidFieldError(TypeError):
pass
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
class Input(str, Enum):
"""
The type of input a field accepts.
@@ -236,35 +303,35 @@ def InputField(
Ignored for non-collection fields.
"""
json_schema_extra_: dict[str, Any] = {
"input": input,
"ui_type": ui_type,
"ui_component": ui_component,
"ui_hidden": ui_hidden,
"ui_order": ui_order,
"item_default": item_default,
"ui_choice_labels": ui_choice_labels,
"_field_kind": "input",
}
json_schema_extra_: dict[str, Any] = dict(
input=input,
ui_type=ui_type,
ui_component=ui_component,
ui_hidden=ui_hidden,
ui_order=ui_order,
item_default=item_default,
ui_choice_labels=ui_choice_labels,
_field_kind="input",
)
field_args = {
"default": default,
"default_factory": default_factory,
"title": title,
"description": description,
"pattern": pattern,
"strict": strict,
"gt": gt,
"ge": ge,
"lt": lt,
"le": le,
"multiple_of": multiple_of,
"allow_inf_nan": allow_inf_nan,
"max_digits": max_digits,
"decimal_places": decimal_places,
"min_length": min_length,
"max_length": max_length,
}
field_args = dict(
default=default,
default_factory=default_factory,
title=title,
description=description,
pattern=pattern,
strict=strict,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
allow_inf_nan=allow_inf_nan,
max_digits=max_digits,
decimal_places=decimal_places,
min_length=min_length,
max_length=max_length,
)
"""
Invocation definitions have their fields typed correctly for their `invoke()` functions.
@@ -299,24 +366,24 @@ def InputField(
# because we are manually making fields optional, we need to store the original required bool for reference later
if default is PydanticUndefined and default_factory is PydanticUndefined:
json_schema_extra_.update({"orig_required": True})
json_schema_extra_.update(dict(orig_required=True))
else:
json_schema_extra_.update({"orig_required": False})
json_schema_extra_.update(dict(orig_required=False))
# make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one
if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined:
default_ = None if default is PydanticUndefined else default
provided_args.update({"default": default_})
provided_args.update(dict(default=default_))
if default is not PydanticUndefined:
# before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value
json_schema_extra_.update({"default": default})
json_schema_extra_.update({"orig_default": default})
json_schema_extra_.update(dict(default=default))
json_schema_extra_.update(dict(orig_default=default))
elif default is not PydanticUndefined and default_factory is PydanticUndefined:
default_ = default
provided_args.update({"default": default_})
json_schema_extra_.update({"orig_default": default_})
provided_args.update(dict(default=default_))
json_schema_extra_.update(dict(orig_default=default_))
elif default_factory is not PydanticUndefined:
provided_args.update({"default_factory": default_factory})
provided_args.update(dict(default_factory=default_factory))
# TODO: cannot serialize default_factory...
# json_schema_extra_.update(dict(orig_default_factory=default_factory))
@@ -383,12 +450,12 @@ def OutputField(
decimal_places=decimal_places,
min_length=min_length,
max_length=max_length,
json_schema_extra={
"ui_type": ui_type,
"ui_hidden": ui_hidden,
"ui_order": ui_order,
"_field_kind": "output",
},
json_schema_extra=dict(
ui_type=ui_type,
ui_hidden=ui_hidden,
ui_order=ui_order,
_field_kind="output",
),
)
@@ -460,14 +527,14 @@ class BaseInvocationOutput(BaseModel):
@classmethod
def get_output_types(cls) -> Iterable[str]:
return (get_type(i) for i in BaseInvocationOutput.get_outputs())
return map(lambda i: get_type(i), BaseInvocationOutput.get_outputs())
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
# Because we use a pydantic Literal field with default value for the invocation type,
# it will be typed as optional in the OpenAPI schema. Make it required manually.
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["required"] = list()
schema["required"].extend(["type"])
model_config = ConfigDict(
@@ -527,11 +594,16 @@ class BaseInvocation(ABC, BaseModel):
@classmethod
def get_invocations_map(cls) -> dict[str, BaseInvocation]:
# Get the type strings out of the literals and into a dictionary
return {get_type(i): i for i in BaseInvocation.get_invocations()}
return dict(
map(
lambda i: (get_type(i), i),
BaseInvocation.get_invocations(),
)
)
@classmethod
def get_invocation_types(cls) -> Iterable[str]:
return (get_type(i) for i in BaseInvocation.get_invocations())
return map(lambda i: get_type(i), BaseInvocation.get_invocations())
@classmethod
def get_output_type(cls) -> BaseInvocationOutput:
@@ -550,7 +622,7 @@ class BaseInvocation(ABC, BaseModel):
if uiconfig and hasattr(uiconfig, "version"):
schema["version"] = uiconfig.version
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["required"] = list()
schema["required"].extend(["type", "id"])
@abstractmethod
@@ -604,15 +676,15 @@ class BaseInvocation(ABC, BaseModel):
id: str = Field(
default_factory=uuid_string,
description="The id of this instance of an invocation. Must be unique among all instances of invocations.",
json_schema_extra={"_field_kind": "internal"},
json_schema_extra=dict(_field_kind="internal"),
)
is_intermediate: bool = Field(
default=False,
description="Whether or not this is an intermediate invocation.",
json_schema_extra={"ui_type": UIType.IsIntermediate, "_field_kind": "internal"},
json_schema_extra=dict(ui_type=UIType.IsIntermediate, _field_kind="internal"),
)
use_cache: bool = Field(
default=True, description="Whether or not to use the cache", json_schema_extra={"_field_kind": "internal"}
default=True, description="Whether or not to use the cache", json_schema_extra=dict(_field_kind="internal")
)
UIConfig: ClassVar[Type[UIConfigBase]]
@@ -646,7 +718,7 @@ class _Model(BaseModel):
# Get all pydantic model attrs, methods, etc
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
RESERVED_PYDANTIC_FIELD_NAMES = set(map(lambda m: m[0], inspect.getmembers(_Model())))
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
@@ -661,7 +733,9 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
field_kind = (
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None
field.json_schema_extra.get("_field_kind", None)
if field.json_schema_extra
else None
)
# must have a field_kind
@@ -722,7 +796,7 @@ def invocation(
# Add OpenAPI schema extras
uiconf_name = cls.__qualname__ + ".UIConfig"
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name:
cls.UIConfig = type(uiconf_name, (UIConfigBase,), {})
cls.UIConfig = type(uiconf_name, (UIConfigBase,), dict())
if title is not None:
cls.UIConfig.title = title
if tags is not None:
@@ -749,7 +823,7 @@ def invocation(
invocation_type_annotation = Literal[invocation_type] # type: ignore
invocation_type_field = Field(
title="type", default=invocation_type, json_schema_extra={"_field_kind": "internal"}
title="type", default=invocation_type, json_schema_extra=dict(_field_kind="internal")
)
docstring = cls.__doc__
@@ -795,7 +869,7 @@ def invocation_output(
# Add the output type to the model.
output_type_annotation = Literal[output_type] # type: ignore
output_type_field = Field(title="type", default=output_type, json_schema_extra={"_field_kind": "internal"})
output_type_field = Field(title="type", default=output_type, json_schema_extra=dict(_field_kind="internal"))
docstring = cls.__doc__
cls = create_model(
@@ -827,7 +901,7 @@ WorkflowFieldValidator = TypeAdapter(WorkflowField)
class WithWorkflow(BaseModel):
workflow: Optional[WorkflowField] = Field(
default=None, description=FieldDescriptions.workflow, json_schema_extra={"_field_kind": "internal"}
default=None, description=FieldDescriptions.workflow, json_schema_extra=dict(_field_kind="internal")
)
@@ -845,5 +919,5 @@ MetadataFieldValidator = TypeAdapter(MetadataField)
class WithMetadata(BaseModel):
metadata: Optional[MetadataField] = Field(
default=None, description=FieldDescriptions.metadata, json_schema_extra={"_field_kind": "internal"}
default=None, description=FieldDescriptions.metadata, json_schema_extra=dict(_field_kind="internal")
)

View File

@@ -7,7 +7,6 @@ from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
ExtraConditioningInfo,
@@ -20,6 +19,7 @@ from ...backend.util.devices import torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -28,12 +28,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import BaseModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -131,7 +131,7 @@ def prepare_faces_list(
deduped_faces: list[FaceResultData] = []
if len(face_result_list) == 0:
return []
return list()
for candidate in face_result_list:
should_add = True
@@ -210,7 +210,7 @@ def generate_face_box_mask(
# Check if any face is detected.
if results.multi_face_landmarks: # type: ignore # this are via protobuf and not typed
# Search for the face_id in the detected faces.
for _face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed
for face_id, face_landmarks in enumerate(results.multi_face_landmarks): # type: ignore #this are via protobuf and not typed
# Get the bounding box of the face mesh.
x_coordinates = [landmark.x for landmark in face_landmarks.landmark]
y_coordinates = [landmark.y for landmark in face_landmarks.landmark]

View File

@@ -9,11 +9,19 @@ from PIL import Image, ImageChops, ImageFilter, ImageOps
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
from .baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
WithMetadata,
WithWorkflow,
invocation,
)
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")

View File

@@ -7,6 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -16,7 +17,6 @@ from invokeai.app.invocations.baseinvocation import (
invocation_output,
)
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id

View File

@@ -10,7 +10,7 @@ import torch
import torchvision.transforms as T
from diffusers import AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor
from diffusers.models.adapter import T2IAdapter
from diffusers.models.adapter import FullAdapterXL, T2IAdapter
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
@@ -34,7 +34,6 @@ from invokeai.app.invocations.primitives import (
)
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
@@ -58,6 +57,7 @@ from ...backend.util.devices import choose_precision, choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -77,7 +77,7 @@ if choose_torch_device() == torch.device("mps"):
DEFAULT_PRECISION = choose_precision(choose_torch_device())
SAMPLER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())]
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
@invocation_output("scheduler_output")
@@ -562,6 +562,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapter_model: T2IAdapter
with t2i_adapter_model_info as t2i_adapter_model:
total_downscale_factor = t2i_adapter_model.total_downscale_factor
if isinstance(t2i_adapter_model.adapter, FullAdapterXL):
# HACK(ryand): Work around a bug in FullAdapterXL. This is being addressed upstream in diffusers by
# this PR: https://github.com/huggingface/diffusers/pull/5134.
total_downscale_factor = total_downscale_factor // 2
# Resize the T2I-Adapter input image.
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
@@ -1105,7 +1109,7 @@ class BlendLatentsInvocation(BaseInvocation):
latents_b = context.services.latents.get(self.latents_b.latents_name)
if latents_a.shape != latents_b.shape:
raise Exception("Latents to blend must be the same size.")
raise "Latents to blend must be the same size."
# TODO:
device = choose_torch_device()

View File

@@ -6,9 +6,8 @@ import numpy as np
from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")
@@ -145,17 +144,17 @@ INTEGER_OPERATIONS = Literal[
]
INTEGER_OPERATIONS_LABELS = {
"ADD": "Add A+B",
"SUB": "Subtract A-B",
"MUL": "Multiply A*B",
"DIV": "Divide A/B",
"EXP": "Exponentiate A^B",
"MOD": "Modulus A%B",
"ABS": "Absolute Value of A",
"MIN": "Minimum(A,B)",
"MAX": "Maximum(A,B)",
}
INTEGER_OPERATIONS_LABELS = dict(
ADD="Add A+B",
SUB="Subtract A-B",
MUL="Multiply A*B",
DIV="Divide A/B",
EXP="Exponentiate A^B",
MOD="Modulus A%B",
ABS="Absolute Value of A",
MIN="Minimum(A,B)",
MAX="Maximum(A,B)",
)
@invocation(
@@ -231,17 +230,17 @@ FLOAT_OPERATIONS = Literal[
]
FLOAT_OPERATIONS_LABELS = {
"ADD": "Add A+B",
"SUB": "Subtract A-B",
"MUL": "Multiply A*B",
"DIV": "Divide A/B",
"EXP": "Exponentiate A^B",
"ABS": "Absolute Value of A",
"SQRT": "Square Root of A",
"MIN": "Minimum(A,B)",
"MAX": "Maximum(A,B)",
}
FLOAT_OPERATIONS_LABELS = dict(
ADD="Add A+B",
SUB="Subtract A-B",
MUL="Multiply A*B",
DIV="Divide A/B",
EXP="Exponentiate A^B",
ABS="Absolute Value of A",
SQRT="Square Root of A",
MIN="Minimum(A,B)",
MAX="Maximum(A,B)",
)
@invocation(
@@ -266,7 +265,7 @@ class FloatMathInvocation(BaseInvocation):
raise ValueError("Cannot divide by zero")
elif info.data["operation"] == "EXP" and info.data["a"] == 0 and v < 0:
raise ValueError("Cannot raise zero to a negative power")
elif info.data["operation"] == "EXP" and isinstance(info.data["a"] ** v, complex):
elif info.data["operation"] == "EXP" and type(info.data["a"] ** v) is complex:
raise ValueError("Root operation resulted in a complex number")
return v

View File

@@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
MetadataField,
@@ -18,7 +19,6 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.shared.fields import FieldDescriptions
from ...version import __version__
@@ -160,14 +160,13 @@ class CoreMetadataInvocation(BaseInvocation):
)
# High resolution fix metadata.
hrf_enabled: Optional[float] = InputField(
hrf_width: Optional[int] = InputField(
default=None,
description="Whether or not high resolution fix was enabled.",
description="The high resolution fix height and width multipler.",
)
# TODO: should this be stricter or do we just let the UI handle it?
hrf_method: Optional[str] = InputField(
hrf_height: Optional[int] = InputField(
default=None,
description="The high resolution fix upscale method.",
description="The high resolution fix height and width multipler.",
)
hrf_strength: Optional[float] = InputField(
default=None,

View File

@@ -3,13 +3,11 @@ from typing import List, Optional
from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.shared.models import FreeUConfig
from ...backend.model_management import BaseModelType, ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -19,6 +17,22 @@ from .baseinvocation import (
invocation_output,
)
# TODO: Permanent fix for this
# from invokeai.app.invocations.shared import FreeUConfig
class FreeUConfig(BaseModel):
"""
Configuration for the FreeU hyperparameters.
- https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu
- https://github.com/ChenyangSi/FreeU
"""
s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1)
s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2)
b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1)
b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2)
class ModelInfo(BaseModel):
model_name: str = Field(description="Info to load submodel")

View File

@@ -5,13 +5,13 @@ import torch
from pydantic import field_validator
from invokeai.app.invocations.latent import LatentsField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
OutputField,

View File

@@ -14,7 +14,6 @@ from tqdm import tqdm
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType
@@ -24,6 +23,7 @@ from ...backend.util import choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -54,7 +54,7 @@ ORT_TO_NP_TYPE = {
"tensor(double)": np.float64,
}
PRECISION_VALUES = Literal[tuple(ORT_TO_NP_TYPE.keys())]
PRECISION_VALUES = Literal[tuple(list(ORT_TO_NP_TYPE.keys()))]
@invocation("prompt_onnx", title="ONNX Prompt (Raw)", tags=["prompt", "onnx"], category="conditioning", version="1.0.0")
@@ -252,7 +252,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation):
scheduler.set_timesteps(self.steps)
latents = latents * np.float64(scheduler.init_noise_sigma)
extra_step_kwargs = {}
extra_step_kwargs = dict()
if "eta" in set(inspect.signature(scheduler.step).parameters.keys()):
extra_step_kwargs.update(
eta=0.0,

View File

@@ -100,7 +100,7 @@ EASING_FUNCTIONS_MAP = {
"BounceInOut": BounceEaseInOut,
}
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
EASING_FUNCTION_KEYS = Literal[tuple(list(EASING_FUNCTIONS_MAP.keys()))]
# actually I think for now could just use CollectionOutput (which is list[Any]
@@ -161,7 +161,7 @@ class StepParamEasingInvocation(BaseInvocation):
easing_class = EASING_FUNCTIONS_MAP[self.easing]
if log_diagnostics:
context.services.logger.debug("easing class: " + str(easing_class))
easing_list = []
easing_list = list()
if self.mirror: # "expected" mirroring
# if number of steps is even, squeeze duration down to (number_of_steps)/2
# and create reverse copy of list to append
@@ -178,7 +178,7 @@ class StepParamEasingInvocation(BaseInvocation):
end=self.end_value,
duration=base_easing_duration - 1,
)
base_easing_vals = []
base_easing_vals = list()
for step_index in range(base_easing_duration):
easing_val = easing_function.ease(step_index)
base_easing_vals.append(easing_val)

View File

@@ -5,11 +5,10 @@ from typing import Optional, Tuple
import torch
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -1,9 +1,8 @@
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.invocations.baseinvocation import FieldDescriptions
class FreeUConfig(BaseModel):

View File

@@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -15,7 +16,6 @@ from invokeai.app.invocations.baseinvocation import (
)
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType

View File

@@ -139,7 +139,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
(board_id,),
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
images = list(map(lambda r: deserialize_image_record(dict(r)), result))
self._cursor.execute(
"""--sql
@@ -167,7 +167,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
(board_id,),
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result]
image_names = list(map(lambda r: r[0], result))
return image_names
except sqlite3.Error as e:
self._conn.rollback()

View File

@@ -199,7 +199,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
# Get the total number of boards
self._cursor.execute(
@@ -236,7 +236,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
boards = list(map(lambda r: deserialize_board_record(dict(r)), result))
return boards

View File

@@ -55,7 +55,7 @@ class InvokeAISettings(BaseSettings):
"""
cls = self.__class__
type = get_args(get_type_hints(cls)["type"])[0]
field_dict = {type: {}}
field_dict = dict({type: dict()})
for name, field in self.model_fields.items():
if name in cls._excluded_from_yaml():
continue
@@ -64,7 +64,7 @@ class InvokeAISettings(BaseSettings):
)
value = getattr(self, name)
if category not in field_dict[type]:
field_dict[type][category] = {}
field_dict[type][category] = dict()
# keep paths as strings to make it easier to read
field_dict[type][category][name] = str(value) if isinstance(value, Path) else value
conf = OmegaConf.create(field_dict)
@@ -89,7 +89,7 @@ class InvokeAISettings(BaseSettings):
# create an upcase version of the environment in
# order to achieve case-insensitive environment
# variables (the way Windows does)
upcase_environ = {}
upcase_environ = dict()
for key, value in os.environ.items():
upcase_environ[key.upper()] = value

View File

@@ -188,18 +188,18 @@ DEFAULT_MAX_VRAM = 0.5
class Categories(object):
WebServer = {"category": "Web Server"}
Features = {"category": "Features"}
Paths = {"category": "Paths"}
Logging = {"category": "Logging"}
Development = {"category": "Development"}
Other = {"category": "Other"}
ModelCache = {"category": "Model Cache"}
Device = {"category": "Device"}
Generation = {"category": "Generation"}
Queue = {"category": "Queue"}
Nodes = {"category": "Nodes"}
MemoryPerformance = {"category": "Memory/Performance"}
WebServer = dict(category="Web Server")
Features = dict(category="Features")
Paths = dict(category="Paths")
Logging = dict(category="Logging")
Development = dict(category="Development")
Other = dict(category="Other")
ModelCache = dict(category="Model Cache")
Device = dict(category="Device")
Generation = dict(category="Generation")
Queue = dict(category="Queue")
Nodes = dict(category="Nodes")
MemoryPerformance = dict(category="Memory/Performance")
class InvokeAIAppConfig(InvokeAISettings):
@@ -482,7 +482,7 @@ def _find_root() -> Path:
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
if os.environ.get("INVOKEAI_ROOT"):
root = Path(os.environ["INVOKEAI_ROOT"])
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]):
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
root = (venv.parent).resolve()
else:
root = Path("~/invokeai").expanduser().resolve()

View File

@@ -27,7 +27,7 @@ class EventServiceBase:
payload["timestamp"] = get_timestamp()
self.dispatch(
event_name=EventServiceBase.queue_event,
payload={"event": event_name, "data": payload},
payload=dict(event=event_name, data=payload),
)
# Define events here for every event in the system.
@@ -48,18 +48,18 @@ class EventServiceBase:
"""Emitted when there is generation progress"""
self.__emit_queue_event(
event_name="generator_progress",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node_id": node.get("id"),
"source_node_id": source_node_id,
"progress_image": progress_image.model_dump() if progress_image is not None else None,
"step": step,
"order": order,
"total_steps": total_steps,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
node_id=node.get("id"),
source_node_id=source_node_id,
progress_image=progress_image.model_dump() if progress_image is not None else None,
step=step,
order=order,
total_steps=total_steps,
),
)
def emit_invocation_complete(
@@ -75,15 +75,15 @@ class EventServiceBase:
"""Emitted when an invocation has completed"""
self.__emit_queue_event(
event_name="invocation_complete",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node": node,
"source_node_id": source_node_id,
"result": result,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
result=result,
),
)
def emit_invocation_error(
@@ -100,16 +100,16 @@ class EventServiceBase:
"""Emitted when an invocation has completed"""
self.__emit_queue_event(
event_name="invocation_error",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node": node,
"source_node_id": source_node_id,
"error_type": error_type,
"error": error,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
error_type=error_type,
error=error,
),
)
def emit_invocation_started(
@@ -124,14 +124,14 @@ class EventServiceBase:
"""Emitted when an invocation has started"""
self.__emit_queue_event(
event_name="invocation_started",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node": node,
"source_node_id": source_node_id,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
node=node,
source_node_id=source_node_id,
),
)
def emit_graph_execution_complete(
@@ -140,12 +140,12 @@ class EventServiceBase:
"""Emitted when a session has completed all invocations"""
self.__emit_queue_event(
event_name="graph_execution_state_complete",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
),
)
def emit_model_load_started(
@@ -162,16 +162,16 @@ class EventServiceBase:
"""Emitted when a model is requested"""
self.__emit_queue_event(
event_name="model_load_started",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"model_name": model_name,
"base_model": base_model,
"model_type": model_type,
"submodel": submodel,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=submodel,
),
)
def emit_model_load_completed(
@@ -189,19 +189,19 @@ class EventServiceBase:
"""Emitted when a model is correctly loaded (returns model info)"""
self.__emit_queue_event(
event_name="model_load_completed",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"model_name": model_name,
"base_model": base_model,
"model_type": model_type,
"submodel": submodel,
"hash": model_info.hash,
"location": str(model_info.location),
"precision": str(model_info.precision),
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=submodel,
hash=model_info.hash,
location=str(model_info.location),
precision=str(model_info.precision),
),
)
def emit_session_retrieval_error(
@@ -216,14 +216,14 @@ class EventServiceBase:
"""Emitted when session retrieval fails"""
self.__emit_queue_event(
event_name="session_retrieval_error",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"error_type": error_type,
"error": error,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
error_type=error_type,
error=error,
),
)
def emit_invocation_retrieval_error(
@@ -239,15 +239,15 @@ class EventServiceBase:
"""Emitted when invocation retrieval fails"""
self.__emit_queue_event(
event_name="invocation_retrieval_error",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node_id": node_id,
"error_type": error_type,
"error": error,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
node_id=node_id,
error_type=error_type,
error=error,
),
)
def emit_session_canceled(
@@ -260,12 +260,12 @@ class EventServiceBase:
"""Emitted when a session is canceled"""
self.__emit_queue_event(
event_name="session_canceled",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
},
payload=dict(
queue_id=queue_id,
queue_item_id=queue_item_id,
queue_batch_id=queue_batch_id,
graph_execution_state_id=graph_execution_state_id,
),
)
def emit_queue_item_status_changed(
@@ -277,39 +277,39 @@ class EventServiceBase:
"""Emitted when a queue item's status changes"""
self.__emit_queue_event(
event_name="queue_item_status_changed",
payload={
"queue_id": queue_status.queue_id,
"queue_item": {
"queue_id": session_queue_item.queue_id,
"item_id": session_queue_item.item_id,
"status": session_queue_item.status,
"batch_id": session_queue_item.batch_id,
"session_id": session_queue_item.session_id,
"error": session_queue_item.error,
"created_at": str(session_queue_item.created_at) if session_queue_item.created_at else None,
"updated_at": str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None,
"completed_at": str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
},
"batch_status": batch_status.model_dump(),
"queue_status": queue_status.model_dump(),
},
payload=dict(
queue_id=queue_status.queue_id,
queue_item=dict(
queue_id=session_queue_item.queue_id,
item_id=session_queue_item.item_id,
status=session_queue_item.status,
batch_id=session_queue_item.batch_id,
session_id=session_queue_item.session_id,
error=session_queue_item.error,
created_at=str(session_queue_item.created_at) if session_queue_item.created_at else None,
updated_at=str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
started_at=str(session_queue_item.started_at) if session_queue_item.started_at else None,
completed_at=str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
),
batch_status=batch_status.model_dump(),
queue_status=queue_status.model_dump(),
),
)
def emit_batch_enqueued(self, enqueue_result: EnqueueBatchResult) -> None:
"""Emitted when a batch is enqueued"""
self.__emit_queue_event(
event_name="batch_enqueued",
payload={
"queue_id": enqueue_result.queue_id,
"batch_id": enqueue_result.batch.batch_id,
"enqueued": enqueue_result.enqueued,
},
payload=dict(
queue_id=enqueue_result.queue_id,
batch_id=enqueue_result.batch.batch_id,
enqueued=enqueue_result.enqueued,
),
)
def emit_queue_cleared(self, queue_id: str) -> None:
"""Emitted when the queue is cleared"""
self.__emit_queue_event(
event_name="queue_cleared",
payload={"queue_id": queue_id},
payload=dict(queue_id=queue_id),
)

View File

@@ -25,7 +25,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
__invoker: Invoker
def __init__(self, output_folder: Union[str, Path]):
self.__cache = {}
self.__cache = dict()
self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config

View File

@@ -90,23 +90,25 @@ class ImageRecordDeleteException(Exception):
IMAGE_DTO_COLS = ", ".join(
[
"images." + c
for c in [
"image_name",
"image_origin",
"image_category",
"width",
"height",
"session_id",
"node_id",
"is_intermediate",
"created_at",
"updated_at",
"deleted_at",
"starred",
]
]
list(
map(
lambda c: "images." + c,
[
"image_name",
"image_origin",
"image_category",
"width",
"height",
"session_id",
"node_id",
"is_intermediate",
"created_at",
"updated_at",
"deleted_at",
"starred",
],
)
)
)

View File

@@ -263,7 +263,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
category_strings = list(map(lambda c: c.value, set(categories)))
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
@@ -307,7 +307,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Build the list of images, deserializing each row
self._cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
images = list(map(lambda r: deserialize_image_record(dict(r)), result))
# Set up and execute the count query, without pagination
count_query += query_conditions + ";"
@@ -386,7 +386,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
"""
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result]
image_names = list(map(lambda r: r[0], result))
self._cursor.execute(
"""--sql
DELETE FROM images

View File

@@ -21,8 +21,8 @@ class ImageServiceABC(ABC):
_on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None:
self._on_changed_callbacks = []
self._on_deleted_callbacks = []
self._on_changed_callbacks = list()
self._on_deleted_callbacks = list()
def on_changed(self, on_changed: Callable[[ImageDTO], None]) -> None:
"""Register a callback for when an image is changed"""

View File

@@ -217,16 +217,18 @@ class ImageService(ImageServiceABC):
board_id,
)
image_dtos = [
image_record_to_dto(
image_record=r,
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
image_dtos = list(
map(
lambda r: image_record_to_dto(
image_record=r,
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
),
results.items,
)
for r in results.items
]
)
return OffsetPaginatedResults[ImageDTO](
items=image_dtos,

View File

@@ -1,5 +1,5 @@
from abc import ABC
class InvocationProcessorABC(ABC): # noqa: B024
class InvocationProcessorABC(ABC):
pass

View File

@@ -26,7 +26,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
self.__invoker_thread = Thread(
name="invoker_processor",
target=self.__process,
kwargs={"stop_event": self.__stop_event},
kwargs=dict(stop_event=self.__stop_event),
)
self.__invoker_thread.daemon = True # TODO: make async and do not use threads
self.__invoker_thread.start()

View File

@@ -14,7 +14,7 @@ class MemoryInvocationQueue(InvocationQueueABC):
def __init__(self):
self.__queue = Queue()
self.__cancellations = {}
self.__cancellations = dict()
def get(self) -> InvocationQueueItem:
item = self.__queue.get()

View File

@@ -122,7 +122,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
def log_stats(self):
completed = set()
errored = set()
for graph_id, _node_log in self._stats.items():
for graph_id, node_log in self._stats.items():
try:
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
except Exception:
@@ -142,7 +142,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
cache_stats = self._cache_stats[graph_id]
hwm = cache_stats.high_watermark / GIG
tot = cache_stats.cache_size / GIG
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG
loaded = sum([v for v in cache_stats.loaded_model_sizes.values()]) / GIG
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")

View File

@@ -15,8 +15,8 @@ class ItemStorageABC(ABC, Generic[T]):
_on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None:
self._on_changed_callbacks = []
self._on_deleted_callbacks = []
self._on_changed_callbacks = list()
self._on_deleted_callbacks = list()
"""Base item storage class"""

View File

@@ -112,7 +112,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
items = list(map(lambda r: self._parse_item(r[0]), result))
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0]
@@ -132,7 +132,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
items = list(map(lambda r: self._parse_item(r[0]), result))
self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",

View File

@@ -13,8 +13,8 @@ class LatentsStorageBase(ABC):
_on_deleted_callbacks: list[Callable[[str], None]]
def __init__(self) -> None:
self._on_changed_callbacks = []
self._on_deleted_callbacks = []
self._on_changed_callbacks = list()
self._on_deleted_callbacks = list()
@abstractmethod
def get(self, name: str) -> torch.Tensor:

View File

@@ -19,7 +19,7 @@ class ForwardCacheLatentsStorage(LatentsStorageBase):
def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20):
super().__init__()
self.__underlying_storage = underlying_storage
self.__cache = {}
self.__cache = dict()
self.__cache_ids = Queue()
self.__max_cache_size = max_cache_size

View File

@@ -33,11 +33,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
self.__thread = Thread(
name="session_processor",
target=self.__process,
kwargs={
"stop_event": self.__stop_event,
"poll_now_event": self.__poll_now_event,
"resume_event": self.__resume_event,
},
kwargs=dict(
stop_event=self.__stop_event, poll_now_event=self.__poll_now_event, resume_event=self.__resume_event
),
)
self.__thread.start()

View File

@@ -129,12 +129,12 @@ class Batch(BaseModel):
return v
model_config = ConfigDict(
json_schema_extra={
"required": [
json_schema_extra=dict(
required=[
"graph",
"runs",
]
}
)
)
@@ -191,8 +191,8 @@ class SessionQueueItemWithoutGraph(BaseModel):
return SessionQueueItemDTO(**queue_item_dict)
model_config = ConfigDict(
json_schema_extra={
"required": [
json_schema_extra=dict(
required=[
"item_id",
"status",
"batch_id",
@@ -203,7 +203,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
"created_at",
"updated_at",
]
}
)
)
@@ -222,8 +222,8 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
return SessionQueueItem(**queue_item_dict)
model_config = ConfigDict(
json_schema_extra={
"required": [
json_schema_extra=dict(
required=[
"item_id",
"status",
"batch_id",
@@ -235,7 +235,7 @@ class SessionQueueItem(SessionQueueItemWithoutGraph):
"created_at",
"updated_at",
]
}
)
)
@@ -355,7 +355,7 @@ def create_session_nfv_tuples(
for item in batch_datum.items
]
node_field_values_to_zip.append(node_field_values)
data.append(list(zip(*node_field_values_to_zip, strict=True))) # type: ignore [arg-type]
data.append(list(zip(*node_field_values_to_zip))) # type: ignore [arg-type]
# create generator to yield session,nfv tuples
count = 0
@@ -383,7 +383,7 @@ def calc_session_count(batch: Batch) -> int:
for batch_datum in batch_datum_list:
batch_data_items = range(len(batch_datum.items))
to_zip.append(batch_data_items)
data.append(list(zip(*to_zip, strict=True)))
data.append(list(zip(*to_zip)))
data_product = list(product(*data))
return len(data_product) * batch.runs

View File

@@ -78,7 +78,7 @@ def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[Li
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
# TODO: Uncomment this when we are ready to fix this up to prevent breaking changes
graphs: list[LibraryGraph] = []
graphs: list[LibraryGraph] = list()
text_to_image = graph_library.get(default_text_to_image_graph_id)

View File

@@ -352,7 +352,7 @@ class Graph(BaseModel):
# Validate that all node ids are unique
node_ids = [n.id for n in self.nodes.values()]
duplicate_node_ids = {node_id for node_id in node_ids if node_ids.count(node_id) >= 2}
duplicate_node_ids = set([node_id for node_id in node_ids if node_ids.count(node_id) >= 2])
if duplicate_node_ids:
raise DuplicateNodeIdError(f"Node ids must be unique, found duplicates {duplicate_node_ids}")
@@ -616,7 +616,7 @@ class Graph(BaseModel):
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all input edges for a node along with the graph they are in and the graph's path"""
edges = []
edges = list()
# Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path])
@@ -658,7 +658,7 @@ class Graph(BaseModel):
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all output edges for a node along with the graph they are in and the graph's path"""
edges = []
edges = list()
# Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path])
@@ -680,8 +680,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "collection")]
outputs = [e.destination for e in self._get_output_edges(node_path, "item")]
inputs = list([e.source for e in self._get_input_edges(node_path, "collection")])
outputs = list([e.destination for e in self._get_output_edges(node_path, "item")])
if new_input is not None:
inputs.append(new_input)
@@ -694,7 +694,7 @@ class Graph(BaseModel):
# Get input and output fields (the fields linked to the iterator's input/output)
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
# Input type must be a list
if get_origin(input_field) != list:
@@ -713,8 +713,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "item")]
outputs = [e.destination for e in self._get_output_edges(node_path, "collection")]
inputs = list([e.source for e in self._get_input_edges(node_path, "item")])
outputs = list([e.destination for e in self._get_output_edges(node_path, "collection")])
if new_input is not None:
inputs.append(new_input)
@@ -722,16 +722,18 @@ class Graph(BaseModel):
outputs.append(new_output)
# Get input and output fields (the fields linked to the iterator's input/output)
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs]
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
input_fields = list([get_output_field(self.get_node(e.node_id), e.field) for e in inputs])
output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs])
# Validate that all inputs are derived from or match a single type
input_field_types = {
t
for input_field in input_fields
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
if t != NoneType
} # Get unique types
input_field_types = set(
[
t
for input_field in input_fields
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
if t != NoneType
]
) # Get unique types
type_tree = nx.DiGraph()
type_tree.add_nodes_from(input_field_types)
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
@@ -759,15 +761,15 @@ class Graph(BaseModel):
"""Returns a NetworkX DiGraph representing the layout of this graph"""
# TODO: Cache this?
g = nx.DiGraph()
g.add_nodes_from(list(self.nodes.keys()))
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges})
g.add_nodes_from([n for n in self.nodes.keys()])
g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g
def nx_graph_with_data(self) -> nx.DiGraph:
"""Returns a NetworkX DiGraph representing the data and layout of this graph"""
g = nx.DiGraph()
g.add_nodes_from(list(self.nodes.items()))
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges})
g.add_nodes_from([n for n in self.nodes.items()])
g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g
def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph:
@@ -789,7 +791,7 @@ class Graph(BaseModel):
# TODO: figure out if iteration nodes need to be expanded
unique_edges = {(e.source.node_id, e.destination.node_id) for e in self.edges}
unique_edges = set([(e.source.node_id, e.destination.node_id) for e in self.edges])
g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges])
return g
@@ -841,8 +843,8 @@ class GraphExecutionState(BaseModel):
return v
model_config = ConfigDict(
json_schema_extra={
"required": [
json_schema_extra=dict(
required=[
"id",
"graph",
"execution_graph",
@@ -853,7 +855,7 @@ class GraphExecutionState(BaseModel):
"prepared_source_mapping",
"source_prepared_mapping",
]
}
)
)
def next(self) -> Optional[BaseInvocation]:
@@ -893,7 +895,7 @@ class GraphExecutionState(BaseModel):
source_node = self.prepared_source_mapping[node_id]
prepared_nodes = self.source_prepared_mapping[source_node]
if all(n in self.executed for n in prepared_nodes):
if all([n in self.executed for n in prepared_nodes]):
self.executed.add(source_node)
self.executed_history.append(source_node)
@@ -928,7 +930,7 @@ class GraphExecutionState(BaseModel):
input_collection = getattr(input_collection_prepared_node_output, input_collection_edge.source.field)
self_iteration_count = len(input_collection)
new_nodes: list[str] = []
new_nodes: list[str] = list()
if self_iteration_count == 0:
# TODO: should this raise a warning? It might just happen if an empty collection is input, and should be valid.
return new_nodes
@@ -938,7 +940,7 @@ class GraphExecutionState(BaseModel):
# Create new edges for this iteration
# For collect nodes, this may contain multiple inputs to the same field
new_edges: list[Edge] = []
new_edges: list[Edge] = list()
for edge in input_edges:
for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge.source.node_id):
new_edge = Edge(
@@ -1032,7 +1034,7 @@ class GraphExecutionState(BaseModel):
# Create execution nodes
next_node = self.graph.get_node(next_node_id)
new_node_ids = []
new_node_ids = list()
if isinstance(next_node, CollectInvocation):
# Collapse all iterator input mappings and create a single execution node for the collect invocation
all_iteration_mappings = list(
@@ -1053,10 +1055,7 @@ class GraphExecutionState(BaseModel):
# For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator
# TODO: Handle a node mapping to none
eg = self.execution_graph.nx_graph_flat()
prepared_parent_mappings = [
[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents]
for it in iterator_node_prepared_combinations
] # type: ignore
prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore
# Create execution node for each iteration
for iteration_mappings in prepared_parent_mappings:
@@ -1122,7 +1121,7 @@ class GraphExecutionState(BaseModel):
for edge in input_edges
if edge.destination.field == "item"
]
node.collection = output_collection
setattr(node, "collection", output_collection)
else:
for edge in input_edges:
output_value = getattr(self.results[edge.source.node_id], edge.source.field)
@@ -1202,7 +1201,7 @@ class LibraryGraph(BaseModel):
@field_validator("exposed_inputs", "exposed_outputs")
def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]):
if len(v) != len({i.alias for i in v}):
if len(v) != len(set(i.alias for i in v)):
raise ValueError("Duplicate exposed alias")
return v

View File

@@ -1,5 +0,0 @@
"""
This module contains various classes, functions and models which are shared across the app, particularly by invocations.
Lifting these classes, functions and models into this shared module helps to reduce circular imports.
"""

View File

@@ -1,66 +0,0 @@
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."

View File

@@ -59,7 +59,7 @@ def thin_one_time(x, kernels):
def lvmin_thin(x, prunings=True):
y = x
for _i in range(32):
for i in range(32):
y, is_done = thin_one_time(y, lvmin_kernels)
if is_done:
break

View File

@@ -21,11 +21,11 @@ def get_metadata_graph_from_raw_session(session_raw: str) -> Optional[dict]:
# sanity check make sure the graph is at least reasonably shaped
if (
not isinstance(graph, dict)
type(graph) is not dict
or "nodes" not in graph
or not isinstance(graph["nodes"], dict)
or type(graph["nodes"]) is not dict
or "edges" not in graph
or not isinstance(graph["edges"], list)
or type(graph["edges"]) is not list
):
# something has gone terribly awry, return an empty dict
return None

View File

@@ -88,7 +88,7 @@ class PromptFormatter:
t2i = self.t2i
opt = self.opt
switches = []
switches = list()
switches.append(f'"{opt.prompt}"')
switches.append(f"-s{opt.steps or t2i.steps}")
switches.append(f"-W{opt.width or t2i.width}")

View File

@@ -88,7 +88,7 @@ class Txt2Mask(object):
provided image and returns a SegmentedGrayscale object in which the brighter
pixels indicate where the object is inferred to be.
"""
if isinstance(image, str):
if type(image) is str:
image = Image.open(image).convert("RGB")
image = ImageOps.exif_transpose(image)

View File

@@ -40,7 +40,7 @@ class InitImageResizer:
(rw, rh) = (int(scale * im.width), int(scale * im.height))
# round everything to multiples of 64
width, height, rw, rh = (x - x % 64 for x in (width, height, rw, rh))
width, height, rw, rh = map(lambda x: x - x % 64, (width, height, rw, rh))
# no resize necessary, but return a copy
if im.width == width and im.height == height:

View File

@@ -32,7 +32,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
from pydantic import ValidationError
from pydantic.error_wrappers import ValidationError
from tqdm import tqdm
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
@@ -197,7 +197,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models():
target_dir = config.models_path / "core/convert"
kwargs = {} # for future use
kwargs = dict() # for future use
try:
logger.info("Downloading core tokenizers and text encoders")
@@ -252,26 +252,26 @@ def download_conversion_models():
def download_realesrgan():
logger.info("Installing ESRGAN Upscaling models...")
URLs = [
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
"description": "RealESRGAN_x4plus.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
"description": "RealESRGAN_x4plus_anime_6B.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"dest": "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"description": "ESRGAN_SRx4_DF2KOST_official.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
"description": "RealESRGAN_x2plus.pth",
},
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
description="RealESRGAN_x4plus.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
description="RealESRGAN_x4plus_anime_6B.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
dest="core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
description="ESRGAN_SRx4_DF2KOST_official.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
description="RealESRGAN_x2plus.pth",
),
]
for model in URLs:
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
@@ -680,7 +680,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
if program_opts.default_only
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
if program_opts.yes_to_all
else [],
else list(),
)

View File

@@ -38,7 +38,6 @@ SAMPLER_CHOICES = [
"k_heun",
"k_lms",
"plms",
"lcm",
]
PRECISION_CHOICES = [

View File

@@ -123,6 +123,8 @@ class MigrateTo3(object):
logger.error(str(e))
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
for f in files:
# don't copy raw learned_embeds.bin or pytorch_lora_weights.bin
# let them be copied as part of a tree copy operation
@@ -141,6 +143,8 @@ class MigrateTo3(object):
logger.error(str(e))
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
def migrate_support_models(self):
"""
@@ -178,10 +182,10 @@ class MigrateTo3(object):
"""
dest_directory = self.dest_models
kwargs = {
"cache_dir": self.root_directory / "models/hub",
kwargs = dict(
cache_dir=self.root_directory / "models/hub",
# local_files_only = True
}
)
try:
logger.info("Migrating core tokenizers and text encoders")
target_dir = dest_directory / "core" / "convert"
@@ -312,11 +316,11 @@ class MigrateTo3(object):
dest_dir = self.dest_models
cache = self.root_directory / "models/hub"
kwargs = {
"cache_dir": cache,
"safety_checker": None,
kwargs = dict(
cache_dir=cache,
safety_checker=None,
# local_files_only = True,
}
)
owner, repo_name = repo_id.split("/")
model_name = model_name or repo_name

View File

@@ -120,7 +120,7 @@ class ModelInstall(object):
be treated uniformly. It also sorts the models alphabetically
by their name, to improve the display somewhat.
"""
model_dict = {}
model_dict = dict()
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
@@ -134,7 +134,7 @@ class ModelInstall(object):
model_dict[key] = model_info
# supplement with entries in models.yaml
installed_models = list(self.mgr.list_models())
installed_models = [x for x in self.mgr.list_models()]
for md in installed_models:
base = md["base_model"]
@@ -176,7 +176,7 @@ class ModelInstall(object):
# logic here a little reversed to maintain backward compatibility
def starter_models(self, all_models: bool = False) -> Set[str]:
models = set()
for key, _value in self.datasets.items():
for key, value in self.datasets.items():
name, base, model_type = ModelManager.parse_key(key)
if all_models or model_type in [ModelType.Main, ModelType.Vae]:
models.add(key)
@@ -184,7 +184,7 @@ class ModelInstall(object):
def recommended_models(self) -> Set[str]:
starters = self.starter_models(all_models=True)
return {x for x in starters if self.datasets[x].get("recommended", False)}
return set([x for x in starters if self.datasets[x].get("recommended", False)])
def default_model(self) -> str:
starters = self.starter_models()
@@ -234,7 +234,7 @@ class ModelInstall(object):
"""
if not models_installed:
models_installed = {}
models_installed = dict()
model_path_id_or_url = str(model_path_id_or_url).strip("\"' ")
@@ -252,14 +252,10 @@ class ModelInstall(object):
# folders style or similar
elif path.is_dir() and any(
(path / x).exists()
for x in {
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"pytorch_lora_weights.safetensors",
}
[
(path / x).exists()
for x in {"config.json", "model_index.json", "learned_embeds.bin", "pytorch_lora_weights.bin"}
]
):
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
@@ -361,7 +357,7 @@ class ModelInstall(object):
for suffix in ["safetensors", "bin"]:
if f"{prefix}pytorch_lora_weights.{suffix}" in files:
location = self._download_hf_model(
repo_id, [f"pytorch_lora_weights.{suffix}"], staging, subfolder=subfolder
repo_id, ["pytorch_lora_weights.bin"], staging, subfolder=subfolder
) # LoRA
break
elif (
@@ -431,17 +427,17 @@ class ModelInstall(object):
rel_path = self.relative_to_root(path, self.config.models_path)
attributes = {
"path": str(rel_path),
"description": str(description),
"model_format": info.format,
}
attributes = dict(
path=str(rel_path),
description=str(description),
model_format=info.format,
)
legacy_conf = None
if info.model_type == ModelType.Main or info.model_type == ModelType.ONNX:
attributes.update(
{
"variant": info.variant_type,
}
dict(
variant=info.variant_type,
)
)
if info.format == "checkpoint":
try:
@@ -472,7 +468,7 @@ class ModelInstall(object):
)
if legacy_conf:
attributes.update({"config": str(legacy_conf)})
attributes.update(dict(config=str(legacy_conf)))
return attributes
def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
@@ -517,7 +513,7 @@ class ModelInstall(object):
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path, subfolder: None) -> Path:
_, name = repo_id.split("/")
location = staging / name
paths = []
paths = list()
for filename in files:
filePath = Path(filename)
p = hf_download_with_resume(

View File

@@ -130,9 +130,7 @@ class IPAttnProcessor2_0(torch.nn.Module):
assert ip_adapter_image_prompt_embeds is not None
assert len(ip_adapter_image_prompt_embeds) == len(self._weights)
for ipa_embed, ipa_weights, scale in zip(
ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True
):
for ipa_embed, ipa_weights, scale in zip(ip_adapter_image_prompt_embeds, self._weights, self._scales):
# The batch dimensions should match.
assert ipa_embed.shape[0] == encoder_hidden_states.shape[0]
# The token_len dimensions should match.

View File

@@ -56,7 +56,7 @@ class PerceiverAttention(nn.Module):
x = self.norm1(x)
latents = self.norm2(latents)
b, L, _ = latents.shape
b, l, _ = latents.shape
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
@@ -72,7 +72,7 @@ class PerceiverAttention(nn.Module):
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
out = weight @ v
out = out.permute(0, 2, 1, 3).reshape(b, L, -1)
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
return self.to_out(out)

View File

@@ -269,7 +269,7 @@ def create_unet_diffusers_config(original_config, image_size: int, controlnet=Fa
resolution *= 2
up_block_types = []
for _i in range(len(block_out_channels)):
for i in range(len(block_out_channels)):
block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D"
up_block_types.append(block_type)
resolution //= 2
@@ -1223,7 +1223,7 @@ def download_from_original_stable_diffusion_ckpt(
# scan model
scan_result = scan_file_path(checkpoint_path)
if scan_result.infected_files != 0:
raise Exception("The model {checkpoint_path} is potentially infected by malware. Aborting import.")
raise "The model {checkpoint_path} is potentially infected by malware. Aborting import."
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
checkpoint = torch.load(checkpoint_path, map_location=device)
@@ -1664,7 +1664,7 @@ def download_controlnet_from_original_ckpt(
# scan model
scan_result = scan_file_path(checkpoint_path)
if scan_result.infected_files != 0:
raise Exception("The model {checkpoint_path} is potentially infected by malware. Aborting import.")
raise "The model {checkpoint_path} is potentially infected by malware. Aborting import."
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
checkpoint = torch.load(checkpoint_path, map_location=device)

View File

@@ -12,7 +12,7 @@ from diffusers.models import UNet2DConditionModel
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.app.invocations.shared import FreeUConfig
from .models.lora import LoRAModel
@@ -104,7 +104,7 @@ class ModelPatcher:
loras: List[Tuple[LoRAModel, float]],
prefix: str,
):
original_weights = {}
original_weights = dict()
try:
with torch.no_grad():
for lora, lora_weight in loras:
@@ -166,15 +166,6 @@ class ModelPatcher:
init_tokens_count = None
new_tokens_added = None
# TODO: This is required since Transformers 4.32 see
# https://github.com/huggingface/transformers/pull/25088
# More information by NVIDIA:
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
# This value might need to be changed in the future and take the GPUs model into account as there seem
# to be ideal values for different GPUS. This value is temporary!
# For references to the current discussion please see https://github.com/invoke-ai/InvokeAI/pull/4817
pad_to_multiple_of = 8
try:
# HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a
# workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after
@@ -184,7 +175,7 @@ class ModelPatcher:
# but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs).
ti_tokenizer = pickle.loads(pickle.dumps(tokenizer))
ti_manager = TextualInversionManager(ti_tokenizer)
init_tokens_count = text_encoder.resize_token_embeddings(None, pad_to_multiple_of).num_embeddings
init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings
def _get_trigger(ti_name, index):
trigger = ti_name
@@ -199,7 +190,7 @@ class ModelPatcher:
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
# modify text_encoder
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added, pad_to_multiple_of)
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added)
model_embeddings = text_encoder.get_input_embeddings()
for ti_name, ti in ti_list:
@@ -231,7 +222,7 @@ class ModelPatcher:
finally:
if init_tokens_count and new_tokens_added:
text_encoder.resize_token_embeddings(init_tokens_count, pad_to_multiple_of)
text_encoder.resize_token_embeddings(init_tokens_count)
@classmethod
@contextmanager
@@ -242,7 +233,7 @@ class ModelPatcher:
):
skipped_layers = []
try:
for _i in range(clip_skip):
for i in range(clip_skip):
skipped_layers.append(text_encoder.text_model.encoder.layers.pop(-1))
yield
@@ -324,7 +315,7 @@ class TextualInversionManager(BaseTextualInversionManager):
tokenizer: CLIPTokenizer
def __init__(self, tokenizer: CLIPTokenizer):
self.pad_tokens = {}
self.pad_tokens = dict()
self.tokenizer = tokenizer
def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]:
@@ -385,10 +376,10 @@ class ONNXModelPatcher:
if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_weights = {}
orig_weights = dict()
try:
blended_loras = {}
blended_loras = dict()
for lora, lora_weight in loras:
for layer_key, layer in lora.layers.items():
@@ -404,7 +395,7 @@ class ONNXModelPatcher:
else:
blended_loras[layer_key] = layer_weight
node_names = {}
node_names = dict()
for node in model.nodes.values():
node_names[node.name.replace("/", "_").replace(".", "_").lstrip("_")] = node.name

View File

@@ -66,13 +66,11 @@ class CacheStats(object):
class ModelLocker(object):
"Forward declaration"
pass
class ModelCache(object):
"Forward declaration"
pass
@@ -134,7 +132,7 @@ class ModelCache(object):
snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's
behaviour.
"""
self.model_infos: Dict[str, ModelBase] = {}
self.model_infos: Dict[str, ModelBase] = dict()
# allow lazy offloading only when vram cache enabled
self.lazy_offloading = lazy_offloading and max_vram_cache_size > 0
self.precision: torch.dtype = precision
@@ -149,8 +147,8 @@ class ModelCache(object):
# used for stats collection
self.stats = None
self._cached_models = {}
self._cache_stack = []
self._cached_models = dict()
self._cache_stack = list()
def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]:
if self._log_memory_usage:

View File

@@ -26,5 +26,5 @@ def skip_torch_weight_init():
yield None
finally:
for torch_module, saved_function in zip(torch_modules, saved_functions, strict=True):
for torch_module, saved_function in zip(torch_modules, saved_functions):
torch_module.reset_parameters = saved_function

View File

@@ -363,7 +363,7 @@ class ModelManager(object):
else:
return
self.models = {}
self.models = dict()
for model_key, model_config in config.items():
if model_key.startswith("_"):
continue
@@ -374,7 +374,7 @@ class ModelManager(object):
self.models[model_key] = model_class.create_config(**model_config)
# check config version number and update on disk/RAM if necessary
self.cache_keys = {}
self.cache_keys = dict()
# add controlnet, lora and textual_inversion models from disk
self.scan_models_directory()
@@ -655,7 +655,7 @@ class ModelManager(object):
"""
# TODO: redo
for model_dict in self.list_models():
for _model_name, model_info in model_dict.items():
for model_name, model_info in model_dict.items():
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
print(line)
@@ -902,7 +902,7 @@ class ModelManager(object):
"""
Write current configuration out to the indicated file.
"""
data_to_save = {}
data_to_save = dict()
data_to_save["__metadata__"] = self.config_meta.model_dump()
for model_key, model_config in self.models.items():
@@ -1034,7 +1034,7 @@ class ModelManager(object):
self.ignore = ignore
def on_search_started(self):
self.new_models_found = {}
self.new_models_found = dict()
def on_model_found(self, model: Path):
if model not in self.ignore:
@@ -1106,7 +1106,7 @@ class ModelManager(object):
# avoid circular import here
from invokeai.backend.install.model_install_backend import ModelInstall
successfully_installed = {}
successfully_installed = dict()
installer = ModelInstall(
config=self.app_config, prediction_type_helper=prediction_type_helper, model_manager=self

View File

@@ -92,7 +92,7 @@ class ModelMerger(object):
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
"""
model_paths = []
model_paths = list()
config = self.manager.app_config
base_model = BaseModelType(base_model)
vae = None
@@ -124,13 +124,13 @@ class ModelMerger(object):
dump_path = (dump_path / merged_model_name).as_posix()
merged_pipe.save_pretrained(dump_path, safe_serialization=True)
attributes = {
"path": dump_path,
"description": f"Merge of models {', '.join(model_names)}",
"model_format": "diffusers",
"variant": ModelVariantType.Normal.value,
"vae": vae,
}
attributes = dict(
path=dump_path,
description=f"Merge of models {', '.join(model_names)}",
model_format="diffusers",
variant=ModelVariantType.Normal.value,
vae=vae,
)
return self.manager.add_model(
merged_model_name,
base_model=base_model,

View File

@@ -183,13 +183,12 @@ class ModelProbe(object):
if model:
class_name = model.__class__.__name__
else:
for suffix in ["bin", "safetensors"]:
if (folder_path / f"learned_embeds.{suffix}").exists():
return ModelType.TextualInversion
if (folder_path / f"pytorch_lora_weights.{suffix}").exists():
return ModelType.Lora
if (folder_path / "unet/model.onnx").exists():
return ModelType.ONNX
if (folder_path / "learned_embeds.bin").exists():
return ModelType.TextualInversion
if (folder_path / "pytorch_lora_weights.bin").exists():
return ModelType.Lora
if (folder_path / "image_encoder.txt").exists():
return ModelType.IPAdapter
@@ -237,7 +236,7 @@ class ModelProbe(object):
# scan model
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
raise "The model {model_name} is potentially infected by malware. Aborting import."
# ##################################################3
@@ -454,9 +453,9 @@ class PipelineFolderProbe(FolderProbeBase):
else:
with open(self.folder_path / "scheduler" / "scheduler_config.json", "r") as file:
scheduler_conf = json.load(file)
if scheduler_conf["prediction_type"] == "v_prediction":
if scheduler_conf.get("prediction_type", "epsilon") == "v_prediction":
return SchedulerPredictionType.VPrediction
elif scheduler_conf["prediction_type"] == "epsilon":
elif scheduler_conf.get("prediction_type", "epsilon") == "epsilon":
return SchedulerPredictionType.Epsilon
else:
return None

View File

@@ -59,7 +59,7 @@ class ModelSearch(ABC):
for root, dirs, files in os.walk(path, followlinks=True):
if str(Path(root).name).startswith("."):
self._pruned_paths.add(root)
if any(Path(root).is_relative_to(x) for x in self._pruned_paths):
if any([Path(root).is_relative_to(x) for x in self._pruned_paths]):
continue
self._items_scanned += len(dirs) + len(files)
@@ -69,14 +69,16 @@ class ModelSearch(ABC):
self._scanned_dirs.add(path)
continue
if any(
(path / x).exists()
for x in {
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"image_encoder.txt",
}
[
(path / x).exists()
for x in {
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"image_encoder.txt",
}
]
):
try:
self.on_model_found(path)

View File

@@ -97,8 +97,8 @@ MODEL_CLASSES = {
# },
}
MODEL_CONFIGS = []
OPENAPI_MODEL_CONFIGS = []
MODEL_CONFIGS = list()
OPENAPI_MODEL_CONFIGS = list()
class OpenAPIModelInfoBase(BaseModel):
@@ -109,7 +109,7 @@ class OpenAPIModelInfoBase(BaseModel):
model_config = ConfigDict(protected_namespaces=())
for _base_model, models in MODEL_CLASSES.items():
for base_model, models in MODEL_CLASSES.items():
for model_type, model_class in models.items():
model_configs = set(model_class._get_configs().values())
model_configs.discard(None)
@@ -133,7 +133,7 @@ for _base_model, models in MODEL_CLASSES.items():
def get_model_config_enums():
enums = []
enums = list()
for model_config in MODEL_CONFIGS:
if hasattr(inspect, "get_annotations"):

View File

@@ -67,6 +67,7 @@ class SubModelType(str, Enum):
VaeEncoder = "vae_encoder"
Scheduler = "scheduler"
SafetyChecker = "safety_checker"
FeatureExtractor = "feature_extractor"
# MoVQ = "movq"
@@ -153,7 +154,7 @@ class ModelBase(metaclass=ABCMeta):
else:
res_type = sys.modules["diffusers"]
res_type = res_type.pipelines
res_type = getattr(res_type, "pipelines")
for subtype in subtypes:
res_type = getattr(res_type, subtype)
@@ -164,7 +165,7 @@ class ModelBase(metaclass=ABCMeta):
with suppress(Exception):
return cls.__configs
configs = {}
configs = dict()
for name in dir(cls):
if name.startswith("__"):
continue
@@ -246,8 +247,8 @@ class DiffusersModel(ModelBase):
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
super().__init__(model_path, base_model, model_type)
self.child_types: Dict[str, Type] = {}
self.child_sizes: Dict[str, int] = {}
self.child_types: Dict[str, Type] = dict()
self.child_sizes: Dict[str, int] = dict()
try:
config_data = DiffusionPipeline.load_config(self.model_path)
@@ -326,8 +327,8 @@ def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, vari
all_files = os.listdir(model_path)
all_files = [f for f in all_files if os.path.isfile(os.path.join(model_path, f))]
fp16_files = {f for f in all_files if ".fp16." in f or ".fp16-" in f}
bit8_files = {f for f in all_files if ".8bit." in f or ".8bit-" in f}
fp16_files = set([f for f in all_files if ".fp16." in f or ".fp16-" in f])
bit8_files = set([f for f in all_files if ".8bit." in f or ".8bit-" in f])
other_files = set(all_files) - fp16_files - bit8_files
if variant is None:
@@ -413,7 +414,7 @@ def _calc_onnx_model_by_data(model) -> int:
def _fast_safetensors_reader(path: str):
checkpoint = {}
checkpoint = dict()
device = torch.device("meta")
with open(path, "rb") as f:
definition_len = int.from_bytes(f.read(8), "little")
@@ -483,7 +484,7 @@ class IAIOnnxRuntimeModel:
class _tensor_access:
def __init__(self, model):
self.model = model
self.indexes = {}
self.indexes = dict()
for idx, obj in enumerate(self.model.proto.graph.initializer):
self.indexes[obj.name] = idx
@@ -524,7 +525,7 @@ class IAIOnnxRuntimeModel:
class _access_helper:
def __init__(self, raw_proto):
self.indexes = {}
self.indexes = dict()
self.raw_proto = raw_proto
for idx, obj in enumerate(raw_proto):
self.indexes[obj.name] = idx
@@ -549,7 +550,7 @@ class IAIOnnxRuntimeModel:
return self.indexes.keys()
def values(self):
return list(self.raw_proto)
return [obj for obj in self.raw_proto]
def __init__(self, model_path: str, provider: Optional[str]):
self.path = model_path

View File

@@ -104,7 +104,7 @@ class ControlNetModel(ModelBase):
return ControlNetModelFormat.Diffusers
if os.path.isfile(path):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
return ControlNetModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")

View File

@@ -68,12 +68,11 @@ class LoRAModel(ModelBase):
raise ModelNotFoundException()
if os.path.isdir(path):
for ext in ["safetensors", "bin"]:
if os.path.exists(os.path.join(path, f"pytorch_lora_weights.{ext}")):
return LoRAModelFormat.Diffusers
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
return LoRAModelFormat.Diffusers
if os.path.isfile(path):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return LoRAModelFormat.LyCORIS
raise InvalidModelException(f"Not a valid model: {path}")
@@ -87,10 +86,8 @@ class LoRAModel(ModelBase):
base_model: BaseModelType,
) -> str:
if cls.detect_format(model_path) == LoRAModelFormat.Diffusers:
for ext in ["safetensors", "bin"]: # return path to the safetensors file inside the folder
path = Path(model_path, f"pytorch_lora_weights.{ext}")
if path.exists():
return path
# TODO: add diffusers lora when it stabilizes a bit
raise NotImplementedError("Diffusers lora not supported")
else:
return model_path
@@ -462,7 +459,7 @@ class LoRAModelRaw: # (torch.nn.Module):
dtype: Optional[torch.dtype] = None,
):
# TODO: try revert if exception?
for _key, layer in self.layers.items():
for key, layer in self.layers.items():
layer.to(device=device, dtype=dtype)
def calc_size(self) -> int:
@@ -499,7 +496,7 @@ class LoRAModelRaw: # (torch.nn.Module):
stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
stability_unet_keys.sort()
new_state_dict = {}
new_state_dict = dict()
for full_key, value in state_dict.items():
if full_key.startswith("lora_unet_"):
search_key = full_key.replace("lora_unet_", "")
@@ -545,7 +542,7 @@ class LoRAModelRaw: # (torch.nn.Module):
model = cls(
name=file_path.stem, # TODO:
layers={},
layers=dict(),
)
if file_path.suffix == ".safetensors":
@@ -593,12 +590,12 @@ class LoRAModelRaw: # (torch.nn.Module):
@staticmethod
def _group_state(state_dict: dict):
state_dict_groupped = {}
state_dict_groupped = dict()
for key, value in state_dict.items():
stem, leaf = key.split(".", 1)
if stem not in state_dict_groupped:
state_dict_groupped[stem] = {}
state_dict_groupped[stem] = dict()
state_dict_groupped[stem][leaf] = value
return state_dict_groupped

View File

@@ -110,7 +110,7 @@ class StableDiffusion1Model(DiffusersModel):
return StableDiffusion1ModelFormat.Diffusers
if os.path.isfile(model_path):
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion1ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@@ -221,7 +221,7 @@ class StableDiffusion2Model(DiffusersModel):
return StableDiffusion2ModelFormat.Diffusers
if os.path.isfile(model_path):
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion2ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")

View File

@@ -71,7 +71,7 @@ class TextualInversionModel(ModelBase):
return None # diffusers-ti
if os.path.isfile(path):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]]):
return None
raise InvalidModelException(f"Not a valid model: {path}")

View File

@@ -89,7 +89,7 @@ class VaeModel(ModelBase):
return VaeModelFormat.Diffusers
if os.path.isfile(path):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return VaeModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")

View File

@@ -0,0 +1,446 @@
# Normalized Model Manager
This is proof-of-principle code that refactors model storage to be
more space efficient and less dependent on the particulars of Stable
Diffusion models. The driving observation is that there is a
significant amount of redundancy in Stable Diffusion models. For
example, the VAE, tokenizer and safety checker are frequently the same
across multiple models derived from the same base models.
The way the normalized model manager works is that when a main
(pipeline) model is ingested, each of its submodels ("vae", "unet" and
so forth) is scanned and hashed using a fast sampling and hashing
algorithm. If the submodel has a hash that hasn't been seen before, it
is copied into a folder within INVOKEAI_ROOT, and we create a new
database entry with the submodel's path and a reference count of "1".
If the submodel has a hash that has previously been seen, then we
update the database to bump up the submodel's reference count.
Checkpoint files (.bin, .ckpt and .safetensors) are converted into
diffusers format prior to ingestion. The system directly imports
simple models, such as LoRAs and standalone VAEs, and normalizes them
if previously seen. This has benefits when a user tries to ingest the
same VAE twice under different names.
Additional database tables map the relationship between main models
and their submodels, and to record which base model(s) a submodel is
compatible with.
## Installation and Testing
To test, checkout the PR and run `pip install -e .`. This will create
a command called `invokeai-nmm` (for "normalized model
manager"). To ingest a single model:
```
invokeai-nmm ingest my_model.safetensors
```
To ingest a whole directory of models:
```
invokeai-nmm ingest my_models/*
```
These commands will create a sqlite3 database of model data in
`INVOKEAI_ROOT/databases/normalized_models.db`, copy the model data
into a blobs directory under `INVOKEAI_ROOT/model_blobs`, and create
appropriate entries in the database. You can then use the API to
retrieve information on pipelines and submodels.
The `invokeai-nmm` tool has a number of other features, including
listing models and examining pipeline subparts. In addition, it has an
`export` command which will reconstitute a diffusers pipeline by
creating a directory containing symbolic links into the blogs
directory.
Use `invokeai-nmm --help` to get a summary of commands and their
flags.
## Benchmarking
To test the performance of the normalied model system, I ingested a
InvokeAI models directory of 117 different models (35 main models, 52
LoRAs, 9 controlnets, 8 embeddings and miscellaneous others). The
ingestion, which included the conversion of multiple checkpoint to
diffusers models, took about 2 minutes. Prior to ingestion, the
directory took up 189.5 GB. After ingestion, it was reduced to 160 GB,
an overall 16% reduction in size and a savings of 29 GB.
I was a surprised at the relatively modest space savings and checked that
submodels were indeed being shared. They were:
```
sqlite> select part_id,type,refcount from simple_model order by refcount desc,type;
┌─────────┬───────────────────┬──────────┐
│ part_id │ type │ refcount │
├─────────┼───────────────────┼──────────┤
│ 28 │ tokenizer │ 9 │
│ 67 │ feature_extractor │ 7 │
│ 33 │ feature_extractor │ 5 │
│ 38 │ tokenizer │ 5 │
│ 26 │ safety_checker │ 4 │
│ 32 │ safety_checker │ 4 │
│ 37 │ scheduler │ 4 │
│ 29 │ vae │ 3 │
│ 30 │ feature_extractor │ 2 │
│ 72 │ safety_checker │ 2 │
│ 54 │ scheduler │ 2 │
│ 100 │ scheduler │ 2 │
│ 71 │ text_encoder │ 2 │
│ 90 │ text_encoder │ 2 │
│ 99 │ text_encoder_2 │ 2 │
│ 98 │ tokenizer_2 │ 2 │
│ 44 │ vae │ 2 │
│ 73 │ vae │ 2 │
│ 91 │ vae │ 2 │
│ 97 │ vae │ 2 │
│ 1 │ clip_vision │ 1 │
│ 2 │ clip_vision │ 1 │
...
```
As expected, submodels that don't change from model to model, such as
the tokenizer and safety checker, are frequently shared across main
models. So were the VAEs, but less frequently than I expected. On
further inspection, the spread of VAEs was explained by the following
formatting differences:
1. Whether the VAE weights are .bin or .safetensors
2. Whether it is an fp16 or fp32 VAE
3. Actual differences in the VAE's training
Ironically, checkpoint models downloaded from Civitai are more likely
to share submodels than diffusers pipelines directly downloaded from
HuggingFace. This is because the checkpoints pass through a uniform
conversion process, while diffusers downloaded directly from
HuggingFace are more likely to have format-related differences.
## Database tables
This illustrates the database schema.
### SIMPLE_MODEL
This provides the type and path of each fundamental model. The type
can be any of the ModelType enums, including clip_vision, etc.
```
┌─────────┬───────────────────┬──────────────────────────────────┬──────────┬──────────────────────────────────────────────────────────────────────────────────────────┐
│ part_id │ type │ hash │ refcount │ path │
├─────────┼───────────────────┼──────────────────────────────────┼──────────┼──────────────────────────────────────────────────────────────────────────────────────────┤
│ 26 │ safety_checker │ 76b420d8f641411021ec1dadca767cf7 │ 4 │ /opt/model_blobs/safety_checker-7214b322-1069-4753-a4d5-fe9e18915ca7 │
│ 28 │ tokenizer │ 44e42c7bf25b5e32e8d7de0b822cf012 │ 9 │ /opt/model_blobs/tokenizer-caeb7f7f-e3db-4d67-8f60-1a4831e1aef2 │
│ 29 │ vae │ c9aa45f52c5d4e15a22677f34436d373 │ 3 │ /opt/model_blobs/vae-7e7d96ee-074f-45dc-8c43-c9902b0d0671 │
│ 30 │ feature_extractor │ 3240f79383fdf6ea7f24bbd5569cb106 │ 2 │ /opt/model_blobs/feature_extractor-a5bb8ceb-2c15-4b7f-bd43-964396440f6c │
│ 32 │ safety_checker │ 2e2f7732cff3349350bc99f3e7ab3998 │ 4 │ /opt/model_blobs/safety_checker-ef70c446-e3a1-445c-b216-d7c4acfdbcda │
└─────────┴───────────────────┴──────────────────────────────────┴──────────┴──────────────────────────────────────────────────────────────────────────────────────────┘
```
The Refcount indicates how many pipelines the fundamental is being
shared with. The path is where the submodel is stored, and uses a
randomly-assigned file/directory name to avoid collisions.
The `type` field is a SQLITE3 ENUM that maps to the values of the
`ModelType` enum.
### MODEL_NAME
The MODEL_NAME table stores the name and other metadata of a top-level
model. The same table is used for both simple models (one part only)
and pipeline models (multiple parts).
Note that in the current implementation, the model name is forced to
be unique and is currently used as the identifier for retrieving
models from the database. This is a simplifying implementation detail;
in a real system the name would be supplemented with some sort of
anonymous key.
Only top-level models are entered into the MODEL_NAME table. The
models contained in subfolders of a pipeline become unnamed anonymous
parts stored in SIMPLE_MODEL and associated with the named model(s)
that use them in the MODEL_PARTS table described next.
An interesting piece of behavior is that the same simple model can be
both anonymous and named. Consider a VAE that is first imported from
the 'vae' folder of a main model. Because it is part of a larger
pipeline, there will be an entry for the VAE in SIMPLE_MODEL with a
refcount of 1, but not in the MODEL_NAME table. However let's say
that, at a later date, the user ingests the same model as a named
standalone VAE. The system will detect that this is the same model,
and will create a named entry to the VAE in MODEL_NAME that identifies
the VAE as its sole part. In SIMPLE_MODEL, the VAE's refcount will be
bumped up to 2. Thus, the same simple model can be retrieved in two
ways: by requesting the "vae" submodel of the named pipeline, or by
requesting it via its standalone name.
The MODEL_NAME table has fields for the model's name, its source, and
description. The `is_pipeline` field is True if the named model is a
pipeline that contains subparts. In the case of a pipeline, then the
`table_of_contents` field will hold a copy of the contents of
`model_index.json`. This is used for the sole purpose of regenerating
a de-normalized diffusers folder from the database.
```
├──────────┼────────────────────────────────┼────────────────────────────────────────────────────────────┼───────────────────────────────────────────────┼─────────────┼───────────────────┤
│ model_id │ name │ source │ description │ is_pipeline │ table_of_contents │
├──────────┼────────────────────────────────┼────────────────────────────────────────────────────────────┼───────────────────────────────────────────────┼─────────────┼───────────────────┤
│ 1 │ ip_adapter_sd_image_encoder │ /opt/models/any/clip_vision/ip_adapter_sd_image_encoder │ Imported model ip_adapter_sd_image_encoder │ 0 │ │
│ 2 │ ip_adapter_sd_image_encoder_01 │ /opt/models/any/clip_vision/ip_adapter_sd_image_encoder_01 │ Imported model ip_adapter_sd_image_encoder_01 │ 0 │ │
│ 3 │ ip_adapter_sdxl_image_encoder │ /opt/models/any/clip_vision/ip_adapter_sdxl_image_encoder │ Imported model ip_adapter_sdxl_image_encoder │ 0 │ │
│ 4 │ control_v11e_sd15_ip2p │ /opt/models/sd-1/controlnet/control_v11e_sd15_ip2p │ Imported model control_v11e_sd15_ip2p │ 0 │ │
│ 5 │ control_v11e_sd15_shuffle │ /opt/models/sd-1/controlnet/control_v11e_sd15_shuffle │ Imported model control_v11e_sd15_shuffle │ 0 │ │
│ 6 │ control_v11f1e_sd15_tile │ /opt/models/sd-1/controlnet/control_v11f1e_sd15_tile │ Imported model control_v11f1e_sd15_tile │ 0 │ │
│ 7 │ control_v11f1p_sd15_depth │ /opt/models/sd-1/controlnet/control_v11f1p_sd15_depth │ Imported model control_v11f1p_sd15_depth │ 0 │ │
│ 8 │ control_v11p_sd15_canny │ /opt/models/sd-1/controlnet/control_v11p_sd15_canny │ Imported model control_v11p_sd15_canny │ 0 │ │
│ 9 │ control_v11p_sd15_inpaint │ /opt/models/sd-1/controlnet/control_v11p_sd15_inpaint │ Imported model control_v11p_sd15_inpaint │ 0 │ │
│ 10 │ control_v11p_sd15_lineart │ /opt/models/sd-1/controlnet/control_v11p_sd15_lineart │ Imported model control_v11p_sd15_lineart │ 0 │ │
└──────────┴────────────────────────────────┴─────────-──────────────────────────────────────────────────┴───────────────────────────────────────────────┴─────────────┴───────────────────┘
```
### MODEL_PARTS
The MODEL_PARTS table maps the `model_id` field from MODEL_NAME to the
`part_id` field of SIMPLE_MODEL, as shown below. The `part_name` field
contains the subfolder name that the part was located in at model
ingestion time.
There is not exactly a one-to-one correspondence between the
MODEL_PARTS `part_name` and the SIMPLE_MODEL `type` fields. For
example, SDXL models have part_names of `text_encoder` and
`text_encoder_2`, both of which point to a simple model of type
`text_encoder`.
For one-part model such as LoRAs, the `part_name` is `root`.
```
┌──────────┬─────────┬───────────────────┐
│ model_id │ part_id │ part_name │
├──────────┼─────────┼───────────────────┤
│ 6 │ 6 │ root │
│ 25 │ 25 │ unet │
│ 25 │ 26 │ safety_checker │
│ 25 │ 27 │ text_encoder │
│ 25 │ 28 │ tokenizer │
│ 25 │ 29 │ vae │
│ 25 │ 30 │ feature_extractor │
│ 25 │ 31 │ scheduler │
│ 26 │ 32 │ safety_checker │
│ 26 │ 33 │ feature_extractor │
│ 26 │ 34 │ unet │
└──────────┴─────────┴───────────────────┘
```
### MODEL_BASE
The MODEL_BASE table maps simple models to the base models that they
are compatible with. A simple model may be compatible with one base
only (e.g. an SDXL-based `unet`); it may be compatible with multiple
bases (e.g. a VAE that works with either `sd-1` or `sd-2`); or it may
be compatible with all models (e.g. a `clip_vision` model).
This table has two fields, the `part_id` and the `base` it is
compatible with. The base is a SQLITE ENUM that corresponds to the
`BaseModelType` enum.
```
sqlite> select * from model_base limit 8;
┌─────────┬──────────────┐
│ part_id │ base │
├─────────┼──────────────┤
│ 1 │ sd-1 │
│ 1 │ sd-2 │
│ 1 │ sdxl │
│ 1 │ sdxl-refiner │
│ 2 │ sd-1 │
│ 2 │ sd-2 │
│ 2 │ sdxl │
│ 2 │ sdxl-refiner │
└─────────┴──────────────┘
```
At ingestion time, the MODEL_BASE table is populated using the
following algorithm:
1. If the ingested model is a multi-part pipeline, then each of its
parts is assigned the base determined by probing the pipeline as a
whole.
2. If the ingested model is a single-part simple model, then its part
is assigned to the base returned by probing the simple model.
3. Any models that return `BaseModelType.Any` at probe time will be
assigned to all four of the base model types as shown in the
example above.
Interestingly, the table will "learn" when the same simple model is
compatible with multiple bases. Consider a sequence of events in which
the user ingests an sd-1 model containing a VAE. The VAE will
initially get a single row in the MODEL_BASE table with base
"sd-1". Next the user ingests an sd-2 model that contains the same
VAE. The system will recognize that the same VAE is being used for a
model with a different base, and will add a new row to the table
indicating that this VAE is compatible with either sd-1 or sd-2.
When retrieving information about a multipart pipeline using the API,
the system will intersect the base compatibility of all the components
of the pipeline until it finds the set of base(s) that all the
subparts are compatible with.
## The API
Initialization will look something like this:
```
from pathlib import Path
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.normalized_mm.normalized_model_manager import NormalizedModelManager
config = InvokeAIAppConfig.get_config()
config.parse_args()
nmm = NormalizedModelManager(config)
```
At the current time, the InvokeAIAppConfig object is used only to
locate the root directory path and the location of the `databases`
subdirectory.
## Ingesting a model
Apply the `ingest()` method to a checkpoint or diffusers folder Path
and an optional model name. If the model name isn't provided, then it
will be derived from the stem of the ingested filename/folder.
```
model_config = nmm.ingest(
Path('/tmp/models/slick_anime.safetensors'),
name="Slick Anime",
)
```
Depending on what is being ingested, the call will return either a
`SimpleModelConfig` or a `PipelineConfig` object, which are slightly
different from each other:
```
@dataclass
class SimpleModelConfig:
"""Submodel name, description, type and path."""
name: str
description: str
base_models: Set[BaseModelType]
type: ExtendedModelType
path: Path
@dataclass
class PipelineConfig:
"""Pipeline model name, description, type and parts."""
name: str
description: str
base_models: Set[BaseModelType]
parts: Dict[str, ModelPart] # part_name -> ModelPart
@dataclass
class ModelPart:
"""Type and path of a pipeline submodel."""
type: ExtendedModelType
path: Path
refcount: int
```
For more control, you can directly call the `ingest_pipeline_model()`
or `ingest_simple_model()` methods, which operate on multi-part
pipelines and single-part models respectively.
Note that the `ExtendedModelType` class is an enum created from the
union of the current model manager's `ModelType` and
`SubModelType`. This was necessary to support the SIMPLE_MODEL table's
`type` field.
## Fetching a model
To fetch a simple model, call `get_model()` with the name of the model
and optionally its part_name. This returns a `SimpleModelConfig` object.
```
model_info = nmm.get_model(name='stable-diffusion-v1-5', part='unet')
print(model_info.path)
print(model_info.description)
print(model_info.base_models)
```
If the model only has one part, leave out the `part` argument, or use
`part=root`:
```
model_info = nmm.get_model(name='detail_slider_v1')
```
To fetch information about a pipeline model, call `get_pipeline()`:
```
model_info = nmm.get_pipeline('stable-diffusion-v1-5')
for part_name, part in model_info.parts.items():
print(f'{part_name} is located at {part.path}')
```
This returns a `PipelineConfig` object, which you can then interrogate
to get the model's name, description, list of base models it is
compatible with, and its parts. The latter is a dict mapping the
part_name (the original subfolder name) to a `ModelPart` object that
contains the part's type, refcount and path.
## Exporting a model
To export a model back into its native format (diffusers for main,
safetensors for other types), use `export_pipeline`:
```
nmm.export_pipeline(name='stable-diffusion-v1-5', destination='/path/to/export/folder')
```
The model will be exported to the indicated folder as a folder at
`/path/to/export/folder/stable-diffusion-v1-5`. It will contain a copy
of the original `model_index.json` file, and a series of symbolic
links pointing into the model blobs directory for each of the
subfolders.
Despite its name, `export_pipeline()` works as expected with simple
models as well.
## Listing models in the database
There is currently a `list_models()` method that retrieves a list of
all the **named** models in the database. It doesn't currently provide any
way of filtering by name, type or base compatibility, but these are
easy to add in the future.
`list_models()` returns a list of `ModelListing` objects:
```
class ModelListing:
"""Slightly simplified object for generating listings."""
name: str
description: str
source: str
type: ModelType
base_models: Set[BaseModelType]
```
An alternative implementation might return a list of
Union[SimpleModelConfig, PipelineConfig], but it seemed cleanest to
return a uniform list.
## Deleting models
Model deletion is not currently fully implemented. When implemented,
deletion of a named model will decrement the refcount of each of its
subparts and then delete parts whose refcount has reached zero. The
appropriate triggers for incrementing and decrementing the refcount
have already been implemented in the database schema.

View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python
import argparse
import sys
from pathlib import Path
from typing import Optional
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.normalized_mm.normalized_model_manager import (
DuplicateModelException,
InvalidModelException,
ModelNotFoundException,
NormalizedModelManager,
)
config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
model_manager: Optional[NormalizedModelManager] = None
def list_parts(args):
try:
model = model_manager.get_pipeline(args.model_name)
print(f"Components of model {args.model_name}:")
print(f" {'ROLE':20s} {'TYPE':20s} {'REFCOUNT':8} PATH")
for role, part in model.parts.items():
print(f" {role:20s} {part.type:20s} {part.refcount:4d} {part.path}")
except ModelNotFoundException:
print(f"{args.model_name}: model not found")
def list_models(args):
model_list = model_manager.list_models()
print(f"{'NAME':30s} {'TYPE':10s} {'BASE(S)':10s} {'DESCRIPTION':40s} ORIGINAL SOURCE")
for model in model_list:
print(
f"{model.name:30s} {model.type.value:10s} {', '.join([x.value for x in model.base_models]):10s} {model.description:40s} {model.source}"
)
def ingest_models(args):
for path in args.model_paths:
try:
print(f"ingesting {path}...", end="")
model_manager.ingest(path)
print("success.")
except (OSError, InvalidModelException, DuplicateModelException) as e:
print(f"FAILED: {e}")
def export_model(args):
print(f"exporting {args.model_name} to {args.destination}...", end="")
try:
model_manager.export_pipeline(args.model_name, args.destination)
print("success.")
except (OSError, ModelNotFoundException, InvalidModelException) as e:
print(f"FAILED: {e}")
def main():
global model_manager
global config
parser = argparse.ArgumentParser(description="Normalized model manager util")
parser.add_argument("--root_dir", dest="root", type=str, default=None, help="path to INVOKEAI_ROOT")
subparsers = parser.add_subparsers(help="commands")
parser_ingest = subparsers.add_parser("ingest", help="ingest checkpoint or diffusers models")
parser_ingest.add_argument("model_paths", type=Path, nargs="+", help="paths to one or more models to be ingested")
parser_ingest.set_defaults(func=ingest_models)
parser_export = subparsers.add_parser("export", help="export a pipeline to indicated directory")
parser_export.add_argument(
"model_name",
type=str,
help="name of model to export",
)
parser_export.add_argument(
"destination",
type=Path,
help="path to destination to export pipeline to",
)
parser_export.set_defaults(func=export_model)
parser_list = subparsers.add_parser("list", help="list models")
parser_list.set_defaults(func=list_models)
parser_listparts = subparsers.add_parser("list-parts", help="list the parts of a pipeline model")
parser_listparts.add_argument(
"model_name",
type=str,
help="name of pipeline model to list parts of",
)
parser_listparts.set_defaults(func=list_parts)
if len(sys.argv) <= 1:
sys.argv.append("--help")
args = parser.parse_args()
if args.root:
config.parse_args(["--root", args.root])
else:
config.parse_args([])
model_manager = NormalizedModelManager(config)
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,66 @@
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
"""
Fast hashing of diffusers and checkpoint-style models.
Usage:
from invokeai.backend.model_managre.model_hash import FastModelHash
>>> FastModelHash.hash('/home/models/stable-diffusion-v1.5')
'a8e693a126ea5b831c96064dc569956f'
"""
import hashlib
import os
from pathlib import Path
from typing import Dict, Union
from imohash import hashfile
from invokeai.backend.model_management.models import InvalidModelException
class FastModelHash(object):
"""FastModelHash obect provides one public class method, hash()."""
@classmethod
def hash(cls, model_location: Union[str, Path]) -> str:
"""
Return hexdigest string for model located at model_location.
:param model_location: Path to the model
"""
model_location = Path(model_location)
if model_location.is_file():
return cls._hash_file(model_location)
elif model_location.is_dir():
return cls._hash_dir(model_location)
else:
raise InvalidModelException(f"Not a valid file or directory: {model_location}")
@classmethod
def _hash_file(cls, model_location: Union[str, Path]) -> str:
"""
Fasthash a single file and return its hexdigest.
:param model_location: Path to the model file
"""
# we return md5 hash of the filehash to make it shorter
# cryptographic security not needed here
return hashlib.md5(hashfile(model_location)).hexdigest()
@classmethod
def _hash_dir(cls, model_location: Union[str, Path]) -> str:
components: Dict[str, str] = {}
for root, dirs, files in os.walk(model_location):
for file in files:
path = Path(root) / file
if path.name == "config.json": # don't use - varies according to diffusers version
continue
fast_hash = cls._hash_file(path.as_posix())
components.update({path: fast_hash})
# hash all the model hashes together, using alphabetic file order
md5 = hashlib.md5()
for path, fast_hash in sorted(components.items()):
md5.update(fast_hash.encode("utf-8"))
return md5.hexdigest()

View File

@@ -0,0 +1,601 @@
import sqlite3
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from shutil import copy, copytree
from tempfile import TemporaryDirectory
from typing import Dict, List, Optional, Set, Tuple, Union
from uuid import uuid4
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from invokeai.app.services.config import InvokeAIAppConfig
from ..model_management import BaseModelType, DuplicateModelException, ModelNotFoundException, ModelType, SubModelType
from ..model_management.convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
from ..model_management.model_probe import InvalidModelException, ModelProbe, ModelVariantType
from ..util.devices import choose_torch_device, torch_dtype
from .hash import FastModelHash
# We create a new enumeration for model types
model_types = {x.name: x.value for x in ModelType}
model_types.update({x.name: x.value for x in SubModelType})
ExtendedModelType = Enum("ExtendedModelType", model_types, type=str)
# Turn into a SQL enum
MODEL_TYPES = {x.value for x in ExtendedModelType}
MODEL_SQL_ENUM = ",".join([f'"{x}"' for x in MODEL_TYPES])
# Again
BASE_TYPES = {x.value for x in BaseModelType}
BASE_SQL_ENUM = ",".join([f'"{x}"' for x in BASE_TYPES])
@dataclass
class ModelPart:
"""Type and path of a pipeline submodel."""
type: ExtendedModelType
path: Path
refcount: int
@dataclass
class SimpleModelConfig:
"""Submodel name, description, type and path."""
name: str
description: str
base_models: Set[BaseModelType]
type: ExtendedModelType
path: Path
@dataclass
class PipelineConfig:
"""Pipeline model name, description, type and parts."""
name: str
description: str
base_models: Set[BaseModelType]
parts: Dict[str, ModelPart]
@dataclass
class ModelListing:
"""Slightly simplified object for generating listings."""
name: str
description: str
source: str
type: ModelType
base_models: Set[BaseModelType]
class NormalizedModelManager:
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_blob_directory: Path
def __init__(self, config=InvokeAIAppConfig):
database_file = config.db_path.parent / "normalized_models.db"
Path(database_file).parent.mkdir(parents=True, exist_ok=True)
self._conn = sqlite3.connect(database_file, check_same_thread=True)
self._conn.row_factory = sqlite3.Row
self._conn.isolation_level = "DEFERRED"
self._cursor = self._conn.cursor()
self._blob_directory = config.root_path / "model_blobs"
self._blob_directory.mkdir(parents=True, exist_ok=True)
self._conn.execute("PRAGMA foreign_keys = ON;")
self._create_tables()
self._conn.commit()
def ingest(self, model_path: Path, name: Optional[str] = None) -> Union[SimpleModelConfig, PipelineConfig]:
"""Ingest a simple or pipeline model into the normalized models database."""
model_path = model_path.absolute()
info = ModelProbe.probe(model_path)
if info.model_type == ModelType.Main:
return self.ingest_pipeline_model(model_path, name)
else:
return self.ingest_simple_model(model_path, name)
def ingest_simple_model(self, model_path: Path, name: Optional[str] = None) -> SimpleModelConfig:
"""Insert a simple one-part model, returning its config."""
model_name = name or model_path.stem
model_hash = FastModelHash.hash(model_path)
try:
# retrieve or create the single part that goes into this model
part_id = self._lookup_part_by_hash(model_hash) or self._install_part(model_hash, model_path)
# create the model name/source entry
self._cursor.execute(
"""--sql
INSERT INTO model_name (
name, source, description, is_pipeline
)
VALUES (?, ?, ?, 0);
""",
(model_name, model_path.as_posix(), f"Imported model {model_name}"),
)
# associate the part with the model
model_id = self._cursor.lastrowid
self._cursor.execute(
"""--sql
INSERT INTO model_parts (
model_id, part_id
)
VALUES (?, ?);
""",
(
model_id,
part_id,
),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
if isinstance(e, sqlite3.IntegrityError):
raise DuplicateModelException(f"a model named {model_name} is already in the database") from e
else:
raise e
return self.get_model(model_name)
def ingest_pipeline_model(self, model_path: Path, name: Optional[str] = None) -> PipelineConfig:
"""Insert the components of a diffusers pipeline."""
if model_path.is_file(): # convert to diffusers before ingesting
name = name or model_path.stem
with TemporaryDirectory() as tmp_dir:
_convert_ckpt(model_path, Path(tmp_dir))
result = self._ingest_pipeline_model(Path(tmp_dir), name, source=model_path)
return result
else:
return self._ingest_pipeline_model(model_path, name)
def _ingest_pipeline_model(
self, model_path: Path, name: Optional[str] = None, source: Optional[Path] = None
) -> PipelineConfig:
"""Insert the components of a diffusers pipeline."""
model_name = name or model_path.stem
model_index = model_path / "model_index.json"
assert (
model_index.exists()
), f"{model_path} does not look like a diffusers model: model_index.json is missing" # check that it is a diffuers
with open(model_index, "r") as file:
toc = file.read()
base_type = ModelProbe.probe(model_path).base_type
source = source or model_path
try:
# create a name entry for the pipeline and insert its table of contents
self._cursor.execute(
"""--sql
INSERT INTO model_name (
name, source, description, is_pipeline, table_of_contents
)
VALUES(?, ?, ?, "1", ?);
""",
(model_name, source.as_posix(), f"Normalized pipeline {model_name}", toc),
)
pipeline_id = self._cursor.lastrowid
# now we create or retrieve each of the parts
subdirectories = [x for x in model_path.iterdir() if x.is_dir()]
parts_to_insert = []
bases_to_insert = []
for submodel in subdirectories:
part_name = submodel.stem
part_path = submodel
part_hash = FastModelHash.hash(part_path)
part_id = self._lookup_part_by_hash(part_hash) or self._install_part(part_hash, part_path, {base_type})
parts_to_insert.append((pipeline_id, part_id, part_name))
bases_to_insert.append((part_id, base_type.value))
# insert the parts into the part list
self._cursor.executemany(
"""--sql
INSERT INTO model_parts (
model_id, part_id, part_name
)
VALUES(?, ?, ?);
""",
parts_to_insert,
)
# update the base types - over time each simple model will get tagged
# with all the base types of any pipelines that use it, which is a feature... I think?
self._cursor.executemany(
"""--sql
INSERT OR IGNORE INTO model_base (
part_id, base
)
VALUES(?, ?);
""",
bases_to_insert,
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
if isinstance(e, sqlite3.IntegrityError):
raise DuplicateModelException(f"a model named {model_name} is already in the database") from e
else:
raise e
return self.get_pipeline(model_name)
# in this p-o-p implementation, we assume that the model name is unique
def get_model(self, name: str, part: Optional[str] = "root") -> SimpleModelConfig:
"""Fetch a simple model. Use optional `part` to specify the diffusers subfolder."""
self._cursor.execute(
"""--sql
SELECT a.source, a.description, c.type, b.part_name, c.path, d.base
FROM model_name as a,
model_parts as b,
simple_model as c,
model_base as d
WHERE a.name=?
AND a.model_id=b.model_id
AND b.part_id=c.part_id
AND b.part_id=d.part_id
AND b.part_name=?
""",
(name, part),
)
rows = self._cursor.fetchall()
if len(rows) == 0:
raise ModelNotFoundException
bases: Set[BaseModelType] = {BaseModelType(x["base"]) for x in rows}
return SimpleModelConfig(
name=name,
description=rows[0]["description"],
base_models=bases,
type=ExtendedModelType(rows[0]["type"]),
path=Path(rows[0]["path"]),
)
# in this p-o-p implementation, we assume that the model name is unique
def get_pipeline(self, name: str) -> PipelineConfig:
"""Fetch a pipeline model."""
self._cursor.execute(
"""--sql
SELECT a.source, a.description, c.type, b.part_name, c.path, d.base, c.refcount
FROM model_name as a,
model_parts as b,
simple_model as c,
model_base as d
WHERE a.name=?
AND a.model_id=b.model_id
AND b.part_id=c.part_id
AND b.part_id=d.part_id
""",
(name,),
)
rows = self._cursor.fetchall()
if len(rows) == 0:
raise ModelNotFoundException
# Find the intersection of base models supported by each part.
# Need a more pythonic way of doing this!
bases: Dict[str, Set] = dict()
base_union: Set[BaseModelType] = set()
parts = dict()
for row in rows:
part_name = row["part_name"]
base = row["base"]
if not bases.get(part_name):
bases[part_name] = set()
bases[part_name].add(base)
base_union.add(base)
parts[part_name] = ModelPart(row["type"], row["path"], row["refcount"])
for base_set in bases.values():
base_union = base_union.intersection(base_set)
return PipelineConfig(
name=name,
description=rows[0]["description"],
base_models={BaseModelType(x) for x in base_union},
parts=parts,
)
def list_models(self) -> List[ModelListing]:
"""Get a listing of models. No filtering implemented yet."""
# get simple models first
self._cursor.execute(
"""--sql
SELECT name, source, is_pipeline
FROM model_name;
""",
(),
)
results: List[ModelListing] = []
for row in self._cursor.fetchall():
if row["is_pipeline"]:
pipeline = self.get_pipeline(row["name"])
results.append(
ModelListing(
name=pipeline.name,
description=pipeline.description,
source=row["source"],
type=ModelType.Main,
base_models=pipeline.base_models,
)
)
else:
model = self.get_model(row["name"])
results.append(
ModelListing(
name=model.name,
description=model.description,
source=row["source"],
type=model.type,
base_models=model.base_models,
)
)
return results
def export_pipeline(self, name: str, destination: Path) -> Path:
"""Reconstruction the pipeline as a set of symbolic links in folder indicated by destination."""
# get the model_index.json file (the "toc")
self._cursor.execute(
"""--sql
SELECT table_of_contents, is_pipeline
FROM model_name
WHERE name=?
""",
(name,),
)
row = self._cursor.fetchone()
if row is None:
raise ModelNotFoundException
# if the destination exists and is a directory, then we create
# a new subdirectory using the model name
if destination.exists() and destination.is_dir():
destination = destination / name
# now check that the (possibly new) destination doesn't already exist
if destination.exists():
raise OSError(f"{destination}: path or directory exists; won't overwrite")
if row["is_pipeline"]:
# write the toc
toc = row[0]
destination.mkdir(parents=True)
with open(destination / "model_index.json", "w") as model_index:
model_index.write(toc)
# symlink the subfolders
model = self.get_pipeline(name)
for part_name, part_config in model.parts.items():
source_path = destination / part_name
target_path = part_config.path
source_path.symlink_to(target_path)
else:
model = self.get_model(name)
destination = Path(destination.as_posix() + model.path.suffix)
destination.symlink_to(model.path)
return destination
def _lookup_part_by_hash(self, hash: str) -> Optional[int]:
self._cursor.execute(
"""--sql
SELECT part_id from simple_model
WHERE hash=?;
""",
(hash,),
)
rows = self._cursor.fetchone()
if not rows:
return None
return rows[0]
# may raise an exception
def _install_part(self, model_hash: str, model_path: Path, base_types: Set[BaseModelType] = set()) -> int:
(model_type, model_base) = self._probe_model(model_path)
if model_base is None:
model_bases = base_types
else:
# hack logic to test multiple base type compatibility
model_bases = set()
if model_type == ExtendedModelType("vae") and model_base == BaseModelType("sd-1"):
model_bases = {BaseModelType("sd-1"), BaseModelType("sd-2")}
elif model_base == BaseModelType("any"):
model_bases = {BaseModelType(x) for x in BASE_TYPES}
else:
model_bases = {BaseModelType(model_base)}
# make the storage name slightly easier to interpret
blob_name = model_type.value + "-" + str(uuid4())
if model_path.is_file() and model_path.suffix:
blob_name += model_path.suffix
destination = self._blob_directory / blob_name
assert not destination.exists(), f"a path named {destination} already exists"
if model_path.is_dir():
copytree(model_path, destination)
else:
copy(model_path, destination)
# create entry in the model_path table
self._cursor.execute(
"""--sql
INSERT INTO simple_model (
type, hash, path
)
VALUES (?, ?, ?);
""",
(model_type.value, model_hash, destination.as_posix()),
)
# id of the inserted row
part_id = self._cursor.lastrowid
# create base compatibility info
for base in model_bases:
self._cursor.execute(
"""--sql
INSERT INTO model_base (part_id, base)
VALUES (?, ?);
""",
(part_id, BaseModelType(base).value),
)
return part_id
def _create_tables(self):
self._cursor.execute(
f"""--sql
CREATE TABLE IF NOT EXISTS simple_model (
part_id INTEGER PRIMARY KEY,
type TEXT CHECK( type IN ({MODEL_SQL_ENUM}) ) NOT NULL,
hash TEXT UNIQUE,
refcount INTEGER NOT NULL DEFAULT '0',
path TEXT NOT NULL
);
"""
)
self._cursor.execute(
"""--sql
CREATE TABLE IF NOT EXISTS model_name (
model_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
source TEXT,
description TEXT,
is_pipeline BOOLEAN NOT NULL DEFAULT '0',
table_of_contents TEXT, -- this is the contents of model_index.json
UNIQUE(name)
);
"""
)
self._cursor.execute(
f"""--sql
CREATE TABLE IF NOT EXISTS model_base (
part_id TEXT NOT NULL,
base TEXT CHECK( base in ({BASE_SQL_ENUM}) ) NOT NULL,
FOREIGN KEY(part_id) REFERENCES simple_model(part_id),
UNIQUE(part_id,base)
);
"""
)
self._cursor.execute(
"""--sql
CREATE TABLE IF NOT EXISTS model_parts (
model_id INTEGER NOT NULL,
part_id INTEGER NOT NULL,
part_name TEXT DEFAULT 'root', -- to do: use enum
FOREIGN KEY(model_id) REFERENCES model_name(model_id),
FOREIGN KEY(part_id) REFERENCES simple_model(part_id),
UNIQUE(model_id, part_id, part_name)
);
"""
)
self._cursor.execute(
"""--sql
CREATE TRIGGER IF NOT EXISTS insert_model_refcount
AFTER INSERT
ON model_parts FOR EACH ROW
BEGIN
UPDATE simple_model SET refcount=refcount+1 WHERE simple_model.part_id=new.part_id;
END;
"""
)
self._cursor.execute(
"""--sql
CREATE TRIGGER IF NOT EXISTS delete_model_refcount
AFTER DELETE
ON model_parts FOR EACH ROW
BEGIN
UPDATE simple_model SET refcount=refcount-1 WHERE simple_model.part_id=old.part_id;
END;
"""
)
self._cursor.execute(
"""--sql
CREATE TRIGGER IF NOT EXISTS update_model_refcount
AFTER UPDATE
ON model_parts FOR EACH ROW
BEGIN
UPDATE simple_model SET refcount=refcount-1 WHERE simple_model.part_id=old.part_id;
UPDATE simple_model SET refcount=refcount+1 WHERE simple_model.part_id=new.part_id;
END;
"""
)
def _probe_model(self, model_path: Path) -> Tuple[ExtendedModelType, Optional[BaseModelType]]:
try:
model_info = ModelProbe.probe(model_path)
return (model_info.model_type, model_info.base_type)
except InvalidModelException:
return (ExtendedModelType(model_path.stem), None)
# Adapted from invokeai/backend/model_management/models/stable_diffusion.py
# This code should be moved into its own module
def _convert_ckpt(checkpoint_path: Path, output_path: Path) -> Path:
"""
Convert checkpoint model to diffusers format.
The converted model will be stored atat output_path.
"""
app_config = InvokeAIAppConfig.get_config()
weights = checkpoint_path
model_info = ModelProbe.probe(checkpoint_path)
base_type = model_info.base_type
variant = model_info.variant_type
pipeline_class = StableDiffusionInpaintPipeline if variant == "inpaint" else StableDiffusionPipeline
config_file = app_config.legacy_conf_path / __select_ckpt_config(base_type, variant)
precision = torch_dtype(choose_torch_device())
model_base_to_model_type = {
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",
BaseModelType.StableDiffusion2: "FrozenOpenCLIPEmbedder",
BaseModelType.StableDiffusionXL: "SDXL",
BaseModelType.StableDiffusionXLRefiner: "SDXL-Refiner",
}
convert_ckpt_to_diffusers(
weights.as_posix(),
output_path.as_posix(),
model_type=model_base_to_model_type[base_type],
model_version=base_type,
model_variant=variant,
original_config_file=config_file,
extract_ema=True,
scan_needed=True,
pipeline_class=pipeline_class,
from_safetensors=weights.suffix == ".safetensors",
precision=precision,
)
return output_path
def __select_ckpt_config(version: BaseModelType, variant: ModelVariantType):
ckpt_configs: Dict[BaseModelType, Dict[ModelVariantType, Optional[str]]] = {
BaseModelType.StableDiffusion1: {
ModelVariantType.Normal: "v1-inference.yaml",
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
},
BaseModelType.StableDiffusion2: {
ModelVariantType.Normal: "v2-inference-v.yaml", # best guess, as we can't differentiate with base(512)
ModelVariantType.Inpaint: "v2-inpainting-inference.yaml",
ModelVariantType.Depth: "v2-midas-inference.yaml",
},
BaseModelType.StableDiffusionXL: {
ModelVariantType.Normal: "sd_xl_base.yaml",
ModelVariantType.Inpaint: None,
ModelVariantType.Depth: None,
},
BaseModelType.StableDiffusionXLRefiner: {
ModelVariantType.Normal: "sd_xl_refiner.yaml",
ModelVariantType.Inpaint: None,
ModelVariantType.Depth: None,
},
}
return ckpt_configs[version][variant]

View File

@@ -193,7 +193,6 @@ class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput):
attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user
after generation completes. Optional.
"""
attention_map_saver: Optional[AttentionMapSaver]

View File

@@ -54,13 +54,13 @@ class Context:
self.clear_requests(cleanup=True)
def register_cross_attention_modules(self, model):
for name, _module in get_cross_attention_modules(model, CrossAttentionType.SELF):
for name, module in get_cross_attention_modules(model, CrossAttentionType.SELF):
if name in self.self_cross_attention_module_identifiers:
raise AssertionError(f"name {name} cannot appear more than once")
assert False, f"name {name} cannot appear more than once"
self.self_cross_attention_module_identifiers.append(name)
for name, _module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
for name, module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
if name in self.tokens_cross_attention_module_identifiers:
raise AssertionError(f"name {name} cannot appear more than once")
assert False, f"name {name} cannot appear more than once"
self.tokens_cross_attention_module_identifiers.append(name)
def request_save_attention_maps(self, cross_attention_type: CrossAttentionType):
@@ -170,7 +170,7 @@ class Context:
self.saved_cross_attention_maps = {}
def offload_saved_attention_slices_to_cpu(self):
for _key, map_dict in self.saved_cross_attention_maps.items():
for key, map_dict in self.saved_cross_attention_maps.items():
for offset, slice in map_dict["slices"].items():
map_dict[offset] = slice.to("cpu")
@@ -433,7 +433,7 @@ def inject_attention_function(unet, context: Context):
module.identifier = identifier
try:
module.set_attention_slice_wrangler(attention_slice_wrangler)
module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier)) # noqa: B023
module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier))
except AttributeError as e:
if is_attribute_error_about(e, "set_attention_slice_wrangler"):
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO
@@ -445,7 +445,7 @@ def remove_attention_function(unet):
cross_attention_modules = get_cross_attention_modules(
unet, CrossAttentionType.TOKENS
) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
for _identifier, module in cross_attention_modules:
for identifier, module in cross_attention_modules:
try:
# clear wrangler callback
module.set_attention_slice_wrangler(None)

View File

@@ -56,7 +56,7 @@ class AttentionMapSaver:
merged = None
for _key, maps in self.collated_maps.items():
for key, maps in self.collated_maps.items():
# maps has shape [(H*W), N] for N tokens
# but we want [N, H, W]
this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height))

View File

@@ -123,7 +123,7 @@ class InvokeAIDiffuserComponent:
# control_data should be type List[ControlNetData]
# this loop covers both ControlNet (one ControlNetData in list)
# and MultiControlNet (multiple ControlNetData in list)
for _i, control_datum in enumerate(control_data):
for i, control_datum in enumerate(control_data):
control_mode = control_datum.control_mode
# soft_injection and cfg_injection are the two ControlNet control_mode booleans
# that are combined at higher level to make control_mode enum
@@ -214,7 +214,7 @@ class InvokeAIDiffuserComponent:
# add controlnet outputs together if have multiple controlnets
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples, strict=True)
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
@@ -642,9 +642,7 @@ class InvokeAIDiffuserComponent:
deltas = None
uncond_latents = None
weighted_cond_list = (
c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)]
)
weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)]
# below is fugly omg
conditionings = [uc] + [c for c, weight in weighted_cond_list]

View File

@@ -10,34 +10,32 @@ from diffusers import (
HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LCMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
)
SCHEDULER_MAP = {
"ddim": (DDIMScheduler, {}),
"ddpm": (DDPMScheduler, {}),
"deis": (DEISMultistepScheduler, {}),
"lms": (LMSDiscreteScheduler, {"use_karras_sigmas": False}),
"lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
"pndm": (PNDMScheduler, {}),
"heun": (HeunDiscreteScheduler, {"use_karras_sigmas": False}),
"heun_k": (HeunDiscreteScheduler, {"use_karras_sigmas": True}),
"euler": (EulerDiscreteScheduler, {"use_karras_sigmas": False}),
"euler_k": (EulerDiscreteScheduler, {"use_karras_sigmas": True}),
"euler_a": (EulerAncestralDiscreteScheduler, {}),
"kdpm_2": (KDPM2DiscreteScheduler, {}),
"kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {}),
"dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}),
"dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
"dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}),
"dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
"dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "algorithm_type": "sde-dpmsolver++"}),
"dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}),
"dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}),
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
"lcm": (LCMScheduler, {}),
}
SCHEDULER_MAP = dict(
ddim=(DDIMScheduler, dict()),
ddpm=(DDPMScheduler, dict()),
deis=(DEISMultistepScheduler, dict()),
lms=(LMSDiscreteScheduler, dict(use_karras_sigmas=False)),
lms_k=(LMSDiscreteScheduler, dict(use_karras_sigmas=True)),
pndm=(PNDMScheduler, dict()),
heun=(HeunDiscreteScheduler, dict(use_karras_sigmas=False)),
heun_k=(HeunDiscreteScheduler, dict(use_karras_sigmas=True)),
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
euler_a=(EulerAncestralDiscreteScheduler, dict()),
kdpm_2=(KDPM2DiscreteScheduler, dict()),
kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
dpmpp_2s=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2s_k=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=True)),
dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
dpmpp_2m_sde=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False, algorithm_type="sde-dpmsolver++")),
dpmpp_2m_sde_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")),
dpmpp_sde=(DPMSolverSDEScheduler, dict(use_karras_sigmas=False, noise_sampler_seed=0)),
dpmpp_sde_k=(DPMSolverSDEScheduler, dict(use_karras_sigmas=True, noise_sampler_seed=0)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True)),
)

View File

@@ -615,7 +615,7 @@ def do_textual_inversion_training(
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
pipeline_args = {"local_files_only": True}
pipeline_args = dict(local_files_only=True)
if tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
else:

View File

@@ -732,9 +732,7 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
controlnet_down_block_res_samples = ()
for down_block_res_sample, controlnet_block in zip(
down_block_res_samples, self.controlnet_down_blocks, strict=True
):
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
down_block_res_sample = controlnet_block(down_block_res_sample)
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
@@ -747,9 +745,7 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
scales = scales * conditioning_scale
down_block_res_samples = [
sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True)
]
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
else:
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]

View File

@@ -225,34 +225,34 @@ def basicConfig(**kwargs):
_FACILITY_MAP = (
{
"LOG_KERN": syslog.LOG_KERN,
"LOG_USER": syslog.LOG_USER,
"LOG_MAIL": syslog.LOG_MAIL,
"LOG_DAEMON": syslog.LOG_DAEMON,
"LOG_AUTH": syslog.LOG_AUTH,
"LOG_LPR": syslog.LOG_LPR,
"LOG_NEWS": syslog.LOG_NEWS,
"LOG_UUCP": syslog.LOG_UUCP,
"LOG_CRON": syslog.LOG_CRON,
"LOG_SYSLOG": syslog.LOG_SYSLOG,
"LOG_LOCAL0": syslog.LOG_LOCAL0,
"LOG_LOCAL1": syslog.LOG_LOCAL1,
"LOG_LOCAL2": syslog.LOG_LOCAL2,
"LOG_LOCAL3": syslog.LOG_LOCAL3,
"LOG_LOCAL4": syslog.LOG_LOCAL4,
"LOG_LOCAL5": syslog.LOG_LOCAL5,
"LOG_LOCAL6": syslog.LOG_LOCAL6,
"LOG_LOCAL7": syslog.LOG_LOCAL7,
}
dict(
LOG_KERN=syslog.LOG_KERN,
LOG_USER=syslog.LOG_USER,
LOG_MAIL=syslog.LOG_MAIL,
LOG_DAEMON=syslog.LOG_DAEMON,
LOG_AUTH=syslog.LOG_AUTH,
LOG_LPR=syslog.LOG_LPR,
LOG_NEWS=syslog.LOG_NEWS,
LOG_UUCP=syslog.LOG_UUCP,
LOG_CRON=syslog.LOG_CRON,
LOG_SYSLOG=syslog.LOG_SYSLOG,
LOG_LOCAL0=syslog.LOG_LOCAL0,
LOG_LOCAL1=syslog.LOG_LOCAL1,
LOG_LOCAL2=syslog.LOG_LOCAL2,
LOG_LOCAL3=syslog.LOG_LOCAL3,
LOG_LOCAL4=syslog.LOG_LOCAL4,
LOG_LOCAL5=syslog.LOG_LOCAL5,
LOG_LOCAL6=syslog.LOG_LOCAL6,
LOG_LOCAL7=syslog.LOG_LOCAL7,
)
if SYSLOG_AVAILABLE
else {}
else dict()
)
_SOCK_MAP = {
"SOCK_STREAM": socket.SOCK_STREAM,
"SOCK_DGRAM": socket.SOCK_DGRAM,
}
_SOCK_MAP = dict(
SOCK_STREAM=socket.SOCK_STREAM,
SOCK_DGRAM=socket.SOCK_DGRAM,
)
class InvokeAIFormatter(logging.Formatter):
@@ -344,7 +344,7 @@ LOG_FORMATTERS = {
class InvokeAILogger(object):
loggers = {}
loggers = dict()
@classmethod
def get_logger(
@@ -364,7 +364,7 @@ class InvokeAILogger(object):
@classmethod
def get_loggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]:
handler_strs = config.log_handlers
handlers = []
handlers = list()
for handler in handler_strs:
handler_name, *args = handler.split("=", 2)
args = args[0] if len(args) > 0 else None
@@ -398,7 +398,7 @@ class InvokeAILogger(object):
raise ValueError("syslog is not available on this system")
if not args:
args = "/dev/log" if Path("/dev/log").exists() else "address:localhost:514"
syslog_args = {}
syslog_args = dict()
try:
for a in args.split(","):
arg_name, *arg_value = a.split(":", 2)
@@ -434,7 +434,7 @@ class InvokeAILogger(object):
path = url.path
port = url.port or 80
syslog_args = {}
syslog_args = dict()
for a in arg_list:
arg_name, *arg_value = a.split(":", 2)
if arg_name == "method":

View File

@@ -26,7 +26,7 @@ def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = []
txts = list()
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
@@ -90,7 +90,7 @@ def instantiate_from_config(config, **kwargs):
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", {}), **kwargs)
return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs)
def get_obj_from_str(string, reload=False):
@@ -228,12 +228,11 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10
angles = 2 * math.pi * rand_val
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1).to(device)
def tile_grads(slice1, slice2):
return (
gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
.repeat_interleave(d[0], 0)
.repeat_interleave(d[1], 1)
)
tile_grads = (
lambda slice1, slice2: gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
.repeat_interleave(d[0], 0)
.repeat_interleave(d[1], 1)
)
def dot(grad, shift):
return (

View File

@@ -341,19 +341,19 @@ class InvokeAIMetadataParser:
# this was more elegant as a case statement, but that's not available in python 3.9
if old_scheduler is None:
return None
scheduler_map = {
"ddim": "ddim",
"plms": "pnmd",
"k_lms": "lms",
"k_dpm_2": "kdpm_2",
"k_dpm_2_a": "kdpm_2_a",
"dpmpp_2": "dpmpp_2s",
"k_dpmpp_2": "dpmpp_2m",
"k_dpmpp_2_a": None, # invalid, in 2.3.x, selecting this sample would just fallback to last run or plms if new session
"k_euler": "euler",
"k_euler_a": "euler_a",
"k_heun": "heun",
}
scheduler_map = dict(
ddim="ddim",
plms="pnmd",
k_lms="lms",
k_dpm_2="kdpm_2",
k_dpm_2_a="kdpm_2_a",
dpmpp_2="dpmpp_2s",
k_dpmpp_2="dpmpp_2m",
k_dpmpp_2_a=None, # invalid, in 2.3.x, selecting this sample would just fallback to last run or plms if new session
k_euler="euler",
k_euler_a="euler_a",
k_heun="heun",
)
return scheduler_map.get(old_scheduler)
def split_prompt(self, raw_prompt: str):

View File

@@ -72,7 +72,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
self.multipage = multipage
self.subprocess = None
super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad?
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
def create(self):
self.keypress_timeout = 10
@@ -203,14 +203,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
)
# This restores the selected page on return from an installation
for _i in range(1, self.current_tab + 1):
for i in range(1, self.current_tab + 1):
self.tabs.h_cursor_line_down(1)
self._toggle_tables([self.current_tab])
############# diffusers tab ##########
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
"""Add widgets responsible for selecting diffusers models"""
widgets = {}
widgets = dict()
models = self.all_models
starters = self.starter_models
starter_model_labels = self.model_labels
@@ -258,12 +258,10 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
model_type: ModelType,
window_width: int = 120,
install_prompt: str = None,
exclude: set = None,
exclude: set = set(),
) -> dict[str, npyscreen.widget]:
"""Generic code to create model selection widgets"""
if exclude is None:
exclude = set()
widgets = {}
widgets = dict()
model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude]
model_labels = [self.model_labels[x] for x in model_list]
@@ -368,13 +366,13 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
]
for group in widgets:
for _k, v in group.items():
for k, v in group.items():
try:
v.hidden = True
v.editable = False
except Exception:
pass
for _k, v in widgets[selected_tab].items():
for k, v in widgets[selected_tab].items():
try:
v.hidden = False
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
@@ -393,7 +391,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
label_width = max([len(models[x].name) for x in models])
description_width = window_width - label_width - checkbox_width - spacing_width
result = {}
result = dict()
for x in models.keys():
description = models[x].description
description = (
@@ -435,11 +433,11 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
parent_conn, child_conn = Pipe()
p = Process(
target=process_and_execute,
kwargs={
"opt": app.program_opts,
"selections": app.install_selections,
"conn_out": child_conn,
},
kwargs=dict(
opt=app.program_opts,
selections=app.install_selections,
conn_out=child_conn,
),
)
p.start()
child_conn.close()
@@ -560,7 +558,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
for section in ui_sections:
if "models_selected" not in section:
continue
selected = {section["models"][x] for x in section["models_selected"].value}
selected = set([section["models"][x] for x in section["models_selected"].value])
models_to_install = [x for x in selected if not self.all_models[x].installed]
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
selections.remove_models.extend(models_to_remove)

View File

@@ -11,7 +11,6 @@ import sys
import textwrap
from curses import BUTTON2_CLICKED, BUTTON3_CLICKED
from shutil import get_terminal_size
from typing import Optional
import npyscreen
import npyscreen.wgmultiline as wgmultiline
@@ -244,9 +243,7 @@ class SelectColumnBase:
class MultiSelectColumns(SelectColumnBase, npyscreen.MultiSelect):
def __init__(self, screen, columns: int = 1, values: Optional[list] = None, **keywords):
if values is None:
values = []
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
self.columns = columns
self.value_cnt = len(values)
self.rows = math.ceil(self.value_cnt / self.columns)
@@ -270,9 +267,7 @@ class SingleSelectWithChanged(npyscreen.SelectOne):
class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged):
"""Row of radio buttons. Spacebar to select."""
def __init__(self, screen, columns: int = 1, values: list = None, **keywords):
if values is None:
values = []
def __init__(self, screen, columns: int = 1, values: list = [], **keywords):
self.columns = columns
self.value_cnt = len(values)
self.rows = math.ceil(self.value_cnt / self.columns)

View File

@@ -275,14 +275,14 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
interp = self.interpolations[self.merge_method.value[0]]
bases = ["sd-1", "sd-2", "sdxl"]
args = {
"model_names": models,
"base_model": BaseModelType(bases[self.base_select.value[0]]),
"alpha": self.alpha.value,
"interp": interp,
"force": self.force.value,
"merged_model_name": self.merged_model_name.value,
}
args = dict(
model_names=models,
base_model=BaseModelType(bases[self.base_select.value[0]]),
alpha=self.alpha.value,
interp=interp,
force=self.force.value,
merged_model_name=self.merged_model_name.value,
)
return args
def check_for_overwrite(self) -> bool:
@@ -297,7 +297,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
def validate_field_values(self) -> bool:
bad_fields = []
model_names = self.model_names
selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]}
selected_models = set((model_names[self.model1.value[0]], model_names[self.model2.value[0]]))
if self.model3.value[0] > 0:
selected_models.add(model_names[self.model3.value[0] - 1])
if len(selected_models) < 2:

View File

@@ -276,13 +276,13 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
def get_model_names(self) -> Tuple[List[str], int]:
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
model_names = [idx for idx in sorted(conf.keys()) if conf[idx].get("format", None) == "diffusers"]
model_names = [idx for idx in sorted(list(conf.keys())) if conf[idx].get("format", None) == "diffusers"]
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
default = defaults[0] if len(defaults) > 0 else 0
return (model_names, default)
def marshall_arguments(self) -> dict:
args = {}
args = dict()
# the choices
args.update(

View File

@@ -54,35 +54,42 @@
]
},
"dependencies": {
"@chakra-ui/anatomy": "^2.2.2",
"@chakra-ui/anatomy": "^2.2.1",
"@chakra-ui/icons": "^2.1.1",
"@chakra-ui/react": "^2.8.2",
"@chakra-ui/styled-system": "^2.9.2",
"@chakra-ui/theme-tools": "^2.1.2",
"@chakra-ui/react": "^2.8.1",
"@chakra-ui/styled-system": "^2.9.1",
"@chakra-ui/theme-tools": "^2.1.1",
"@dagrejs/graphlib": "^2.1.13",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/utilities": "^3.2.2",
"@dnd-kit/core": "^6.0.8",
"@dnd-kit/modifiers": "^6.0.1",
"@dnd-kit/utilities": "^3.2.1",
"@emotion/react": "^11.11.1",
"@emotion/styled": "^11.11.0",
"@fontsource-variable/inter": "^5.0.15",
"@floating-ui/react-dom": "^2.0.2",
"@fontsource-variable/inter": "^5.0.13",
"@fontsource/inter": "^5.0.13",
"@mantine/core": "^6.0.19",
"@mantine/form": "^6.0.19",
"@mantine/hooks": "^6.0.19",
"@nanostores/react": "^0.7.1",
"@reduxjs/toolkit": "^1.9.7",
"@roarr/browser-log-writer": "^1.3.0",
"@stevebel/png": "^1.5.1",
"compare-versions": "^6.1.0",
"dateformat": "^5.0.3",
"formik": "^2.4.5",
"framer-motion": "^10.16.4",
"i18next": "^23.6.0",
"i18next-http-backend": "^2.3.1",
"konva": "^9.2.3",
"fuse.js": "^6.6.2",
"i18next": "^23.5.1",
"i18next-browser-languagedetector": "^7.0.2",
"i18next-http-backend": "^2.2.2",
"konva": "^9.2.2",
"lodash-es": "^4.17.21",
"nanostores": "^0.9.4",
"nanostores": "^0.9.2",
"new-github-issue-url": "^1.0.0",
"openapi-fetch": "^0.8.1",
"overlayscrollbars": "^2.4.4",
"overlayscrollbars-react": "^0.5.3",
"openapi-fetch": "^0.7.10",
"overlayscrollbars": "^2.3.2",
"overlayscrollbars-react": "^0.5.2",
"patch-package": "^8.0.0",
"query-string": "^8.1.0",
"react": "^18.2.0",
@@ -91,25 +98,26 @@
"react-dropzone": "^14.2.3",
"react-error-boundary": "^4.0.11",
"react-hotkeys-hook": "4.4.1",
"react-i18next": "^13.3.1",
"react-i18next": "^13.3.0",
"react-icons": "^4.11.0",
"react-konva": "^18.2.10",
"react-redux": "^8.1.3",
"react-resizable-panels": "^0.0.55",
"react-use": "^17.4.0",
"react-virtuoso": "^4.6.2",
"reactflow": "^11.9.4",
"react-virtuoso": "^4.6.1",
"react-zoom-pan-pinch": "^3.2.0",
"reactflow": "^11.9.3",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^4.0.4",
"roarr": "^7.18.3",
"roarr": "^7.15.1",
"serialize-error": "^11.0.2",
"socket.io-client": "^4.7.2",
"type-fest": "^4.7.1",
"use-debounce": "^10.0.0",
"type-fest": "^4.4.0",
"use-debounce": "^9.0.4",
"use-image": "^1.1.1",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zod-validation-error": "^2.1.0"
"zod-validation-error": "^1.5.0"
},
"peerDependencies": {
"@chakra-ui/cli": "^2.4.0",
@@ -120,33 +128,39 @@
},
"devDependencies": {
"@chakra-ui/cli": "^2.4.1",
"@types/dateformat": "^5.0.2",
"@types/lodash-es": "^4.17.11",
"@types/node": "^20.9.0",
"@types/react": "^18.2.37",
"@types/react-dom": "^18.2.15",
"@types/react-redux": "^7.1.30",
"@types/uuid": "^9.0.7",
"@typescript-eslint/eslint-plugin": "^6.10.0",
"@typescript-eslint/parser": "^6.10.0",
"@vitejs/plugin-react-swc": "^3.4.1",
"concurrently": "^8.2.2",
"eslint": "^8.53.0",
"@types/dateformat": "^5.0.0",
"@types/lodash-es": "^4.17.9",
"@types/node": "^20.8.6",
"@types/react": "^18.2.28",
"@types/react-dom": "^18.2.13",
"@types/react-redux": "^7.1.27",
"@types/react-transition-group": "^4.4.7",
"@types/uuid": "^9.0.5",
"@typescript-eslint/eslint-plugin": "^6.7.5",
"@typescript-eslint/parser": "^6.7.5",
"@vitejs/plugin-react-swc": "^3.4.0",
"axios": "^1.5.1",
"babel-plugin-transform-imports": "^2.0.0",
"concurrently": "^8.2.1",
"eslint": "^8.51.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.0.1",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"form-data": "^4.0.0",
"husky": "^8.0.3",
"lint-staged": "^15.0.2",
"lint-staged": "^15.0.1",
"madge": "^6.1.0",
"openapi-types": "^12.1.3",
"openapi-typescript": "^6.7.0",
"postinstall-postinstall": "^2.1.0",
"prettier": "^3.0.3",
"rollup-plugin-visualizer": "^5.9.2",
"ts-toolbelt": "^9.6.0",
"typescript": "^5.2.2",
"vite": "^4.5.0",
"vite": "^4.4.11",
"vite-plugin-css-injected-by-js": "^3.3.0",
"vite-plugin-dts": "^3.6.3",
"vite-plugin-dts": "^3.6.0",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.2.1",
"yarn": "^1.22.19"

View File

@@ -221,19 +221,6 @@
"resetIPAdapterImage": "Reset IP Adapter Image",
"ipAdapterImageFallback": "No IP Adapter Image Selected"
},
"hrf": {
"hrf": "High Resolution Fix",
"enableHrf": "Enable High Resolution Fix",
"enableHrfTooltip": "Generate with a lower initial resolution, upscale to the base resolution, then run Image-to-Image.",
"upscaleMethod": "Upscale Method",
"hrfStrength": "High Resolution Fix Strength",
"strengthTooltip": "Lower values result in fewer details, which may reduce potential artifacts.",
"metadata": {
"enabled": "High Resolution Fix Enabled",
"strength": "High Resolution Fix Strength",
"method": "High Resolution Fix Method"
}
},
"embedding": {
"addEmbedding": "Add Embedding",
"incompatibleModel": "Incompatible base model:",
@@ -583,7 +570,6 @@
"strength": "Image to image strength",
"Threshold": "Noise Threshold",
"variations": "Seed-weight pairs",
"vae": "VAE",
"width": "Width",
"workflow": "Workflow"
},
@@ -1272,11 +1258,15 @@
},
"compositingBlur": {
"heading": "Blur",
"paragraphs": ["The blur radius of the mask."]
"paragraphs": [
"The blur radius of the mask."
]
},
"compositingBlurMethod": {
"heading": "Blur Method",
"paragraphs": ["The method of blur applied to the masked area."]
"paragraphs": [
"The method of blur applied to the masked area."
]
},
"compositingCoherencePass": {
"heading": "Coherence Pass",
@@ -1286,7 +1276,9 @@
},
"compositingCoherenceMode": {
"heading": "Mode",
"paragraphs": ["The mode of the Coherence Pass."]
"paragraphs": [
"The mode of the Coherence Pass."
]
},
"compositingCoherenceSteps": {
"heading": "Steps",
@@ -1304,7 +1296,9 @@
},
"compositingMaskAdjustments": {
"heading": "Mask Adjustments",
"paragraphs": ["Adjust the mask."]
"paragraphs": [
"Adjust the mask."
]
},
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
@@ -1362,7 +1356,9 @@
},
"infillMethod": {
"heading": "Infill Method",
"paragraphs": ["Method to infill the selected area."]
"paragraphs": [
"Method to infill the selected area."
]
},
"lora": {
"heading": "LoRA Weight",

View File

@@ -1487,18 +1487,5 @@
"scheduler": "Campionatore",
"recallParameters": "Richiama i parametri",
"noRecallParameters": "Nessun parametro da richiamare trovato"
},
"hrf": {
"enableHrf": "Abilita Correzione Alta Risoluzione",
"upscaleMethod": "Metodo di ampliamento",
"enableHrfTooltip": "Genera con una risoluzione iniziale inferiore, esegue l'ampliamento alla risoluzione di base, quindi esegue Immagine a Immagine.",
"metadata": {
"strength": "Forza della Correzione Alta Risoluzione",
"enabled": "Correzione Alta Risoluzione Abilitata",
"method": "Metodo della Correzione Alta Risoluzione"
},
"hrf": "Correzione Alta Risoluzione",
"hrfStrength": "Forza della Correzione Alta Risoluzione",
"strengthTooltip": "Valori più bassi comportano meno dettagli, il che può ridurre potenziali artefatti."
}
}

View File

@@ -1231,9 +1231,7 @@
"noLoRAsAvailable": "无可用 LoRA",
"noModelsAvailable": "无可用模型",
"selectModel": "选择一个模型",
"selectLoRA": "选择一个 LoRA",
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
"noLoRAsInstalled": "无已安装的 LoRA"
"selectLoRA": "选择一个 LoRA"
},
"boards": {
"autoAddBoard": "自动添加面板",

Some files were not shown because too many files have changed in this diff Show More