mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-20 14:48:03 -05:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bfa6439d4 | ||
|
|
a8d7969a1d | ||
|
|
46bfa24af3 | ||
|
|
a8cb8e128d | ||
|
|
8cef0f5bf5 | ||
|
|
911baeb58b | ||
|
|
312960645b | ||
|
|
50cf285efb | ||
|
|
a214f4fff5 | ||
|
|
2981591c36 | ||
|
|
b08f90c99f | ||
|
|
ab8c739cd8 | ||
|
|
5c5108c28a | ||
|
|
3df7cfd605 | ||
|
|
1ff3d44dba | ||
|
|
c80ad90f72 | ||
|
|
3b4d1b8786 | ||
|
|
c66201c7e1 | ||
|
|
35c7c59455 | ||
|
|
85f98ab3eb | ||
|
|
dac75685be | ||
|
|
d7b5a8b298 | ||
|
|
d3ecaa740f | ||
|
|
b5a6765a3d | ||
|
|
3704573ef8 | ||
|
|
01fbf2ce4d | ||
|
|
96e7003449 | ||
|
|
80197b8856 | ||
|
|
0187bc671e | ||
|
|
31584daabe | ||
|
|
a6cb522fed | ||
|
|
f70be1e415 | ||
|
|
a2901f2b46 | ||
|
|
b61c66c3a9 | ||
|
|
c77f9ec202 | ||
|
|
2c5c35647f | ||
|
|
bf0fdbd10e |
@@ -1,8 +1,7 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from importlib.metadata import distributions
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
try:
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
|
||||
@@ -99,7 +99,9 @@ async def upload_image(
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
np_image = pil_to_np(pil_image)
|
||||
# heuristic_resize_fast expects an RGB or RGBA image
|
||||
pil_rgba = pil_image.convert("RGBA")
|
||||
np_image = pil_to_np(pil_rgba)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
|
||||
@@ -158,7 +158,7 @@ web_root_path = Path(list(web_dir.__path__)[0])
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
@@ -613,7 +613,7 @@ def invocation(
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
uiconfig["version"] = version
|
||||
else:
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
uiconfig["version"] = "1.0.0"
|
||||
|
||||
cls.UIConfig = UIConfigBase(**uiconfig)
|
||||
|
||||
@@ -114,6 +114,13 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
|
||||
@@ -222,7 +229,10 @@ class SDXLPromptInvocationBase:
|
||||
else:
|
||||
c_pooled = None
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = mask_tensor
|
||||
mask = dilated_mask_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
|
||||
@@ -437,7 +437,7 @@ class WithWorkflow:
|
||||
workflow = None
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
|
||||
)
|
||||
super().__init_subclass__()
|
||||
@@ -578,7 +578,7 @@ def InputField(
|
||||
|
||||
if default_factory is not _Unset and default_factory is not None:
|
||||
default = default_factory()
|
||||
logger.warn('"default_factory" is not supported, calling it now to set "default"')
|
||||
logger.warning('"default_factory" is not supported, calling it now to set "default"')
|
||||
|
||||
# These are the args we may wish pass to the pydantic `Field()` function
|
||||
field_args = {
|
||||
|
||||
@@ -24,7 +24,6 @@ from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
# GENERATION
|
||||
|
||||
@@ -196,9 +196,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
AND (
|
||||
images.metadata LIKE ?
|
||||
OR images.created_at LIKE ?
|
||||
)
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
|
||||
@@ -78,7 +78,7 @@ class ImageService(ImageServiceABC):
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
|
||||
@@ -148,7 +148,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
if not job.in_terminal_state:
|
||||
self._logger.warning("Cancelling job {job.id}")
|
||||
self._logger.warning(f"Cancelling job {job.id}")
|
||||
self.cancel_job(job)
|
||||
while True:
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import gc
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Thread
|
||||
@@ -439,6 +440,12 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
|
||||
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
|
||||
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
|
||||
# allocation is well worth it.
|
||||
gc.collect()
|
||||
|
||||
self._invoker.services.logger.info(
|
||||
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
|
||||
)
|
||||
|
||||
@@ -104,11 +104,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
|
||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
|
||||
|
||||
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
@@ -118,8 +114,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
if prepend:
|
||||
priority = self._get_highest_priority(queue_id) + 1
|
||||
|
||||
requested_count = calc_session_count(batch)
|
||||
values_to_insert = prepare_values_to_insert(
|
||||
requested_count = await asyncio.to_thread(
|
||||
calc_session_count,
|
||||
batch=batch,
|
||||
)
|
||||
values_to_insert = await asyncio.to_thread(
|
||||
prepare_values_to_insert,
|
||||
queue_id=queue_id,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
@@ -127,19 +127,16 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
enqueued_count = len(values_to_insert)
|
||||
|
||||
if requested_count > enqueued_count:
|
||||
values_to_insert = values_to_insert[:max_new_queue_items]
|
||||
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
self._conn.commit()
|
||||
with self._conn:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
enqueue_result = EnqueueBatchResult(
|
||||
queue_id=queue_id,
|
||||
|
||||
@@ -42,4 +42,5 @@ IP-Adapters:
|
||||
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
|
||||
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
|
||||
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
|
||||
@@ -296,7 +296,7 @@ class LoRAConfigBase(ABC, BaseModel):
|
||||
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
|
||||
|
||||
sd = mod.load_state_dict(mod.path)
|
||||
value = flux_format_from_state_dict(sd)
|
||||
value = flux_format_from_state_dict(sd, mod.metadata())
|
||||
mod.cache[key] = value
|
||||
return value
|
||||
|
||||
|
||||
@@ -20,6 +20,10 @@ from invokeai.backend.model_manager.taxonomy import (
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
|
||||
is_state_dict_likely_flux_control,
|
||||
lora_model_from_flux_control_state_dict,
|
||||
@@ -92,6 +96,8 @@ class LoRALoader(ModelLoader):
|
||||
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_flux_control(state_dict=state_dict):
|
||||
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
else:
|
||||
|
||||
@@ -297,6 +297,15 @@ ip_adapter_sdxl = StarterModel(
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_plus_sdxl = StarterModel(
|
||||
name="Precise Reference (IP Adapter Plus ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter Plus SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
@@ -672,6 +681,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
ip_adapter_plus_sd1,
|
||||
ip_adapter_plus_face_sd1,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
ip_adapter_flux,
|
||||
qr_code_cnet_sd1,
|
||||
qr_code_cnet_sdxl,
|
||||
@@ -744,6 +754,7 @@ sdxl_bundle: list[StarterModel] = [
|
||||
juggernaut_sdxl,
|
||||
sdxl_fp16_vae_fix,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
|
||||
@@ -137,6 +137,7 @@ class FluxLoRAFormat(str, Enum):
|
||||
Kohya = "flux.kohya"
|
||||
OneTrainer = "flux.onetrainer"
|
||||
Control = "flux.control"
|
||||
AIToolkit = "flux.aitoolkit"
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
@@ -46,6 +46,10 @@ class ModelPatcher:
|
||||
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
|
||||
ti_list: List[Tuple[str, TextualInversionModelRaw]],
|
||||
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
|
||||
if len(ti_list) == 0:
|
||||
yield tokenizer, TextualInversionManager(tokenizer)
|
||||
return
|
||||
|
||||
init_tokens_count = None
|
||||
new_tokens_added = None
|
||||
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
|
||||
if metadata:
|
||||
try:
|
||||
software = json.loads(metadata.get("software", "{}"))
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
return software.get("name") == "ai-toolkit"
|
||||
# metadata got lost somewhere
|
||||
return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupedStateDict:
|
||||
transformer: dict[str, Any] = field(default_factory=dict)
|
||||
# might also grow CLIP and T5 submodels
|
||||
|
||||
|
||||
def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
grouped = GroupedStateDict()
|
||||
for key, value in state_dict.items():
|
||||
submodel_name, param_name = key.split(".", 1)
|
||||
match submodel_name:
|
||||
case "diffusion_model":
|
||||
grouped.transformer[param_name] = value
|
||||
case _:
|
||||
logger.warning(f"Unexpected submodel name: {submodel_name}")
|
||||
return grouped
|
||||
|
||||
|
||||
def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
"""Renames keys from the PEFT LoRA format to the InvokeAI format."""
|
||||
renamed_state_dict = {}
|
||||
for key, value in state_dict.items():
|
||||
renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
|
||||
renamed_state_dict[renamed_key] = value
|
||||
return renamed_state_dict
|
||||
|
||||
|
||||
def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
|
||||
state_dict = _rename_peft_lora_keys(state_dict)
|
||||
by_layer = _group_by_layer(state_dict)
|
||||
by_model = _group_state_by_submodel(by_layer)
|
||||
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for layer_key, layer_state_dict in by_model.transformer.items():
|
||||
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
return ModelPatchRaw(layers=layers)
|
||||
@@ -1,4 +1,7 @@
|
||||
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
@@ -11,7 +14,7 @@ from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_u
|
||||
)
|
||||
|
||||
|
||||
def flux_format_from_state_dict(state_dict):
|
||||
def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
|
||||
if is_state_dict_likely_in_flux_kohya_format(state_dict):
|
||||
return FluxLoRAFormat.Kohya
|
||||
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
|
||||
@@ -20,5 +23,7 @@ def flux_format_from_state_dict(state_dict):
|
||||
return FluxLoRAFormat.Diffusers
|
||||
elif is_state_dict_likely_flux_control(state_dict):
|
||||
return FluxLoRAFormat.Control
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
|
||||
return FluxLoRAFormat.AIToolkit
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^25.0.1",
|
||||
"i18next-http-backend": "^3.0.2",
|
||||
|
||||
8
invokeai/frontend/web/pnpm-lock.yaml
generated
8
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -54,8 +54,8 @@ dependencies:
|
||||
specifier: ^10.1.6
|
||||
version: 10.1.6
|
||||
fracturedjsonjs:
|
||||
specifier: ^4.0.2
|
||||
version: 4.0.2
|
||||
specifier: ^4.1.0
|
||||
version: 4.1.0
|
||||
framer-motion:
|
||||
specifier: ^11.10.0
|
||||
version: 11.10.0(react-dom@18.3.1)(react@18.3.1)
|
||||
@@ -5280,8 +5280,8 @@ packages:
|
||||
signal-exit: 4.1.0
|
||||
dev: true
|
||||
|
||||
/fracturedjsonjs@4.0.2:
|
||||
resolution: {integrity: sha512-+vGJH9wK0EEhbbn50V2sOebLRaar1VL3EXr02kxchIwpkhQk0ItrPjIOtYPYuU9hNFpVzxjrPgzjtMJih+ae4A==}
|
||||
/fracturedjsonjs@4.1.0:
|
||||
resolution: {integrity: sha512-qy6LPA8OOiiyRHt5/sNKDayD7h5r3uHmHxSOLbBsgtU/hkt5vOVWOR51MdfDbeCNfj7k/dKCRbXYm8FBAJcgWQ==}
|
||||
dev: false
|
||||
|
||||
/framer-motion@10.18.0(react-dom@18.3.1)(react@18.3.1):
|
||||
|
||||
@@ -2452,9 +2452,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"GPU Nvidia 50xx: Invoke utilizza PyTorch 2.7.0, necessario per queste GPU.",
|
||||
"Relazioni tra modelli: collega i LoRA ai modelli principali e i LoRA verranno visualizzati per primi nell'elenco.",
|
||||
"Adattatore IP: nuovi metodi Style (Strong) e Style (Precise) per i modelli SDXL e SD1.5."
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -392,7 +392,7 @@
|
||||
"title": "全選択"
|
||||
},
|
||||
"addNode": {
|
||||
"desc": "ノード追加メニューを開く.",
|
||||
"desc": "ノード追加メニューを開く。",
|
||||
"title": "ノードを追加"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
@@ -1156,11 +1156,11 @@
|
||||
"unknownField": "不明なフィールド",
|
||||
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
|
||||
"loadingTemplates": "読み込み中 {{name}}",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします.",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
|
||||
"validateConnections": "接続とグラフを確認する",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"newWorkflowDesc": "新しいワークフローを作りますか?",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型:{type}}",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
|
||||
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
|
||||
"unableToLoadWorkflow": "ワークフローが読み込めません",
|
||||
"unableToValidateWorkflow": "ワークフローを確認できません",
|
||||
@@ -1203,13 +1203,13 @@
|
||||
"downloadBoard": "ボードをダウンロード",
|
||||
"changeBoard": "ボードを変更",
|
||||
"loading": "ロード中...",
|
||||
"topMessage": "このボードには、以下の機能で使用されている画像が含まれています:",
|
||||
"bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。",
|
||||
"topMessage": "この選択には、次の機能で使用される画像が含まれています:",
|
||||
"bottomMessage": "この画像を削除すると、現在利用している機能はリセットされます。",
|
||||
"clearSearch": "検索をクリア",
|
||||
"deleteBoard": "ボードの削除",
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
|
||||
"deletedBoardsCannotbeRestored": "削除したボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は未分類の状態になります。",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
@@ -1224,9 +1224,12 @@
|
||||
"imagesWithCount_other": "{{count}} の画像",
|
||||
"updateBoardError": "ボード更新エラー",
|
||||
"selectedForAutoAdd": "自動追加に選択済み",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は作成者に対して非公開の未分類状態になります。",
|
||||
"noBoards": "{{boardType}} ボードがありません",
|
||||
"viewBoards": "ボードを表示"
|
||||
"viewBoards": "ボードを表示",
|
||||
"uncategorizedImages": "分類されていない画像",
|
||||
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
|
||||
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -1292,25 +1295,49 @@
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
"heading": "アップスケール手法",
|
||||
"paragraphs": [
|
||||
"高解像度修正のために画像を拡大するために使用される方法。"
|
||||
]
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
"heading": "アップスケールモデル",
|
||||
"paragraphs": [
|
||||
"アップスケールモデルは、ディテールを追加する前に画像を出力サイズに合わせて拡大縮小します。サポートされているアップスケールモデルであればどれでも使用できますが、写真や線画など、特定の種類の画像に特化したモデルもあります。"
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成される画像のアスペクト比。比率を変更すると、幅と高さもそれに応じて更新されます。",
|
||||
"「最適化」は、選択したモデルの幅と高さを最適な寸法に設定します。"
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で実行されるステップの数。",
|
||||
"生成ステップと似ています。"
|
||||
]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"AI 出力を最終画像に変換するために使用されるモデル。"
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
"heading": "スケール"
|
||||
"heading": "スケール",
|
||||
"paragraphs": [
|
||||
"スケールは出力画像のサイズを制御し、入力画像の解像度の倍数に基づいて決定されます。例えば、1024x1024の画像を2倍に拡大すると、2048x2048の出力が生成されます。"
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
"heading": "スケジューラー",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で使用されるスケジューラ。",
|
||||
"生成スケジューラに似ています。"
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "モード",
|
||||
@@ -1319,10 +1346,16 @@
|
||||
]
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "モデル"
|
||||
"heading": "モデル",
|
||||
"paragraphs": [
|
||||
"生成に使用されるモデル。異なるモデルは、異なる美的結果とコンテンツを生成するように特化するようにトレーニングされています。"
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "高さ"
|
||||
"heading": "高さ",
|
||||
"paragraphs": [
|
||||
"生成される画像の高さ。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ",
|
||||
@@ -1345,7 +1378,11 @@
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
"heading": "生成回数",
|
||||
"paragraphs": [
|
||||
"生成する画像の数。",
|
||||
"動的プロンプトが有効になっている場合、各プロンプトはこの回数生成されます。"
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
@@ -1354,7 +1391,10 @@
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "幅"
|
||||
"heading": "幅",
|
||||
"paragraphs": [
|
||||
"生成される画像の幅。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA",
|
||||
@@ -1369,7 +1409,11 @@
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"埋め込む前にどの程度のダウンスケーリングが行われるか。",
|
||||
"ダウンスケーリングを大きくするとパフォーマンスは向上しますが、品質は低下します。"
|
||||
]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "重み",
|
||||
@@ -1511,6 +1555,124 @@
|
||||
"paragraphs": [
|
||||
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"ガイダンス値が高すぎると過飽和状態になる可能性があり、ガイダンス値が高すぎるか低すぎると生成結果に歪みが生じる可能性があります。ガイダンスはFLUX DEVモデルにのみ適用されます。"
|
||||
],
|
||||
"heading": "ガイダンス"
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"生成されたイメージがラスター レイヤーとどの程度異なるかを制御します。",
|
||||
"強度が低いほど、結合された表示ラスターレイヤーに近くなります。強度が高いほど、グローバルプロンプトに大きく依存します。",
|
||||
"表示されるコンテンツを持つラスター レイヤーがない場合、この設定は無視されます。"
|
||||
],
|
||||
"heading": "ディノイジングストレングス"
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "リファイナースタート",
|
||||
"paragraphs": [
|
||||
"生成プロセスのどの時点でリファイナーが使用され始めるか。",
|
||||
"0 はリファイナーが生成プロセス全体で使用されることを意味し、0.8 は、リファイナーが生成プロセスの最後の 20% で使用されることを意味します。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "イメージtoイメージの最適化",
|
||||
"paragraphs": [
|
||||
"「イメージtoイメージを最適化」を有効にすると、Fluxモデルを用いた画像間変換およびインペインティング変換において、より段階的なノイズ除去強度スケールが適用されます。この設定により、画像に適用される変化量を制御する能力が向上しますが、標準のノイズ除去強度スケールを使用したい場合はオフにすることができます。この設定は現在調整中で、ベータ版です。"
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "ポジティブ美的スコア",
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美的スコアの高い画像に類似するように生成を重み付けします。"
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"CFG スケールの値が高すぎると、飽和しすぎて生成結果が歪む可能性があります。 "
|
||||
],
|
||||
"heading": "CFGスケール"
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"paragraphs": [
|
||||
"VAE エンコードおよびデコード時に使用される精度。",
|
||||
"Fp16/Half 精度は、画像のわずかな変化を犠牲にして、より効率的です。"
|
||||
],
|
||||
"heading": "VAE精度"
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "リファイナーモデル",
|
||||
"paragraphs": [
|
||||
"生成プロセスの精製部分で使用されるモデル。",
|
||||
"世代モデルに似ています。"
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFGスケール",
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスに与える影響を制御する。",
|
||||
"生成CFG スケールに似ています。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "シームレスタイリングY軸",
|
||||
"paragraphs": [
|
||||
"画像を垂直軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "プロセス前のスケール値",
|
||||
"paragraphs": [
|
||||
"「自動」は、画像生成プロセスの前に、選択した領域をモデルに最適なサイズに拡大縮小します。",
|
||||
"「手動」では、画像生成プロセスの前に、選択した領域を拡大縮小する幅と高さを選択できます。"
|
||||
]
|
||||
},
|
||||
"creativity": {
|
||||
"heading": "クリエイティビティ",
|
||||
"paragraphs": [
|
||||
"クリエイティビティは、ディテールを追加する際のモデルに与えられる自由度を制御します。クリエイティビティが低いと元のイメージに近いままになり、クリエイティビティが高いとより多くの変化を加えることができます。プロンプトを使用する場合、クリエイティビティが高いとプロンプトの影響が増します。"
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "高解像度修正を有効にする",
|
||||
"paragraphs": [
|
||||
"モデルに最適な解像度よりも高い解像度で、高品質な画像を生成します。通常、生成された画像内の重複を防ぐために使用されます。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "シームレスタイリングX軸",
|
||||
"paragraphs": [
|
||||
"画像を水平軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"paragraphs": [
|
||||
"ゼロ端末 SNR (ztsnr) を使用してトレーニングされたモデルに使用される、CFG ガイダンスのリスケールマルチプライヤー。",
|
||||
"これらのモデルの場合、推奨値は 0.7 です。"
|
||||
],
|
||||
"heading": "CFG リスケールマルチプライヤー"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "ストラクチャ",
|
||||
"paragraphs": [
|
||||
"ストラクチャは、出力画像が元のレイアウトにどれだけ忠実に従うかを制御します。低いストラクチャでは大幅な変更が可能ですが、高いストラクチャでは元の構成とレイアウトが厳密に維持されます。"
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美観スコアが低い画像に類似するように生成に重み付けします。"
|
||||
],
|
||||
"heading": "ネガティブ美的スコア"
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商用ライセンス",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev]モデルは、FLUX [dev]非商用ライセンスに基づいてライセンスされています。Invokeでこのモデルタイプを商用目的で使用する場合は、当社のウェブサイトをご覧ください。"
|
||||
]
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -1683,7 +1845,106 @@
|
||||
"workflows": "ワークフロー",
|
||||
"ascending": "昇順",
|
||||
"name": "名前",
|
||||
"descending": "降順"
|
||||
"descending": "降順",
|
||||
"searchPlaceholder": "名前、説明、タグで検索",
|
||||
"projectWorkflows": "プロジェクトワークフロー",
|
||||
"searchWorkflows": "ワークフローを検索",
|
||||
"updated": "アップデート",
|
||||
"published": "公表",
|
||||
"builder": {
|
||||
"label": "ラベル",
|
||||
"containerPlaceholder": "空のコンテナ",
|
||||
"showDescription": "説明を表示",
|
||||
"emptyRootPlaceholderEditMode": "開始するには、フォーム要素またはノード フィールドをここにドラッグします。",
|
||||
"divider": "仕切り",
|
||||
"deleteAllElements": "すべてのフォーム要素を削除",
|
||||
"heading": "見出し",
|
||||
"nodeField": "ノードフィールド",
|
||||
"zoomToNode": "ノードにズーム",
|
||||
"dropdown": "ドロップダウン",
|
||||
"resetOptions": "オプションをリセット",
|
||||
"both": "両方",
|
||||
"builder": "フォームビルダー",
|
||||
"text": "テキスト",
|
||||
"row": "行",
|
||||
"multiLine": "マルチライン",
|
||||
"resetAllNodeFields": "すべてのノードフィールドをリセット",
|
||||
"slider": "スライダー",
|
||||
"layout": "レイアウト",
|
||||
"addToForm": "フォームに追加",
|
||||
"headingPlaceholder": "空の見出し",
|
||||
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
|
||||
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
|
||||
"component": "コンポーネント",
|
||||
"textPlaceholder": "空のテキスト",
|
||||
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
|
||||
"addOption": "オプションを追加",
|
||||
"singleLine": "単線",
|
||||
"numberInput": "数値入力",
|
||||
"column": "列",
|
||||
"container": "コンテナ",
|
||||
"containerRowLayout": "コンテナ(行レイアウト)",
|
||||
"containerColumnLayout": "コンテナ(列レイアウト)",
|
||||
"maximum": "最大",
|
||||
"published": "公開済み",
|
||||
"publishedWorkflowOutputs": "アウトプット",
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
"download": "ダウンロード",
|
||||
"savingWorkflow": "ワークフローを保存しています...",
|
||||
"problemSavingWorkflow": "ワークフローの保存に関する問題",
|
||||
"convertGraph": "グラフを変換",
|
||||
"downloadWorkflow": "ファイルに保存",
|
||||
"saveWorkflow": "ワークフローを保存",
|
||||
"userWorkflows": "ユーザーワークフロー",
|
||||
"yourWorkflows": "あなたのワークフロー",
|
||||
"edit": "編集",
|
||||
"workflowLibrary": "ワークフローライブラリ",
|
||||
"workflowSaved": "ワークフローが保存されました",
|
||||
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
|
||||
"workflowCleared": "ワークフローが作成されました",
|
||||
"autoLayout": "オートレイアウト",
|
||||
"view": "ビュー",
|
||||
"saveChanges": "変更を保存",
|
||||
"noDescription": "説明なし",
|
||||
"recommended": "あなたへのおすすめ",
|
||||
"noRecentWorkflows": "最近のワークフローがありません",
|
||||
"problemLoading": "ワークフローのローディングに関する問題",
|
||||
"newWorkflowCreated": "新しいワークフローが作成されました",
|
||||
"noWorkflows": "ワークフローがありません",
|
||||
"copyShareLink": "共有リンクをコピー",
|
||||
"copyShareLinkForWorkflow": "ワークフローの共有リンクをコピー",
|
||||
"workflowThumbnail": "ワークフローサムネイル",
|
||||
"loadWorkflow": "$t(common.load) ワークフロー",
|
||||
"shared": "共有",
|
||||
"openWorkflow": "ワークフローを開く",
|
||||
"emptyStringPlaceholder": "<空の文字列>",
|
||||
"browseWorkflows": "ワークフローを閲覧する",
|
||||
"saveWorkflowAs": "ワークフローとして保存",
|
||||
"private": "プライベート",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"delete": "削除",
|
||||
"openLibrary": "ライブラリを開く",
|
||||
"loadMore": "もっと読み込む",
|
||||
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
|
||||
"created": "作成されました",
|
||||
"workflowEditorMenu": "ワークフローエディターメニュー",
|
||||
"defaultWorkflows": "デフォルトワークフロー",
|
||||
"allLoaded": "すべてのワークフローが読み込まれました",
|
||||
"filterByTags": "タグでフィルター",
|
||||
"recentlyOpened": "最近開いた",
|
||||
"opened": "オープン",
|
||||
"deleteWorkflow": "ワークフローを削除",
|
||||
"deleteWorkflow2": "このワークフローを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"loadFromGraph": "グラフからワークフローをロード",
|
||||
"workflowName": "ワークフロー名",
|
||||
"loading": "ワークフローをロードしています",
|
||||
"uploadWorkflow": "ファイルからロードする"
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
|
||||
@@ -2440,8 +2440,7 @@
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách.",
|
||||
"IP Adapter: Thủ thuật Phong Cách (Mạnh Mẽ) và Phong Cách (Chính Xác) mới cho model SDXL và SD1.5."
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import { Spinner } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useAllEntityAdapters } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { computed } from 'nanostores';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
export const CanvasBusySpinner = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const allEntityAdapters = useAllEntityAdapters();
|
||||
const $isPendingRectCalculation = useMemo(
|
||||
() =>
|
||||
computed(
|
||||
allEntityAdapters.map(({ transformer }) => transformer.$isPendingRectCalculation),
|
||||
(...values) => values.some((v) => v)
|
||||
),
|
||||
[allEntityAdapters]
|
||||
);
|
||||
const isPendingRectCalculation = useStore($isPendingRectCalculation);
|
||||
const isRasterizing = useStore(canvasManager.stateApi.$isRasterizing);
|
||||
const isCompositing = useStore(canvasManager.compositor.$isBusy);
|
||||
|
||||
if (isRasterizing || isCompositing || isPendingRectCalculation) {
|
||||
return <Spinner opacity={0.3} />;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
CanvasBusySpinner.displayName = 'CanvasBusySpinner';
|
||||
@@ -12,6 +12,7 @@ import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
|
||||
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
|
||||
import { CanvasAlertsSelectedEntityStatus } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSelectedEntityStatus';
|
||||
import { CanvasAlertsSendingToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
|
||||
import { CanvasBusySpinner } from 'features/controlLayers/components/CanvasBusySpinner';
|
||||
import { CanvasContextMenuGlobalMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuGlobalMenuItems';
|
||||
import { CanvasContextMenuSelectedEntityMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuSelectedEntityMenuItems';
|
||||
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
|
||||
@@ -106,6 +107,9 @@ export const CanvasMainPanelContent = memo(() => {
|
||||
<MenuContent />
|
||||
</Menu>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={4} insetInlineEnd={4}>
|
||||
<CanvasBusySpinner />
|
||||
</Flex>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
)}
|
||||
|
||||
@@ -168,3 +168,33 @@ export const useEntityAdapter = (
|
||||
assert(adapter, 'useEntityAdapter must be used within a EntityAdapterContext');
|
||||
return adapter;
|
||||
};
|
||||
|
||||
export const useAllEntityAdapters = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const regionalGuidanceAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.regionMasks.subscribe,
|
||||
canvasManager.adapters.regionMasks.getSnapshot
|
||||
);
|
||||
const rasterLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.rasterLayers.subscribe,
|
||||
canvasManager.adapters.rasterLayers.getSnapshot
|
||||
);
|
||||
const controlLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.controlLayers.subscribe,
|
||||
canvasManager.adapters.controlLayers.getSnapshot
|
||||
);
|
||||
const inpaintMaskAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.inpaintMasks.subscribe,
|
||||
canvasManager.adapters.inpaintMasks.getSnapshot
|
||||
);
|
||||
const allEntityAdapters = useMemo(() => {
|
||||
return [
|
||||
...Array.from(rasterLayerAdapters.values()),
|
||||
...Array.from(controlLayerAdapters.values()),
|
||||
...Array.from(inpaintMaskAdapters.values()),
|
||||
...Array.from(regionalGuidanceAdapters.values()),
|
||||
];
|
||||
}, [controlLayerAdapters, inpaintMaskAdapters, rasterLayerAdapters, regionalGuidanceAdapters]);
|
||||
|
||||
return allEntityAdapters;
|
||||
};
|
||||
|
||||
@@ -24,12 +24,13 @@ import {
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
type CanvasEntityIdentifier,
|
||||
type CanvasRenderableEntityState,
|
||||
isRasterLayerEntityIdentifier,
|
||||
type Rect,
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRenderableEntityState,
|
||||
LifecycleCallback,
|
||||
Rect,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { isRasterLayerEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import { atom } from 'nanostores';
|
||||
@@ -40,11 +41,6 @@ import stableHash from 'stable-hash';
|
||||
import { assert } from 'tsafe';
|
||||
import type { Jsonifiable, JsonObject } from 'type-fest';
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`. We'll need to do a
|
||||
// type assertion below in the `onInit` method, which calls these callbacks.
|
||||
type InitCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
export abstract class CanvasEntityAdapterBase<
|
||||
T extends CanvasRenderableEntityState,
|
||||
U extends string,
|
||||
@@ -118,7 +114,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
/**
|
||||
* Callbacks that are executed when the module is initialized.
|
||||
*/
|
||||
private static initCallbacks = new Set<InitCallback>();
|
||||
private static initCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
/**
|
||||
* Register a callback to be run when an entity adapter is initialized.
|
||||
@@ -165,7 +161,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
* return false;
|
||||
* });
|
||||
*/
|
||||
static registerInitCallback = (callback: InitCallback) => {
|
||||
static registerInitCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
roundRect,
|
||||
} from 'features/controlLayers/konva/util';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import type { Coordinate, LifecycleCallback, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import type { GroupConfig } from 'konva/lib/Group';
|
||||
@@ -123,7 +123,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
/**
|
||||
* Whether the transformer is currently calculating the rect of the parent.
|
||||
*/
|
||||
$isPendingRectCalculation = atom<boolean>(true);
|
||||
$isPendingRectCalculation = atom<boolean>(false);
|
||||
|
||||
/**
|
||||
* A set of subscriptions that should be cleaned up when the transformer is destroyed.
|
||||
@@ -177,6 +177,11 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
*/
|
||||
transformMutex = new Mutex();
|
||||
|
||||
/**
|
||||
* Callbacks that are executed when the bbox is updated.
|
||||
*/
|
||||
private static bboxUpdatedCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
konva: {
|
||||
transformer: Konva.Transformer;
|
||||
proxyRect: Konva.Rect;
|
||||
@@ -908,6 +913,8 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.parent.renderer.konva.objectGroup.setAttrs(groupAttrs);
|
||||
this.parent.bufferRenderer.konva.group.setAttrs(groupAttrs);
|
||||
}
|
||||
|
||||
CanvasEntityTransformer.runBboxUpdatedCallbacks(this.parent);
|
||||
};
|
||||
|
||||
calculateRect = debounce(() => {
|
||||
@@ -1026,6 +1033,23 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.konva.outlineRect.visible(false);
|
||||
};
|
||||
|
||||
static registerBboxUpdatedCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
this.bboxUpdatedCallbacks.delete(wrapped);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
this.bboxUpdatedCallbacks.add(wrapped);
|
||||
};
|
||||
|
||||
private static runBboxUpdatedCallbacks = (adapter: CanvasEntityAdapter) => {
|
||||
for (const callback of this.bboxUpdatedCallbacks) {
|
||||
callback(adapter);
|
||||
}
|
||||
};
|
||||
|
||||
repr = () => {
|
||||
return {
|
||||
id: this.id,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { Property } from 'csstype';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId, getRectUnion } from 'features/controlLayers/konva/util';
|
||||
import type { Coordinate, Dimensions, Rect, StageAttrs } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
@@ -186,6 +186,18 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the bbox and layers to the stage. The union of the bbox and the visible layers will be centered and scaled
|
||||
* to fit the stage with some padding.
|
||||
*/
|
||||
fitBboxAndLayersToStage = (): void => {
|
||||
const layersRect = this.manager.compositor.getVisibleRectOfType();
|
||||
const bboxRect = this.manager.stateApi.getBbox().rect;
|
||||
const unionRect = getRectUnion(layersRect, bboxRect);
|
||||
this.log.trace({ bboxRect, layersRect, unionRect }, 'Fitting bbox and layers to stage');
|
||||
this.fitRect(unionRect);
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits a rectangle to the stage. The rectangle will be centered and scaled to fit the stage with some padding.
|
||||
*
|
||||
@@ -218,14 +230,23 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
this._intendedScale = scale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
const tween = new Konva.Tween({
|
||||
node: this.konva.stage,
|
||||
duration: 0.15,
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
easing: Konva.Easings.EaseInOut,
|
||||
onUpdate: () => {
|
||||
this.syncStageAttrs();
|
||||
},
|
||||
onFinish: () => {
|
||||
this.syncStageAttrs();
|
||||
tween.destroy();
|
||||
},
|
||||
});
|
||||
|
||||
this.syncStageAttrs({ x, y, scale });
|
||||
tween.play();
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import { fetchModelConfigByIdentifier } from 'features/metadata/util/modelFetchingHelpers';
|
||||
import { zMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { ParameterLoRAModel } from 'features/parameters/types/parameterSchemas';
|
||||
@@ -611,3 +612,7 @@ export const isMaskEntityIdentifier = (
|
||||
): entityIdentifier is CanvasEntityIdentifier<'inpaint_mask' | 'regional_guidance'> => {
|
||||
return isInpaintMaskEntityIdentifier(entityIdentifier) || isRegionalGuidanceEntityIdentifier(entityIdentifier);
|
||||
};
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`.
|
||||
export type LifecycleCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { FlexProps } from '@invoke-ai/ui-library';
|
||||
import { Box, chakra, Flex, IconButton, Tooltip, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { getOverlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { Formatter } from 'fracturedjsonjs';
|
||||
import { Formatter, TableCommaPlacement } from 'fracturedjsonjs';
|
||||
import { isString } from 'lodash-es';
|
||||
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties } from 'react';
|
||||
@@ -11,6 +11,8 @@ import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold, PiDownloadSimpleBold } from 'react-icons/pi';
|
||||
|
||||
const formatter = new Formatter();
|
||||
formatter.Options.TableCommaPlacement = TableCommaPlacement.BeforePadding;
|
||||
formatter.Options.OmitTrailingWhitespace = true;
|
||||
|
||||
type Props = {
|
||||
label: string;
|
||||
@@ -19,6 +21,7 @@ type Props = {
|
||||
withDownload?: boolean;
|
||||
withCopy?: boolean;
|
||||
extraCopyActions?: { label: string; getData: (data: unknown) => unknown }[];
|
||||
wrapData?: boolean;
|
||||
} & FlexProps;
|
||||
|
||||
const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
@@ -29,7 +32,16 @@ const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
const ChakraPre = chakra('pre');
|
||||
|
||||
const DataViewer = (props: Props) => {
|
||||
const { label, data, fileName, withDownload = true, withCopy = true, extraCopyActions, ...rest } = props;
|
||||
const {
|
||||
label,
|
||||
data,
|
||||
fileName,
|
||||
withDownload = true,
|
||||
withCopy = true,
|
||||
extraCopyActions,
|
||||
wrapData = true,
|
||||
...rest
|
||||
} = props;
|
||||
const dataString = useMemo(() => (isString(data) ? data : formatter.Serialize(data)) ?? '', [data]);
|
||||
const shift = useShiftModifier();
|
||||
const clipboard = useClipboard();
|
||||
@@ -53,7 +65,7 @@ const DataViewer = (props: Props) => {
|
||||
<Flex bg="base.800" borderRadius="base" flexGrow={1} w="full" h="full" position="relative" {...rest}>
|
||||
<Box position="absolute" top={0} left={0} right={0} bottom={0} overflow="auto" p={2} fontSize="sm">
|
||||
<OverlayScrollbarsComponent defer style={overlayScrollbarsStyles} options={overlayscrollbarsOptions}>
|
||||
<ChakraPre whiteSpace="pre-wrap">{dataString}</ChakraPre>
|
||||
<ChakraPre whiteSpace={wrapData ? 'pre-wrap' : undefined}>{dataString}</ChakraPre>
|
||||
</OverlayScrollbarsComponent>
|
||||
</Box>
|
||||
<Flex position="absolute" top={0} insetInlineEnd={0} p={2}>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectDefaultIPAdapter, selectDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
@@ -173,15 +174,24 @@ export const newCanvasFromImage = async (arg: {
|
||||
imageObject = imageDTOToImageObject(imageDTO);
|
||||
}
|
||||
|
||||
const { x, y } = selectBboxRect(state);
|
||||
const addFitOnLayerInitCallback = (adapterId: string) => {
|
||||
CanvasEntityTransformer.registerBboxUpdatedCallback((adapter) => {
|
||||
// Skip the callback if the adapter is not the one we are creating
|
||||
if (adapter.id !== adapterId) {
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
adapter.manager.stage.fitBboxAndLayersToStage();
|
||||
return Promise.resolve(true);
|
||||
});
|
||||
};
|
||||
|
||||
switch (type) {
|
||||
case 'raster_layer': {
|
||||
const overrides = {
|
||||
id: getPrefixedId('raster_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -192,9 +202,9 @@ export const newCanvasFromImage = async (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('control_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
} satisfies Partial<CanvasControlLayerState>;
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -205,8 +215,8 @@ export const newCanvasFromImage = async (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('inpaint_mask'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasInpaintMaskState>;
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -217,8 +227,8 @@ export const newCanvasFromImage = async (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('regional_guidance'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRegionalGuidanceState>;
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
|
||||
@@ -22,6 +22,7 @@ import { NodeFieldElementOverlay } from 'features/nodes/components/sidePanel/bui
|
||||
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
||||
import {
|
||||
$isInPublishFlow,
|
||||
$isPublishing,
|
||||
$isReadyToDoValidationRun,
|
||||
$isSelectingOutputNode,
|
||||
$outputNodeId,
|
||||
@@ -183,13 +184,14 @@ SelectOutputNodeButton.displayName = 'SelectOutputNodeButton';
|
||||
|
||||
const CancelPublishButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isPublishing = useStore($isPublishing);
|
||||
const onClick = useCallback(() => {
|
||||
$isInPublishFlow.set(false);
|
||||
$isSelectingOutputNode.set(false);
|
||||
$outputNodeId.set(null);
|
||||
}, []);
|
||||
return (
|
||||
<Button leftIcon={<PiXBold />} onClick={onClick}>
|
||||
<Button leftIcon={<PiXBold />} onClick={onClick} isDisabled={isPublishing}>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
);
|
||||
@@ -198,6 +200,7 @@ CancelPublishButton.displayName = 'CancelDeployButton';
|
||||
|
||||
const PublishWorkflowButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isPublishing = useStore($isPublishing);
|
||||
const isReadyToDoValidationRun = useStore($isReadyToDoValidationRun);
|
||||
const isReadyToEnqueue = useStore($isReadyToEnqueue);
|
||||
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
|
||||
@@ -211,6 +214,7 @@ const PublishWorkflowButton = memo(() => {
|
||||
|
||||
const enqueue = useEnqueueWorkflows();
|
||||
const onClick = useCallback(async () => {
|
||||
$isPublishing.set(true);
|
||||
const result = await withResultAsync(() => enqueue(true, true));
|
||||
if (result.isErr()) {
|
||||
toast({
|
||||
@@ -244,8 +248,30 @@ const PublishWorkflowButton = memo(() => {
|
||||
});
|
||||
log.debug(parseify(result.value), 'Enqueued batch');
|
||||
}
|
||||
$isPublishing.set(false);
|
||||
}, [enqueue, projectUrl, t]);
|
||||
|
||||
const isDisabled = useMemo(() => {
|
||||
return (
|
||||
!allowPublishWorkflows ||
|
||||
!isReadyToEnqueue ||
|
||||
doesWorkflowHaveUnsavedChanges ||
|
||||
hasUnpublishableNodes ||
|
||||
!isReadyToDoValidationRun ||
|
||||
!(outputNodeId !== null && !isSelectingOutputNode) ||
|
||||
isPublishing
|
||||
);
|
||||
}, [
|
||||
allowPublishWorkflows,
|
||||
doesWorkflowHaveUnsavedChanges,
|
||||
hasUnpublishableNodes,
|
||||
isReadyToDoValidationRun,
|
||||
isReadyToEnqueue,
|
||||
isSelectingOutputNode,
|
||||
outputNodeId,
|
||||
isPublishing,
|
||||
]);
|
||||
|
||||
return (
|
||||
<PublishTooltip
|
||||
isWorkflowSaved={!doesWorkflowHaveUnsavedChanges}
|
||||
@@ -255,19 +281,8 @@ const PublishWorkflowButton = memo(() => {
|
||||
hasPublishableInputs={inputs.publishable.length > 0}
|
||||
hasUnpublishableInputs={inputs.unpublishable.length > 0}
|
||||
>
|
||||
<Button
|
||||
leftIcon={<PiLightningFill />}
|
||||
isDisabled={
|
||||
!allowPublishWorkflows ||
|
||||
!isReadyToEnqueue ||
|
||||
doesWorkflowHaveUnsavedChanges ||
|
||||
hasUnpublishableNodes ||
|
||||
!isReadyToDoValidationRun ||
|
||||
!(outputNodeId !== null && !isSelectingOutputNode)
|
||||
}
|
||||
onClick={onClick}
|
||||
>
|
||||
{t('workflows.builder.publish')}
|
||||
<Button leftIcon={<PiLightningFill />} isDisabled={isDisabled} onClick={onClick}>
|
||||
{isPublishing ? t('workflows.builder.publishing') : t('workflows.builder.publish')}
|
||||
</Button>
|
||||
</PublishTooltip>
|
||||
);
|
||||
@@ -337,6 +352,10 @@ export const StartPublishFlowButton = memo(() => {
|
||||
$isInPublishFlow.set(true);
|
||||
}, []);
|
||||
|
||||
const isDisabled = useMemo(() => {
|
||||
return !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes;
|
||||
}, [allowPublishWorkflows, doesWorkflowHaveUnsavedChanges, hasUnpublishableNodes, isReadyToEnqueue]);
|
||||
|
||||
return (
|
||||
<PublishTooltip
|
||||
isWorkflowSaved={!doesWorkflowHaveUnsavedChanges}
|
||||
@@ -346,15 +365,7 @@ export const StartPublishFlowButton = memo(() => {
|
||||
hasPublishableInputs={inputs.publishable.length > 0}
|
||||
hasUnpublishableInputs={inputs.unpublishable.length > 0}
|
||||
>
|
||||
<Button
|
||||
onClick={onClick}
|
||||
leftIcon={<PiLightningFill />}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
isDisabled={
|
||||
!allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes
|
||||
}
|
||||
>
|
||||
<Button onClick={onClick} leftIcon={<PiLightningFill />} variant="ghost" size="sm" isDisabled={isDisabled}>
|
||||
{t('workflows.builder.publish')}
|
||||
</Button>
|
||||
</PublishTooltip>
|
||||
|
||||
@@ -19,6 +19,7 @@ import { useGetBatchStatusQuery } from 'services/api/endpoints/queue';
|
||||
import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
export const $isPublishing = atom(false);
|
||||
export const $isInPublishFlow = atom(false);
|
||||
export const $outputNodeId = atom<string | null>(null);
|
||||
export const $isSelectingOutputNode = atom(false);
|
||||
|
||||
@@ -58,7 +58,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
|
||||
{cloneElement(children, {
|
||||
onClick: onOpen,
|
||||
})}
|
||||
<Modal isOpen={isOpen} onClose={onClose} isCentered size="2xl" useInert={false}>
|
||||
<Modal isOpen={isOpen} onClose={onClose} isCentered size="5xl" useInert={false}>
|
||||
<ModalOverlay />
|
||||
<ModalContent maxH="80vh" h="34rem">
|
||||
<ModalHeader>{t('accessibility.about')}</ModalHeader>
|
||||
@@ -66,7 +66,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
|
||||
<ModalBody display="flex" flexDir="column" gap={4}>
|
||||
<Grid templateColumns="repeat(2, 1fr)" h="full">
|
||||
<GridItem backgroundColor="base.750" borderRadius="base" p="4" h="full">
|
||||
<DataViewer label={t('common.systemInformation')} data={localData} />
|
||||
<DataViewer label={t('common.systemInformation')} data={localData} wrapData={false} />
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<Flex flexDir="column" gap={3} justifyContent="center" alignItems="center" h="full">
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
|
||||
import type { OpenAPIV3_1 } from 'openapi-types';
|
||||
import type { paths } from 'services/api/schema';
|
||||
import type { AppConfig, AppDependencyVersions, AppVersion } from 'services/api/types';
|
||||
import type { AppConfig, AppVersion } from 'services/api/types';
|
||||
|
||||
import { api, buildV1Url } from '..';
|
||||
|
||||
@@ -22,7 +22,10 @@ export const appInfoApi = api.injectEndpoints({
|
||||
}),
|
||||
providesTags: ['FetchOnReconnect'],
|
||||
}),
|
||||
getAppDeps: build.query<AppDependencyVersions, void>({
|
||||
getAppDeps: build.query<
|
||||
paths['/api/v1/app/app_deps']['get']['responses']['200']['content']['application/json'],
|
||||
void
|
||||
>({
|
||||
query: () => ({
|
||||
url: buildAppInfoUrl('app_deps'),
|
||||
method: 'GET',
|
||||
|
||||
@@ -1925,77 +1925,6 @@ export type components = {
|
||||
*/
|
||||
watermarking_methods: string[];
|
||||
};
|
||||
/**
|
||||
* AppDependencyVersions
|
||||
* @description App depencency Versions Response
|
||||
*/
|
||||
AppDependencyVersions: {
|
||||
/**
|
||||
* Accelerate
|
||||
* @description accelerate version
|
||||
*/
|
||||
accelerate: string;
|
||||
/**
|
||||
* Compel
|
||||
* @description compel version
|
||||
*/
|
||||
compel: string;
|
||||
/**
|
||||
* Cuda
|
||||
* @description CUDA version
|
||||
*/
|
||||
cuda: string | null;
|
||||
/**
|
||||
* Diffusers
|
||||
* @description diffusers version
|
||||
*/
|
||||
diffusers: string;
|
||||
/**
|
||||
* Numpy
|
||||
* @description Numpy version
|
||||
*/
|
||||
numpy: string;
|
||||
/**
|
||||
* Opencv
|
||||
* @description OpenCV version
|
||||
*/
|
||||
opencv: string;
|
||||
/**
|
||||
* Onnx
|
||||
* @description ONNX version
|
||||
*/
|
||||
onnx: string;
|
||||
/**
|
||||
* Pillow
|
||||
* @description Pillow (PIL) version
|
||||
*/
|
||||
pillow: string;
|
||||
/**
|
||||
* Python
|
||||
* @description Python version
|
||||
*/
|
||||
python: string;
|
||||
/**
|
||||
* Torch
|
||||
* @description PyTorch version
|
||||
*/
|
||||
torch: string;
|
||||
/**
|
||||
* Torchvision
|
||||
* @description PyTorch Vision version
|
||||
*/
|
||||
torchvision: string;
|
||||
/**
|
||||
* Transformers
|
||||
* @description transformers version
|
||||
*/
|
||||
transformers: string;
|
||||
/**
|
||||
* Xformers
|
||||
* @description xformers version
|
||||
*/
|
||||
xformers: string | null;
|
||||
};
|
||||
/**
|
||||
* AppVersion
|
||||
* @description App Version Response
|
||||
@@ -12062,7 +11991,7 @@ export type components = {
|
||||
* vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
* lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
* pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
* sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
* attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -12337,11 +12266,10 @@ export type components = {
|
||||
pytorch_cuda_alloc_conf?: string | null;
|
||||
/**
|
||||
* Device
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
|
||||
device?: string;
|
||||
/**
|
||||
* Precision
|
||||
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
|
||||
@@ -24226,7 +24154,9 @@ export interface operations {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["AppDependencyVersions"];
|
||||
"application/json": {
|
||||
[key: string]: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -31,7 +31,6 @@ export type InvocationJSONSchemaExtra = S['UIConfigBase'];
|
||||
// App Info
|
||||
export type AppVersion = S['AppVersion'];
|
||||
export type AppConfig = S['AppConfig'];
|
||||
export type AppDependencyVersions = S['AppDependencyVersions'];
|
||||
|
||||
// Images
|
||||
export type ImageDTO = S['ImageDTO'];
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "5.13.0"
|
||||
__version__ = "5.15.0"
|
||||
|
||||
@@ -109,6 +109,12 @@ dependencies = [
|
||||
"humanize==4.12.1",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
# Prevent opencv-python from ever being chosen during dependency resolution.
|
||||
# This prevents conflicts with opencv-contrib-python, which Invoke requires.
|
||||
override-dependencies = ["opencv-python; sys_platform=='never'"]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
"invokeai-web" = "invokeai.app.run_app:run_app"
|
||||
|
||||
|
||||
@@ -0,0 +1,458 @@
|
||||
state_dict_keys = {
|
||||
"diffusion_model.double_blocks.0.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.0.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.0.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.0.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.1.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.1.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.1.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.10.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.10.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.10.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.11.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.11.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.11.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.12.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.12.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.12.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.13.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.13.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.13.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.14.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.14.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.14.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.15.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.15.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.15.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.16.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.16.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.16.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.17.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.17.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.17.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.18.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.18.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.18.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.2.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.2.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.2.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.3.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.3.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.3.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.4.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.4.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.4.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.5.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.5.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.5.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.6.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.6.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.6.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.7.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.7.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.7.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.8.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.8.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.8.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.9.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.9.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.9.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.0.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.0.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.0.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.0.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.1.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.1.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.1.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.1.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.10.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.10.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.10.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.10.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.11.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.11.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.11.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.11.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.12.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.12.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.12.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.12.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.13.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.13.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.13.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.13.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.14.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.14.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.14.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.14.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.15.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.15.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.15.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.15.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.16.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.16.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.16.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.16.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.17.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.17.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.17.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.17.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.18.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.18.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.18.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.18.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.19.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.19.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.19.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.19.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.2.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.2.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.2.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.2.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.20.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.20.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.20.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.20.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.21.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.21.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.21.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.21.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.22.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.22.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.22.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.22.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.23.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.23.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.23.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.23.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.24.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.24.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.24.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.24.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.25.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.25.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.25.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.25.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.26.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.26.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.26.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.26.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.27.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.27.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.27.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.27.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.28.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.28.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.28.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.28.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.29.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.29.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.29.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.29.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.3.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.3.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.3.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.3.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.30.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.30.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.30.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.30.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.31.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.31.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.31.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.31.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.32.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.32.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.32.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.32.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.33.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.33.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.33.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.33.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.34.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.34.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.34.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.34.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.35.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.35.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.35.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.35.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.36.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.36.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.36.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.36.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.37.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.37.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.37.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.37.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.4.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.4.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.4.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.4.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.5.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.5.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.5.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.5.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.6.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.6.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.6.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.6.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.7.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.7.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.7.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.7.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.8.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.8.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.8.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.8.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.9.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.9.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.9.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.9.linear2.lora_B.weight": [3072, 16],
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
import accelerate
|
||||
import pytest
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.util import params
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
_group_state_by_submodel,
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_dora_onetrainer_format import (
|
||||
state_dict_keys as flux_onetrainer_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_aitoolkit_format import (
|
||||
state_dict_keys as flux_aitoolkit_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_diffusers_format import (
|
||||
state_dict_keys as flux_diffusers_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.utils import keys_to_mock_state_dict
|
||||
|
||||
|
||||
def test_is_state_dict_likely_in_flux_aitoolkit_format():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
assert is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sd_keys", [flux_diffusers_state_dict_keys, flux_onetrainer_state_dict_keys])
|
||||
def test_is_state_dict_likely_in_flux_kohya_format_false(sd_keys: dict[str, list[int]]):
|
||||
state_dict = keys_to_mock_state_dict(sd_keys)
|
||||
assert not is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
|
||||
|
||||
|
||||
def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
converted_state_dict = _group_state_by_submodel(state_dict).transformer
|
||||
|
||||
# Extract the prefixes from the converted state dict (without the lora suffixes)
|
||||
converted_key_prefixes: list[str] = []
|
||||
for k in converted_state_dict.keys():
|
||||
k = k.replace(".lora_A.weight", "")
|
||||
k = k.replace(".lora_B.weight", "")
|
||||
converted_key_prefixes.append(k)
|
||||
|
||||
# Initialize a FLUX model on the meta device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = Flux(params["flux-schnell"])
|
||||
model_keys = set(model.state_dict().keys())
|
||||
|
||||
for converted_key_prefix in converted_key_prefixes:
|
||||
assert any(model_key.startswith(converted_key_prefix) for model_key in model_keys), (
|
||||
f"'{converted_key_prefix}' did not match any model keys."
|
||||
)
|
||||
|
||||
|
||||
def test_lora_model_from_flux_aitoolkit_state_dict():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
|
||||
assert lora_model_from_flux_aitoolkit_state_dict(state_dict)
|
||||
@@ -10,7 +10,7 @@ import torch
|
||||
from invokeai.app.services.config import get_config
|
||||
from invokeai.backend.util.devices import TorchDevice, choose_precision, choose_torch_device, torch_dtype
|
||||
|
||||
devices = ["cpu", "cuda:0", "cuda:1", "mps"]
|
||||
devices = ["cpu", "cuda:0", "cuda:1", "cuda:2", "mps"]
|
||||
device_types_cpu = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float32)]
|
||||
device_types_cuda = [("cpu", torch.float32), ("cuda:0", torch.float16), ("mps", torch.float32)]
|
||||
device_types_mps = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float16)]
|
||||
|
||||
15
uv.lock
generated
15
uv.lock
generated
@@ -13,6 +13,9 @@ resolution-markers = [
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
|
||||
[manifest]
|
||||
overrides = [{ name = "opencv-python", marker = "sys_platform == 'never'" }]
|
||||
|
||||
[[package]]
|
||||
name = "absl-py"
|
||||
version = "2.2.1"
|
||||
@@ -948,7 +951,7 @@ version = "0.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "opencv-python", marker = "sys_platform == 'never'" },
|
||||
{ name = "pillow" },
|
||||
{ name = "pywavelets" },
|
||||
{ name = "torch" },
|
||||
@@ -2043,17 +2046,9 @@ name = "opencv-python"
|
||||
version = "4.9.0.80"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "numpy", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/25/72/da7c69a3542071bf1e8f65336721b8b2659194425438d988f79bc14ed9cc/opencv-python-4.9.0.80.tar.gz", hash = "sha256:1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1", size = 92896686 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/35/69/b657974ddcbba54d59d7d62b01e60a8b815e35f415b996e4d355be0ac7b4/opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb", size = 55689340 },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/df/b56175c3fb5bc058774bdcf35f5a71cf9c3c5b909f98a1c688eb71cd3b1f/opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3", size = 35354525 },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/00/2adf376707c7965bb4569f28f73fafe303c404d01047b10e3b52761be086/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a", size = 41289855 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/64/7fdfb9386511cd6805451e012c537073a79a958a58795c4e602e538c388c/opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57", size = 62208946 },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/9e/db1c2d56c04b97981c06663384f45f28950a73d9acf840c4006d60d0a1ff/opencv_python-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:dcf000c36dd1651118a2462257e3a9e76db789a78432e1f303c7bac54f63ef6c", size = 28546907 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/ec/9dabb6a9abfdebb3c45b0cc52dec901caafef2b2c7e7d6a839ed86d81e91/opencv_python-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:3f16f08e02b2a2da44259c7cc712e779eff1dd8b55fdb0323e8cab09548086c0", size = 38624911 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opt-einsum"
|
||||
|
||||
Reference in New Issue
Block a user