mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-18 05:08:16 -05:00
Compare commits
45 Commits
v5.9.0rc2
...
ebr/less-t
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb066f6c33 | ||
|
|
3f58c68c09 | ||
|
|
e50c7e5947 | ||
|
|
4a83700fe4 | ||
|
|
a53e1ccf08 | ||
|
|
1af9930951 | ||
|
|
c6f96613fc | ||
|
|
258bf736da | ||
|
|
7004fde41b | ||
|
|
c9dc27afbb | ||
|
|
efd14ec0e4 | ||
|
|
21ee2b6251 | ||
|
|
82dd2d508f | ||
|
|
ffb5f6c6a6 | ||
|
|
5c5fff9ecb | ||
|
|
9ca071819b | ||
|
|
b14d8e8192 | ||
|
|
5a59f6e3b8 | ||
|
|
60b5aef16a | ||
|
|
35222a8835 | ||
|
|
0e8b5484d5 | ||
|
|
454506c83e | ||
|
|
8f6ab67376 | ||
|
|
5afcc7778f | ||
|
|
325e07d330 | ||
|
|
a016bdc159 | ||
|
|
a14f0b2864 | ||
|
|
721483318a | ||
|
|
be04743649 | ||
|
|
92f0c28d6c | ||
|
|
a6b94e8ca4 | ||
|
|
00b11ef795 | ||
|
|
182580ff69 | ||
|
|
8e9d5c1187 | ||
|
|
99aac5870e | ||
|
|
c1b475c585 | ||
|
|
ec44e68cbf | ||
|
|
73dbebbcc3 | ||
|
|
09f971467d | ||
|
|
2c71b0e873 | ||
|
|
92f69ac463 | ||
|
|
3b154df71a | ||
|
|
64aa965160 | ||
|
|
d715c27d07 | ||
|
|
515084577c |
@@ -1,2 +1,5 @@
|
||||
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||
218b6d0546b990fc449c876fb99f44b50c4daa35
|
||||
182580ff6970caed400be178c5b888514b75d7f2
|
||||
8e9d5c1187b0d36da80571ce4c8ba9b3a37b6c46
|
||||
99aac5870e1092b182e6c5f21abcaab6936a4ad1
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -61,13 +61,13 @@ jobs:
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: '3.12'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pip install ruff==0.9.9
|
||||
run: pip install ruff==0.11.2
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
|
||||
23
.github/workflows/python-tests.yml
vendored
23
.github/workflows/python-tests.yml
vendored
@@ -39,26 +39,25 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
platform:
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
# - linux-cuda-12_6
|
||||
# - linux-rocm-6_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
include:
|
||||
- platform: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
# - platform: linux-cuda-12_6
|
||||
# os: ubuntu-24.04
|
||||
# github-env: $GITHUB_ENV
|
||||
# - platform: linux-rocm-6_2
|
||||
# os: ubuntu-24.04
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/rocm6.2'
|
||||
# github-env: $GITHUB_ENV
|
||||
- platform: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
os: ubuntu-24.04
|
||||
github-env: $GITHUB_ENV
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
- platform: macos-default
|
||||
os: macOS-14
|
||||
github-env: $GITHUB_ENV
|
||||
|
||||
@@ -12,6 +12,7 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.util.logging import logging
|
||||
@@ -99,7 +100,7 @@ async def get_app_deps() -> AppDependencyVersions:
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
async def get_config() -> AppConfig:
|
||||
async def get_config_() -> AppConfig:
|
||||
infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append("patchmatch")
|
||||
@@ -121,6 +122,21 @@ async def get_config() -> AppConfig:
|
||||
)
|
||||
|
||||
|
||||
class InvokeAIAppConfigWithSetFields(BaseModel):
|
||||
"""InvokeAI App Config with model fields set"""
|
||||
|
||||
set_fields: set[str] = Field(description="The set fields")
|
||||
config: InvokeAIAppConfig = Field(description="The InvokeAI App Config")
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/runtime_config", operation_id="get_runtime_config", status_code=200, response_model=InvokeAIAppConfigWithSetFields
|
||||
)
|
||||
async def get_runtime_config() -> InvokeAIAppConfigWithSetFields:
|
||||
config = get_config()
|
||||
return InvokeAIAppConfigWithSetFields(set_fields=config.model_fields_set, config=config)
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
operation_id="get_log_level",
|
||||
|
||||
@@ -96,6 +96,22 @@ async def upload_image(
|
||||
raise HTTPException(status_code=500, detail="Failed to create image")
|
||||
|
||||
|
||||
class ImageUploadEntry(BaseModel):
|
||||
image_dto: ImageDTO = Body(description="The image DTO")
|
||||
presigned_url: str = Body(description="The URL to get the presigned URL for the image upload")
|
||||
|
||||
|
||||
@images_router.post("/", operation_id="create_image_upload_entry")
|
||||
async def create_image_upload_entry(
|
||||
width: int = Body(description="The width of the image"),
|
||||
height: int = Body(description="The height of the image"),
|
||||
board_id: Optional[str] = Body(default=None, description="The board to add this image to, if any"),
|
||||
) -> ImageUploadEntry:
|
||||
"""Uploads an image from a URL, not implemented"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
||||
async def delete_image(
|
||||
image_name: str = Path(description="The name of the image to delete"),
|
||||
|
||||
@@ -28,12 +28,10 @@ from invokeai.app.services.model_records import (
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.util.suppress_output import SuppressOutput
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
MainCheckpointConfig,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
|
||||
@@ -19,7 +19,8 @@ from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
|
||||
from invokeai.app.invocations.model import UNetField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
|
||||
from invokeai.backend.model_manager.config import MainConfigBase
|
||||
from invokeai.backend.model_manager.taxonomy import ModelVariantType
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelVariantType
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -25,7 +24,6 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxControlLoRALoaderInvocation(BaseInvocation):
|
||||
"""LoRA model and Image to use with FLUX transformer generation."""
|
||||
|
||||
@@ -3,7 +3,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -52,7 +51,6 @@ class FluxControlNetOutput(BaseInvocationOutput):
|
||||
tags=["controlnet", "flux"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxControlNetInvocation(BaseInvocation):
|
||||
"""Collect FLUX ControlNet info to pass to other nodes."""
|
||||
|
||||
@@ -10,7 +10,7 @@ from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -49,7 +49,7 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.flux.text_conditioning import FluxReduxConditioning, FluxTextConditioning
|
||||
from invokeai.backend.model_manager.config import ModelFormat, ModelVariantType
|
||||
from invokeai.backend.model_manager.taxonomy import ModelFormat, ModelVariantType
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@@ -64,7 +64,6 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.3.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a FLUX transformer model."""
|
||||
|
||||
@@ -31,7 +31,7 @@ class FluxFillOutput(BaseInvocationOutput):
|
||||
tags=["inpaint"],
|
||||
category="inpaint",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class FluxFillInvocation(BaseInvocation):
|
||||
"""Prepare the FLUX Fill conditioning data."""
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import List, Literal, Union
|
||||
from pydantic import field_validator, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import InputField, UIType
|
||||
from invokeai.app.invocations.ip_adapter import (
|
||||
CLIP_VISION_MODEL_MAP,
|
||||
@@ -28,7 +28,6 @@ from invokeai.backend.model_manager.config import (
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxIPAdapterInvocation(BaseInvocation):
|
||||
"""Collects FLUX IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -3,14 +3,13 @@ from typing import Optional
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||
|
||||
|
||||
@invocation_output("flux_lora_loader_output")
|
||||
@@ -32,7 +31,6 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.2.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
|
||||
@@ -111,7 +109,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.3.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to a FLUX transformer."""
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Literal
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -17,8 +16,8 @@ from invokeai.app.util.t5_model_identifier import (
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
@@ -41,7 +40,6 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.6",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
@@ -23,7 +23,8 @@ from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.starter_models import siglip
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -44,7 +45,7 @@ class FluxReduxOutput(BaseInvocationOutput):
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="2.0.0",
|
||||
classification=Classification.Prototype,
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class FluxReduxInvocation(BaseInvocation):
|
||||
"""Runs a FLUX Redux model to generate a conditioning tensor."""
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Iterator, Literal, Optional, Tuple
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
@@ -17,7 +17,7 @@ from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.model_manager import ModelFormat
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@@ -30,7 +30,6 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.1.2",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a flux image."""
|
||||
|
||||
@@ -6,7 +6,7 @@ from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
|
||||
from invokeai.app.invocations.model import UNetField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||
|
||||
|
||||
@invocation_output("ideal_size_output")
|
||||
|
||||
@@ -355,7 +355,6 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
tags=["image", "unsharp_mask"],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Applies an unsharp mask filter to an image"""
|
||||
@@ -1096,6 +1095,7 @@ class ExpandMaskWithFadeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
|
||||
The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect.
|
||||
The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white.
|
||||
If the fade size is 0, the mask is returned as-is.
|
||||
"""
|
||||
|
||||
mask: ImageField = InputField(description="The mask to expand")
|
||||
@@ -1105,6 +1105,11 @@ class ExpandMaskWithFadeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
pil_mask = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
if self.fade_size_px == 0:
|
||||
# If the fade size is 0, just return the mask as-is.
|
||||
image_dto = context.images.save(image=pil_mask, image_category=ImageCategory.MASK)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
np_mask = numpy.array(pil_mask)
|
||||
|
||||
# Threshold the mask to create a binary mask - 0 for black, 255 for white
|
||||
@@ -1265,7 +1270,6 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
tags=["image", "crop"],
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels."""
|
||||
@@ -1292,7 +1296,6 @@ class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
tags=["image", "crop"],
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Paste the source image into the target image at the given bounding box.
|
||||
|
||||
@@ -13,10 +13,8 @@ from invokeai.app.services.model_records.model_records_base import ModelRecordCh
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
IPAdapterCheckpointConfig,
|
||||
IPAdapterInvokeAIConfig,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.starter_models import (
|
||||
StarterModel,
|
||||
@@ -24,6 +22,7 @@ from invokeai.backend.model_manager.starter_models import (
|
||||
ip_adapter_sd_image_encoder,
|
||||
ip_adapter_sdxl_image_encoder,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
|
||||
|
||||
class IPAdapterField(BaseModel):
|
||||
|
||||
@@ -4,7 +4,7 @@ import torch
|
||||
from PIL.Image import Image
|
||||
from pydantic import field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import StringOutput
|
||||
@@ -13,7 +13,14 @@ from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation("llava_onevision_vllm", title="LLaVA OneVision VLLM", tags=["vllm"], category="vllm", version="1.0.0")
|
||||
@invocation(
|
||||
"llava_onevision_vllm",
|
||||
title="LLaVA OneVision VLLM",
|
||||
tags=["vllm"],
|
||||
category="vllm",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class LlavaOnevisionVllmInvocation(BaseInvocation):
|
||||
"""Run a LLaVA OneVision VLLM model."""
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
Classification,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
@@ -58,7 +57,6 @@ class RectangleMaskInvocation(BaseInvocation, WithMetadata):
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||
"""Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0."""
|
||||
@@ -87,7 +85,6 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class InvertTensorMaskInvocation(BaseInvocation):
|
||||
"""Inverts a tensor mask."""
|
||||
@@ -234,7 +231,6 @@ WHITE = ColorField(r=255, g=255, b=255, a=255)
|
||||
tags=["mask"],
|
||||
category="mask",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class GetMaskBoundingBoxInvocation(BaseInvocation):
|
||||
"""Gets the bounding box of the given mask image."""
|
||||
|
||||
@@ -43,7 +43,7 @@ from invokeai.app.invocations.primitives import BooleanOutput, FloatOutput, Inte
|
||||
from invokeai.app.invocations.scheduler import SchedulerOutput
|
||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import ModelType, SubModelType
|
||||
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
||||
from invokeai.version import __version__
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -15,10 +14,8 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
|
||||
|
||||
|
||||
class ModelIdentifierField(BaseModel):
|
||||
@@ -126,7 +123,6 @@ class ModelIdentifierOutput(BaseInvocationOutput):
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ModelIdentifierInvocation(BaseInvocation):
|
||||
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
||||
|
||||
@@ -6,7 +6,7 @@ from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
@@ -23,7 +23,7 @@ from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.model_manager import BaseModelType
|
||||
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
||||
@@ -36,7 +36,6 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
tags=["image", "sd3"],
|
||||
category="image",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a SD3 model."""
|
||||
|
||||
@@ -2,7 +2,7 @@ import einops
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@@ -25,7 +25,6 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates latents from an image."""
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Optional
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -14,7 +13,7 @@ from invokeai.app.util.t5_model_identifier import (
|
||||
preprocess_t5_encoder_model_identifier,
|
||||
preprocess_t5_tokenizer_model_identifier,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||
|
||||
|
||||
@invocation_output("sd3_model_loader_output")
|
||||
@@ -34,7 +33,6 @@ class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
||||
tags=["model", "sd3"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a SD3 base model, outputting its submodels."""
|
||||
|
||||
@@ -11,12 +11,12 @@ from transformers import (
|
||||
T5TokenizerFast,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import SD3ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.model_manager.taxonomy import ModelFormat
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@@ -33,7 +33,6 @@ SD3_T5_MAX_SEQ_LEN = 256
|
||||
tags=["prompt", "conditioning", "sd3"],
|
||||
category="conditioning",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a SD3 image."""
|
||||
|
||||
@@ -2,7 +2,7 @@ from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocati
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import SubModelType
|
||||
|
||||
|
||||
@invocation_output("sdxl_model_loader_output")
|
||||
|
||||
@@ -7,7 +7,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
||||
from pydantic import field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
||||
from invokeai.app.invocations.denoise_latents import DenoiseLatentsInvocation, get_scheduler
|
||||
@@ -56,7 +56,6 @@ def crop_controlnet_data(control_data: ControlNetData, latent_region: TBLR) -> C
|
||||
title="Tiled Multi-Diffusion Denoise - SD1.5, SDXL",
|
||||
tags=["upscale", "denoise"],
|
||||
category="latents",
|
||||
classification=Classification.Beta,
|
||||
version="1.0.1",
|
||||
)
|
||||
class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
|
||||
@@ -7,7 +7,6 @@ from pydantic import BaseModel
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -40,7 +39,6 @@ class CalculateImageTilesOutput(BaseInvocationOutput):
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
@@ -74,7 +72,6 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
@@ -117,7 +114,6 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
@@ -168,7 +164,6 @@ class TileToPropertiesOutput(BaseInvocationOutput):
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class TileToPropertiesInvocation(BaseInvocation):
|
||||
"""Split a Tile into its individual properties."""
|
||||
@@ -201,7 +196,6 @@ class PairTileImageOutput(BaseInvocationOutput):
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class PairTileImageInvocation(BaseInvocation):
|
||||
"""Pair an image with its tile properties."""
|
||||
@@ -230,7 +224,6 @@ BLEND_MODES = Literal["Linear", "Seam"]
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Merge multiple tile images into a single image."""
|
||||
|
||||
@@ -41,9 +41,10 @@ def run_app() -> None:
|
||||
)
|
||||
|
||||
# Find an open port, and modify the config accordingly.
|
||||
orig_config_port = app_config.port
|
||||
app_config.port = find_open_port(app_config.port)
|
||||
if orig_config_port != app_config.port:
|
||||
first_open_port = find_open_port(app_config.port)
|
||||
if app_config.port != first_open_port:
|
||||
orig_config_port = app_config.port
|
||||
app_config.port = first_open_port
|
||||
logger.warning(f"Port {orig_config_port} is already in use. Using port {app_config.port}.")
|
||||
|
||||
# Miscellaneous startup tasks.
|
||||
|
||||
@@ -44,7 +44,8 @@ if TYPE_CHECKING:
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
|
||||
|
||||
class EventServiceBase:
|
||||
|
||||
@@ -16,7 +16,8 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
)
|
||||
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.download.download_base import DownloadJob
|
||||
|
||||
@@ -10,9 +10,9 @@ from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
|
||||
from invokeai.app.services.model_records import ModelRecordChanges
|
||||
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
||||
from invokeai.backend.model_manager.config import ModelSourceType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||
|
||||
|
||||
class InstallStatus(str, Enum):
|
||||
|
||||
@@ -39,8 +39,6 @@ from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
InvalidModelConfigException,
|
||||
ModelConfigBase,
|
||||
ModelRepoVariant,
|
||||
ModelSourceType,
|
||||
)
|
||||
from invokeai.backend.model_manager.legacy_probe import ModelProbe
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
@@ -52,6 +50,7 @@ from invokeai.backend.model_manager.metadata import (
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@@ -5,9 +5,10 @@ from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional
|
||||
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||
|
||||
|
||||
class ModelLoadServiceBase(ABC):
|
||||
|
||||
@@ -11,7 +11,7 @@ from torch import load as torch_load
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load import (
|
||||
LoadedModel,
|
||||
LoadedModelWithoutConfig,
|
||||
@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.load import (
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
"""Initialization file for model manager service."""
|
||||
|
||||
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load import LoadedModel
|
||||
|
||||
__all__ = [
|
||||
"ModelManagerServiceBase",
|
||||
"ModelManagerService",
|
||||
"AnyModel",
|
||||
"AnyModelConfig",
|
||||
"BaseModelType",
|
||||
"ModelType",
|
||||
"SubModelType",
|
||||
"LoadedModel",
|
||||
]
|
||||
|
||||
@@ -14,10 +14,12 @@ from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ControlAdapterDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ModelFormat,
|
||||
ModelSourceType,
|
||||
ModelType,
|
||||
|
||||
@@ -60,11 +60,9 @@ from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigFactory,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
|
||||
|
||||
|
||||
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
@@ -20,14 +20,10 @@ from invokeai.app.services.session_processor.session_processor_common import Pro
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
|
||||
|
||||
@@ -34,9 +34,17 @@ def check_cudnn(logger: logging.Logger) -> None:
|
||||
)
|
||||
|
||||
|
||||
def invokeai_source_dir() -> Path:
|
||||
# `invokeai.__file__` doesn't always work for editable installs
|
||||
this_module_path = Path(__file__).resolve()
|
||||
# https://youtrack.jetbrains.com/issue/PY-38382/Unresolved-reference-spec-but-this-is-standard-builtin
|
||||
# noinspection PyUnresolvedReferences
|
||||
depth = len(__spec__.parent.split("."))
|
||||
return this_module_path.parents[depth - 1]
|
||||
|
||||
|
||||
def enable_dev_reload(custom_nodes_path=None) -> None:
|
||||
"""Enable hot reloading on python file changes during development."""
|
||||
import invokeai
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
try:
|
||||
@@ -46,7 +54,7 @@ def enable_dev_reload(custom_nodes_path=None) -> None:
|
||||
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.'
|
||||
) from e
|
||||
else:
|
||||
paths = [str(Path(invokeai.__file__).with_name("*.py"))]
|
||||
paths = [str(invokeai_source_dir() / "*.py")]
|
||||
if custom_nodes_path:
|
||||
paths.append(str(custom_nodes_path / "*.py"))
|
||||
jurigged.watch(pattern=paths, logger=InvokeAILogger.get_logger(name="jurigged").info)
|
||||
|
||||
@@ -5,7 +5,7 @@ import torch
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
# fast latents preview matrix for sdxl
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.backend.model_manager.config import BaseModelType, SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, SubModelType
|
||||
|
||||
|
||||
def preprocess_t5_encoder_model_identifier(model_identifier: ModelIdentifierField) -> ModelIdentifierField:
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import List, Tuple
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.model_records import UnknownModelException
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
from invokeai.backend.textual_inversion import TextualInversionModelRaw
|
||||
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import torch
|
||||
from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel
|
||||
|
||||
|
||||
def norm_img(np_img):
|
||||
|
||||
@@ -16,7 +16,7 @@ import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .config import *
|
||||
from .config import is_exportable, is_scriptable
|
||||
|
||||
|
||||
# From PyTorch internals
|
||||
|
||||
@@ -5,8 +5,8 @@ Copyright 2020 Ross Wightman
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from .conv2d_layers import *
|
||||
from geffnet.activations import *
|
||||
from .conv2d_layers import CondConv2d, get_condconv_initializer, math, partial, select_conv2d
|
||||
from geffnet.activations import F, get_act_layer, nn, sigmoid, torch
|
||||
|
||||
__all__ = ['get_bn_args_tf', 'resolve_bn_args', 'resolve_se_args', 'resolve_act_layer', 'make_divisible',
|
||||
'round_channels', 'drop_connect', 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv',
|
||||
|
||||
@@ -32,7 +32,9 @@ import torch.nn.functional as F
|
||||
from .config import layer_config_kwargs, is_scriptable
|
||||
from .conv2d_layers import select_conv2d
|
||||
from .helpers import load_pretrained
|
||||
from .efficientnet_builder import *
|
||||
from .efficientnet_builder import (BN_EPS_TF_DEFAULT, EfficientNetBuilder, decode_arch_def,
|
||||
initialize_weight_default, initialize_weight_goog,
|
||||
resolve_act_layer, resolve_bn_args, round_channels)
|
||||
|
||||
__all__ = ['GenEfficientNet', 'mnasnet_050', 'mnasnet_075', 'mnasnet_100', 'mnasnet_b1', 'mnasnet_140',
|
||||
'semnasnet_050', 'semnasnet_075', 'semnasnet_100', 'mnasnet_a1', 'semnasnet_140', 'mnasnet_small',
|
||||
|
||||
@@ -13,7 +13,9 @@ from .activations import get_act_fn, get_act_layer, HardSwish
|
||||
from .config import layer_config_kwargs
|
||||
from .conv2d_layers import select_conv2d
|
||||
from .helpers import load_pretrained
|
||||
from .efficientnet_builder import *
|
||||
from .efficientnet_builder import (BN_EPS_TF_DEFAULT, EfficientNetBuilder, decode_arch_def,
|
||||
initialize_weight_default, initialize_weight_goog,
|
||||
resolve_act_layer, resolve_bn_args, round_channels)
|
||||
|
||||
__all__ = ['mobilenetv3_rw', 'mobilenetv3_large_075', 'mobilenetv3_large_100', 'mobilenetv3_large_minimal_100',
|
||||
'mobilenetv3_small_075', 'mobilenetv3_small_100', 'mobilenetv3_small_minimal_100',
|
||||
|
||||
@@ -10,7 +10,7 @@ from cv2.typing import MatLike
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
"""
|
||||
|
||||
@@ -47,3 +47,10 @@ class LlavaOnevisionModel(RawModel):
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
self._vllm_model.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
"""Get size of the model in memory in bytes."""
|
||||
# HACK(ryand): Fix this issue with circular imports.
|
||||
from invokeai.backend.model_manager.load.model_util import calc_module_size
|
||||
|
||||
return calc_module_size(self._vllm_model)
|
||||
|
||||
@@ -1,37 +1,45 @@
|
||||
"""Re-export frequently-used symbols from the Model Manager backend."""
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelConfigBase,
|
||||
ModelConfigFactory,
|
||||
)
|
||||
from invokeai.backend.model_manager.legacy_probe import ModelProbe
|
||||
from invokeai.backend.model_manager.load import LoadedModel
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ModelFormat,
|
||||
ModelRepoVariant,
|
||||
ModelSourceType,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.legacy_probe import ModelProbe
|
||||
from invokeai.backend.model_manager.load import LoadedModel
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
|
||||
__all__ = [
|
||||
"AnyModel",
|
||||
"AnyModelConfig",
|
||||
"BaseModelType",
|
||||
"ModelRepoVariant",
|
||||
"InvalidModelConfigException",
|
||||
"LoadedModel",
|
||||
"ModelConfigFactory",
|
||||
"ModelFormat",
|
||||
"ModelProbe",
|
||||
"ModelSearch",
|
||||
"ModelConfigBase",
|
||||
"AnyModel",
|
||||
"AnyVariant",
|
||||
"BaseModelType",
|
||||
"ClipVariantType",
|
||||
"ModelFormat",
|
||||
"ModelRepoVariant",
|
||||
"ModelSourceType",
|
||||
"ModelType",
|
||||
"ModelVariantType",
|
||||
"SchedulerPredictionType",
|
||||
"SubModelType",
|
||||
"ModelConfigBase",
|
||||
]
|
||||
|
||||
@@ -30,11 +30,8 @@ from inspect import isabstract
|
||||
from pathlib import Path
|
||||
from typing import ClassVar, Literal, Optional, TypeAlias, Union
|
||||
|
||||
import diffusers
|
||||
import onnxruntime as ort
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from diffusers.models.modeling_utils import ModelMixin
|
||||
from picklescan.scanner import scan_file_path
|
||||
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter
|
||||
from typing_extensions import Annotated, Any, Dict
|
||||
@@ -42,139 +39,37 @@ from typing_extensions import Annotated, Any, Dict
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.model_hash.hash_validator import validate_hash
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ModelFormat,
|
||||
ModelRepoVariant,
|
||||
ModelSourceType,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ModelMixin is the base class for all diffusers and transformers models
|
||||
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime
|
||||
AnyModel = Union[
|
||||
ModelMixin, RawModel, torch.nn.Module, Dict[str, torch.Tensor], diffusers.DiffusionPipeline, ort.InferenceSession
|
||||
]
|
||||
|
||||
|
||||
class InvalidModelConfigException(Exception):
|
||||
"""Exception for when config parser doesn't recognize this combination of model type and format."""
|
||||
|
||||
|
||||
class BaseModelType(str, Enum):
|
||||
"""Base model type."""
|
||||
|
||||
Any = "any"
|
||||
StableDiffusion1 = "sd-1"
|
||||
StableDiffusion2 = "sd-2"
|
||||
StableDiffusion3 = "sd-3"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
# Kandinsky2_1 = "kandinsky-2.1"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
"""Model type."""
|
||||
|
||||
ONNX = "onnx"
|
||||
Main = "main"
|
||||
VAE = "vae"
|
||||
LoRA = "lora"
|
||||
ControlLoRa = "control_lora"
|
||||
ControlNet = "controlnet" # used by model_probe
|
||||
TextualInversion = "embedding"
|
||||
IPAdapter = "ip_adapter"
|
||||
CLIPVision = "clip_vision"
|
||||
CLIPEmbed = "clip_embed"
|
||||
T2IAdapter = "t2i_adapter"
|
||||
T5Encoder = "t5_encoder"
|
||||
SpandrelImageToImage = "spandrel_image_to_image"
|
||||
SigLIP = "siglip"
|
||||
FluxRedux = "flux_redux"
|
||||
LlavaOnevision = "llava_onevision"
|
||||
|
||||
|
||||
class SubModelType(str, Enum):
|
||||
"""Submodel type."""
|
||||
|
||||
UNet = "unet"
|
||||
Transformer = "transformer"
|
||||
TextEncoder = "text_encoder"
|
||||
TextEncoder2 = "text_encoder_2"
|
||||
TextEncoder3 = "text_encoder_3"
|
||||
Tokenizer = "tokenizer"
|
||||
Tokenizer2 = "tokenizer_2"
|
||||
Tokenizer3 = "tokenizer_3"
|
||||
VAE = "vae"
|
||||
VAEDecoder = "vae_decoder"
|
||||
VAEEncoder = "vae_encoder"
|
||||
Scheduler = "scheduler"
|
||||
SafetyChecker = "safety_checker"
|
||||
|
||||
|
||||
class ClipVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
L = "large"
|
||||
G = "gigantic"
|
||||
|
||||
|
||||
class ModelVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
Normal = "normal"
|
||||
Inpaint = "inpaint"
|
||||
Depth = "depth"
|
||||
|
||||
|
||||
class ModelFormat(str, Enum):
|
||||
"""Storage format of model."""
|
||||
|
||||
Diffusers = "diffusers"
|
||||
Checkpoint = "checkpoint"
|
||||
LyCORIS = "lycoris"
|
||||
ONNX = "onnx"
|
||||
Olive = "olive"
|
||||
EmbeddingFile = "embedding_file"
|
||||
EmbeddingFolder = "embedding_folder"
|
||||
InvokeAI = "invokeai"
|
||||
T5Encoder = "t5_encoder"
|
||||
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
|
||||
BnbQuantizednf4b = "bnb_quantized_nf4b"
|
||||
GGUFQuantized = "gguf_quantized"
|
||||
|
||||
|
||||
class SchedulerPredictionType(str, Enum):
|
||||
"""Scheduler prediction type."""
|
||||
|
||||
Epsilon = "epsilon"
|
||||
VPrediction = "v_prediction"
|
||||
Sample = "sample"
|
||||
|
||||
|
||||
class ModelRepoVariant(str, Enum):
|
||||
"""Various hugging face variants on the diffusers format."""
|
||||
|
||||
Default = "" # model files without "fp16" or other qualifier
|
||||
FP16 = "fp16"
|
||||
FP32 = "fp32"
|
||||
ONNX = "onnx"
|
||||
OpenVINO = "openvino"
|
||||
Flax = "flax"
|
||||
|
||||
|
||||
class ModelSourceType(str, Enum):
|
||||
"""Model source type."""
|
||||
|
||||
Path = "path"
|
||||
Url = "url"
|
||||
HFRepoID = "hf_repo_id"
|
||||
pass
|
||||
|
||||
|
||||
DEFAULTS_PRECISION = Literal["fp16", "fp32"]
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
class FSLayout(Enum):
|
||||
FILE = "file"
|
||||
DIRECTORY = "directory"
|
||||
|
||||
|
||||
class SubmodelDefinition(BaseModel):
|
||||
@@ -212,29 +107,31 @@ class ModelOnDisk:
|
||||
|
||||
def __init__(self, path: Path, hash_algo: HASHING_ALGORITHMS = "blake3_single"):
|
||||
self.path = path
|
||||
self.format_type = ModelFormat.Diffusers if path.is_dir() else ModelFormat.Checkpoint
|
||||
# TODO: Revisit checkpoint vs diffusers terminology
|
||||
self.layout = FSLayout.DIRECTORY if path.is_dir() else FSLayout.FILE
|
||||
if self.path.suffix in {".safetensors", ".bin", ".pt", ".ckpt"}:
|
||||
self.name = path.stem
|
||||
else:
|
||||
self.name = path.name
|
||||
self.hash_algo = hash_algo
|
||||
self._state_dict_cache = {}
|
||||
|
||||
def hash(self):
|
||||
def hash(self) -> str:
|
||||
return ModelHash(algorithm=self.hash_algo).hash(self.path)
|
||||
|
||||
def size(self):
|
||||
if self.format_type == ModelFormat.Checkpoint:
|
||||
def size(self) -> int:
|
||||
if self.layout == FSLayout.FILE:
|
||||
return self.path.stat().st_size
|
||||
return sum(file.stat().st_size for file in self.path.rglob("*"))
|
||||
|
||||
def component_paths(self):
|
||||
if self.format_type == ModelFormat.Checkpoint:
|
||||
def component_paths(self) -> set[Path]:
|
||||
if self.layout == FSLayout.FILE:
|
||||
return {self.path}
|
||||
extensions = {".safetensors", ".pt", ".pth", ".ckpt", ".bin", ".gguf"}
|
||||
return {f for f in self.path.rglob("*") if f.suffix in extensions}
|
||||
|
||||
def repo_variant(self):
|
||||
if self.format_type == ModelFormat.Checkpoint:
|
||||
def repo_variant(self) -> Optional[ModelRepoVariant]:
|
||||
if self.layout == FSLayout.FILE:
|
||||
return None
|
||||
|
||||
weight_files = list(self.path.glob("**/*.safetensors"))
|
||||
@@ -250,14 +147,30 @@ class ModelOnDisk:
|
||||
return ModelRepoVariant.ONNX
|
||||
return ModelRepoVariant.Default
|
||||
|
||||
@staticmethod
|
||||
def load_state_dict(path: Path):
|
||||
def load_state_dict(self, path: Optional[Path] = None) -> Dict[str | int, Any]:
|
||||
if path in self._state_dict_cache:
|
||||
return self._state_dict_cache[path]
|
||||
|
||||
if not path:
|
||||
components = list(self.component_paths())
|
||||
match components:
|
||||
case []:
|
||||
raise ValueError("No weight files found for this model")
|
||||
case [p]:
|
||||
path = p
|
||||
case ps if len(ps) >= 2:
|
||||
raise ValueError(
|
||||
f"Multiple weight files found for this model: {ps}. "
|
||||
f"Please specify the intended file using the 'path' argument"
|
||||
)
|
||||
|
||||
with SilenceWarnings():
|
||||
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise RuntimeError(f"The model {path.stem} is potentially infected by malware. Aborting import.")
|
||||
checkpoint = torch.load(path, map_location="cpu")
|
||||
assert isinstance(checkpoint, dict)
|
||||
elif path.suffix.endswith(".gguf"):
|
||||
checkpoint = gguf_sd_loader(path, compute_dtype=torch.float32)
|
||||
elif path.suffix.endswith(".safetensors"):
|
||||
@@ -266,6 +179,7 @@ class ModelOnDisk:
|
||||
raise ValueError(f"Unrecognized model extension: {path.suffix}")
|
||||
|
||||
state_dict = checkpoint.get("state_dict", checkpoint)
|
||||
self._state_dict_cache[path] = state_dict
|
||||
return state_dict
|
||||
|
||||
|
||||
@@ -348,11 +262,13 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
|
||||
for config_cls in sorted_by_match_speed:
|
||||
try:
|
||||
return config_cls.from_model_on_disk(mod, **overrides)
|
||||
except InvalidModelConfigException:
|
||||
logger.debug(f"ModelConfig '{config_cls.__name__}' failed to parse '{mod.path}', trying next config")
|
||||
if not config_cls.matches(mod):
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected exception while parsing '{config_cls.__name__}': {e}, trying next config")
|
||||
logger.warning(f"Unexpected exception while matching {mod.name} to '{config_cls.__name__}': {e}")
|
||||
continue
|
||||
else:
|
||||
return config_cls.from_model_on_disk(mod, **overrides)
|
||||
|
||||
raise InvalidModelConfigException("No valid config found")
|
||||
|
||||
@@ -395,9 +311,6 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
@classmethod
|
||||
def from_model_on_disk(cls, mod: ModelOnDisk, **overrides):
|
||||
"""Creates an instance of this config or raises InvalidModelConfigException."""
|
||||
if not cls.matches(mod):
|
||||
raise InvalidModelConfigException(f"Path {mod.path} does not match {cls.__name__} format")
|
||||
|
||||
fields = cls.parse(mod)
|
||||
cls.cast_overrides(overrides)
|
||||
fields.update(overrides)
|
||||
@@ -673,7 +586,7 @@ class LlavaOnevisionConfig(DiffusersConfigBase, ModelConfigBase):
|
||||
|
||||
@classmethod
|
||||
def matches(cls, mod: ModelOnDisk) -> bool:
|
||||
if mod.format_type == ModelFormat.Checkpoint:
|
||||
if mod.layout == FSLayout.FILE:
|
||||
return False
|
||||
|
||||
config_path = mod.path / "config.json"
|
||||
|
||||
@@ -19,22 +19,24 @@ from invokeai.backend.flux.redux.flux_redux_state_dict_utils import is_state_dic
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ControlAdapterDefaultSettings,
|
||||
InvalidModelConfigException,
|
||||
MainModelDefaultSettings,
|
||||
ModelConfigFactory,
|
||||
SubmodelDefinition,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import ConfigLoader
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelRepoVariant,
|
||||
ModelSourceType,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubmodelDefinition,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import ConfigLoader
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
get_clip_variant_type,
|
||||
lora_token_vector_length,
|
||||
|
||||
@@ -13,12 +13,11 @@ import torch
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||
|
||||
|
||||
class LoadedModelWithoutConfig:
|
||||
|
||||
@@ -6,18 +6,16 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
InvalidModelConfigException,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import DiffusersConfigBase
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, DiffusersConfigBase, InvalidModelConfigException
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache, get_model_cache_key
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
|
||||
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, List, Optional
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager import AnyModel, SubModelType
|
||||
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
|
||||
@@ -23,6 +22,7 @@ from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.torch
|
||||
apply_custom_layers_to_model,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.backend.util.prefix_logger_adapter import PrefixedLoggerAdapter
|
||||
|
||||
@@ -20,13 +20,10 @@ from typing import Callable, Dict, Optional, Tuple, Type, TypeVar
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigBase,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load import ModelLoaderBase
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
|
||||
|
||||
class ModelLoaderRegistryBase(ABC):
|
||||
|
||||
@@ -4,16 +4,12 @@ from typing import Optional
|
||||
from transformers import CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
DiffusersConfigBase,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)
|
||||
|
||||
@@ -5,19 +5,19 @@ from typing import Optional
|
||||
|
||||
from diffusers import ControlNetModel
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
BaseModelType,
|
||||
AnyModelConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(
|
||||
|
||||
@@ -27,15 +27,8 @@ from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
|
||||
from invokeai.backend.flux.util import ae_params, params
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
CheckpointConfigBase,
|
||||
CLIPEmbedDiffusersConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
@@ -51,6 +44,13 @@ from invokeai.backend.model_manager.config import (
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
convert_bundle_to_flux_transformer_checkpoint,
|
||||
)
|
||||
|
||||
@@ -8,18 +8,16 @@ from typing import Any, Optional
|
||||
from diffusers.configuration_utils import ConfigMixin
|
||||
from diffusers.models.modeling_utils import ModelMixin
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, DiffusersConfigBase, InvalidModelConfigException
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import DiffusersConfigBase
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T2IAdapter, format=ModelFormat.Diffusers)
|
||||
|
||||
@@ -7,8 +7,9 @@ from typing import Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.ip_adapter.ip_adapter import build_ip_adapter
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load import ModelLoader, ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
|
||||
|
||||
@@ -3,15 +3,11 @@ from typing import Optional
|
||||
|
||||
from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LlavaOnevision, format=ModelFormat.Diffusers)
|
||||
|
||||
@@ -9,17 +9,17 @@ import torch
|
||||
from safetensors.torch import load_file
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.model_manager import (
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
|
||||
is_state_dict_likely_flux_control,
|
||||
lora_model_from_flux_control_state_dict,
|
||||
|
||||
@@ -5,16 +5,16 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX)
|
||||
|
||||
@@ -2,15 +2,11 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
|
||||
|
||||
|
||||
@@ -4,15 +4,11 @@ from typing import Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
|
||||
|
||||
|
||||
@@ -11,16 +11,8 @@ from diffusers import (
|
||||
StableDiffusionXLPipeline,
|
||||
)
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
CheckpointConfigBase,
|
||||
DiffusersConfigBase,
|
||||
MainCheckpointConfig,
|
||||
@@ -28,6 +20,14 @@ from invokeai.backend.model_manager.config import (
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import get_model_cache_key
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
|
||||
@@ -4,16 +4,16 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.textual_inversion import TextualInversionModelRaw
|
||||
|
||||
|
||||
|
||||
@@ -5,15 +5,16 @@ from typing import Optional
|
||||
|
||||
from diffusers import AutoencoderKL
|
||||
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModelConfig,
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, VAECheckpointConfig
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers)
|
||||
|
||||
@@ -15,7 +15,8 @@ from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import D
|
||||
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
|
||||
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.llava_onevision_model import LlavaOnevisionModel
|
||||
from invokeai.backend.model_manager.taxonomy import AnyModel
|
||||
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
@@ -50,6 +51,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
|
||||
SegmentAnythingPipeline,
|
||||
DepthAnythingPipeline,
|
||||
SigLipPipeline,
|
||||
LlavaOnevisionModel,
|
||||
),
|
||||
):
|
||||
return model.calc_size()
|
||||
|
||||
@@ -17,12 +17,12 @@ from typing import Optional
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests.sessions import Session
|
||||
|
||||
from invokeai.backend.model_manager import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import (
|
||||
AnyModelRepoMetadata,
|
||||
AnyModelRepoMetadataValidator,
|
||||
BaseMetadata,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
|
||||
|
||||
class ModelMetadataFetchBase(ABC):
|
||||
|
||||
@@ -24,7 +24,6 @@ from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundErro
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests.sessions import Session
|
||||
|
||||
from invokeai.backend.model_manager.config import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import (
|
||||
AnyModelRepoMetadata,
|
||||
@@ -32,6 +31,7 @@ from invokeai.backend.model_manager.metadata.metadata_base import (
|
||||
RemoteModelFile,
|
||||
UnknownMetadataException,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
|
||||
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ from pydantic.networks import AnyHttpUrl
|
||||
from requests.sessions import Session
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.backend.model_manager import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.util.select_hf_files import filter_files
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
|
||||
|
||||
|
||||
class StarterModelWithoutDependencies(BaseModel):
|
||||
|
||||
129
invokeai/backend/model_manager/taxonomy.py
Normal file
129
invokeai/backend/model_manager/taxonomy.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from enum import Enum
|
||||
from typing import Dict, TypeAlias, Union
|
||||
|
||||
import diffusers
|
||||
import onnxruntime as ort
|
||||
import torch
|
||||
from diffusers import ModelMixin
|
||||
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
# ModelMixin is the base class for all diffusers and transformers models
|
||||
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime
|
||||
AnyModel = Union[
|
||||
ModelMixin, RawModel, torch.nn.Module, Dict[str, torch.Tensor], diffusers.DiffusionPipeline, ort.InferenceSession
|
||||
]
|
||||
|
||||
|
||||
class BaseModelType(str, Enum):
|
||||
"""Base model type."""
|
||||
|
||||
Any = "any"
|
||||
StableDiffusion1 = "sd-1"
|
||||
StableDiffusion2 = "sd-2"
|
||||
StableDiffusion3 = "sd-3"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
# Kandinsky2_1 = "kandinsky-2.1"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
"""Model type."""
|
||||
|
||||
ONNX = "onnx"
|
||||
Main = "main"
|
||||
VAE = "vae"
|
||||
LoRA = "lora"
|
||||
ControlLoRa = "control_lora"
|
||||
ControlNet = "controlnet" # used by model_probe
|
||||
TextualInversion = "embedding"
|
||||
IPAdapter = "ip_adapter"
|
||||
CLIPVision = "clip_vision"
|
||||
CLIPEmbed = "clip_embed"
|
||||
T2IAdapter = "t2i_adapter"
|
||||
T5Encoder = "t5_encoder"
|
||||
SpandrelImageToImage = "spandrel_image_to_image"
|
||||
SigLIP = "siglip"
|
||||
FluxRedux = "flux_redux"
|
||||
LlavaOnevision = "llava_onevision"
|
||||
|
||||
|
||||
class SubModelType(str, Enum):
|
||||
"""Submodel type."""
|
||||
|
||||
UNet = "unet"
|
||||
Transformer = "transformer"
|
||||
TextEncoder = "text_encoder"
|
||||
TextEncoder2 = "text_encoder_2"
|
||||
TextEncoder3 = "text_encoder_3"
|
||||
Tokenizer = "tokenizer"
|
||||
Tokenizer2 = "tokenizer_2"
|
||||
Tokenizer3 = "tokenizer_3"
|
||||
VAE = "vae"
|
||||
VAEDecoder = "vae_decoder"
|
||||
VAEEncoder = "vae_encoder"
|
||||
Scheduler = "scheduler"
|
||||
SafetyChecker = "safety_checker"
|
||||
|
||||
|
||||
class ClipVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
L = "large"
|
||||
G = "gigantic"
|
||||
|
||||
|
||||
class ModelVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
Normal = "normal"
|
||||
Inpaint = "inpaint"
|
||||
Depth = "depth"
|
||||
|
||||
|
||||
class ModelFormat(str, Enum):
|
||||
"""Storage format of model."""
|
||||
|
||||
Diffusers = "diffusers"
|
||||
Checkpoint = "checkpoint"
|
||||
LyCORIS = "lycoris"
|
||||
ONNX = "onnx"
|
||||
Olive = "olive"
|
||||
EmbeddingFile = "embedding_file"
|
||||
EmbeddingFolder = "embedding_folder"
|
||||
InvokeAI = "invokeai"
|
||||
T5Encoder = "t5_encoder"
|
||||
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
|
||||
BnbQuantizednf4b = "bnb_quantized_nf4b"
|
||||
GGUFQuantized = "gguf_quantized"
|
||||
|
||||
|
||||
class SchedulerPredictionType(str, Enum):
|
||||
"""Scheduler prediction type."""
|
||||
|
||||
Epsilon = "epsilon"
|
||||
VPrediction = "v_prediction"
|
||||
Sample = "sample"
|
||||
|
||||
|
||||
class ModelRepoVariant(str, Enum):
|
||||
"""Various hugging face variants on the diffusers format."""
|
||||
|
||||
Default = "" # model files without "fp16" or other qualifier
|
||||
FP16 = "fp16"
|
||||
FP32 = "fp32"
|
||||
ONNX = "onnx"
|
||||
OpenVINO = "openvino"
|
||||
Flax = "flax"
|
||||
|
||||
|
||||
class ModelSourceType(str, Enum):
|
||||
"""Model source type."""
|
||||
|
||||
Path = "path"
|
||||
Url = "url"
|
||||
HFRepoID = "hf_repo_id"
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
@@ -8,7 +8,7 @@ import picklescan.scanner as pscan
|
||||
import safetensors
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.config import ClipVariantType
|
||||
from invokeai.backend.model_manager.taxonomy import ClipVariantType
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set
|
||||
|
||||
from invokeai.backend.model_manager.config import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
|
||||
|
||||
def filter_files(
|
||||
|
||||
@@ -8,7 +8,7 @@ from diffusers import T2IAdapter
|
||||
from PIL.Image import Image
|
||||
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.backend.model_manager import BaseModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningMode
|
||||
from invokeai.backend.stable_diffusion.extension_callback_type import ExtensionCallbackType
|
||||
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase, callback
|
||||
|
||||
@@ -196,7 +196,8 @@
|
||||
"row": "Row",
|
||||
"column": "Column",
|
||||
"value": "Value",
|
||||
"label": "Label"
|
||||
"label": "Label",
|
||||
"systemInformation": "System Information"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -2343,8 +2344,9 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Workflows: New and improved Workflow Library.",
|
||||
"FLUX: Support for FLUX Redux & FLUX Fill in Workflows and Canvas."
|
||||
"Workflows: Support for custom string drop-downs in Workflow Builder.",
|
||||
"FLUX: Support for FLUX Fill in Workflows and Canvas.",
|
||||
"LLaVA OneVision VLLM: Beta support in Workflows."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { imageUploadedClientSide } from 'features/gallery/store/actions';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -8,7 +10,8 @@ import { t } from 'i18next';
|
||||
import { omit } from 'lodash-es';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import { getCategories, getListImagesUrl } from 'services/api/util';
|
||||
const log = logger('gallery');
|
||||
|
||||
/**
|
||||
@@ -34,19 +37,56 @@ let lastUploadedToastTimeout: number | null = null;
|
||||
|
||||
export const addImageUploadedFulfilledListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.uploadImage.matchFulfilled,
|
||||
matcher: isAnyOf(imagesApi.endpoints.uploadImage.matchFulfilled, imageUploadedClientSide),
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const imageDTO = action.payload;
|
||||
let imageDTO: ImageDTO;
|
||||
let silent;
|
||||
let isFirstUploadOfBatch = true;
|
||||
|
||||
if (imageUploadedClientSide.match(action)) {
|
||||
imageDTO = action.payload.imageDTO;
|
||||
silent = action.payload.silent;
|
||||
isFirstUploadOfBatch = action.payload.isFirstUploadOfBatch;
|
||||
} else if (imagesApi.endpoints.uploadImage.matchFulfilled(action)) {
|
||||
imageDTO = action.payload;
|
||||
silent = action.meta.arg.originalArgs.silent;
|
||||
isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
if (silent || imageDTO.is_intermediate) {
|
||||
// If the image is silent or intermediate, we don't want to show a toast
|
||||
return;
|
||||
}
|
||||
|
||||
if (imageUploadedClientSide.match(action)) {
|
||||
const categories = getCategories(imageDTO);
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
dispatch(
|
||||
imagesApi.util.invalidateTags([
|
||||
{
|
||||
type: 'ImageList',
|
||||
id: getListImagesUrl({
|
||||
board_id: boardId,
|
||||
categories,
|
||||
}),
|
||||
},
|
||||
{
|
||||
type: 'Board',
|
||||
id: boardId,
|
||||
},
|
||||
{
|
||||
type: 'BoardImagesTotal',
|
||||
id: boardId,
|
||||
},
|
||||
])
|
||||
);
|
||||
}
|
||||
const state = getState();
|
||||
|
||||
log.debug({ imageDTO }, 'Image uploaded');
|
||||
|
||||
if (action.meta.arg.originalArgs.silent || imageDTO.is_intermediate) {
|
||||
// When a "silent" upload is requested, or the image is intermediate, we can skip all post-upload actions,
|
||||
// like toasts and switching the gallery view
|
||||
return;
|
||||
}
|
||||
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
|
||||
const DEFAULT_UPLOADED_TOAST = {
|
||||
@@ -80,7 +120,7 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
*
|
||||
* Default to true to not require _all_ image upload handlers to set this value
|
||||
*/
|
||||
const isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
|
||||
|
||||
if (isFirstUploadOfBatch) {
|
||||
dispatch(boardIdSelected({ boardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
|
||||
@@ -73,6 +73,7 @@ export type AppConfig = {
|
||||
maxUpscaleDimension?: number;
|
||||
allowPrivateBoards: boolean;
|
||||
allowPrivateStylePresets: boolean;
|
||||
allowClientSideUpload: boolean;
|
||||
disabledTabs: TabName[];
|
||||
disabledFeatures: AppFeature[];
|
||||
disabledSDFeatures: SDFeature[];
|
||||
@@ -81,7 +82,6 @@ export type AppConfig = {
|
||||
metadataFetchDebounce?: number;
|
||||
workflowFetchDebounce?: number;
|
||||
isLocal?: boolean;
|
||||
maxImageUploadCount?: number;
|
||||
sd: {
|
||||
defaultModel?: string;
|
||||
disabledControlNetModels: string[];
|
||||
|
||||
105
invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts
Normal file
105
invokeai/frontend/web/src/common/hooks/useClientSideUpload.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { imageUploadedClientSide } from 'features/gallery/store/actions';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { useCallback } from 'react';
|
||||
import { useCreateImageUploadEntryMutation } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
export const useClientSideUpload = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const authToken = useStore($authToken);
|
||||
const [createImageUploadEntry] = useCreateImageUploadEntryMutation();
|
||||
|
||||
const clientSideUpload = useCallback(
|
||||
async (file: File, i: number): Promise<ImageDTO> => {
|
||||
const image = new Image();
|
||||
const objectURL = URL.createObjectURL(file);
|
||||
image.src = objectURL;
|
||||
let width = 0;
|
||||
let height = 0;
|
||||
let thumbnail: Blob | undefined;
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
image.onload = () => {
|
||||
width = image.naturalWidth;
|
||||
height = image.naturalHeight;
|
||||
|
||||
// Calculate thumbnail dimensions maintaining aspect ratio
|
||||
let thumbWidth = width;
|
||||
let thumbHeight = height;
|
||||
if (width > height && width > 256) {
|
||||
thumbWidth = 256;
|
||||
thumbHeight = Math.round((height * 256) / width);
|
||||
} else if (height > 256) {
|
||||
thumbHeight = 256;
|
||||
thumbWidth = Math.round((width * 256) / height);
|
||||
}
|
||||
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = thumbWidth;
|
||||
canvas.height = thumbHeight;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx?.drawImage(image, 0, 0, thumbWidth, thumbHeight);
|
||||
|
||||
canvas.toBlob(
|
||||
(blob) => {
|
||||
if (blob) {
|
||||
thumbnail = blob;
|
||||
// Clean up resources
|
||||
URL.revokeObjectURL(objectURL);
|
||||
image.src = ''; // Clear image source
|
||||
image.remove(); // Remove the image element
|
||||
canvas.width = 0; // Clear canvas
|
||||
canvas.height = 0;
|
||||
resolve();
|
||||
}
|
||||
},
|
||||
'image/webp',
|
||||
0.8
|
||||
);
|
||||
};
|
||||
|
||||
// Handle load errors
|
||||
image.onerror = () => {
|
||||
URL.revokeObjectURL(objectURL);
|
||||
image.remove();
|
||||
resolve();
|
||||
};
|
||||
});
|
||||
const { presigned_url, image_dto } = await createImageUploadEntry({
|
||||
width,
|
||||
height,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
}).unwrap();
|
||||
|
||||
await fetch(`${presigned_url}/?type=full`, {
|
||||
method: 'PUT',
|
||||
body: file,
|
||||
...(authToken && {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
await fetch(`${presigned_url}/?type=thumbnail`, {
|
||||
method: 'PUT',
|
||||
body: thumbnail,
|
||||
...(authToken && {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
dispatch(imageUploadedClientSide({ imageDTO: image_dto, silent: false, isFirstUploadOfBatch: i === 0 }));
|
||||
|
||||
return image_dto;
|
||||
},
|
||||
[autoAddBoardId, authToken, createImageUploadEntry, dispatch]
|
||||
);
|
||||
|
||||
return clientSideUpload;
|
||||
};
|
||||
@@ -3,7 +3,7 @@ import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import type { FileRejection } from 'react-dropzone';
|
||||
@@ -15,6 +15,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
import type { SetOptional } from 'type-fest';
|
||||
|
||||
import { useClientSideUpload } from './useClientSideUpload';
|
||||
type UseImageUploadButtonArgs =
|
||||
| {
|
||||
isDisabled?: boolean;
|
||||
@@ -50,8 +51,9 @@ const log = logger('gallery');
|
||||
*/
|
||||
export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: UseImageUploadButtonArgs) => {
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled);
|
||||
const [uploadImage, request] = useUploadImageMutation();
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
const clientSideUpload = useClientSideUpload();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const onDropAccepted = useCallback(
|
||||
@@ -79,22 +81,27 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
onUpload(imageDTO);
|
||||
}
|
||||
} else {
|
||||
const imageDTOs = await uploadImages(
|
||||
files.map((file, i) => ({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
silent: false,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
}))
|
||||
);
|
||||
let imageDTOs: ImageDTO[] = [];
|
||||
if (isClientSideUploadEnabled) {
|
||||
imageDTOs = await Promise.all(files.map((file, i) => clientSideUpload(file, i)));
|
||||
} else {
|
||||
imageDTOs = await uploadImages(
|
||||
files.map((file, i) => ({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
silent: false,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
}))
|
||||
);
|
||||
}
|
||||
if (onUpload) {
|
||||
onUpload(imageDTOs);
|
||||
}
|
||||
}
|
||||
},
|
||||
[allowMultiple, autoAddBoardId, onUpload, uploadImage]
|
||||
[allowMultiple, autoAddBoardId, onUpload, uploadImage, isClientSideUploadEnabled, clientSideUpload]
|
||||
);
|
||||
|
||||
const onDropRejected = useCallback(
|
||||
@@ -105,10 +112,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
file: rejection.file.path,
|
||||
}));
|
||||
log.error({ errors }, 'Invalid upload');
|
||||
const description =
|
||||
maxImageUploadCount === undefined
|
||||
? t('toast.uploadFailedInvalidUploadDesc')
|
||||
: t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount });
|
||||
const description = t('toast.uploadFailedInvalidUploadDesc');
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
@@ -120,7 +124,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
return;
|
||||
}
|
||||
},
|
||||
[maxImageUploadCount, t]
|
||||
[t]
|
||||
);
|
||||
|
||||
const {
|
||||
@@ -137,8 +141,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
onDropRejected,
|
||||
disabled: isDisabled,
|
||||
noDrag: true,
|
||||
multiple: allowMultiple && (maxImageUploadCount === undefined || maxImageUploadCount > 1),
|
||||
maxFiles: maxImageUploadCount,
|
||||
multiple: allowMultiple,
|
||||
});
|
||||
|
||||
return { getUploadButtonProps, getUploadInputProps, openUploader, request };
|
||||
|
||||
@@ -8,12 +8,13 @@ import { useStore } from '@nanostores/react';
|
||||
import { getStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { $focusedRegion } from 'common/hooks/focus';
|
||||
import { useClientSideUpload } from 'common/hooks/useClientSideUpload';
|
||||
import { setFileToPaste } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import { DndDropOverlay } from 'features/dnd/DndDropOverlay';
|
||||
import type { DndTargetState } from 'features/dnd/types';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||
@@ -53,13 +54,6 @@ const zUploadFile = z
|
||||
(file) => ({ message: `File extension .${file.name.split('.').at(-1)} is not supported` })
|
||||
);
|
||||
|
||||
const getFilesSchema = (max?: number) => {
|
||||
if (max === undefined) {
|
||||
return z.array(zUploadFile);
|
||||
}
|
||||
return z.array(zUploadFile).max(max);
|
||||
};
|
||||
|
||||
const sx = {
|
||||
position: 'absolute',
|
||||
top: 2,
|
||||
@@ -74,22 +68,19 @@ const sx = {
|
||||
export const FullscreenDropzone = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
const [dndState, setDndState] = useState<DndTargetState>('idle');
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
const isImageViewerOpen = useStore($imageViewer);
|
||||
const isClientSideUploadEnabled = useAppSelector(selectIsClientSideUploadEnabled);
|
||||
const clientSideUpload = useClientSideUpload();
|
||||
|
||||
const validateAndUploadFiles = useCallback(
|
||||
(files: File[]) => {
|
||||
async (files: File[]) => {
|
||||
const { getState } = getStore();
|
||||
const uploadFilesSchema = getFilesSchema(maxImageUploadCount);
|
||||
const parseResult = uploadFilesSchema.safeParse(files);
|
||||
const parseResult = z.array(zUploadFile).safeParse(files);
|
||||
|
||||
if (!parseResult.success) {
|
||||
const description =
|
||||
maxImageUploadCount === undefined
|
||||
? t('toast.uploadFailedInvalidUploadDesc')
|
||||
: t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount });
|
||||
const description = t('toast.uploadFailedInvalidUploadDesc');
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
@@ -118,17 +109,23 @@ export const FullscreenDropzone = memo(() => {
|
||||
|
||||
const autoAddBoardId = selectAutoAddBoardId(getState());
|
||||
|
||||
const uploadArgs: UploadImageArg[] = files.map((file, i) => ({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
}));
|
||||
if (isClientSideUploadEnabled) {
|
||||
for (const [i, file] of files.entries()) {
|
||||
await clientSideUpload(file, i);
|
||||
}
|
||||
} else {
|
||||
const uploadArgs: UploadImageArg[] = files.map((file, i) => ({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
}));
|
||||
|
||||
uploadImages(uploadArgs);
|
||||
uploadImages(uploadArgs);
|
||||
}
|
||||
},
|
||||
[activeTab, isImageViewerOpen, maxImageUploadCount, t]
|
||||
[activeTab, isImageViewerOpen, t, isClientSideUploadEnabled, clientSideUpload]
|
||||
);
|
||||
|
||||
const onPaste = useCallback(
|
||||
|
||||
@@ -1,31 +1,18 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { t } from 'i18next';
|
||||
import { useMemo } from 'react';
|
||||
import { PiUploadBold } from 'react-icons/pi';
|
||||
|
||||
export const GalleryUploadButton = () => {
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
const uploadOptions = useMemo(() => ({ allowMultiple: maxImageUploadCount !== 1 }), [maxImageUploadCount]);
|
||||
const uploadApi = useImageUploadButton(uploadOptions);
|
||||
const uploadApi = useImageUploadButton({ allowMultiple: true });
|
||||
return (
|
||||
<>
|
||||
<IconButton
|
||||
size="sm"
|
||||
alignSelf="stretch"
|
||||
variant="link"
|
||||
aria-label={
|
||||
maxImageUploadCount === undefined || maxImageUploadCount > 1
|
||||
? t('accessibility.uploadImages')
|
||||
: t('accessibility.uploadImage')
|
||||
}
|
||||
tooltip={
|
||||
maxImageUploadCount === undefined || maxImageUploadCount > 1
|
||||
? t('accessibility.uploadImages')
|
||||
: t('accessibility.uploadImage')
|
||||
}
|
||||
aria-label={t('accessibility.uploadImages')}
|
||||
tooltip={t('accessibility.uploadImages')}
|
||||
icon={<PiUploadBold />}
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const sentImageToCanvas = createAction('gallery/sentImageToCanvas');
|
||||
|
||||
@@ -7,3 +8,9 @@ export const imageDownloaded = createAction('gallery/imageDownloaded');
|
||||
export const imageCopiedToClipboard = createAction('gallery/imageCopiedToClipboard');
|
||||
|
||||
export const imageOpenedInNewTab = createAction('gallery/imageOpenedInNewTab');
|
||||
|
||||
export const imageUploadedClientSide = createAction<{
|
||||
imageDTO: ImageDTO;
|
||||
silent: boolean;
|
||||
isFirstUploadOfBatch: boolean;
|
||||
}>('gallery/imageUploadedClientSide');
|
||||
|
||||
@@ -109,6 +109,7 @@ export const StringGeneratorFieldInputComponent = memo(
|
||||
fontFamily="monospace"
|
||||
userSelect="text"
|
||||
cursor="text"
|
||||
whiteSpace="pre"
|
||||
>
|
||||
{resolvedValuesAsString}
|
||||
</Text>
|
||||
|
||||
@@ -56,6 +56,7 @@ const NodeTitle = ({ nodeId, title }: Props) => {
|
||||
fontWeight="semibold"
|
||||
color={batchGroupColorToken}
|
||||
onDoubleClick={editable.startEditing}
|
||||
noOfLines={1}
|
||||
>
|
||||
{titleWithBatchGroupId}
|
||||
</Text>
|
||||
|
||||
@@ -4,7 +4,6 @@ import {
|
||||
Grid,
|
||||
GridItem,
|
||||
Heading,
|
||||
IconButton,
|
||||
Image,
|
||||
Modal,
|
||||
ModalBody,
|
||||
@@ -13,21 +12,17 @@ import {
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
Spacer,
|
||||
Text,
|
||||
Tooltip,
|
||||
useDisclosure,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import DataViewer from 'features/gallery/components/ImageMetadataViewer/DataViewer';
|
||||
import { discordLink, githubLink, websiteLink } from 'features/system/store/constants';
|
||||
import { map } from 'lodash-es';
|
||||
import InvokeLogoYellow from 'public/assets/images/invoke-tag-lrg.svg';
|
||||
import type { ReactElement } from 'react';
|
||||
import { cloneElement, memo, useCallback } from 'react';
|
||||
import { cloneElement, memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold } from 'react-icons/pi';
|
||||
import { useGetAppDepsQuery, useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
|
||||
import { useGetAppDepsQuery, useGetAppVersionQuery, useGetRuntimeConfigQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
type AboutModalProps = {
|
||||
/* The button to open the Settings Modal */
|
||||
@@ -37,18 +32,26 @@ type AboutModalProps = {
|
||||
const AboutModal = ({ children }: AboutModalProps) => {
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
const { t } = useTranslation();
|
||||
const clipboard = useClipboard();
|
||||
const { depsArray, depsObject } = useGetAppDepsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => ({
|
||||
depsObject: data,
|
||||
depsArray: data ? map(data, (version, name) => ({ name, version })) : [],
|
||||
}),
|
||||
});
|
||||
const { data: runtimeConfig } = useGetRuntimeConfigQuery();
|
||||
const { data: dependencies } = useGetAppDepsQuery();
|
||||
const { data: appVersion } = useGetAppVersionQuery();
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
clipboard.writeText(JSON.stringify(depsObject, null, 2));
|
||||
}, [clipboard, depsObject]);
|
||||
const localData = useMemo(() => {
|
||||
const clonedRuntimeConfig = deepClone(runtimeConfig);
|
||||
|
||||
if (clonedRuntimeConfig && clonedRuntimeConfig.config.remote_api_tokens) {
|
||||
clonedRuntimeConfig.config.remote_api_tokens.forEach((remote_api_token) => {
|
||||
remote_api_token.token = 'REDACTED';
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
version: appVersion?.version,
|
||||
dependencies,
|
||||
config: clonedRuntimeConfig?.config,
|
||||
set_config_fields: clonedRuntimeConfig?.set_fields,
|
||||
};
|
||||
}, [appVersion, dependencies, runtimeConfig]);
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -63,27 +66,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
|
||||
<ModalBody display="flex" flexDir="column" gap={4}>
|
||||
<Grid templateColumns="repeat(2, 1fr)" h="full">
|
||||
<GridItem backgroundColor="base.750" borderRadius="base" p="4" h="full">
|
||||
<ScrollableContent>
|
||||
<Flex position="sticky" top="0" backgroundColor="base.750" p={1} alignItems="center">
|
||||
<Heading size="md">{t('common.localSystem')}</Heading>
|
||||
<Spacer />
|
||||
<Tooltip label={t('common.copy')}>
|
||||
<IconButton
|
||||
onClick={handleCopy}
|
||||
isDisabled={!depsObject}
|
||||
aria-label={t('common.copy')}
|
||||
icon={<PiCopyBold />}
|
||||
variant="ghost"
|
||||
/>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
{depsArray.map(({ name, version }, i) => (
|
||||
<Grid key={i} py="2" px="1" w="full" templateColumns="repeat(2, 1fr)">
|
||||
<Text>{name}</Text>
|
||||
<Text>{version ? version : t('common.notInstalled')}</Text>
|
||||
</Grid>
|
||||
))}
|
||||
</ScrollableContent>
|
||||
<DataViewer label={t('common.systemInformation')} data={localData} />
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<Flex flexDir="column" gap={3} justifyContent="center" alignItems="center" h="full">
|
||||
|
||||
@@ -20,6 +20,7 @@ const initialConfigState: AppConfig = {
|
||||
shouldFetchMetadataFromApi: false,
|
||||
allowPrivateBoards: false,
|
||||
allowPrivateStylePresets: false,
|
||||
allowClientSideUpload: false,
|
||||
disabledTabs: [],
|
||||
disabledFeatures: ['lightbox', 'faceRestore', 'batches'],
|
||||
disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'],
|
||||
@@ -218,6 +219,5 @@ export const selectWorkflowFetchDebounce = createConfigSelector((config) => conf
|
||||
export const selectMetadataFetchDebounce = createConfigSelector((config) => config.metadataFetchDebounce ?? 300);
|
||||
|
||||
export const selectIsModelsTabDisabled = createConfigSelector((config) => config.disabledTabs.includes('models'));
|
||||
export const selectMaxImageUploadCount = createConfigSelector((config) => config.maxImageUploadCount);
|
||||
|
||||
export const selectIsClientSideUploadEnabled = createConfigSelector((config) => config.allowClientSideUpload);
|
||||
export const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
|
||||
|
||||
@@ -36,6 +36,15 @@ export const appInfoApi = api.injectEndpoints({
|
||||
}),
|
||||
providesTags: ['FetchOnReconnect'],
|
||||
}),
|
||||
getRuntimeConfig: build.query<
|
||||
paths['/api/v1/app/runtime_config']['get']['responses']['200']['content']['application/json'],
|
||||
void
|
||||
>({
|
||||
query: () => ({
|
||||
url: buildAppInfoUrl('runtime_config'),
|
||||
method: 'GET',
|
||||
}),
|
||||
}),
|
||||
getInvocationCacheStatus: build.query<
|
||||
paths['/api/v1/app/invocation_cache/status']['get']['responses']['200']['content']['application/json'],
|
||||
void
|
||||
@@ -82,6 +91,7 @@ export const {
|
||||
useGetAppVersionQuery,
|
||||
useGetAppDepsQuery,
|
||||
useGetAppConfigQuery,
|
||||
useGetRuntimeConfigQuery,
|
||||
useClearInvocationCacheMutation,
|
||||
useDisableInvocationCacheMutation,
|
||||
useEnableInvocationCacheMutation,
|
||||
|
||||
@@ -7,6 +7,8 @@ import type {
|
||||
DeleteBoardResult,
|
||||
GraphAndWorkflowResponse,
|
||||
ImageDTO,
|
||||
ImageUploadEntryRequest,
|
||||
ImageUploadEntryResponse,
|
||||
ListImagesArgs,
|
||||
ListImagesResponse,
|
||||
UploadImageArg,
|
||||
@@ -287,6 +289,7 @@ export const imagesApi = api.injectEndpoints({
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
invalidatesTags: (result) => {
|
||||
if (!result || result.is_intermediate) {
|
||||
// Don't add it to anything
|
||||
@@ -314,7 +317,13 @@ export const imagesApi = api.injectEndpoints({
|
||||
];
|
||||
},
|
||||
}),
|
||||
|
||||
createImageUploadEntry: build.mutation<ImageUploadEntryResponse, ImageUploadEntryRequest>({
|
||||
query: ({ width, height, board_id }) => ({
|
||||
url: buildImagesUrl(),
|
||||
method: 'POST',
|
||||
body: { width, height, board_id },
|
||||
}),
|
||||
}),
|
||||
deleteBoard: build.mutation<DeleteBoardResult, string>({
|
||||
query: (board_id) => ({ url: buildBoardsUrl(board_id), method: 'DELETE' }),
|
||||
invalidatesTags: () => [
|
||||
@@ -549,6 +558,7 @@ export const {
|
||||
useGetImageWorkflowQuery,
|
||||
useLazyGetImageWorkflowQuery,
|
||||
useUploadImageMutation,
|
||||
useCreateImageUploadEntryMutation,
|
||||
useClearIntermediatesMutation,
|
||||
useAddImagesToBoardMutation,
|
||||
useRemoveImagesFromBoardMutation,
|
||||
|
||||
@@ -466,6 +466,30 @@ export type paths = {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
/**
|
||||
* List Image Dtos
|
||||
* @description Gets a list of image DTOs
|
||||
*/
|
||||
get: operations["list_image_dtos"];
|
||||
put?: never;
|
||||
/**
|
||||
* Create Image Upload Entry
|
||||
* @description Uploads an image from a URL, not implemented
|
||||
*/
|
||||
post: operations["create_image_upload_entry"];
|
||||
delete?: never;
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/i/{image_name}": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -619,26 +643,6 @@ export type paths = {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
/**
|
||||
* List Image Dtos
|
||||
* @description Gets a list of image DTOs
|
||||
*/
|
||||
get: operations["list_image_dtos"];
|
||||
put?: never;
|
||||
post?: never;
|
||||
delete?: never;
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/delete": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -904,7 +908,7 @@ export type paths = {
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
/** Get Config */
|
||||
/** Get Config */
|
||||
get: operations["get_config"];
|
||||
put?: never;
|
||||
post?: never;
|
||||
@@ -914,6 +918,23 @@ export type paths = {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/app/runtime_config": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
/** Get Runtime Config */
|
||||
get: operations["get_runtime_config"];
|
||||
put?: never;
|
||||
post?: never;
|
||||
delete?: never;
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/app/logging": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -2341,6 +2362,24 @@ export type components = {
|
||||
*/
|
||||
batch_ids: string[];
|
||||
};
|
||||
/** Body_create_image_upload_entry */
|
||||
Body_create_image_upload_entry: {
|
||||
/**
|
||||
* Width
|
||||
* @description The width of the image
|
||||
*/
|
||||
width: number;
|
||||
/**
|
||||
* Height
|
||||
* @description The height of the image
|
||||
*/
|
||||
height: number;
|
||||
/**
|
||||
* Board Id
|
||||
* @description The board to add this image to, if any
|
||||
*/
|
||||
board_id?: string | null;
|
||||
};
|
||||
/** Body_create_style_preset */
|
||||
Body_create_style_preset: {
|
||||
/**
|
||||
@@ -6434,6 +6473,7 @@ export type components = {
|
||||
* @description Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard.
|
||||
* The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect.
|
||||
* The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white.
|
||||
* If the fade size is 0, the mask is returned as-is.
|
||||
*/
|
||||
ExpandMaskWithFadeInvocation: {
|
||||
/**
|
||||
@@ -10736,6 +10776,16 @@ export type components = {
|
||||
*/
|
||||
type: "i2l";
|
||||
};
|
||||
/** ImageUploadEntry */
|
||||
ImageUploadEntry: {
|
||||
/** @description The image DTO */
|
||||
image_dto: components["schemas"]["ImageDTO"];
|
||||
/**
|
||||
* Presigned Url
|
||||
* @description The URL to get the presigned URL for the image upload
|
||||
*/
|
||||
presigned_url: string;
|
||||
};
|
||||
/**
|
||||
* ImageUrlsDTO
|
||||
* @description The URLs for an image and its thumbnail.
|
||||
@@ -11850,6 +11900,431 @@ export type components = {
|
||||
*/
|
||||
invocation_source_id: string;
|
||||
};
|
||||
/**
|
||||
* InvokeAIAppConfig
|
||||
* @description Invoke's global app configuration.
|
||||
*
|
||||
* Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.
|
||||
*
|
||||
* Attributes:
|
||||
* host: IP address to bind to. Use `0.0.0.0` to serve to your local network.
|
||||
* port: Port to bind to.
|
||||
* allow_origins: Allowed CORS origins.
|
||||
* allow_credentials: Allow CORS credentials.
|
||||
* allow_methods: Methods allowed for CORS.
|
||||
* allow_headers: Headers allowed for CORS.
|
||||
* ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
* ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
* log_tokenization: Enable logging of parsed prompt tokens.
|
||||
* patchmatch: Enable patchmatch inpaint code.
|
||||
* models_dir: Path to the models directory.
|
||||
* convert_cache_dir: Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions).
|
||||
* download_cache_dir: Path to the directory that contains dynamically downloaded models.
|
||||
* legacy_conf_dir: Path to directory of legacy checkpoint config files.
|
||||
* db_dir: Path to InvokeAI databases directory.
|
||||
* outputs_dir: Path to directory for outputs.
|
||||
* custom_nodes_dir: Path to directory for custom nodes.
|
||||
* style_presets_dir: Path to directory for style presets.
|
||||
* workflow_thumbnails_dir: Path to directory for workflow thumbnails.
|
||||
* log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||
* log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||
* log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
* log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
|
||||
* log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
* use_memory_db: Use in-memory database. Useful for development.
|
||||
* dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.
|
||||
* profile_graphs: Enable graph profiling using `cProfile`.
|
||||
* profile_prefix: An optional prefix for profile output files.
|
||||
* profiles_dir: Path to profiles output directory.
|
||||
* max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.
|
||||
* max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.
|
||||
* log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
|
||||
* device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.
|
||||
* enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.
|
||||
* keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.
|
||||
* ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
* vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
* lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
* pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
* precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
* sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
* attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
* attention_slice_size: Slice size, valid when attention_type=="sliced".<br>Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`
|
||||
* force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).
|
||||
* pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.
|
||||
* max_queue_size: Maximum number of items in the session queue.
|
||||
* clear_queue_on_startup: Empties session queue on startup.
|
||||
* allow_nodes: List of nodes to allow. Omit to allow all.
|
||||
* deny_nodes: List of nodes to deny. Omit to deny none.
|
||||
* node_cache_size: How many cached nodes to keep in memory.
|
||||
* hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
|
||||
* remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
* scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
|
||||
*/
|
||||
InvokeAIAppConfig: {
|
||||
/**
|
||||
* Schema Version
|
||||
* @description Schema version of the config file. This is not a user-configurable setting.
|
||||
* @default 4.0.2
|
||||
*/
|
||||
schema_version?: string;
|
||||
/**
|
||||
* Legacy Models Yaml Path
|
||||
* @description Path to the legacy models.yaml file. This is not a user-configurable setting.
|
||||
*/
|
||||
legacy_models_yaml_path?: string | null;
|
||||
/**
|
||||
* Host
|
||||
* @description IP address to bind to. Use `0.0.0.0` to serve to your local network.
|
||||
* @default 127.0.0.1
|
||||
*/
|
||||
host?: string;
|
||||
/**
|
||||
* Port
|
||||
* @description Port to bind to.
|
||||
* @default 9090
|
||||
*/
|
||||
port?: number;
|
||||
/**
|
||||
* Allow Origins
|
||||
* @description Allowed CORS origins.
|
||||
* @default []
|
||||
*/
|
||||
allow_origins?: string[];
|
||||
/**
|
||||
* Allow Credentials
|
||||
* @description Allow CORS credentials.
|
||||
* @default true
|
||||
*/
|
||||
allow_credentials?: boolean;
|
||||
/**
|
||||
* Allow Methods
|
||||
* @description Methods allowed for CORS.
|
||||
* @default [
|
||||
* "*"
|
||||
* ]
|
||||
*/
|
||||
allow_methods?: string[];
|
||||
/**
|
||||
* Allow Headers
|
||||
* @description Headers allowed for CORS.
|
||||
* @default [
|
||||
* "*"
|
||||
* ]
|
||||
*/
|
||||
allow_headers?: string[];
|
||||
/**
|
||||
* Ssl Certfile
|
||||
* @description SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
*/
|
||||
ssl_certfile?: string | null;
|
||||
/**
|
||||
* Ssl Keyfile
|
||||
* @description SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
*/
|
||||
ssl_keyfile?: string | null;
|
||||
/**
|
||||
* Log Tokenization
|
||||
* @description Enable logging of parsed prompt tokens.
|
||||
* @default false
|
||||
*/
|
||||
log_tokenization?: boolean;
|
||||
/**
|
||||
* Patchmatch
|
||||
* @description Enable patchmatch inpaint code.
|
||||
* @default true
|
||||
*/
|
||||
patchmatch?: boolean;
|
||||
/**
|
||||
* Models Dir
|
||||
* Format: path
|
||||
* @description Path to the models directory.
|
||||
* @default models
|
||||
*/
|
||||
models_dir?: string;
|
||||
/**
|
||||
* Convert Cache Dir
|
||||
* Format: path
|
||||
* @description Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions).
|
||||
* @default models/.convert_cache
|
||||
*/
|
||||
convert_cache_dir?: string;
|
||||
/**
|
||||
* Download Cache Dir
|
||||
* Format: path
|
||||
* @description Path to the directory that contains dynamically downloaded models.
|
||||
* @default models/.download_cache
|
||||
*/
|
||||
download_cache_dir?: string;
|
||||
/**
|
||||
* Legacy Conf Dir
|
||||
* Format: path
|
||||
* @description Path to directory of legacy checkpoint config files.
|
||||
* @default configs
|
||||
*/
|
||||
legacy_conf_dir?: string;
|
||||
/**
|
||||
* Db Dir
|
||||
* Format: path
|
||||
* @description Path to InvokeAI databases directory.
|
||||
* @default databases
|
||||
*/
|
||||
db_dir?: string;
|
||||
/**
|
||||
* Outputs Dir
|
||||
* Format: path
|
||||
* @description Path to directory for outputs.
|
||||
* @default outputs
|
||||
*/
|
||||
outputs_dir?: string;
|
||||
/**
|
||||
* Custom Nodes Dir
|
||||
* Format: path
|
||||
* @description Path to directory for custom nodes.
|
||||
* @default nodes
|
||||
*/
|
||||
custom_nodes_dir?: string;
|
||||
/**
|
||||
* Style Presets Dir
|
||||
* Format: path
|
||||
* @description Path to directory for style presets.
|
||||
* @default style_presets
|
||||
*/
|
||||
style_presets_dir?: string;
|
||||
/**
|
||||
* Workflow Thumbnails Dir
|
||||
* Format: path
|
||||
* @description Path to directory for workflow thumbnails.
|
||||
* @default workflow_thumbnails
|
||||
*/
|
||||
workflow_thumbnails_dir?: string;
|
||||
/**
|
||||
* Log Handlers
|
||||
* @description Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||
* @default [
|
||||
* "console"
|
||||
* ]
|
||||
*/
|
||||
log_handlers?: string[];
|
||||
/**
|
||||
* Log Format
|
||||
* @description Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.
|
||||
* @default color
|
||||
* @enum {string}
|
||||
*/
|
||||
log_format?: "plain" | "color" | "syslog" | "legacy";
|
||||
/**
|
||||
* Log Level
|
||||
* @description Emit logging messages at this level or higher.
|
||||
* @default info
|
||||
* @enum {string}
|
||||
*/
|
||||
log_level?: "debug" | "info" | "warning" | "error" | "critical";
|
||||
/**
|
||||
* Log Sql
|
||||
* @description Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
|
||||
* @default false
|
||||
*/
|
||||
log_sql?: boolean;
|
||||
/**
|
||||
* Log Level Network
|
||||
* @description Log level for network-related messages. 'info' and 'debug' are very verbose.
|
||||
* @default warning
|
||||
* @enum {string}
|
||||
*/
|
||||
log_level_network?: "debug" | "info" | "warning" | "error" | "critical";
|
||||
/**
|
||||
* Use Memory Db
|
||||
* @description Use in-memory database. Useful for development.
|
||||
* @default false
|
||||
*/
|
||||
use_memory_db?: boolean;
|
||||
/**
|
||||
* Dev Reload
|
||||
* @description Automatically reload when Python sources are changed. Does not reload node definitions.
|
||||
* @default false
|
||||
*/
|
||||
dev_reload?: boolean;
|
||||
/**
|
||||
* Profile Graphs
|
||||
* @description Enable graph profiling using `cProfile`.
|
||||
* @default false
|
||||
*/
|
||||
profile_graphs?: boolean;
|
||||
/**
|
||||
* Profile Prefix
|
||||
* @description An optional prefix for profile output files.
|
||||
*/
|
||||
profile_prefix?: string | null;
|
||||
/**
|
||||
* Profiles Dir
|
||||
* Format: path
|
||||
* @description Path to profiles output directory.
|
||||
* @default profiles
|
||||
*/
|
||||
profiles_dir?: string;
|
||||
/**
|
||||
* Max Cache Ram Gb
|
||||
* @description The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.
|
||||
*/
|
||||
max_cache_ram_gb?: number | null;
|
||||
/**
|
||||
* Max Cache Vram Gb
|
||||
* @description The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.
|
||||
*/
|
||||
max_cache_vram_gb?: number | null;
|
||||
/**
|
||||
* Log Memory Usage
|
||||
* @description If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
|
||||
* @default false
|
||||
*/
|
||||
log_memory_usage?: boolean;
|
||||
/**
|
||||
* Device Working Mem Gb
|
||||
* @description The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.
|
||||
* @default 3
|
||||
*/
|
||||
device_working_mem_gb?: number;
|
||||
/**
|
||||
* Enable Partial Loading
|
||||
* @description Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.
|
||||
* @default false
|
||||
*/
|
||||
enable_partial_loading?: boolean;
|
||||
/**
|
||||
* Keep Ram Copy Of Weights
|
||||
* @description Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.
|
||||
* @default true
|
||||
*/
|
||||
keep_ram_copy_of_weights?: boolean;
|
||||
/**
|
||||
* Ram
|
||||
* @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
*/
|
||||
ram?: number | null;
|
||||
/**
|
||||
* Vram
|
||||
* @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
*/
|
||||
vram?: number | null;
|
||||
/**
|
||||
* Lazy Offload
|
||||
* @description DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
* @default true
|
||||
*/
|
||||
lazy_offload?: boolean;
|
||||
/**
|
||||
* Pytorch Cuda Alloc Conf
|
||||
* @description Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
*/
|
||||
pytorch_cuda_alloc_conf?: string | null;
|
||||
/**
|
||||
* Device
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
|
||||
/**
|
||||
* Precision
|
||||
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
precision?: "auto" | "float16" | "bfloat16" | "float32";
|
||||
/**
|
||||
* Sequential Guidance
|
||||
* @description Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
* @default false
|
||||
*/
|
||||
sequential_guidance?: boolean;
|
||||
/**
|
||||
* Attention Type
|
||||
* @description Attention type.
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
attention_type?: "auto" | "normal" | "xformers" | "sliced" | "torch-sdp";
|
||||
/**
|
||||
* Attention Slice Size
|
||||
* @description Slice size, valid when attention_type=="sliced".
|
||||
* @default auto
|
||||
* @enum {unknown}
|
||||
*/
|
||||
attention_slice_size?: "auto" | "balanced" | "max" | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8;
|
||||
/**
|
||||
* Force Tiled Decode
|
||||
* @description Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).
|
||||
* @default false
|
||||
*/
|
||||
force_tiled_decode?: boolean;
|
||||
/**
|
||||
* Pil Compress Level
|
||||
* @description The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.
|
||||
* @default 1
|
||||
*/
|
||||
pil_compress_level?: number;
|
||||
/**
|
||||
* Max Queue Size
|
||||
* @description Maximum number of items in the session queue.
|
||||
* @default 10000
|
||||
*/
|
||||
max_queue_size?: number;
|
||||
/**
|
||||
* Clear Queue On Startup
|
||||
* @description Empties session queue on startup.
|
||||
* @default false
|
||||
*/
|
||||
clear_queue_on_startup?: boolean;
|
||||
/**
|
||||
* Allow Nodes
|
||||
* @description List of nodes to allow. Omit to allow all.
|
||||
*/
|
||||
allow_nodes?: string[] | null;
|
||||
/**
|
||||
* Deny Nodes
|
||||
* @description List of nodes to deny. Omit to deny none.
|
||||
*/
|
||||
deny_nodes?: string[] | null;
|
||||
/**
|
||||
* Node Cache Size
|
||||
* @description How many cached nodes to keep in memory.
|
||||
* @default 512
|
||||
*/
|
||||
node_cache_size?: number;
|
||||
/**
|
||||
* Hashing Algorithm
|
||||
* @description Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
|
||||
* @default blake3_single
|
||||
* @enum {string}
|
||||
*/
|
||||
hashing_algorithm?: "blake3_multi" | "blake3_single" | "random" | "md5" | "sha1" | "sha224" | "sha256" | "sha384" | "sha512" | "blake2b" | "blake2s" | "sha3_224" | "sha3_256" | "sha3_384" | "sha3_512" | "shake_128" | "shake_256";
|
||||
/**
|
||||
* Remote Api Tokens
|
||||
* @description List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
*/
|
||||
remote_api_tokens?: components["schemas"]["URLRegexTokenPair"][] | null;
|
||||
/**
|
||||
* Scan Models On Startup
|
||||
* @description Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
|
||||
* @default false
|
||||
*/
|
||||
scan_models_on_startup?: boolean;
|
||||
};
|
||||
/**
|
||||
* InvokeAIAppConfigWithSetFields
|
||||
* @description InvokeAI App Config with model fields set
|
||||
*/
|
||||
InvokeAIAppConfigWithSetFields: {
|
||||
/**
|
||||
* Set Fields
|
||||
* @description The set fields
|
||||
*/
|
||||
set_fields: string[];
|
||||
/** @description The InvokeAI App Config */
|
||||
config: components["schemas"]["InvokeAIAppConfig"];
|
||||
};
|
||||
/**
|
||||
* Adjust Image Hue Plus
|
||||
* @description Adjusts the Hue of an image by rotating it in the selected color space. Originally created by @dwringer
|
||||
@@ -21013,6 +21488,19 @@ export type components = {
|
||||
*/
|
||||
type: "url";
|
||||
};
|
||||
/** URLRegexTokenPair */
|
||||
URLRegexTokenPair: {
|
||||
/**
|
||||
* Url Regex
|
||||
* @description Regular expression to match against the URL
|
||||
*/
|
||||
url_regex: string;
|
||||
/**
|
||||
* Token
|
||||
* @description Token to use when the URL matches the regex
|
||||
*/
|
||||
token: string;
|
||||
};
|
||||
/**
|
||||
* Unsharp Mask
|
||||
* @description Applies an unsharp mask filter to an image
|
||||
@@ -22763,6 +23251,87 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
list_image_dtos: {
|
||||
parameters: {
|
||||
query?: {
|
||||
/** @description The origin of images to list. */
|
||||
image_origin?: components["schemas"]["ResourceOrigin"] | null;
|
||||
/** @description The categories of image to include. */
|
||||
categories?: components["schemas"]["ImageCategory"][] | null;
|
||||
/** @description Whether to list intermediate images. */
|
||||
is_intermediate?: boolean | null;
|
||||
/** @description The board id to filter by. Use 'none' to find images without a board. */
|
||||
board_id?: string | null;
|
||||
/** @description The page offset */
|
||||
offset?: number;
|
||||
/** @description The number of images per page */
|
||||
limit?: number;
|
||||
/** @description The order of sort */
|
||||
order_dir?: components["schemas"]["SQLiteDirection"];
|
||||
/** @description Whether to sort by starred images first */
|
||||
starred_first?: boolean;
|
||||
/** @description The term to search for */
|
||||
search_term?: string | null;
|
||||
};
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"];
|
||||
};
|
||||
};
|
||||
/** @description Validation Error */
|
||||
422: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["HTTPValidationError"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
create_image_upload_entry: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody: {
|
||||
content: {
|
||||
"application/json": components["schemas"]["Body_create_image_upload_entry"];
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["ImageUploadEntry"];
|
||||
};
|
||||
};
|
||||
/** @description Validation Error */
|
||||
422: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["HTTPValidationError"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
get_image_dto: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -23116,54 +23685,6 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
list_image_dtos: {
|
||||
parameters: {
|
||||
query?: {
|
||||
/** @description The origin of images to list. */
|
||||
image_origin?: components["schemas"]["ResourceOrigin"] | null;
|
||||
/** @description The categories of image to include. */
|
||||
categories?: components["schemas"]["ImageCategory"][] | null;
|
||||
/** @description Whether to list intermediate images. */
|
||||
is_intermediate?: boolean | null;
|
||||
/** @description The board id to filter by. Use 'none' to find images without a board. */
|
||||
board_id?: string | null;
|
||||
/** @description The page offset */
|
||||
offset?: number;
|
||||
/** @description The number of images per page */
|
||||
limit?: number;
|
||||
/** @description The order of sort */
|
||||
order_dir?: components["schemas"]["SQLiteDirection"];
|
||||
/** @description Whether to sort by starred images first */
|
||||
starred_first?: boolean;
|
||||
/** @description The term to search for */
|
||||
search_term?: string | null;
|
||||
};
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["OffsetPaginatedResults_ImageDTO_"];
|
||||
};
|
||||
};
|
||||
/** @description Validation Error */
|
||||
422: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["HTTPValidationError"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
delete_images_from_list: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -23743,6 +24264,26 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
get_runtime_config: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["InvokeAIAppConfigWithSetFields"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
get_log_level: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
|
||||
@@ -354,3 +354,6 @@ export type UploadImageArg = {
|
||||
*/
|
||||
isFirstUploadOfBatch?: boolean;
|
||||
};
|
||||
|
||||
export type ImageUploadEntryResponse = S['ImageUploadEntry'];
|
||||
export type ImageUploadEntryRequest = paths['/api/v1/images/']['post']['requestBody']['content']['application/json'];
|
||||
|
||||
@@ -71,7 +71,6 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
@@ -164,9 +163,6 @@ __all__ = [
|
||||
# invokeai.backend.model_management.model_manager
|
||||
"LoadedModel",
|
||||
# invokeai.backend.model_management.models.base
|
||||
"BaseModelType",
|
||||
"ModelType",
|
||||
"SubModelType",
|
||||
# invokeai.backend.stable_diffusion.schedulers.schedulers
|
||||
"SCHEDULER_NAME_VALUES",
|
||||
# invokeai.version
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "5.9.0rc2"
|
||||
__version__ = "5.9.0"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user