Compare commits

..

2 Commits

Author SHA1 Message Date
Millun Atluri
598ab1d596 Update version to v3.4.0rc3 2023-11-07 17:10:43 +11:00
Millun Atluri
f511adb15f Updated JS files for v3.4.0rc3 2023-11-07 17:05:34 +11:00
60 changed files with 1981 additions and 1842 deletions

View File

@@ -198,7 +198,6 @@ The list of schedulers has been completely revamped and brought up to date:
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
| **unipc** | UniPCMultistepScheduler | CPU only |
| **lcm** | LCMScheduler | |
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.

View File

@@ -460,10 +460,10 @@ def get_torch_source() -> (Union[str, None], str):
url = "https://download.pytorch.org/whl/cpu"
if device == "cuda":
url = "https://download.pytorch.org/whl/cu118"
url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-cuda]"
if device == "cuda_and_dml":
url = "https://download.pytorch.org/whl/cu118"
url = "https://download.pytorch.org/whl/cu121"
optional_modules = "[xformers,onnx-directml]"
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@@ -34,4 +34,4 @@ class SocketIO:
async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
if "queue_id" in data:
await self.__sio.leave_room(sid, data["queue_id"])
await self.__sio.enter_room(sid, data["queue_id"])

View File

@@ -16,7 +16,6 @@ from pydantic.fields import FieldInfo, _Unset
from pydantic_core import PydanticUndefined
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import uuid_string
if TYPE_CHECKING:
@@ -31,6 +30,74 @@ class InvalidFieldError(TypeError):
pass
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
class Input(str, Enum):
"""
The type of input a field accepts.

View File

@@ -7,7 +7,6 @@ from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
ExtraConditioningInfo,
@@ -20,6 +19,7 @@ from ...backend.util.devices import torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -28,12 +28,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import BaseModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -9,11 +9,19 @@ from PIL import Image, ImageChops, ImageFilter, ImageOps
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
from .baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
WithMetadata,
WithWorkflow,
invocation,
)
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")

View File

@@ -7,6 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -16,7 +17,6 @@ from invokeai.app.invocations.baseinvocation import (
invocation_output,
)
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id

View File

@@ -10,7 +10,7 @@ import torch
import torchvision.transforms as T
from diffusers import AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor
from diffusers.models.adapter import T2IAdapter
from diffusers.models.adapter import FullAdapterXL, T2IAdapter
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
@@ -34,7 +34,6 @@ from invokeai.app.invocations.primitives import (
)
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
@@ -58,6 +57,7 @@ from ...backend.util.devices import choose_precision, choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -562,6 +562,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapter_model: T2IAdapter
with t2i_adapter_model_info as t2i_adapter_model:
total_downscale_factor = t2i_adapter_model.total_downscale_factor
if isinstance(t2i_adapter_model.adapter, FullAdapterXL):
# HACK(ryand): Work around a bug in FullAdapterXL. This is being addressed upstream in diffusers by
# this PR: https://github.com/huggingface/diffusers/pull/5134.
total_downscale_factor = total_downscale_factor // 2
# Resize the T2I-Adapter input image.
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the

View File

@@ -6,9 +6,8 @@ import numpy as np
from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")

View File

@@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
MetadataField,
@@ -18,7 +19,6 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.shared.fields import FieldDescriptions
from ...version import __version__
@@ -160,14 +160,13 @@ class CoreMetadataInvocation(BaseInvocation):
)
# High resolution fix metadata.
hrf_enabled: Optional[float] = InputField(
hrf_width: Optional[int] = InputField(
default=None,
description="Whether or not high resolution fix was enabled.",
description="The high resolution fix height and width multipler.",
)
# TODO: should this be stricter or do we just let the UI handle it?
hrf_method: Optional[str] = InputField(
hrf_height: Optional[int] = InputField(
default=None,
description="The high resolution fix upscale method.",
description="The high resolution fix height and width multipler.",
)
hrf_strength: Optional[float] = InputField(
default=None,

View File

@@ -3,13 +3,11 @@ from typing import List, Optional
from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.shared.models import FreeUConfig
from ...backend.model_management import BaseModelType, ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -19,6 +17,22 @@ from .baseinvocation import (
invocation_output,
)
# TODO: Permanent fix for this
# from invokeai.app.invocations.shared import FreeUConfig
class FreeUConfig(BaseModel):
"""
Configuration for the FreeU hyperparameters.
- https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu
- https://github.com/ChenyangSi/FreeU
"""
s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1)
s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2)
b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1)
b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2)
class ModelInfo(BaseModel):
model_name: str = Field(description="Info to load submodel")

View File

@@ -5,13 +5,13 @@ import torch
from pydantic import field_validator
from invokeai.app.invocations.latent import LatentsField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
OutputField,

View File

@@ -14,7 +14,6 @@ from tqdm import tqdm
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType
@@ -24,6 +23,7 @@ from ...backend.util import choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -5,11 +5,10 @@ from typing import Optional, Tuple
import torch
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -1,9 +1,8 @@
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.invocations.baseinvocation import FieldDescriptions
class FreeUConfig(BaseModel):

View File

@@ -5,6 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@@ -15,7 +16,6 @@ from invokeai.app.invocations.baseinvocation import (
)
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType

View File

@@ -1,5 +0,0 @@
"""
This module contains various classes, functions and models which are shared across the app, particularly by invocations.
Lifting these classes, functions and models into this shared module helps to reduce circular imports.
"""

View File

@@ -1,66 +0,0 @@
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."

View File

@@ -32,7 +32,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
from pydantic import ValidationError
from pydantic.error_wrappers import ValidationError
from tqdm import tqdm
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer

View File

@@ -38,7 +38,6 @@ SAMPLER_CHOICES = [
"k_heun",
"k_lms",
"plms",
"lcm",
]
PRECISION_CHOICES = [

View File

@@ -254,13 +254,7 @@ class ModelInstall(object):
elif path.is_dir() and any(
[
(path / x).exists()
for x in {
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"pytorch_lora_weights.safetensors",
}
for x in {"config.json", "model_index.json", "learned_embeds.bin", "pytorch_lora_weights.bin"}
]
):
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
@@ -363,7 +357,7 @@ class ModelInstall(object):
for suffix in ["safetensors", "bin"]:
if f"{prefix}pytorch_lora_weights.{suffix}" in files:
location = self._download_hf_model(
repo_id, [f"pytorch_lora_weights.{suffix}"], staging, subfolder=subfolder
repo_id, ["pytorch_lora_weights.bin"], staging, subfolder=subfolder
) # LoRA
break
elif (

View File

@@ -12,7 +12,7 @@ from diffusers.models import UNet2DConditionModel
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.app.invocations.shared import FreeUConfig
from .models.lora import LoRAModel
@@ -166,15 +166,6 @@ class ModelPatcher:
init_tokens_count = None
new_tokens_added = None
# TODO: This is required since Transformers 4.32 see
# https://github.com/huggingface/transformers/pull/25088
# More information by NVIDIA:
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
# This value might need to be changed in the future and take the GPUs model into account as there seem
# to be ideal values for different GPUS. This value is temporary!
# For references to the current discussion please see https://github.com/invoke-ai/InvokeAI/pull/4817
pad_to_multiple_of = 8
try:
# HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a
# workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after
@@ -184,7 +175,7 @@ class ModelPatcher:
# but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs).
ti_tokenizer = pickle.loads(pickle.dumps(tokenizer))
ti_manager = TextualInversionManager(ti_tokenizer)
init_tokens_count = text_encoder.resize_token_embeddings(None, pad_to_multiple_of).num_embeddings
init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings
def _get_trigger(ti_name, index):
trigger = ti_name
@@ -199,7 +190,7 @@ class ModelPatcher:
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
# modify text_encoder
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added, pad_to_multiple_of)
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added)
model_embeddings = text_encoder.get_input_embeddings()
for ti_name, ti in ti_list:
@@ -231,7 +222,7 @@ class ModelPatcher:
finally:
if init_tokens_count and new_tokens_added:
text_encoder.resize_token_embeddings(init_tokens_count, pad_to_multiple_of)
text_encoder.resize_token_embeddings(init_tokens_count)
@classmethod
@contextmanager

View File

@@ -183,13 +183,12 @@ class ModelProbe(object):
if model:
class_name = model.__class__.__name__
else:
for suffix in ["bin", "safetensors"]:
if (folder_path / f"learned_embeds.{suffix}").exists():
return ModelType.TextualInversion
if (folder_path / f"pytorch_lora_weights.{suffix}").exists():
return ModelType.Lora
if (folder_path / "unet/model.onnx").exists():
return ModelType.ONNX
if (folder_path / "learned_embeds.bin").exists():
return ModelType.TextualInversion
if (folder_path / "pytorch_lora_weights.bin").exists():
return ModelType.Lora
if (folder_path / "image_encoder.txt").exists():
return ModelType.IPAdapter

View File

@@ -68,9 +68,8 @@ class LoRAModel(ModelBase):
raise ModelNotFoundException()
if os.path.isdir(path):
for ext in ["safetensors", "bin"]:
if os.path.exists(os.path.join(path, f"pytorch_lora_weights.{ext}")):
return LoRAModelFormat.Diffusers
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
return LoRAModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
@@ -87,10 +86,8 @@ class LoRAModel(ModelBase):
base_model: BaseModelType,
) -> str:
if cls.detect_format(model_path) == LoRAModelFormat.Diffusers:
for ext in ["safetensors", "bin"]: # return path to the safetensors file inside the folder
path = Path(model_path, f"pytorch_lora_weights.{ext}")
if path.exists():
return path
# TODO: add diffusers lora when it stabilizes a bit
raise NotImplementedError("Diffusers lora not supported")
else:
return model_path

View File

@@ -10,7 +10,6 @@ from diffusers import (
HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LCMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
@@ -39,5 +38,4 @@ SCHEDULER_MAP = dict(
dpmpp_sde=(DPMSolverSDEScheduler, dict(use_karras_sigmas=False, noise_sampler_seed=0)),
dpmpp_sde_k=(DPMSolverSDEScheduler, dict(use_karras_sigmas=True, noise_sampler_seed=0)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True)),
lcm=(LCMScheduler, dict()),
)

View File

@@ -63,8 +63,8 @@ def welcome(latest_release: str, latest_prerelease: str):
yield "[bold yellow]Options:"
yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic])
[2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic])
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
[2] Manually enter the [bold]tag name[/bold] for the version you wish to update to
[3] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
console.rule()
print(

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
import{w as s,ib as T,v as l,a1 as I,ic as R,ad as V,id as z,ie as j,ig as D,ih as F,ii as G,ij as W,ik as K,aG as H,il as U,im as Y}from"./index-7ba6700b.js";import{M as Z}from"./MantineProvider-bf828180.js";var P=String.raw,E=P`
import{w as s,ia as T,v as l,a1 as I,ib as R,ad as V,ic as z,id as j,ie as D,ig as F,ih as G,ii as W,ij as K,aG as H,ik as U,il as Y}from"./index-ec1d7161.js";import{M as Z}from"./MantineProvider-7e0d59f3.js";var P=String.raw,E=P`
:root,
:host {
--chakra-vh: 100vh;
@@ -277,4 +277,4 @@ import{w as s,ib as T,v as l,a1 as I,ic as R,ad as V,id as z,ie as j,ig as D,ih
}
${E}
`}),g={light:"chakra-ui-light",dark:"chakra-ui-dark"};function Q(e={}){const{preventTransition:o=!0}=e,n={setDataset:r=>{const t=o?n.preventTransition():void 0;document.documentElement.dataset.theme=r,document.documentElement.style.colorScheme=r,t==null||t()},setClassName(r){document.body.classList.add(r?g.dark:g.light),document.body.classList.remove(r?g.light:g.dark)},query(){return window.matchMedia("(prefers-color-scheme: dark)")},getSystemTheme(r){var t;return((t=n.query().matches)!=null?t:r==="dark")?"dark":"light"},addListener(r){const t=n.query(),i=a=>{r(a.matches?"dark":"light")};return typeof t.addListener=="function"?t.addListener(i):t.addEventListener("change",i),()=>{typeof t.removeListener=="function"?t.removeListener(i):t.removeEventListener("change",i)}},preventTransition(){const r=document.createElement("style");return r.appendChild(document.createTextNode("*{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}")),document.head.appendChild(r),()=>{window.getComputedStyle(document.body),requestAnimationFrame(()=>{requestAnimationFrame(()=>{document.head.removeChild(r)})})}}};return n}var X="chakra-ui-color-mode";function L(e){return{ssr:!1,type:"localStorage",get(o){if(!(globalThis!=null&&globalThis.document))return o;let n;try{n=localStorage.getItem(e)||o}catch{}return n||o},set(o){try{localStorage.setItem(e,o)}catch{}}}}var ee=L(X),M=()=>{};function S(e,o){return e.type==="cookie"&&e.ssr?e.get(o):o}function O(e){const{value:o,children:n,options:{useSystemColorMode:r,initialColorMode:t,disableTransitionOnChange:i}={},colorModeManager:a=ee}=e,d=t==="dark"?"dark":"light",[u,p]=l.useState(()=>S(a,d)),[y,b]=l.useState(()=>S(a)),{getSystemTheme:w,setClassName:k,setDataset:x,addListener:$}=l.useMemo(()=>Q({preventTransition:i}),[i]),v=t==="system"&&!u?y:u,c=l.useCallback(m=>{const f=m==="system"?w():m;p(f),k(f==="dark"),x(f),a.set(f)},[a,w,k,x]);I(()=>{t==="system"&&b(w())},[]),l.useEffect(()=>{const m=a.get();if(m){c(m);return}if(t==="system"){c("system");return}c(d)},[a,d,t,c]);const C=l.useCallback(()=>{c(v==="dark"?"light":"dark")},[v,c]);l.useEffect(()=>{if(r)return $(c)},[r,$,c]);const A=l.useMemo(()=>({colorMode:o??v,toggleColorMode:o?M:C,setColorMode:o?M:c,forced:o!==void 0}),[v,C,c,o]);return s.jsx(R.Provider,{value:A,children:n})}O.displayName="ColorModeProvider";var te=["borders","breakpoints","colors","components","config","direction","fonts","fontSizes","fontWeights","letterSpacings","lineHeights","radii","shadows","sizes","space","styles","transition","zIndices"];function re(e){return V(e)?te.every(o=>Object.prototype.hasOwnProperty.call(e,o)):!1}function h(e){return typeof e=="function"}function oe(...e){return o=>e.reduce((n,r)=>r(n),o)}var ne=e=>function(...n){let r=[...n],t=n[n.length-1];return re(t)&&r.length>1?r=r.slice(0,r.length-1):t=e,oe(...r.map(i=>a=>h(i)?i(a):ae(a,i)))(t)},ie=ne(j);function ae(...e){return z({},...e,_)}function _(e,o,n,r){if((h(e)||h(o))&&Object.prototype.hasOwnProperty.call(r,n))return(...t)=>{const i=h(e)?e(...t):e,a=h(o)?o(...t):o;return z({},i,a,_)}}var q=l.createContext({getDocument(){return document},getWindow(){return window}});q.displayName="EnvironmentContext";function N(e){const{children:o,environment:n,disabled:r}=e,t=l.useRef(null),i=l.useMemo(()=>n||{getDocument:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument)!=null?u:document},getWindow:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument.defaultView)!=null?u:window}},[n]),a=!r||!n;return s.jsxs(q.Provider,{value:i,children:[o,a&&s.jsx("span",{id:"__chakra_env",hidden:!0,ref:t})]})}N.displayName="EnvironmentProvider";var se=e=>{const{children:o,colorModeManager:n,portalZIndex:r,resetScope:t,resetCSS:i=!0,theme:a={},environment:d,cssVarsRoot:u,disableEnvironment:p,disableGlobalStyle:y}=e,b=s.jsx(N,{environment:d,disabled:p,children:o});return s.jsx(D,{theme:a,cssVarsRoot:u,children:s.jsxs(O,{colorModeManager:n,options:a.config,children:[i?s.jsx(J,{scope:t}):s.jsx(B,{}),!y&&s.jsx(F,{}),r?s.jsx(G,{zIndex:r,children:b}):b]})})},le=e=>function({children:n,theme:r=e,toastOptions:t,...i}){return s.jsxs(se,{theme:r,...i,children:[s.jsx(W,{value:t==null?void 0:t.defaultOptions,children:n}),s.jsx(K,{...t})]})},de=le(j);const ue=()=>l.useMemo(()=>({colorScheme:"dark",fontFamily:"'Inter Variable', sans-serif",components:{ScrollArea:{defaultProps:{scrollbarSize:10},styles:{scrollbar:{"&:hover":{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}},thumb:{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}}}}}),[]),ce=L("@@invokeai-color-mode");function me({children:e}){const{i18n:o}=H(),n=o.dir(),r=l.useMemo(()=>ie({...U,direction:n}),[n]);l.useEffect(()=>{document.body.dir=n},[n]);const t=ue();return s.jsx(Z,{theme:t,children:s.jsx(de,{theme:r,colorModeManager:ce,toastOptions:Y,children:e})})}const ve=l.memo(me);export{ve as default};
`}),g={light:"chakra-ui-light",dark:"chakra-ui-dark"};function Q(e={}){const{preventTransition:o=!0}=e,n={setDataset:r=>{const t=o?n.preventTransition():void 0;document.documentElement.dataset.theme=r,document.documentElement.style.colorScheme=r,t==null||t()},setClassName(r){document.body.classList.add(r?g.dark:g.light),document.body.classList.remove(r?g.light:g.dark)},query(){return window.matchMedia("(prefers-color-scheme: dark)")},getSystemTheme(r){var t;return((t=n.query().matches)!=null?t:r==="dark")?"dark":"light"},addListener(r){const t=n.query(),i=a=>{r(a.matches?"dark":"light")};return typeof t.addListener=="function"?t.addListener(i):t.addEventListener("change",i),()=>{typeof t.removeListener=="function"?t.removeListener(i):t.removeEventListener("change",i)}},preventTransition(){const r=document.createElement("style");return r.appendChild(document.createTextNode("*{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}")),document.head.appendChild(r),()=>{window.getComputedStyle(document.body),requestAnimationFrame(()=>{requestAnimationFrame(()=>{document.head.removeChild(r)})})}}};return n}var X="chakra-ui-color-mode";function L(e){return{ssr:!1,type:"localStorage",get(o){if(!(globalThis!=null&&globalThis.document))return o;let n;try{n=localStorage.getItem(e)||o}catch{}return n||o},set(o){try{localStorage.setItem(e,o)}catch{}}}}var ee=L(X),M=()=>{};function S(e,o){return e.type==="cookie"&&e.ssr?e.get(o):o}function O(e){const{value:o,children:n,options:{useSystemColorMode:r,initialColorMode:t,disableTransitionOnChange:i}={},colorModeManager:a=ee}=e,d=t==="dark"?"dark":"light",[u,p]=l.useState(()=>S(a,d)),[y,b]=l.useState(()=>S(a)),{getSystemTheme:w,setClassName:k,setDataset:x,addListener:$}=l.useMemo(()=>Q({preventTransition:i}),[i]),v=t==="system"&&!u?y:u,c=l.useCallback(h=>{const f=h==="system"?w():h;p(f),k(f==="dark"),x(f),a.set(f)},[a,w,k,x]);I(()=>{t==="system"&&b(w())},[]),l.useEffect(()=>{const h=a.get();if(h){c(h);return}if(t==="system"){c("system");return}c(d)},[a,d,t,c]);const C=l.useCallback(()=>{c(v==="dark"?"light":"dark")},[v,c]);l.useEffect(()=>{if(r)return $(c)},[r,$,c]);const A=l.useMemo(()=>({colorMode:o??v,toggleColorMode:o?M:C,setColorMode:o?M:c,forced:o!==void 0}),[v,C,c,o]);return s.jsx(R.Provider,{value:A,children:n})}O.displayName="ColorModeProvider";var te=["borders","breakpoints","colors","components","config","direction","fonts","fontSizes","fontWeights","letterSpacings","lineHeights","radii","shadows","sizes","space","styles","transition","zIndices"];function re(e){return V(e)?te.every(o=>Object.prototype.hasOwnProperty.call(e,o)):!1}function m(e){return typeof e=="function"}function oe(...e){return o=>e.reduce((n,r)=>r(n),o)}var ne=e=>function(...n){let r=[...n],t=n[n.length-1];return re(t)&&r.length>1?r=r.slice(0,r.length-1):t=e,oe(...r.map(i=>a=>m(i)?i(a):ae(a,i)))(t)},ie=ne(j);function ae(...e){return z({},...e,_)}function _(e,o,n,r){if((m(e)||m(o))&&Object.prototype.hasOwnProperty.call(r,n))return(...t)=>{const i=m(e)?e(...t):e,a=m(o)?o(...t):o;return z({},i,a,_)}}var q=l.createContext({getDocument(){return document},getWindow(){return window}});q.displayName="EnvironmentContext";function N(e){const{children:o,environment:n,disabled:r}=e,t=l.useRef(null),i=l.useMemo(()=>n||{getDocument:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument)!=null?u:document},getWindow:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument.defaultView)!=null?u:window}},[n]),a=!r||!n;return s.jsxs(q.Provider,{value:i,children:[o,a&&s.jsx("span",{id:"__chakra_env",hidden:!0,ref:t})]})}N.displayName="EnvironmentProvider";var se=e=>{const{children:o,colorModeManager:n,portalZIndex:r,resetScope:t,resetCSS:i=!0,theme:a={},environment:d,cssVarsRoot:u,disableEnvironment:p,disableGlobalStyle:y}=e,b=s.jsx(N,{environment:d,disabled:p,children:o});return s.jsx(D,{theme:a,cssVarsRoot:u,children:s.jsxs(O,{colorModeManager:n,options:a.config,children:[i?s.jsx(J,{scope:t}):s.jsx(B,{}),!y&&s.jsx(F,{}),r?s.jsx(G,{zIndex:r,children:b}):b]})})},le=e=>function({children:n,theme:r=e,toastOptions:t,...i}){return s.jsxs(se,{theme:r,...i,children:[s.jsx(W,{value:t==null?void 0:t.defaultOptions,children:n}),s.jsx(K,{...t})]})},de=le(j);const ue=()=>l.useMemo(()=>({colorScheme:"dark",fontFamily:"'Inter Variable', sans-serif",components:{ScrollArea:{defaultProps:{scrollbarSize:10},styles:{scrollbar:{"&:hover":{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}},thumb:{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}}}}}),[]),ce=L("@@invokeai-color-mode");function he({children:e}){const{i18n:o}=H(),n=o.dir(),r=l.useMemo(()=>ie({...U,direction:n}),[n]);l.useEffect(()=>{document.body.dir=n},[n]);const t=ue();return s.jsx(Z,{theme:t,children:s.jsx(de,{theme:r,colorModeManager:ce,toastOptions:Y,children:e})})}const ve=l.memo(he);export{ve as default};

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -15,7 +15,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-7ba6700b.js"></script>
<script type="module" crossorigin src="./assets/index-ec1d7161.js"></script>
</head>
<body dir="ltr">

View File

@@ -221,19 +221,6 @@
"resetIPAdapterImage": "Reset IP Adapter Image",
"ipAdapterImageFallback": "No IP Adapter Image Selected"
},
"hrf": {
"hrf": "High Resolution Fix",
"enableHrf": "Enable High Resolution Fix",
"enableHrfTooltip": "Generate with a lower initial resolution, upscale to the base resolution, then run Image-to-Image.",
"upscaleMethod": "Upscale Method",
"hrfStrength": "High Resolution Fix Strength",
"strengthTooltip": "Lower values result in fewer details, which may reduce potential artifacts.",
"metadata": {
"enabled": "High Resolution Fix Enabled",
"strength": "High Resolution Fix Strength",
"method": "High Resolution Fix Method"
}
},
"embedding": {
"addEmbedding": "Add Embedding",
"incompatibleModel": "Incompatible base model:",
@@ -1271,11 +1258,15 @@
},
"compositingBlur": {
"heading": "Blur",
"paragraphs": ["The blur radius of the mask."]
"paragraphs": [
"The blur radius of the mask."
]
},
"compositingBlurMethod": {
"heading": "Blur Method",
"paragraphs": ["The method of blur applied to the masked area."]
"paragraphs": [
"The method of blur applied to the masked area."
]
},
"compositingCoherencePass": {
"heading": "Coherence Pass",
@@ -1285,7 +1276,9 @@
},
"compositingCoherenceMode": {
"heading": "Mode",
"paragraphs": ["The mode of the Coherence Pass."]
"paragraphs": [
"The mode of the Coherence Pass."
]
},
"compositingCoherenceSteps": {
"heading": "Steps",
@@ -1303,7 +1296,9 @@
},
"compositingMaskAdjustments": {
"heading": "Mask Adjustments",
"paragraphs": ["Adjust the mask."]
"paragraphs": [
"Adjust the mask."
]
},
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
@@ -1361,7 +1356,9 @@
},
"infillMethod": {
"heading": "Infill Method",
"paragraphs": ["Method to infill the selected area."]
"paragraphs": [
"Method to infill the selected area."
]
},
"lora": {
"heading": "LoRA Weight",

View File

@@ -1231,9 +1231,7 @@
"noLoRAsAvailable": "无可用 LoRA",
"noModelsAvailable": "无可用模型",
"selectModel": "选择一个模型",
"selectLoRA": "选择一个 LoRA",
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
"noLoRAsInstalled": "无已安装的 LoRA"
"selectLoRA": "选择一个 LoRA"
},
"boards": {
"autoAddBoard": "自动添加面板",

View File

@@ -54,35 +54,42 @@
]
},
"dependencies": {
"@chakra-ui/anatomy": "^2.2.2",
"@chakra-ui/anatomy": "^2.2.1",
"@chakra-ui/icons": "^2.1.1",
"@chakra-ui/react": "^2.8.2",
"@chakra-ui/styled-system": "^2.9.2",
"@chakra-ui/theme-tools": "^2.1.2",
"@chakra-ui/react": "^2.8.1",
"@chakra-ui/styled-system": "^2.9.1",
"@chakra-ui/theme-tools": "^2.1.1",
"@dagrejs/graphlib": "^2.1.13",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/utilities": "^3.2.2",
"@dnd-kit/core": "^6.0.8",
"@dnd-kit/modifiers": "^6.0.1",
"@dnd-kit/utilities": "^3.2.1",
"@emotion/react": "^11.11.1",
"@emotion/styled": "^11.11.0",
"@fontsource-variable/inter": "^5.0.15",
"@floating-ui/react-dom": "^2.0.2",
"@fontsource-variable/inter": "^5.0.13",
"@fontsource/inter": "^5.0.13",
"@mantine/core": "^6.0.19",
"@mantine/form": "^6.0.19",
"@mantine/hooks": "^6.0.19",
"@nanostores/react": "^0.7.1",
"@reduxjs/toolkit": "^1.9.7",
"@roarr/browser-log-writer": "^1.3.0",
"@stevebel/png": "^1.5.1",
"compare-versions": "^6.1.0",
"dateformat": "^5.0.3",
"formik": "^2.4.5",
"framer-motion": "^10.16.4",
"i18next": "^23.6.0",
"i18next-http-backend": "^2.3.1",
"konva": "^9.2.3",
"fuse.js": "^6.6.2",
"i18next": "^23.5.1",
"i18next-browser-languagedetector": "^7.0.2",
"i18next-http-backend": "^2.2.2",
"konva": "^9.2.2",
"lodash-es": "^4.17.21",
"nanostores": "^0.9.4",
"nanostores": "^0.9.2",
"new-github-issue-url": "^1.0.0",
"openapi-fetch": "^0.8.1",
"overlayscrollbars": "^2.4.4",
"overlayscrollbars-react": "^0.5.3",
"openapi-fetch": "^0.7.10",
"overlayscrollbars": "^2.3.2",
"overlayscrollbars-react": "^0.5.2",
"patch-package": "^8.0.0",
"query-string": "^8.1.0",
"react": "^18.2.0",
@@ -91,25 +98,26 @@
"react-dropzone": "^14.2.3",
"react-error-boundary": "^4.0.11",
"react-hotkeys-hook": "4.4.1",
"react-i18next": "^13.3.1",
"react-i18next": "^13.3.0",
"react-icons": "^4.11.0",
"react-konva": "^18.2.10",
"react-redux": "^8.1.3",
"react-resizable-panels": "^0.0.55",
"react-use": "^17.4.0",
"react-virtuoso": "^4.6.2",
"reactflow": "^11.9.4",
"react-virtuoso": "^4.6.1",
"react-zoom-pan-pinch": "^3.2.0",
"reactflow": "^11.9.3",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^4.0.4",
"roarr": "^7.18.3",
"roarr": "^7.15.1",
"serialize-error": "^11.0.2",
"socket.io-client": "^4.7.2",
"type-fest": "^4.7.1",
"use-debounce": "^10.0.0",
"type-fest": "^4.4.0",
"use-debounce": "^9.0.4",
"use-image": "^1.1.1",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zod-validation-error": "^2.1.0"
"zod-validation-error": "^1.5.0"
},
"peerDependencies": {
"@chakra-ui/cli": "^2.4.0",
@@ -120,33 +128,39 @@
},
"devDependencies": {
"@chakra-ui/cli": "^2.4.1",
"@types/dateformat": "^5.0.2",
"@types/lodash-es": "^4.17.11",
"@types/node": "^20.9.0",
"@types/react": "^18.2.37",
"@types/react-dom": "^18.2.15",
"@types/react-redux": "^7.1.30",
"@types/uuid": "^9.0.7",
"@typescript-eslint/eslint-plugin": "^6.10.0",
"@typescript-eslint/parser": "^6.10.0",
"@vitejs/plugin-react-swc": "^3.4.1",
"concurrently": "^8.2.2",
"eslint": "^8.53.0",
"@types/dateformat": "^5.0.0",
"@types/lodash-es": "^4.17.9",
"@types/node": "^20.8.6",
"@types/react": "^18.2.28",
"@types/react-dom": "^18.2.13",
"@types/react-redux": "^7.1.27",
"@types/react-transition-group": "^4.4.7",
"@types/uuid": "^9.0.5",
"@typescript-eslint/eslint-plugin": "^6.7.5",
"@typescript-eslint/parser": "^6.7.5",
"@vitejs/plugin-react-swc": "^3.4.0",
"axios": "^1.5.1",
"babel-plugin-transform-imports": "^2.0.0",
"concurrently": "^8.2.1",
"eslint": "^8.51.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.0.1",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"form-data": "^4.0.0",
"husky": "^8.0.3",
"lint-staged": "^15.0.2",
"lint-staged": "^15.0.1",
"madge": "^6.1.0",
"openapi-types": "^12.1.3",
"openapi-typescript": "^6.7.0",
"postinstall-postinstall": "^2.1.0",
"prettier": "^3.0.3",
"rollup-plugin-visualizer": "^5.9.2",
"ts-toolbelt": "^9.6.0",
"typescript": "^5.2.2",
"vite": "^4.5.0",
"vite": "^4.4.11",
"vite-plugin-css-injected-by-js": "^3.3.0",
"vite-plugin-dts": "^3.6.3",
"vite-plugin-dts": "^3.6.0",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.2.1",
"yarn": "^1.22.19"

View File

@@ -221,19 +221,6 @@
"resetIPAdapterImage": "Reset IP Adapter Image",
"ipAdapterImageFallback": "No IP Adapter Image Selected"
},
"hrf": {
"hrf": "High Resolution Fix",
"enableHrf": "Enable High Resolution Fix",
"enableHrfTooltip": "Generate with a lower initial resolution, upscale to the base resolution, then run Image-to-Image.",
"upscaleMethod": "Upscale Method",
"hrfStrength": "High Resolution Fix Strength",
"strengthTooltip": "Lower values result in fewer details, which may reduce potential artifacts.",
"metadata": {
"enabled": "High Resolution Fix Enabled",
"strength": "High Resolution Fix Strength",
"method": "High Resolution Fix Method"
}
},
"embedding": {
"addEmbedding": "Add Embedding",
"incompatibleModel": "Incompatible base model:",
@@ -1271,11 +1258,15 @@
},
"compositingBlur": {
"heading": "Blur",
"paragraphs": ["The blur radius of the mask."]
"paragraphs": [
"The blur radius of the mask."
]
},
"compositingBlurMethod": {
"heading": "Blur Method",
"paragraphs": ["The method of blur applied to the masked area."]
"paragraphs": [
"The method of blur applied to the masked area."
]
},
"compositingCoherencePass": {
"heading": "Coherence Pass",
@@ -1285,7 +1276,9 @@
},
"compositingCoherenceMode": {
"heading": "Mode",
"paragraphs": ["The mode of the Coherence Pass."]
"paragraphs": [
"The mode of the Coherence Pass."
]
},
"compositingCoherenceSteps": {
"heading": "Steps",
@@ -1303,7 +1296,9 @@
},
"compositingMaskAdjustments": {
"heading": "Mask Adjustments",
"paragraphs": ["Adjust the mask."]
"paragraphs": [
"Adjust the mask."
]
},
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
@@ -1361,7 +1356,9 @@
},
"infillMethod": {
"heading": "Infill Method",
"paragraphs": ["Method to infill the selected area."]
"paragraphs": [
"Method to infill the selected area."
]
},
"lora": {
"heading": "LoRA Weight",

View File

@@ -1231,9 +1231,7 @@
"noLoRAsAvailable": "无可用 LoRA",
"noModelsAvailable": "无可用模型",
"selectModel": "选择一个模型",
"selectLoRA": "选择一个 LoRA",
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
"noLoRAsInstalled": "无已安装的 LoRA"
"selectLoRA": "选择一个 LoRA"
},
"boards": {
"autoAddBoard": "自动添加面板",

View File

@@ -35,9 +35,6 @@ const ImageMetadataActions = (props: Props) => {
recallWidth,
recallHeight,
recallStrength,
recallHrfEnabled,
recallHrfStrength,
recallHrfMethod,
recallLoRA,
recallControlNet,
recallIPAdapter,
@@ -84,18 +81,6 @@ const ImageMetadataActions = (props: Props) => {
recallStrength(metadata?.strength);
}, [metadata?.strength, recallStrength]);
const handleRecallHrfEnabled = useCallback(() => {
recallHrfEnabled(metadata?.hrf_enabled);
}, [metadata?.hrf_enabled, recallHrfEnabled]);
const handleRecallHrfStrength = useCallback(() => {
recallHrfStrength(metadata?.hrf_strength);
}, [metadata?.hrf_strength, recallHrfStrength]);
const handleRecallHrfMethod = useCallback(() => {
recallHrfMethod(metadata?.hrf_method);
}, [metadata?.hrf_method, recallHrfMethod]);
const handleRecallLoRA = useCallback(
(lora: LoRAMetadataItem) => {
recallLoRA(lora);
@@ -240,27 +225,6 @@ const ImageMetadataActions = (props: Props) => {
onClick={handleRecallStrength}
/>
)}
{metadata.hrf_enabled && (
<ImageMetadataItem
label={t('hrf.metadata.enabled')}
value={metadata.hrf_enabled}
onClick={handleRecallHrfEnabled}
/>
)}
{metadata.hrf_enabled && metadata.hrf_strength && (
<ImageMetadataItem
label={t('hrf.metadata.strength')}
value={metadata.hrf_strength}
onClick={handleRecallHrfStrength}
/>
)}
{metadata.hrf_enabled && metadata.hrf_method && (
<ImageMetadataItem
label={t('hrf.metadata.method')}
value={metadata.hrf_method}
onClick={handleRecallHrfMethod}
/>
)}
{metadata.loras &&
metadata.loras.map((lora, index) => {
if (isValidLoRAModel(lora.lora)) {

View File

@@ -1424,9 +1424,6 @@ export const zCoreMetadata = z
loras: z.array(zLoRAMetadataItem).nullish().catch(null),
vae: zVaeModelField.nullish().catch(null),
strength: z.number().nullish().catch(null),
hrf_enabled: z.boolean().nullish().catch(null),
hrf_strength: z.number().nullish().catch(null),
hrf_method: z.string().nullish().catch(null),
init_image: z.string().nullish().catch(null),
positive_style_prompt: z.string().nullish().catch(null),
negative_style_prompt: z.string().nullish().catch(null),

View File

@@ -1,26 +1,22 @@
import { logger } from 'app/logging/logger';
import { RootState } from 'app/store/store';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import { NonNullableGraph } from 'features/nodes/types/types';
import {
DenoiseLatentsInvocation,
ESRGANInvocation,
Edge,
LatentsToImageInvocation,
NoiseInvocation,
ResizeLatentsInvocation,
} from 'services/api/types';
import {
DENOISE_LATENTS,
DENOISE_LATENTS_HRF,
ESRGAN_HRF,
IMAGE_TO_LATENTS_HRF,
LATENTS_TO_IMAGE,
LATENTS_TO_IMAGE_HRF_HR,
LATENTS_TO_IMAGE_HRF_LR,
LATENTS_TO_IMAGE_HRF,
MAIN_MODEL_LOADER,
NOISE,
NOISE_HRF,
RESIZE_HRF,
RESCALE_LATENTS,
VAE_LOADER,
} from './constants';
import { upsertMetadata } from './metadata';
@@ -60,52 +56,6 @@ function copyConnectionsToDenoiseLatentsHrf(graph: NonNullableGraph): void {
graph.edges = graph.edges.concat(newEdges);
}
/**
* Calculates the new resolution for high-resolution features (HRF) based on base model type.
* Adjusts the width and height to maintain the aspect ratio and constrains them by the model's dimension limits,
* rounding down to the nearest multiple of 8.
*
* @param {string} baseModel The base model type, which determines the base dimension used in calculations.
* @param {number} width The current width to be adjusted for HRF.
* @param {number} height The current height to be adjusted for HRF.
* @return {{newWidth: number, newHeight: number}} The new width and height, adjusted and rounded as needed.
*/
function calculateHrfRes(
baseModel: string,
width: number,
height: number
): { newWidth: number; newHeight: number } {
const aspect = width / height;
let dimension;
if (baseModel == 'sdxl') {
dimension = 1024;
} else {
dimension = 512;
}
const minDimension = Math.floor(dimension * 0.5);
const modelArea = dimension * dimension; // Assuming square images for model_area
let initWidth;
let initHeight;
if (aspect > 1.0) {
initHeight = Math.max(minDimension, Math.sqrt(modelArea / aspect));
initWidth = initHeight * aspect;
} else {
initWidth = Math.max(minDimension, Math.sqrt(modelArea * aspect));
initHeight = initWidth / aspect;
}
// Cap initial height and width to final height and width.
initWidth = Math.min(width, initWidth);
initHeight = Math.min(height, initHeight);
const newWidth = roundToMultiple(Math.floor(initWidth), 8);
const newHeight = roundToMultiple(Math.floor(initHeight), 8);
return { newWidth, newHeight };
}
// Adds the high-res fix feature to the given graph.
export const addHrfToGraph = (
state: RootState,
@@ -121,140 +71,105 @@ export const addHrfToGraph = (
}
const log = logger('txt2img');
const { vae, hrfStrength, hrfEnabled, hrfMethod } = state.generation;
const { vae, hrfWidth, hrfHeight, hrfStrength } = state.generation;
const isAutoVae = !vae;
const width = state.generation.width;
const height = state.generation.height;
const baseModel = state.generation.model
? state.generation.model.base_model
: 'sd1';
const { newWidth: hrfWidth, newHeight: hrfHeight } = calculateHrfRes(
baseModel,
width,
height
);
// Pre-existing (original) graph nodes.
const originalDenoiseLatentsNode = graph.nodes[DENOISE_LATENTS] as
| DenoiseLatentsInvocation
| undefined;
const originalNoiseNode = graph.nodes[NOISE] as NoiseInvocation | undefined;
// Original latents to image should pick this up.
const originalLatentsToImageNode = graph.nodes[LATENTS_TO_IMAGE] as
| LatentsToImageInvocation
| undefined;
// Check if originalDenoiseLatentsNode is undefined and log an error
if (!originalDenoiseLatentsNode) {
log.error('originalDenoiseLatentsNode is undefined');
return;
}
// Check if originalNoiseNode is undefined and log an error
if (!originalNoiseNode) {
log.error('originalNoiseNode is undefined');
return;
}
// Check if originalLatentsToImageNode is undefined and log an error
if (!originalLatentsToImageNode) {
log.error('originalLatentsToImageNode is undefined');
return;
}
// Change height and width of original noise node to initial resolution.
if (originalNoiseNode) {
originalNoiseNode.width = hrfWidth;
originalNoiseNode.height = hrfHeight;
}
// Define new nodes and their connections, roughly in order of operations.
graph.nodes[LATENTS_TO_IMAGE_HRF_LR] = {
type: 'l2i',
id: LATENTS_TO_IMAGE_HRF_LR,
fp32: originalLatentsToImageNode?.fp32,
is_intermediate: true,
// Define new nodes.
// Denoise latents node to be run on upscaled latents.
const denoiseLatentsHrfNode: DenoiseLatentsInvocation = {
type: 'denoise_latents',
id: DENOISE_LATENTS_HRF,
is_intermediate: originalDenoiseLatentsNode?.is_intermediate,
cfg_scale: originalDenoiseLatentsNode?.cfg_scale,
scheduler: originalDenoiseLatentsNode?.scheduler,
steps: originalDenoiseLatentsNode?.steps,
denoising_start: 1 - hrfStrength,
denoising_end: 1,
};
// New base resolution noise node.
const hrfNoiseNode: NoiseInvocation = {
type: 'noise',
id: NOISE_HRF,
seed: originalNoiseNode?.seed,
use_cpu: originalNoiseNode?.use_cpu,
is_intermediate: originalNoiseNode?.is_intermediate,
};
const rescaleLatentsNode: ResizeLatentsInvocation = {
id: RESCALE_LATENTS,
type: 'lresize',
width: state.generation.width,
height: state.generation.height,
};
// New node to convert latents to image.
const latentsToImageHrfNode: LatentsToImageInvocation | undefined =
originalLatentsToImageNode
? {
type: 'l2i',
id: LATENTS_TO_IMAGE_HRF,
fp32: originalLatentsToImageNode?.fp32,
is_intermediate: originalLatentsToImageNode?.is_intermediate,
}
: undefined;
// Add new nodes to graph.
graph.nodes[LATENTS_TO_IMAGE_HRF] =
latentsToImageHrfNode as LatentsToImageInvocation;
graph.nodes[DENOISE_LATENTS_HRF] =
denoiseLatentsHrfNode as DenoiseLatentsInvocation;
graph.nodes[NOISE_HRF] = hrfNoiseNode as NoiseInvocation;
graph.nodes[RESCALE_LATENTS] = rescaleLatentsNode as ResizeLatentsInvocation;
// Connect nodes.
graph.edges.push(
{
// Set up rescale latents.
source: {
node_id: DENOISE_LATENTS,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE_HRF_LR,
node_id: RESCALE_LATENTS,
field: 'latents',
},
},
// Set up new noise node
{
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE_HRF_LR,
field: 'vae',
},
}
);
graph.nodes[RESIZE_HRF] = {
id: RESIZE_HRF,
type: 'img_resize',
is_intermediate: true,
width: width,
height: height,
};
if (hrfMethod == 'ESRGAN') {
let model_name: ESRGANInvocation['model_name'] = 'RealESRGAN_x2plus.pth';
if ((width * height) / (hrfWidth * hrfHeight) > 2) {
model_name = 'RealESRGAN_x4plus.pth';
}
graph.nodes[ESRGAN_HRF] = {
id: ESRGAN_HRF,
type: 'esrgan',
model_name,
is_intermediate: true,
};
graph.edges.push(
{
source: {
node_id: LATENTS_TO_IMAGE_HRF_LR,
field: 'image',
},
destination: {
node_id: ESRGAN_HRF,
field: 'image',
},
},
{
source: {
node_id: ESRGAN_HRF,
field: 'image',
},
destination: {
node_id: RESIZE_HRF,
field: 'image',
},
}
);
} else {
graph.edges.push({
source: {
node_id: LATENTS_TO_IMAGE_HRF_LR,
field: 'image',
},
destination: {
node_id: RESIZE_HRF,
field: 'image',
},
});
}
graph.nodes[NOISE_HRF] = {
type: 'noise',
id: NOISE_HRF,
seed: originalNoiseNode?.seed,
use_cpu: originalNoiseNode?.use_cpu,
is_intermediate: true,
};
graph.edges.push(
{
source: {
node_id: RESIZE_HRF,
node_id: RESCALE_LATENTS,
field: 'height',
},
destination: {
@@ -264,58 +179,18 @@ export const addHrfToGraph = (
},
{
source: {
node_id: RESIZE_HRF,
node_id: RESCALE_LATENTS,
field: 'width',
},
destination: {
node_id: NOISE_HRF,
field: 'width',
},
}
);
graph.nodes[IMAGE_TO_LATENTS_HRF] = {
type: 'i2l',
id: IMAGE_TO_LATENTS_HRF,
is_intermediate: true,
};
graph.edges.push(
{
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: IMAGE_TO_LATENTS_HRF,
field: 'vae',
},
},
// Set up new denoise node.
{
source: {
node_id: RESIZE_HRF,
field: 'image',
},
destination: {
node_id: IMAGE_TO_LATENTS_HRF,
field: 'image',
},
}
);
graph.nodes[DENOISE_LATENTS_HRF] = {
type: 'denoise_latents',
id: DENOISE_LATENTS_HRF,
is_intermediate: true,
cfg_scale: originalDenoiseLatentsNode?.cfg_scale,
scheduler: originalDenoiseLatentsNode?.scheduler,
steps: originalDenoiseLatentsNode?.steps,
denoising_start: 1 - state.generation.hrfStrength,
denoising_end: 1,
};
graph.edges.push(
{
source: {
node_id: IMAGE_TO_LATENTS_HRF,
node_id: RESCALE_LATENTS,
field: 'latents',
},
destination: {
@@ -332,41 +207,35 @@ export const addHrfToGraph = (
node_id: DENOISE_LATENTS_HRF,
field: 'noise',
},
}
);
copyConnectionsToDenoiseLatentsHrf(graph);
graph.nodes[LATENTS_TO_IMAGE_HRF_HR] = {
type: 'l2i',
id: LATENTS_TO_IMAGE_HRF_HR,
fp32: originalLatentsToImageNode?.fp32,
is_intermediate: true,
};
graph.edges.push(
{
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE_HRF_HR,
field: 'vae',
},
},
// Set up new latents to image node.
{
source: {
node_id: DENOISE_LATENTS_HRF,
field: 'latents',
},
destination: {
node_id: LATENTS_TO_IMAGE_HRF_HR,
node_id: LATENTS_TO_IMAGE_HRF,
field: 'latents',
},
},
{
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE_HRF,
field: 'vae',
},
}
);
upsertMetadata(graph, {
hrf_height: hrfHeight,
hrf_width: hrfWidth,
hrf_strength: hrfStrength,
hrf_enabled: hrfEnabled,
hrf_method: hrfMethod,
});
copyConnectionsToDenoiseLatentsHrf(graph);
};

View File

@@ -5,7 +5,7 @@ import { SaveImageInvocation } from 'services/api/types';
import {
CANVAS_OUTPUT,
LATENTS_TO_IMAGE,
LATENTS_TO_IMAGE_HRF_HR,
LATENTS_TO_IMAGE_HRF,
NSFW_CHECKER,
SAVE_IMAGE,
WATERMARKER,
@@ -62,10 +62,10 @@ export const addSaveImageNode = (
},
destination,
});
} else if (LATENTS_TO_IMAGE_HRF_HR in graph.nodes) {
} else if (LATENTS_TO_IMAGE_HRF in graph.nodes) {
graph.edges.push({
source: {
node_id: LATENTS_TO_IMAGE_HRF_HR,
node_id: LATENTS_TO_IMAGE_HRF,
field: 'image',
},
destination,

View File

@@ -4,11 +4,7 @@ export const NEGATIVE_CONDITIONING = 'negative_conditioning';
export const DENOISE_LATENTS = 'denoise_latents';
export const DENOISE_LATENTS_HRF = 'denoise_latents_hrf';
export const LATENTS_TO_IMAGE = 'latents_to_image';
export const LATENTS_TO_IMAGE_HRF_HR = 'latents_to_image_hrf_hr';
export const LATENTS_TO_IMAGE_HRF_LR = 'latents_to_image_hrf_lr';
export const IMAGE_TO_LATENTS_HRF = 'image_to_latents_hrf';
export const RESIZE_HRF = 'resize_hrf';
export const ESRGAN_HRF = 'esrgan_hrf';
export const LATENTS_TO_IMAGE_HRF = 'latents_to_image_hrf';
export const SAVE_IMAGE = 'save_image';
export const NSFW_CHECKER = 'nsfw_checker';
export const WATERMARKER = 'invisible_watermark';
@@ -25,6 +21,7 @@ export const CLIP_SKIP = 'clip_skip';
export const IMAGE_TO_LATENTS = 'image_to_latents';
export const LATENTS_TO_LATENTS = 'latents_to_latents';
export const RESIZE = 'resize_image';
export const RESCALE_LATENTS = 'rescale_latents';
export const IMG2IMG_RESIZE = 'img2img_resize';
export const CANVAS_OUTPUT = 'canvas_output';
export const INPAINT_IMAGE = 'inpaint_image';

View File

@@ -7,9 +7,10 @@ import IAICollapse from 'common/components/IAICollapse';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import ParamHrfHeight from './ParamHrfHeight';
import ParamHrfStrength from './ParamHrfStrength';
import ParamHrfToggle from './ParamHrfToggle';
import ParamHrfMethod from './ParamHrfMethod';
import ParamHrfWidth from './ParamHrfWidth';
const selector = createSelector(
stateSelector,
@@ -36,11 +37,28 @@ export default function ParamHrfCollapse() {
}
return (
<IAICollapse label={t('hrf.hrf')} activeLabel={activeLabel}>
<IAICollapse label="High Resolution Fix" activeLabel={activeLabel}>
<Flex sx={{ flexDir: 'column', gap: 2 }}>
<ParamHrfToggle />
<ParamHrfStrength />
<ParamHrfMethod />
{hrfEnabled && (
<Flex
sx={{
gap: 2,
p: 4,
borderRadius: 4,
flexDirection: 'column',
w: 'full',
bg: 'base.100',
_dark: {
bg: 'base.750',
},
}}
>
<ParamHrfWidth />
<ParamHrfHeight />
</Flex>
)}
{hrfEnabled && <ParamHrfStrength />}
</Flex>
</IAICollapse>
);

View File

@@ -0,0 +1,87 @@
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAISlider, { IAIFullSliderProps } from 'common/components/IAISlider';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import {
setHrfHeight,
setHrfWidth,
} from 'features/parameters/store/generationSlice';
import { memo, useCallback } from 'react';
function findPrevMultipleOfEight(n: number): number {
return Math.floor((n - 1) / 8) * 8;
}
const selector = createSelector(
[stateSelector],
({ generation, hotkeys, config }) => {
const { min, fineStep, coarseStep } = config.sd.height;
const { model, height, hrfHeight, aspectRatio, hrfEnabled } = generation;
const step = hotkeys.shift ? fineStep : coarseStep;
return {
model,
height,
hrfHeight,
min,
step,
aspectRatio,
hrfEnabled,
};
},
defaultSelectorOptions
);
type ParamHeightProps = Omit<
IAIFullSliderProps,
'label' | 'value' | 'onChange'
>;
const ParamHrfHeight = (props: ParamHeightProps) => {
const { height, hrfHeight, min, step, aspectRatio, hrfEnabled } =
useAppSelector(selector);
const dispatch = useAppDispatch();
const maxHrfHeight = Math.max(findPrevMultipleOfEight(height), min);
const handleChange = useCallback(
(v: number) => {
dispatch(setHrfHeight(v));
if (aspectRatio) {
const newWidth = roundToMultiple(v * aspectRatio, 8);
dispatch(setHrfWidth(newWidth));
}
},
[dispatch, aspectRatio]
);
const handleReset = useCallback(() => {
dispatch(setHrfHeight(maxHrfHeight));
if (aspectRatio) {
const newWidth = roundToMultiple(maxHrfHeight * aspectRatio, 8);
dispatch(setHrfWidth(newWidth));
}
}, [dispatch, maxHrfHeight, aspectRatio]);
return (
<IAISlider
label="Initial Height"
value={hrfHeight}
min={min}
step={step}
max={maxHrfHeight}
onChange={handleChange}
handleReset={handleReset}
withInput
withReset
withSliderMarks
sliderNumberInputProps={{ max: maxHrfHeight }}
isDisabled={!hrfEnabled}
{...props}
/>
);
};
export default memo(ParamHrfHeight);

View File

@@ -1,49 +0,0 @@
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAIMantineSelect from 'common/components/IAIMantineSelect';
import { setHrfMethod } from 'features/parameters/store/generationSlice';
import { HrfMethodParam } from 'features/parameters/types/parameterSchemas';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const selector = createSelector(
stateSelector,
({ generation }) => {
const { hrfMethod, hrfEnabled } = generation;
return { hrfMethod, hrfEnabled };
},
defaultSelectorOptions
);
const DATA = ['ESRGAN', 'bilinear'];
// Dropdown selection for the type of high resolution fix method to use.
const ParamHrfMethodSelect = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const { hrfMethod, hrfEnabled } = useAppSelector(selector);
const handleChange = useCallback(
(v: HrfMethodParam | null) => {
if (!v) {
return;
}
dispatch(setHrfMethod(v));
},
[dispatch]
);
return (
<IAIMantineSelect
label={t('hrf.upscaleMethod')}
value={hrfMethod}
data={DATA}
onChange={handleChange}
disabled={!hrfEnabled}
/>
);
};
export default memo(ParamHrfMethodSelect);

View File

@@ -5,8 +5,6 @@ import { memo, useCallback } from 'react';
import { stateSelector } from 'app/store/store';
import { setHrfStrength } from 'features/parameters/store/generationSlice';
import IAISlider from 'common/components/IAISlider';
import { Tooltip } from '@chakra-ui/react';
import { useTranslation } from 'react-i18next';
const selector = createSelector(
[stateSelector],
@@ -33,7 +31,6 @@ const ParamHrfStrength = () => {
const { hrfStrength, initial, min, sliderMax, step, hrfEnabled } =
useAppSelector(selector);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleHrfStrengthReset = useCallback(() => {
dispatch(setHrfStrength(initial));
@@ -47,21 +44,20 @@ const ParamHrfStrength = () => {
);
return (
<Tooltip label={t('hrf.strengthTooltip')} placement="right" hasArrow>
<IAISlider
label={t('parameters.denoisingStrength')}
min={min}
max={sliderMax}
step={step}
value={hrfStrength}
onChange={handleHrfStrengthChange}
withSliderMarks
withInput
withReset
handleReset={handleHrfStrengthReset}
isDisabled={!hrfEnabled}
/>
</Tooltip>
<IAISlider
label="Denoising Strength"
aria-label="High Resolution Denoising Strength"
min={min}
max={sliderMax}
step={step}
value={hrfStrength}
onChange={handleHrfStrengthChange}
withSliderMarks
withInput
withReset
handleReset={handleHrfStrengthReset}
isDisabled={!hrfEnabled}
/>
);
};

View File

@@ -3,11 +3,9 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAISwitch from 'common/components/IAISwitch';
import { setHrfEnabled } from 'features/parameters/store/generationSlice';
import { ChangeEvent, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export default function ParamHrfToggle() {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const hrfEnabled = useAppSelector(
(state: RootState) => state.generation.hrfEnabled
@@ -21,10 +19,9 @@ export default function ParamHrfToggle() {
return (
<IAISwitch
label={t('hrf.enableHrf')}
label="Enable High Resolution Fix"
isChecked={hrfEnabled}
onChange={handleHrfEnabled}
tooltip={t('hrf.enableHrfTooltip')}
/>
);
}

View File

@@ -0,0 +1,84 @@
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAISlider, { IAIFullSliderProps } from 'common/components/IAISlider';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import {
setHrfHeight,
setHrfWidth,
} from 'features/parameters/store/generationSlice';
import { memo, useCallback } from 'react';
function findPrevMultipleOfEight(n: number): number {
return Math.floor((n - 1) / 8) * 8;
}
const selector = createSelector(
[stateSelector],
({ generation, hotkeys, config }) => {
const { min, fineStep, coarseStep } = config.sd.width;
const { model, width, hrfWidth, aspectRatio, hrfEnabled } = generation;
const step = hotkeys.shift ? fineStep : coarseStep;
return {
model,
width,
hrfWidth,
min,
step,
aspectRatio,
hrfEnabled,
};
},
defaultSelectorOptions
);
type ParamWidthProps = Omit<IAIFullSliderProps, 'label' | 'value' | 'onChange'>;
const ParamHrfWidth = (props: ParamWidthProps) => {
const { width, hrfWidth, min, step, aspectRatio, hrfEnabled } =
useAppSelector(selector);
const dispatch = useAppDispatch();
const maxHrfWidth = Math.max(findPrevMultipleOfEight(width), min);
const handleChange = useCallback(
(v: number) => {
dispatch(setHrfWidth(v));
if (aspectRatio) {
const newHeight = roundToMultiple(v / aspectRatio, 8);
dispatch(setHrfHeight(newHeight));
}
},
[dispatch, aspectRatio]
);
const handleReset = useCallback(() => {
dispatch(setHrfWidth(maxHrfWidth));
if (aspectRatio) {
const newHeight = roundToMultiple(maxHrfWidth / aspectRatio, 8);
dispatch(setHrfHeight(newHeight));
}
}, [dispatch, maxHrfWidth, aspectRatio]);
return (
<IAISlider
label="Initial Width"
value={hrfWidth}
min={min}
step={step}
max={maxHrfWidth}
onChange={handleChange}
handleReset={handleReset}
withInput
withReset
withSliderMarks
sliderNumberInputProps={{ max: maxHrfWidth }}
isDisabled={!hrfEnabled}
{...props}
/>
);
};
export default memo(ParamHrfWidth);

View File

@@ -55,9 +55,6 @@ import { initialImageSelected, modelSelected } from '../store/actions';
import {
setCfgScale,
setHeight,
setHrfEnabled,
setHrfMethod,
setHrfStrength,
setImg2imgStrength,
setNegativePrompt,
setPositivePrompt,
@@ -70,7 +67,6 @@ import {
isValidCfgScale,
isValidControlNetModel,
isValidHeight,
isValidHrfMethod,
isValidIPAdapterModel,
isValidLoRAModel,
isValidMainModel,
@@ -87,7 +83,6 @@ import {
isValidSteps,
isValidStrength,
isValidWidth,
isValidBoolean,
} from '../types/parameterSchemas';
const selector = createSelector(
@@ -366,51 +361,6 @@ export const useRecallParameters = () => {
[dispatch, parameterSetToast, parameterNotSetToast]
);
/**
* Recall high resolution enabled with toast
*/
const recallHrfEnabled = useCallback(
(hrfEnabled: unknown) => {
if (!isValidBoolean(hrfEnabled)) {
parameterNotSetToast();
return;
}
dispatch(setHrfEnabled(hrfEnabled));
parameterSetToast();
},
[dispatch, parameterSetToast, parameterNotSetToast]
);
/**
* Recall high resolution strength with toast
*/
const recallHrfStrength = useCallback(
(hrfStrength: unknown) => {
if (!isValidStrength(hrfStrength)) {
parameterNotSetToast();
return;
}
dispatch(setHrfStrength(hrfStrength));
parameterSetToast();
},
[dispatch, parameterSetToast, parameterNotSetToast]
);
/**
* Recall high resolution method with toast
*/
const recallHrfMethod = useCallback(
(hrfMethod: unknown) => {
if (!isValidHrfMethod(hrfMethod)) {
parameterNotSetToast();
return;
}
dispatch(setHrfMethod(hrfMethod));
parameterSetToast();
},
[dispatch, parameterSetToast, parameterNotSetToast]
);
/**
* Recall LoRA with toast
*/
@@ -761,9 +711,6 @@ export const useRecallParameters = () => {
steps,
width,
strength,
hrf_enabled,
hrf_strength,
hrf_method,
positive_style_prompt,
negative_style_prompt,
refiner_model,
@@ -782,55 +729,34 @@ export const useRecallParameters = () => {
if (isValidCfgScale(cfg_scale)) {
dispatch(setCfgScale(cfg_scale));
}
if (isValidMainModel(model)) {
dispatch(modelSelected(model));
}
if (isValidPositivePrompt(positive_prompt)) {
dispatch(setPositivePrompt(positive_prompt));
}
if (isValidNegativePrompt(negative_prompt)) {
dispatch(setNegativePrompt(negative_prompt));
}
if (isValidScheduler(scheduler)) {
dispatch(setScheduler(scheduler));
}
if (isValidSeed(seed)) {
dispatch(setSeed(seed));
}
if (isValidSteps(steps)) {
dispatch(setSteps(steps));
}
if (isValidWidth(width)) {
dispatch(setWidth(width));
}
if (isValidHeight(height)) {
dispatch(setHeight(height));
}
if (isValidStrength(strength)) {
dispatch(setImg2imgStrength(strength));
}
if (isValidBoolean(hrf_enabled)) {
dispatch(setHrfEnabled(hrf_enabled));
}
if (isValidStrength(hrf_strength)) {
dispatch(setHrfStrength(hrf_strength));
}
if (isValidHrfMethod(hrf_method)) {
dispatch(setHrfMethod(hrf_method));
}
if (isValidSDXLPositiveStylePrompt(positive_style_prompt)) {
dispatch(setPositiveStylePromptSDXL(positive_style_prompt));
}
@@ -936,9 +862,6 @@ export const useRecallParameters = () => {
recallWidth,
recallHeight,
recallStrength,
recallHrfEnabled,
recallHrfStrength,
recallHrfMethod,
recallLoRA,
recallControlNet,
recallIPAdapter,

View File

@@ -11,7 +11,6 @@ import {
CanvasCoherenceModeParam,
CfgScaleParam,
HeightParam,
HrfMethodParam,
MainModelParam,
MaskBlurMethodParam,
NegativePromptParam,
@@ -28,9 +27,10 @@ import {
} from '../types/parameterSchemas';
export interface GenerationState {
hrfHeight: HeightParam;
hrfWidth: WidthParam;
hrfEnabled: boolean;
hrfStrength: StrengthParam;
hrfMethod: HrfMethodParam;
cfgScale: CfgScaleParam;
height: HeightParam;
img2imgStrength: StrengthParam;
@@ -73,9 +73,10 @@ export interface GenerationState {
}
export const initialGenerationState: GenerationState = {
hrfStrength: 0.45,
hrfHeight: 64,
hrfWidth: 64,
hrfStrength: 0.75,
hrfEnabled: false,
hrfMethod: 'ESRGAN',
cfgScale: 7.5,
height: 512,
img2imgStrength: 0.75,
@@ -278,15 +279,18 @@ export const generationSlice = createSlice({
setClipSkip: (state, action: PayloadAction<number>) => {
state.clipSkip = action.payload;
},
setHrfHeight: (state, action: PayloadAction<number>) => {
state.hrfHeight = action.payload;
},
setHrfWidth: (state, action: PayloadAction<number>) => {
state.hrfWidth = action.payload;
},
setHrfStrength: (state, action: PayloadAction<number>) => {
state.hrfStrength = action.payload;
},
setHrfEnabled: (state, action: PayloadAction<boolean>) => {
state.hrfEnabled = action.payload;
},
setHrfMethod: (state, action: PayloadAction<HrfMethodParam>) => {
state.hrfMethod = action.payload;
},
shouldUseCpuNoiseChanged: (state, action: PayloadAction<boolean>) => {
state.shouldUseCpuNoise = action.payload;
},
@@ -371,9 +375,10 @@ export const {
setSeamlessXAxis,
setSeamlessYAxis,
setClipSkip,
setHrfEnabled,
setHrfHeight,
setHrfWidth,
setHrfStrength,
setHrfMethod,
setHrfEnabled,
shouldUseCpuNoiseChanged,
setAspectRatio,
setShouldLockAspectRatio,

View File

@@ -132,7 +132,6 @@ export const zScheduler = z.enum([
'lms_k',
'euler_a',
'kdpm_2_a',
'lcm',
]);
/**
* Type alias for scheduler parameter, inferred from its zod schema
@@ -167,7 +166,6 @@ export const SCHEDULER_LABEL_MAP: Record<SchedulerParam, string> = {
lms_k: 'LMS Karras',
euler_a: 'Euler Ancestral',
kdpm_2_a: 'KDPM 2 Ancestral',
lcm: 'LCM',
};
/**
@@ -400,20 +398,6 @@ export type PrecisionParam = z.infer<typeof zPrecision>;
export const isValidPrecision = (val: unknown): val is PrecisionParam =>
zPrecision.safeParse(val).success;
/**
* Zod schema for a high resolution fix method parameter.
*/
export const zHrfMethod = z.enum(['ESRGAN', 'bilinear']);
/**
* Type alias for high resolution fix method parameter, inferred from its zod schema
*/
export type HrfMethodParam = z.infer<typeof zHrfMethod>;
/**
* Validates/type-guards a value as a high resolution fix method parameter
*/
export const isValidHrfMethod = (val: unknown): val is HrfMethodParam =>
zHrfMethod.safeParse(val).success;
/**
* Zod schema for SDXL refiner positive aesthetic score parameter
*/
@@ -496,17 +480,6 @@ export const isValidCoherenceModeParam = (
): val is CanvasCoherenceModeParam =>
zCanvasCoherenceMode.safeParse(val).success;
/**
* Zod schema for a boolean.
*/
export const zBoolean = z.boolean();
/**
* Validates/type-guards a value as a boolean parameter
*/
export const isValidBoolean = (val: unknown): val is boolean =>
zBoolean.safeParse(val).success && val !== null && val !== undefined;
// /**
// * Zod schema for BaseModelType
// */

View File

@@ -70,7 +70,7 @@ export const initialConfigState: AppConfig = {
coarseStep: 0.05,
},
hrfStrength: {
initial: 0.45,
initial: 0.7,
min: 0,
sliderMax: 1,
inputMax: 1,

File diff suppressed because one or more lines are too long

View File

@@ -127,6 +127,7 @@ export type CompelInvocation = s['CompelInvocation'];
export type DynamicPromptInvocation = s['DynamicPromptInvocation'];
export type NoiseInvocation = s['NoiseInvocation'];
export type DenoiseLatentsInvocation = s['DenoiseLatentsInvocation'];
export type ResizeLatentsInvocation = s['ResizeLatentsInvocation'];
export type ONNXTextToLatentsInvocation = s['ONNXTextToLatentsInvocation'];
export type SDXLLoraLoaderInvocation = s['SDXLLoraLoaderInvocation'];
export type ImageToLatentsInvocation = s['ImageToLatentsInvocation'];

File diff suppressed because it is too large Load Diff

View File

@@ -1 +1 @@
__version__ = "3.4.0rc4"
__version__ = "3.4.0rc3"

View File

@@ -40,7 +40,9 @@ dependencies = [
"controlnet-aux>=0.0.6",
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
"datasets",
"diffusers[torch]~=0.23.0",
# When bumping diffusers beyond 0.21, make sure to address this:
# https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513
"diffusers[torch]~=0.22.0",
"dnspython~=2.4.0",
"dynamicprompts",
"easing-functions",
@@ -82,7 +84,7 @@ dependencies = [
"torchvision~=0.16",
"torchmetrics~=0.11.0",
"torchsde~=0.2.5",
"transformers~=4.35.0",
"transformers~=4.31.0",
"uvicorn[standard]~=0.21.1",
"windows-curses; sys_platform=='win32'",
]