Compare commits

..

1 Commits

Author SHA1 Message Date
Millun Atluri
913ad69c34 fix typos 2023-12-20 17:02:54 +11:00
10 changed files with 15 additions and 61 deletions

View File

@@ -11,7 +11,7 @@ complex functionality.
InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These can be used as examples to create your own nodes.
New nodes should be added to a subfolder in `nodes` direction found at the root level of the InvokeAI installation location. Nodes added to this folder will be able to be used upon application startup.
New nodes should be added to a subfolder in the `nodes` directory found at the root level of the InvokeAI installation location. Nodes added to this folder will be imported upon application startup.
Example `nodes` subfolder structure:
```py

View File

@@ -271,7 +271,6 @@ class InvokeAIAppConfig(InvokeAISettings):
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
stable_fast : bool = Field(default=True, description="Enable stable-fast performance optimizations, if the library is installed and functional", json_schema_extra=Categories.Generation)
# QUEUE
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)

View File

@@ -141,6 +141,7 @@ class IPAttnProcessor2_0(torch.nn.Module):
ip_hidden_states = ipa_embed
# Expected ip_hidden_state shape: (batch_size, num_ip_images, ip_seq_len, ip_image_embedding)
ip_key = ipa_weights.to_k_ip(ip_hidden_states)
ip_value = ipa_weights.to_v_ip(ip_hidden_states)

View File

@@ -12,8 +12,6 @@ class IPAttentionProcessorWeights(torch.nn.Module):
super().__init__()
self.to_k_ip = torch.nn.Linear(in_dim, out_dim, bias=False)
self.to_v_ip = torch.nn.Linear(in_dim, out_dim, bias=False)
for param in self.parameters():
param.requires_grad = False
class IPAttentionWeights(torch.nn.Module):

View File

@@ -24,14 +24,12 @@ import sys
import time
from contextlib import suppress
from dataclasses import dataclass, field
from importlib.util import find_spec
from pathlib import Path
from typing import Any, Dict, Optional, Type, Union, types
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_management.model_load_optimizations import skip_torch_weight_init
@@ -41,26 +39,6 @@ from .models import BaseModelType, ModelBase, ModelType, SubModelType
if choose_torch_device() == torch.device("mps"):
from torch import mps
SFAST_AVAILABLE = False
TRITON_AVAILABLE = False
XFORMERS_AVAILABLE = False
SFAST_CONFIG = None
TRITON_AVAILABLE = find_spec("triton") is not None
XFORMERS_AVAILABLE = find_spec("xformers") is not None
try:
from sfast.compilers.diffusion_pipeline_compiler import CompilationConfig, compile_unet, compile_vae
SFAST_CONFIG = CompilationConfig.Default()
SFAST_CONFIG.enable_cuda_graph = True
SFAST_CONFIG.enable_xformers = XFORMERS_AVAILABLE
SFAST_CONFIG.enable_triton = TRITON_AVAILABLE
SFAST_AVAILABLE = True
except ImportError:
pass
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0
@@ -132,7 +110,6 @@ class _CacheRecord:
class ModelCache(object):
def __init__(
self,
app_config: InvokeAIAppConfig,
max_cache_size: float = DEFAULT_MAX_CACHE_SIZE,
max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE,
execution_device: torch.device = torch.device("cuda"),
@@ -145,7 +122,6 @@ class ModelCache(object):
log_memory_usage: bool = False,
):
"""
:param app_config: InvokeAIAppConfig for application
:param max_cache_size: Maximum size of the RAM cache [6.0 GB]
:param execution_device: Torch device to load active model into [torch.device('cuda')]
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
@@ -159,7 +135,6 @@ class ModelCache(object):
behaviour.
"""
self.model_infos: Dict[str, ModelBase] = {}
self.app_config = app_config
# allow lazy offloading only when vram cache enabled
self.lazy_offloading = lazy_offloading and max_vram_cache_size > 0
self.precision: torch.dtype = precision
@@ -264,9 +239,6 @@ class ModelCache(object):
snapshot_before = self._capture_memory_snapshot()
with skip_torch_weight_init():
model = model_info.get_model(child_type=submodel, torch_dtype=self.precision)
if SFAST_AVAILABLE and self.app_config.stable_fast and submodel:
model = self._compile_model(model, submodel)
snapshot_after = self._capture_memory_snapshot()
end_load_time = time.time()
@@ -350,16 +322,6 @@ class ModelCache(object):
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
)
def _compile_model(self, model: Any, model_type: SubModelType) -> Any:
if model_type == SubModelType("unet"):
self.logger.info("SFast-compiling unet model")
return compile_unet(model, SFAST_CONFIG)
elif model_type == SubModelType("vae"):
self.logger.info("SFast-compiling vae model")
return compile_vae(model, SFAST_CONFIG)
else:
return model
class ModelLocker(object):
def __init__(self, cache, key, model, gpu_load, size_needed):
"""

View File

@@ -344,7 +344,6 @@ class ModelManager(object):
self.app_config = InvokeAIAppConfig.get_config()
self.logger = logger
self.cache = ModelCache(
app_config=self.app_config,
max_cache_size=max_cache_size,
max_vram_cache_size=self.app_config.vram_cache_size,
lazy_offloading=self.app_config.lazy_offload,

View File

@@ -1119,10 +1119,7 @@
"deletedInvalidEdge": "已删除无效的边缘 {{source}} -> {{target}}",
"unknownInput": "未知输入:{{name}}",
"prototypeDesc": "此调用是一个原型 (prototype)。它可能会在本项目更新期间发生破坏性更改,并且随时可能被删除。",
"betaDesc": "此调用尚处于测试阶段。在稳定之前,它可能会在项目更新期间发生破坏性更改。本项目计划长期支持这种调用。",
"newWorkflow": "新建工作流",
"newWorkflowDesc": "是否创建一个新的工作流?",
"newWorkflowDesc2": "当前工作流有未保存的更改。"
"betaDesc": "此调用尚处于测试阶段。在稳定之前,它可能会在项目更新期间发生破坏性更改。本项目计划长期支持这种调用。"
},
"controlnet": {
"resize": "直接缩放",
@@ -1638,7 +1635,7 @@
"openWorkflow": "打开工作流",
"clearWorkflowSearchFilter": "清除工作流检索过滤器",
"workflowLibrary": "工作流库",
"downloadWorkflow": "保存到文件",
"downloadWorkflow": "下载工作流",
"noRecentWorkflows": "无最近工作流",
"workflowSaved": "已保存工作流",
"workflowIsOpen": "工作流已打开",
@@ -1651,9 +1648,8 @@
"deleteWorkflow": "删除工作流",
"workflows": "工作流",
"noDescription": "无描述",
"uploadWorkflow": "从文件中加载",
"userWorkflows": "我的工作流",
"newWorkflowCreated": "已创建新的工作流"
"uploadWorkflow": "上传工作流",
"userWorkflows": "我的工作流"
},
"app": {
"storeNotInitialized": "商店尚未初始化"

View File

@@ -34,7 +34,6 @@ import { actionSanitizer } from './middleware/devtools/actionSanitizer';
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
import { listenerMiddleware } from './middleware/listenerMiddleware';
import { authToastMiddleware } from 'services/api/authToastMiddleware';
const allReducers = {
canvas: canvasReducer,
@@ -97,7 +96,6 @@ export const createStore = (uniqueStoreKey?: string, persist = true) =>
})
.concat(api.middleware)
.concat(dynamicMiddlewares)
.concat(authToastMiddleware)
.prepend(listenerMiddleware.middleware),
enhancers: (getDefaultEnhancers) => {
const _enhancers = getDefaultEnhancers().concat(autoBatchEnhancer());

View File

@@ -5,10 +5,12 @@ import { t } from 'i18next';
import { z } from 'zod';
const zRejectedForbiddenAction = z.object({
payload: z.object({
status: z.literal(403),
data: z.object({
detail: z.string(),
action: z.object({
payload: z.object({
status: z.literal(403),
data: z.object({
detail: z.string(),
}),
}),
}),
});
@@ -20,8 +22,8 @@ export const authToastMiddleware: Middleware =
const parsed = zRejectedForbiddenAction.parse(action);
const { dispatch } = api;
const customMessage =
parsed.payload.data.detail !== 'Forbidden'
? parsed.payload.data.detail
parsed.action.payload.data.detail !== 'Forbidden'
? parsed.action.payload.data.detail
: undefined;
dispatch(
addToast({
@@ -30,7 +32,7 @@ export const authToastMiddleware: Middleware =
description: customMessage,
})
);
} catch (error) {
} catch {
// no-op
}
}

View File

@@ -113,7 +113,6 @@ dependencies = [
"onnx" = ["onnxruntime"]
"onnx-cuda" = ["onnxruntime-gpu"]
"onnx-directml" = ["onnxruntime-directml"]
"stable-fast" = ["stable-fast"]
[project.scripts]