Rename model_cache_default.py -> model_cache.py.

This commit is contained in:
Ryan Dick
2024-12-04 22:45:30 +00:00
parent e0bfa6157b
commit d30a9ced38
11 changed files with 13 additions and 15 deletions

View File

@@ -8,7 +8,7 @@ from pathlib import Path
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
# This registers the subclasses that implement loaders of specific model types

View File

@@ -18,7 +18,7 @@ from invokeai.backend.model_manager.config import (
AnyModelConfig,
SubModelType,
)
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker

View File

@@ -14,7 +14,7 @@ from invokeai.backend.model_manager import (
)
from invokeai.backend.model_manager.config import DiffusersConfigBase
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init

View File

@@ -1,13 +1,11 @@
from dataclasses import dataclass
from typing import Dict, Generic, Optional, TypeVar
from typing import Any, Dict, Optional
import torch
T = TypeVar("T")
@dataclass
class CacheRecord(Generic[T]):
class CacheRecord:
"""
Elements of the cache:
@@ -30,7 +28,7 @@ class CacheRecord(Generic[T]):
"""
key: str
model: T
model: Any
device: torch.device
state_dict: Optional[Dict[str, torch.Tensor]]
size: int

View File

@@ -95,7 +95,7 @@ class ModelCache:
self._log_memory_usage = log_memory_usage
self._stats: Optional[CacheStats] = None
self._cached_models: Dict[str, CacheRecord[AnyModel]] = {}
self._cached_models: Dict[str, CacheRecord] = {}
self._cache_stack: List[str] = []
@property

View File

@@ -8,7 +8,7 @@ import torch
from invokeai.backend.model_manager import AnyModel
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
class ModelLocker:

View File

@@ -18,7 +18,7 @@ from invokeai.backend.model_manager import (
SubModelType,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
is_state_dict_likely_flux_control,