mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
feat: add model size calculation for Qwen-Image models
Implemented get_size_fs() method in QwenImageLoader to properly calculate model sizes on disk. This enables the model manager to: - Track memory usage accurately - Prevent OOM errors through better memory management - Load/unload models efficiently based on available resources The size calculation handles both full models and individual submodels (transformer, VAE, etc.) with proper variant support. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -9,6 +9,7 @@ from diffusers import DiffusionPipeline
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, MainDiffusersConfig
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
@@ -22,6 +23,20 @@ from invokeai.backend.model_manager.taxonomy import (
|
||||
class QwenImageLoader(ModelLoader):
|
||||
"""Class to load Qwen-Image models."""
|
||||
|
||||
def get_size_fs(
|
||||
self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None
|
||||
) -> int:
|
||||
"""Calculate the size of the Qwen-Image model on disk."""
|
||||
if not isinstance(config, MainDiffusersConfig):
|
||||
raise ValueError("Only MainDiffusersConfig models are currently supported here.")
|
||||
|
||||
# For Qwen-Image, we need to calculate the size of the entire model or specific submodels
|
||||
return calc_model_size_by_fs(
|
||||
model_path=model_path,
|
||||
subfolder=submodel_type.value if submodel_type else None,
|
||||
variant=config.repo_variant.value if config.repo_variant else None,
|
||||
)
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
|
||||
Reference in New Issue
Block a user