feat: add model size calculation for Qwen-Image models

Implemented get_size_fs() method in QwenImageLoader to properly calculate
model sizes on disk. This enables the model manager to:
- Track memory usage accurately
- Prevent OOM errors through better memory management
- Load/unload models efficiently based on available resources

The size calculation handles both full models and individual submodels
(transformer, VAE, etc.) with proper variant support.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
psychedelicious
2025-09-09 19:27:01 +10:00
parent 9d46fba331
commit c0df1b4dc2

View File

@@ -9,6 +9,7 @@ from diffusers import DiffusionPipeline
from invokeai.backend.model_manager.config import AnyModelConfig, MainDiffusersConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
from invokeai.backend.model_manager.taxonomy import (
AnyModel,
BaseModelType,
@@ -22,6 +23,20 @@ from invokeai.backend.model_manager.taxonomy import (
class QwenImageLoader(ModelLoader):
"""Class to load Qwen-Image models."""
def get_size_fs(
self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None
) -> int:
"""Calculate the size of the Qwen-Image model on disk."""
if not isinstance(config, MainDiffusersConfig):
raise ValueError("Only MainDiffusersConfig models are currently supported here.")
# For Qwen-Image, we need to calculate the size of the entire model or specific submodels
return calc_model_size_by_fs(
model_path=model_path,
subfolder=submodel_type.value if submodel_type else None,
variant=config.repo_variant.value if config.repo_variant else None,
)
def _load_model(
self,
config: AnyModelConfig,