mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Update invokeai/app/api/routers/model_manager.py
Co-authored-by: Ryan Dick <ryanjdick3@gmail.com>
This commit is contained in:
@@ -825,7 +825,12 @@ async def get_starter_models() -> list[StarterModel]:
|
||||
async def get_cache_size(cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM)) -> float:
|
||||
"""Return the current RAM or VRAM cache size setting (in GB)."""
|
||||
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
|
||||
return cache.max_cache_size if cache_type == CacheType.RAM else cache.max_vram_cache_size
|
||||
if cache_type == CacheType.RAM:
|
||||
return cache.max_cache_size
|
||||
elif cache_type == CacheType.VRAM:
|
||||
return cache.max_vram_cache_size
|
||||
else:
|
||||
raise ValueError(f"Unexpected {cache_type=}.")
|
||||
|
||||
|
||||
@model_manager_router.put(
|
||||
|
||||
Reference in New Issue
Block a user