mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
adjust the convert api - not right just yet
This commit is contained in:
@@ -619,17 +619,12 @@ async def convert_model(
|
||||
logger.error(f"The model with key {key} is not a main checkpoint model.")
|
||||
raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.")
|
||||
|
||||
# loading the model will convert it into a cached diffusers file
|
||||
try:
|
||||
cc_size = loader.convert_cache.max_size
|
||||
if cc_size == 0: # temporary set the convert cache to a positive number so that cached model is written
|
||||
loader._convert_cache.max_size = 1.0
|
||||
loader.load_model(model_config, submodel_type=SubModelType.Scheduler)
|
||||
finally:
|
||||
loader._convert_cache.max_size = cc_size
|
||||
|
||||
# Get the path of the converted model from the loader
|
||||
cache_path = loader.convert_cache.cache_path(key)
|
||||
converted_model = loader.load_model(model_config, submodel_type=SubModelType.Scheduler)
|
||||
# write the converted file to the model cache directory
|
||||
raw_model = converted_model.model
|
||||
assert hasattr(raw_model, 'save_pretrained')
|
||||
raw_model.save_pretrained(cache_path)
|
||||
assert cache_path.exists()
|
||||
|
||||
# temporarily rename the original safetensors file so that there is no naming conflict
|
||||
|
||||
@@ -28,22 +28,14 @@ class ControlNetLoader(GenericDiffusersLoader):
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if isinstance(config, ControlNetCheckpointConfig):
|
||||
return ControlNetModel.from_single_file(config.path, config=self._app_config.legacy_conf_path / config.config_path)
|
||||
return ControlNetModel.from_single_file(config.path,
|
||||
config=self._app_config.legacy_conf_path / config.config_path,
|
||||
torch_dtype=self._torch_dtype,
|
||||
local_files_only=True,
|
||||
)
|
||||
else:
|
||||
return super()._load_model(config, submodel_type)
|
||||
|
||||
# def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool:
|
||||
# if not isinstance(config, CheckpointConfigBase):
|
||||
# return False
|
||||
# elif (
|
||||
# dest_path.exists()
|
||||
# and (dest_path / "config.json").stat().st_mtime >= (config.converted_at or 0.0)
|
||||
# and (dest_path / "config.json").stat().st_mtime >= model_path.stat().st_mtime
|
||||
# ):
|
||||
# return False
|
||||
# else:
|
||||
# return True
|
||||
|
||||
# def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Optional[Path] = None) -> AnyModel:
|
||||
# assert isinstance(config, CheckpointConfigBase)
|
||||
# image_size = (
|
||||
|
||||
@@ -107,9 +107,12 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
load_class = load_classes[config.base][config.variant]
|
||||
except KeyError as e:
|
||||
raise Exception(f'No diffusers pipeline known for base={config.base}, variant={config.variant}') from e
|
||||
print(f'DEBUG: load_class={load_class}')
|
||||
original_config_file=self._app_config.legacy_conf_path / config.config_path # should try without using this...
|
||||
pipeline = load_class.from_single_file(config.path, config=original_config_file)
|
||||
pipeline = load_class.from_single_file(config.path,
|
||||
config=original_config_file,
|
||||
torch_dtype=self._torch_dtype,
|
||||
local_files_only=True,
|
||||
)
|
||||
|
||||
# Proactively load the various submodels into the RAM cache so that we don't have to re-convert
|
||||
# the entire pipeline every time a new submodel is needed.
|
||||
|
||||
@@ -34,22 +34,14 @@ class VAELoader(GenericDiffusersLoader):
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if isinstance(config, VAECheckpointConfig):
|
||||
return AutoencoderKL.from_single_file(config.path, config=self._app_config.legacy_conf_path / config.config_path)
|
||||
return AutoencoderKL.from_single_file(config.path,
|
||||
config=self._app_config.legacy_conf_path / config.config_path,
|
||||
torch_dtype=self._torch_dtype,
|
||||
local_files_only=True,
|
||||
)
|
||||
else:
|
||||
return super()._load_model(config, submodel_type)
|
||||
|
||||
# def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool:
|
||||
# if not isinstance(config, CheckpointConfigBase):
|
||||
# return False
|
||||
# elif (
|
||||
# dest_path.exists()
|
||||
# and (dest_path / "config.json").stat().st_mtime >= (config.converted_at or 0.0)
|
||||
# and (dest_path / "config.json").stat().st_mtime >= model_path.stat().st_mtime
|
||||
# ):
|
||||
# return False
|
||||
# else:
|
||||
# return True
|
||||
|
||||
# def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Optional[Path] = None) -> AnyModel:
|
||||
# # TODO(MM2): check whether sdxl VAE models convert.
|
||||
# if config.base not in {BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2}:
|
||||
|
||||
Reference in New Issue
Block a user