diff --git a/invokeai/backend/args.py b/invokeai/backend/args.py index b961658fde..952b799d70 100644 --- a/invokeai/backend/args.py +++ b/invokeai/backend/args.py @@ -490,7 +490,7 @@ class Args(object): "-z", type=int, default=6, - choices=range(0, 9), + choices=range(0, 10), dest="png_compression", help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.", ) @@ -943,7 +943,6 @@ class Args(object): "--png_compression", "-z", type=int, - default=6, choices=range(0, 10), dest="png_compression", help="level of PNG compression, from 0 (none) to 9 (maximum). [6]", diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index ae5550880a..793ba024cf 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -1075,9 +1075,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt( dlogging.set_verbosity_error() checkpoint = ( - load_file(checkpoint_path) - if Path(checkpoint_path).suffix == ".safetensors" - else torch.load(checkpoint_path) + torch.load(checkpoint_path) + if Path(checkpoint_path).suffix == ".ckpt" + else load_file(checkpoint_path) + ) cache_dir = global_cache_dir("hub") pipeline_class = ( diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 84e2ab378b..9464057f71 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -732,9 +732,9 @@ class ModelManager(object): # another round of heuristics to guess the correct config file. checkpoint = ( - safetensors.torch.load_file(model_path) - if model_path.suffix == ".safetensors" - else torch.load(model_path) + torch.load(model_path) + if model_path.suffix == ".ckpt" + else safetensors.torch.load_file(model_path) ) # additional probing needed if no config file provided