diff --git a/invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_module_mixin.py b/invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_module_mixin.py index 0563f3cb36..8bb0db6a27 100644 --- a/invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_module_mixin.py +++ b/invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_module_mixin.py @@ -49,7 +49,10 @@ class CustomModuleMixin: # parameters. But, of course, any sub-layers that need to access the actual values of the parameters will fail. for param_name in orig_params.keys(): param = orig_params[param_name] - if type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor: + if type(param) is torch.Tensor: + # Plain tensor (e.g. after cast_to_device moved a Parameter to another device). + pass + elif type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor: pass elif type(param) is GGMLTensor: # Move to device and dequantize here. Doing it in the patch layer can result in redundant casts /