mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Add a comment for why we're converting scale tensors in flux models to bfloat16
This commit is contained in:
@@ -143,6 +143,8 @@ def convert_bundle_to_flux_transformer_checkpoint(
|
||||
if not k.startswith("model.diffusion_model"):
|
||||
continue
|
||||
if k.endswith("scale"):
|
||||
# Scale math must be done at bfloat16 due to our current flux model
|
||||
# support limitations at inference time
|
||||
v = v.to(dtype=torch.bfloat16)
|
||||
original_state_dict[k.replace("model.diffusion_model.", "")] = v
|
||||
return original_state_dict
|
||||
|
||||
Reference in New Issue
Block a user