From 2cc72b19bcf12b1b0a4bfc94835da47008096b74 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 30 Sep 2024 18:20:44 +0000 Subject: [PATCH] Add XLabs FLUX controlnet state dict key file to be used for development/testing. --- .../xlabs_flux_controlnet_state_dict.py | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 tests/backend/flux/controlnet/xlabs_flux_controlnet_state_dict.py diff --git a/tests/backend/flux/controlnet/xlabs_flux_controlnet_state_dict.py b/tests/backend/flux/controlnet/xlabs_flux_controlnet_state_dict.py new file mode 100644 index 0000000000..b4eb4ccdc8 --- /dev/null +++ b/tests/backend/flux/controlnet/xlabs_flux_controlnet_state_dict.py @@ -0,0 +1,91 @@ +# State dict keys for an XLabs FLUX ControlNet model. Intended to be used for unit tests. +# These keys were extracted from: +# https://huggingface.co/XLabs-AI/flux-controlnet-collections/blob/86ab1e915a389d5857135c00e0d350e9e38a9048/flux-canny-controlnet_v2.safetensors +state_dict_keys = [ + "controlnet_blocks.0.bias", + "controlnet_blocks.0.weight", + "controlnet_blocks.1.bias", + "controlnet_blocks.1.weight", + "double_blocks.0.img_attn.norm.key_norm.scale", + "double_blocks.0.img_attn.norm.query_norm.scale", + "double_blocks.0.img_attn.proj.bias", + "double_blocks.0.img_attn.proj.weight", + "double_blocks.0.img_attn.qkv.bias", + "double_blocks.0.img_attn.qkv.weight", + "double_blocks.0.img_mlp.0.bias", + "double_blocks.0.img_mlp.0.weight", + "double_blocks.0.img_mlp.2.bias", + "double_blocks.0.img_mlp.2.weight", + "double_blocks.0.img_mod.lin.bias", + "double_blocks.0.img_mod.lin.weight", + "double_blocks.0.txt_attn.norm.key_norm.scale", + "double_blocks.0.txt_attn.norm.query_norm.scale", + "double_blocks.0.txt_attn.proj.bias", + "double_blocks.0.txt_attn.proj.weight", + "double_blocks.0.txt_attn.qkv.bias", + "double_blocks.0.txt_attn.qkv.weight", + "double_blocks.0.txt_mlp.0.bias", + "double_blocks.0.txt_mlp.0.weight", + "double_blocks.0.txt_mlp.2.bias", + "double_blocks.0.txt_mlp.2.weight", + "double_blocks.0.txt_mod.lin.bias", + "double_blocks.0.txt_mod.lin.weight", + "double_blocks.1.img_attn.norm.key_norm.scale", + "double_blocks.1.img_attn.norm.query_norm.scale", + "double_blocks.1.img_attn.proj.bias", + "double_blocks.1.img_attn.proj.weight", + "double_blocks.1.img_attn.qkv.bias", + "double_blocks.1.img_attn.qkv.weight", + "double_blocks.1.img_mlp.0.bias", + "double_blocks.1.img_mlp.0.weight", + "double_blocks.1.img_mlp.2.bias", + "double_blocks.1.img_mlp.2.weight", + "double_blocks.1.img_mod.lin.bias", + "double_blocks.1.img_mod.lin.weight", + "double_blocks.1.txt_attn.norm.key_norm.scale", + "double_blocks.1.txt_attn.norm.query_norm.scale", + "double_blocks.1.txt_attn.proj.bias", + "double_blocks.1.txt_attn.proj.weight", + "double_blocks.1.txt_attn.qkv.bias", + "double_blocks.1.txt_attn.qkv.weight", + "double_blocks.1.txt_mlp.0.bias", + "double_blocks.1.txt_mlp.0.weight", + "double_blocks.1.txt_mlp.2.bias", + "double_blocks.1.txt_mlp.2.weight", + "double_blocks.1.txt_mod.lin.bias", + "double_blocks.1.txt_mod.lin.weight", + "guidance_in.in_layer.bias", + "guidance_in.in_layer.weight", + "guidance_in.out_layer.bias", + "guidance_in.out_layer.weight", + "img_in.bias", + "img_in.weight", + "input_hint_block.0.bias", + "input_hint_block.0.weight", + "input_hint_block.10.bias", + "input_hint_block.10.weight", + "input_hint_block.12.bias", + "input_hint_block.12.weight", + "input_hint_block.14.bias", + "input_hint_block.14.weight", + "input_hint_block.2.bias", + "input_hint_block.2.weight", + "input_hint_block.4.bias", + "input_hint_block.4.weight", + "input_hint_block.6.bias", + "input_hint_block.6.weight", + "input_hint_block.8.bias", + "input_hint_block.8.weight", + "pos_embed_input.bias", + "pos_embed_input.weight", + "time_in.in_layer.bias", + "time_in.in_layer.weight", + "time_in.out_layer.bias", + "time_in.out_layer.weight", + "txt_in.bias", + "txt_in.weight", + "vector_in.in_layer.bias", + "vector_in.in_layer.weight", + "vector_in.out_layer.bias", + "vector_in.out_layer.weight", +]