mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Add LoRA metadata import/export scripts with JSON support
Co-authored-by: kent <kent@invoke.ai>
This commit is contained in:
263
scripts/export_lora_metadata.py
Executable file
263
scripts/export_lora_metadata.py
Executable file
@@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Export LoRA metadata from InvokeAI database to JSON files.
|
||||
|
||||
This script exports LoRA metadata to JSON files with the following format:
|
||||
{
|
||||
"description": "",
|
||||
"sd version": "Unknown",
|
||||
"activation text": "",
|
||||
"preferred weight": 0,
|
||||
"negative text": "",
|
||||
"notes": ""
|
||||
}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
def map_base_model_to_sd_version(base_model: BaseModelType) -> str:
|
||||
"""Map BaseModelType to SD version string."""
|
||||
mapping = {
|
||||
BaseModelType.StableDiffusion1: "SD 1.5",
|
||||
BaseModelType.StableDiffusion2: "SD 2.x",
|
||||
BaseModelType.StableDiffusionXL: "SDXL",
|
||||
BaseModelType.Flux: "FLUX",
|
||||
}
|
||||
return mapping.get(base_model, "Unknown")
|
||||
|
||||
|
||||
def parse_description(description: Optional[str]) -> Dict[str, Any]:
|
||||
"""Parse description field to extract structured data."""
|
||||
result = {
|
||||
"description": "",
|
||||
"preferred_weight": 0,
|
||||
"negative_text": "",
|
||||
"notes": ""
|
||||
}
|
||||
|
||||
if not description:
|
||||
return result
|
||||
|
||||
# Try to extract structured parts from description
|
||||
lines = description.split("\n")
|
||||
current_section = "description"
|
||||
section_content = []
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# Check for section markers
|
||||
if line.startswith("Preferred weight:"):
|
||||
# Save previous section
|
||||
if current_section == "description" and section_content:
|
||||
result["description"] = "\n".join(section_content).strip()
|
||||
|
||||
# Extract weight
|
||||
weight_match = re.search(r"Preferred weight:\s*([\d.]+)", line)
|
||||
if weight_match:
|
||||
try:
|
||||
result["preferred_weight"] = float(weight_match.group(1)) # type: ignore
|
||||
except ValueError:
|
||||
pass
|
||||
current_section = "after_weight"
|
||||
section_content = []
|
||||
|
||||
elif line.startswith("Negative prompt:"):
|
||||
# Extract negative text
|
||||
negative_text = line[len("Negative prompt:"):].strip()
|
||||
result["negative_text"] = negative_text
|
||||
current_section = "after_negative"
|
||||
section_content = []
|
||||
|
||||
elif line.startswith("Notes:"):
|
||||
# Extract notes
|
||||
notes = line[len("Notes:"):].strip()
|
||||
result["notes"] = notes
|
||||
current_section = "notes"
|
||||
section_content = [notes] if notes else []
|
||||
|
||||
elif line and current_section == "notes":
|
||||
# Continue adding to notes
|
||||
section_content.append(line)
|
||||
|
||||
elif line and current_section == "description":
|
||||
# Add to description
|
||||
section_content.append(line)
|
||||
|
||||
# Save final section
|
||||
if current_section == "description" and section_content:
|
||||
result["description"] = "\n".join(section_content).strip()
|
||||
elif current_section == "notes" and section_content:
|
||||
result["notes"] = "\n".join(section_content).strip()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def export_lora_metadata(lora_model: AnyModelConfig) -> Dict[str, Any]:
|
||||
"""Export LoRA model metadata to JSON format."""
|
||||
# Parse description to extract structured data
|
||||
parsed = parse_description(lora_model.description)
|
||||
|
||||
# Build activation text from trigger phrases
|
||||
activation_text = ""
|
||||
if hasattr(lora_model, 'trigger_phrases') and lora_model.trigger_phrases:
|
||||
activation_text = ", ".join(sorted(lora_model.trigger_phrases))
|
||||
|
||||
# Build final JSON structure
|
||||
return {
|
||||
"description": parsed["description"],
|
||||
"sd version": map_base_model_to_sd_version(lora_model.base),
|
||||
"activation text": activation_text,
|
||||
"preferred weight": parsed["preferred_weight"],
|
||||
"negative text": parsed["negative_text"],
|
||||
"notes": parsed["notes"]
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Export LoRA metadata from InvokeAI database to JSON files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=Path,
|
||||
default=Path("."),
|
||||
help="Directory to save JSON files (default: current directory)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-name",
|
||||
type=str,
|
||||
help="Export only the specified LoRA model by name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-key",
|
||||
type=str,
|
||||
help="Export only the specified LoRA model by key",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filename-pattern",
|
||||
type=str,
|
||||
default="{name}.json",
|
||||
help="Filename pattern for JSON files (default: {name}.json). "
|
||||
"Available placeholders: {name}, {key}, {base}",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="Overwrite existing JSON files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pretty",
|
||||
action="store_true",
|
||||
help="Pretty-print JSON output",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize configuration and services
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
logger = InvokeAILogger.get_logger("export_lora_metadata")
|
||||
|
||||
# Initialize database
|
||||
db = SqliteDatabase(db_path=config.db_path, logger=logger)
|
||||
model_record_service = ModelRecordServiceSQL(db, logger)
|
||||
|
||||
# Create output directory if needed
|
||||
args.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Get LoRA models to export
|
||||
lora_models = []
|
||||
|
||||
if args.model_key:
|
||||
try:
|
||||
model = model_record_service.get_model(args.model_key)
|
||||
if model.type != ModelType.LoRA:
|
||||
print(f"Error: Model {args.model_key} is not a LoRA model", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
lora_models = [model]
|
||||
except Exception:
|
||||
print(f"Error: Model with key {args.model_key} not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
elif args.model_name:
|
||||
models = model_record_service.search_by_attr(
|
||||
model_name=args.model_name,
|
||||
model_type=ModelType.LoRA
|
||||
)
|
||||
if not models:
|
||||
print(f"Error: No LoRA model found with name '{args.model_name}'", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
lora_models = models
|
||||
|
||||
else:
|
||||
# Export all LoRA models
|
||||
lora_models = model_record_service.search_by_attr(model_type=ModelType.LoRA)
|
||||
|
||||
if not lora_models:
|
||||
print("No LoRA models found in database", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Exporting {len(lora_models)} LoRA model(s)...")
|
||||
|
||||
# Export each model
|
||||
exported_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
for lora_model in lora_models:
|
||||
# Generate filename
|
||||
filename = args.filename_pattern.format(
|
||||
name=lora_model.name,
|
||||
key=lora_model.key,
|
||||
base=lora_model.base.value
|
||||
)
|
||||
|
||||
# Sanitize filename
|
||||
filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
|
||||
|
||||
output_path = args.output_dir / filename
|
||||
|
||||
# Check if file exists
|
||||
if output_path.exists() and not args.overwrite:
|
||||
print(f"Skipping {lora_model.name}: {output_path} already exists")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Export metadata
|
||||
metadata = export_lora_metadata(lora_model)
|
||||
|
||||
# Write JSON file
|
||||
try:
|
||||
with open(output_path, "w", encoding="utf-8") as f:
|
||||
if args.pretty:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
else:
|
||||
json.dump(metadata, f, ensure_ascii=False)
|
||||
|
||||
print(f"Exported {lora_model.name} → {output_path}")
|
||||
exported_count += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error exporting {lora_model.name}: {e}", file=sys.stderr)
|
||||
|
||||
# Summary
|
||||
print(f"\nExport complete:")
|
||||
print(f" Exported: {exported_count}")
|
||||
if skipped_count > 0:
|
||||
print(f" Skipped: {skipped_count} (use --overwrite to replace)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
288
scripts/import_lora_metadata.py
Executable file
288
scripts/import_lora_metadata.py
Executable file
@@ -0,0 +1,288 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Import LoRA metadata from JSON files into InvokeAI database.
|
||||
|
||||
This script reads JSON files with the following format:
|
||||
{
|
||||
"description": "",
|
||||
"sd version": "Unknown",
|
||||
"activation text": "",
|
||||
"preferred weight": 0,
|
||||
"negative text": "",
|
||||
"notes": ""
|
||||
}
|
||||
|
||||
And imports the metadata into existing LoRA models in the InvokeAI database.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import ModelRecordChanges, ModelRecordServiceBase
|
||||
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
def map_sd_version_to_base_model(sd_version: str) -> Optional[BaseModelType]:
|
||||
"""Map SD version string to BaseModelType."""
|
||||
sd_version_lower = sd_version.lower()
|
||||
|
||||
if "xl" in sd_version_lower or "sdxl" in sd_version_lower:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
elif "2" in sd_version_lower:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif "1" in sd_version_lower or "1.5" in sd_version_lower:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif "flux" in sd_version_lower:
|
||||
return BaseModelType.Flux
|
||||
else:
|
||||
return None # Will not update base model if unknown
|
||||
|
||||
|
||||
def build_description(json_data: Dict[str, Any]) -> str:
|
||||
"""Build a comprehensive description from JSON data."""
|
||||
parts = []
|
||||
|
||||
if json_data.get("description"):
|
||||
parts.append(json_data["description"])
|
||||
|
||||
if json_data.get("preferred weight") and json_data["preferred weight"] != 0:
|
||||
parts.append(f"Preferred weight: {json_data['preferred weight']}")
|
||||
|
||||
if json_data.get("negative text"):
|
||||
parts.append(f"Negative prompt: {json_data['negative text']}")
|
||||
|
||||
if json_data.get("notes"):
|
||||
parts.append(f"Notes: {json_data['notes']}")
|
||||
|
||||
return "\n\n".join(parts) if parts else ""
|
||||
|
||||
|
||||
def process_lora_metadata(
|
||||
model_record_service: ModelRecordServiceBase,
|
||||
lora_model: AnyModelConfig,
|
||||
json_data: Dict[str, Any],
|
||||
update_base_model: bool = False,
|
||||
) -> bool:
|
||||
"""Process and update a single LoRA model with metadata from JSON."""
|
||||
changes = ModelRecordChanges()
|
||||
|
||||
# Map activation text to trigger phrases
|
||||
if json_data.get("activation text"):
|
||||
activation_texts = [text.strip() for text in json_data["activation text"].split(",")]
|
||||
changes.trigger_phrases = set(activation_texts)
|
||||
|
||||
# Build description from multiple fields
|
||||
description = build_description(json_data)
|
||||
if description:
|
||||
changes.description = description
|
||||
|
||||
# Optionally update base model type
|
||||
if update_base_model and json_data.get("sd version"):
|
||||
base_model = map_sd_version_to_base_model(json_data["sd version"])
|
||||
if base_model:
|
||||
changes.base = base_model
|
||||
|
||||
# Only update if we have changes
|
||||
if changes.model_dump(exclude_none=True):
|
||||
try:
|
||||
model_record_service.update_model(lora_model.key, changes)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error updating model {lora_model.name}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Import LoRA metadata from JSON files into InvokeAI database"
|
||||
)
|
||||
parser.add_argument(
|
||||
"json_file",
|
||||
type=Path,
|
||||
help="Path to JSON file containing LoRA metadata",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-name",
|
||||
type=str,
|
||||
help="Name of the LoRA model to update (if not specified, will try to match based on filename)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-key",
|
||||
type=str,
|
||||
help="Key of the LoRA model to update (takes precedence over --model-name)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--update-base-model",
|
||||
action="store_true",
|
||||
help="Update the base model type based on 'sd version' field",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Show what would be updated without making changes",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--batch",
|
||||
action="store_true",
|
||||
help="Process multiple JSON files in batch mode (json_file should be a directory)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize configuration and services
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
logger = InvokeAILogger.get_logger("import_lora_metadata")
|
||||
|
||||
# Initialize database
|
||||
db = SqliteDatabase(db_path=config.db_path, logger=logger)
|
||||
model_record_service = ModelRecordServiceSQL(db, logger)
|
||||
|
||||
# Process single file or batch
|
||||
if args.batch:
|
||||
if not args.json_file.is_dir():
|
||||
print(f"Error: {args.json_file} is not a directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
json_files = list(args.json_file.glob("*.json"))
|
||||
if not json_files:
|
||||
print(f"No JSON files found in {args.json_file}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Found {len(json_files)} JSON files to process")
|
||||
|
||||
for json_file in json_files:
|
||||
process_single_file(
|
||||
json_file, model_record_service, args, logger
|
||||
)
|
||||
else:
|
||||
process_single_file(
|
||||
args.json_file, model_record_service, args, logger
|
||||
)
|
||||
|
||||
|
||||
def process_single_file(
|
||||
json_file: Path,
|
||||
model_record_service: ModelRecordServiceBase,
|
||||
args: argparse.Namespace,
|
||||
logger: Any,
|
||||
) -> None:
|
||||
"""Process a single JSON file."""
|
||||
if not json_file.exists():
|
||||
print(f"Error: {json_file} does not exist", file=sys.stderr)
|
||||
return
|
||||
|
||||
try:
|
||||
with open(json_file, "r") as f:
|
||||
json_data = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error reading {json_file}: {e}", file=sys.stderr)
|
||||
return
|
||||
|
||||
# Find the LoRA model to update
|
||||
lora_model = None
|
||||
|
||||
if args.model_key:
|
||||
try:
|
||||
lora_model = model_record_service.get_model(args.model_key)
|
||||
if lora_model.type != ModelType.LoRA:
|
||||
print(f"Error: Model {args.model_key} is not a LoRA model", file=sys.stderr)
|
||||
return
|
||||
except Exception:
|
||||
print(f"Error: Model with key {args.model_key} not found", file=sys.stderr)
|
||||
return
|
||||
elif args.model_name:
|
||||
# Search for LoRA by name
|
||||
models = model_record_service.search_by_attr(
|
||||
model_name=args.model_name,
|
||||
model_type=ModelType.LoRA
|
||||
)
|
||||
if not models:
|
||||
print(f"Error: No LoRA model found with name '{args.model_name}'", file=sys.stderr)
|
||||
return
|
||||
elif len(models) > 1:
|
||||
print(f"Error: Multiple LoRA models found with name '{args.model_name}':", file=sys.stderr)
|
||||
for model in models:
|
||||
print(f" - {model.key}: {model.name} ({model.base})")
|
||||
print("Please specify --model-key to select one", file=sys.stderr)
|
||||
return
|
||||
lora_model = models[0]
|
||||
else:
|
||||
# Try to match based on filename
|
||||
base_name = json_file.stem
|
||||
models = model_record_service.search_by_attr(
|
||||
model_name=base_name,
|
||||
model_type=ModelType.LoRA
|
||||
)
|
||||
if not models:
|
||||
# Try partial match
|
||||
all_loras = model_record_service.search_by_attr(model_type=ModelType.LoRA)
|
||||
matches = [m for m in all_loras if base_name.lower() in m.name.lower()]
|
||||
|
||||
if not matches:
|
||||
print(f"Error: No LoRA model found matching filename '{base_name}'", file=sys.stderr)
|
||||
return
|
||||
elif len(matches) > 1:
|
||||
print(f"Error: Multiple LoRA models found matching '{base_name}':", file=sys.stderr)
|
||||
for model in matches:
|
||||
print(f" - {model.key}: {model.name} ({model.base})")
|
||||
print("Please specify --model-name or --model-key", file=sys.stderr)
|
||||
return
|
||||
lora_model = matches[0]
|
||||
elif len(models) > 1:
|
||||
print(f"Error: Multiple LoRA models found with name '{base_name}':", file=sys.stderr)
|
||||
for model in models:
|
||||
print(f" - {model.key}: {model.name} ({model.base})")
|
||||
print("Please specify --model-key to select one", file=sys.stderr)
|
||||
return
|
||||
else:
|
||||
lora_model = models[0]
|
||||
|
||||
# Display current and proposed changes
|
||||
print(f"\nProcessing: {json_file.name}")
|
||||
print(f"Target LoRA: {lora_model.name} (key: {lora_model.key})")
|
||||
print(f"Current base model: {lora_model.base}")
|
||||
|
||||
if args.dry_run:
|
||||
print("\n--- DRY RUN MODE ---")
|
||||
print("Proposed changes:")
|
||||
|
||||
if json_data.get("activation text"):
|
||||
print(f" Trigger phrases: {json_data['activation text']}")
|
||||
|
||||
description = build_description(json_data)
|
||||
if description:
|
||||
print(f" Description: {description[:100]}..." if len(description) > 100 else f" Description: {description}")
|
||||
|
||||
if args.update_base_model and json_data.get("sd version"):
|
||||
base_model = map_sd_version_to_base_model(json_data["sd version"])
|
||||
if base_model:
|
||||
print(f" Base model: {lora_model.base} → {base_model}")
|
||||
|
||||
print("--- END DRY RUN ---\n")
|
||||
else:
|
||||
# Apply the updates
|
||||
success = process_lora_metadata(
|
||||
model_record_service,
|
||||
lora_model,
|
||||
json_data,
|
||||
args.update_base_model
|
||||
)
|
||||
|
||||
if success:
|
||||
print("✓ Successfully updated metadata")
|
||||
else:
|
||||
print("✗ No changes made")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
136
scripts/lora_metadata_tools.md
Normal file
136
scripts/lora_metadata_tools.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# LoRA Metadata Import/Export Tools
|
||||
|
||||
These scripts allow you to import and export LoRA metadata between JSON files and the InvokeAI database.
|
||||
|
||||
## JSON Format
|
||||
|
||||
The JSON format used by these tools is:
|
||||
|
||||
```json
|
||||
{
|
||||
"description": "Description of the LoRA",
|
||||
"sd version": "SDXL",
|
||||
"activation text": "trigger1, trigger2",
|
||||
"preferred weight": 0.8,
|
||||
"negative text": "negative prompts to avoid",
|
||||
"notes": "Additional notes about the LoRA"
|
||||
}
|
||||
```
|
||||
|
||||
## Import Script: `import_lora_metadata.py`
|
||||
|
||||
Imports metadata from JSON files into existing LoRA models in the InvokeAI database.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Import metadata for a single LoRA (matches by filename)
|
||||
python scripts/import_lora_metadata.py my_lora.json
|
||||
|
||||
# Import metadata by specifying the model name
|
||||
python scripts/import_lora_metadata.py metadata.json --model-name "My LoRA Model"
|
||||
|
||||
# Import metadata by specifying the model key
|
||||
python scripts/import_lora_metadata.py metadata.json --model-key "abc123def456"
|
||||
|
||||
# Dry run to see what would be changed
|
||||
python scripts/import_lora_metadata.py my_lora.json --dry-run
|
||||
|
||||
# Update the base model type based on "sd version" field
|
||||
python scripts/import_lora_metadata.py my_lora.json --update-base-model
|
||||
|
||||
# Batch import multiple JSON files from a directory
|
||||
python scripts/import_lora_metadata.py /path/to/json/directory --batch
|
||||
```
|
||||
|
||||
### Field Mappings
|
||||
|
||||
- `activation text` → `trigger_phrases` (comma-separated list)
|
||||
- `description` → `description`
|
||||
- `preferred weight`, `negative text`, `notes` → Combined into `description` field
|
||||
- `sd version` → `base` (when `--update-base-model` is used)
|
||||
|
||||
### SD Version Mapping
|
||||
|
||||
When using `--update-base-model`, the script maps SD versions as follows:
|
||||
- Contains "xl" or "sdxl" → StableDiffusionXL
|
||||
- Contains "2" → StableDiffusion2
|
||||
- Contains "1" or "1.5" → StableDiffusion1
|
||||
- Contains "flux" → Flux
|
||||
- Other → No update
|
||||
|
||||
## Export Script: `export_lora_metadata.py`
|
||||
|
||||
Exports LoRA metadata from the InvokeAI database to JSON files.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Export all LoRA models to current directory
|
||||
python scripts/export_lora_metadata.py
|
||||
|
||||
# Export to specific directory
|
||||
python scripts/export_lora_metadata.py --output-dir /path/to/output
|
||||
|
||||
# Export specific LoRA by name
|
||||
python scripts/export_lora_metadata.py --model-name "My LoRA Model"
|
||||
|
||||
# Export specific LoRA by key
|
||||
python scripts/export_lora_metadata.py --model-key "abc123def456"
|
||||
|
||||
# Custom filename pattern
|
||||
python scripts/export_lora_metadata.py --filename-pattern "{base}_{name}.json"
|
||||
|
||||
# Pretty-print JSON output
|
||||
python scripts/export_lora_metadata.py --pretty
|
||||
|
||||
# Overwrite existing files
|
||||
python scripts/export_lora_metadata.py --overwrite
|
||||
```
|
||||
|
||||
### Filename Patterns
|
||||
|
||||
Available placeholders for `--filename-pattern`:
|
||||
- `{name}` - LoRA model name
|
||||
- `{key}` - LoRA model key/ID
|
||||
- `{base}` - Base model type (e.g., "sdxl", "sd-1", etc.)
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Import metadata for a newly added LoRA
|
||||
|
||||
1. Add a LoRA model to InvokeAI (e.g., `anime_style_v2.safetensors`)
|
||||
2. Create a JSON file with metadata (`anime_style_v2.json`):
|
||||
```json
|
||||
{
|
||||
"description": "Anime style LoRA trained on modern anime artwork",
|
||||
"sd version": "SDXL",
|
||||
"activation text": "anime style, modern anime",
|
||||
"preferred weight": 0.7,
|
||||
"negative text": "realistic, photorealistic",
|
||||
"notes": "Works best with anime-focused base models"
|
||||
}
|
||||
```
|
||||
3. Import the metadata:
|
||||
```bash
|
||||
python scripts/import_lora_metadata.py anime_style_v2.json
|
||||
```
|
||||
|
||||
### Example 2: Batch export and import
|
||||
|
||||
1. Export all LoRA metadata:
|
||||
```bash
|
||||
python scripts/export_lora_metadata.py --output-dir ./lora_metadata --pretty
|
||||
```
|
||||
2. Edit the JSON files as needed
|
||||
3. Import all metadata back:
|
||||
```bash
|
||||
python scripts/import_lora_metadata.py ./lora_metadata --batch
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The import script requires that LoRA models already exist in the InvokeAI database
|
||||
- When importing, the script will try to match JSON filenames to LoRA model names
|
||||
- Use `--dry-run` to preview changes before applying them
|
||||
- The scripts preserve existing data when possible (e.g., appending to descriptions rather than replacing)
|
||||
Reference in New Issue
Block a user