mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
60 lines
1.4 KiB
YAML
60 lines
1.4 KiB
YAML
# This is an example file with default and example settings.
|
|
# You should not copy this whole file into your config.
|
|
# Only add the settings you need to change to your config file.
|
|
|
|
# Internal metadata - do not edit:
|
|
schema_version: 4.0.2
|
|
|
|
# Put user settings here - see https://invoke-ai.github.io/InvokeAI/configuration/:
|
|
host: 127.0.0.1
|
|
port: 9090
|
|
allow_origins: []
|
|
allow_credentials: true
|
|
allow_methods:
|
|
- '*'
|
|
allow_headers:
|
|
- '*'
|
|
log_tokenization: false
|
|
patchmatch: true
|
|
models_dir: models
|
|
convert_cache_dir: models/.convert_cache
|
|
download_cache_dir: models/.download_cache
|
|
legacy_conf_dir: configs
|
|
db_dir: databases
|
|
outputs_dir: outputs
|
|
custom_nodes_dir: nodes
|
|
style_presets_dir: style_presets
|
|
workflow_thumbnails_dir: workflow_thumbnails
|
|
log_handlers:
|
|
- console
|
|
log_format: color
|
|
log_level: info
|
|
log_sql: false
|
|
log_level_network: warning
|
|
use_memory_db: false
|
|
dev_reload: false
|
|
profile_graphs: false
|
|
profiles_dir: profiles
|
|
log_memory_usage: false
|
|
device_working_mem_gb: 3.0
|
|
enable_partial_loading: false
|
|
keep_ram_copy_of_weights: true
|
|
lazy_offload: true
|
|
device: auto
|
|
precision: auto
|
|
sequential_guidance: false
|
|
attention_type: auto
|
|
attention_slice_size: auto
|
|
force_tiled_decode: false
|
|
pil_compress_level: 1
|
|
max_queue_size: 10000
|
|
clear_queue_on_startup: false
|
|
node_cache_size: 512
|
|
hashing_algorithm: blake3_single
|
|
remote_api_tokens:
|
|
- url_regex: cool-models.com
|
|
token: my_secret_token
|
|
- url_regex: nifty-models.com
|
|
token: some_other_token
|
|
scan_models_on_startup: false
|