mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-09 21:58:00 -05:00
173 lines
9.9 KiB
JSON
173 lines
9.9 KiB
JSON
{
|
|
"config": {
|
|
"error": {
|
|
"download_failed": "The download failed to complete: {exception}",
|
|
"failed_to_connect": "Failed to connect to the remote API: {exception}",
|
|
"missing_model_api": "The selected model is not provided by this API.",
|
|
"missing_model_file": "The provided file does not exist.",
|
|
"other_existing_local": "Another model is already loaded locally. Please unload it or configure a remote model.",
|
|
"unknown": "Unexpected error",
|
|
"pip_wheel_error": "Pip returned an error while installing the wheel! Please check the Home Assistant logs for more details."
|
|
},
|
|
"progress": {
|
|
"download": "Please wait while the model is being downloaded from HuggingFace. This can take a few minutes.",
|
|
"install_local_wheels": "Please wait while Llama.cpp is installed..."
|
|
},
|
|
"step": {
|
|
"local_model": {
|
|
"data": {
|
|
"downloaded_model_file": "Local file name",
|
|
"downloaded_model_quantization": "Downloaded model quantization",
|
|
"huggingface_model": "HuggingFace Model"
|
|
},
|
|
"description": "Please select the model to use",
|
|
"title": "Select Model"
|
|
},
|
|
"remote_model": {
|
|
"data": {
|
|
"host": "API Hostname",
|
|
"huggingface_model": "Model Name",
|
|
"port": "API Port",
|
|
"ssl": "Use HTTPS",
|
|
"openai_api_key": "API Key",
|
|
"text_generation_webui_admin_key": "Admin Key",
|
|
"text_generation_webui_preset": "Generation Preset/Character Name",
|
|
"remote_use_chat_endpoint": "Use chat completions endpoint",
|
|
"text_generation_webui_chat_mode": "Chat Mode"
|
|
},
|
|
"description": "Provide the connection details to connect to the API that is hosting the model.",
|
|
"title": "Configure connection to remote Model API"
|
|
},
|
|
"pick_backend": {
|
|
"data": {
|
|
"download_model_from_hf": "Download model from HuggingFace",
|
|
"use_local_backend": "Use Llama.cpp"
|
|
},
|
|
"description": "Select the backend for running the model. The options are:\n1. Llama.cpp with a model from HuggingFace\n2. Llama.cpp with a model stored on the disk\n3. [text-generation-webui API](https://github.com/oobabooga/text-generation-webui)\n4. Generic OpenAI API Compatible API\n5. [llama-cpp-python Server](https://llama-cpp-python.readthedocs.io/en/latest/server/)\n6. [Ollama API](https://github.com/jmorganca/ollama/blob/main/docs/api.md)\n\nIf using Llama.cpp locally, make sure you copied the correct wheel file to the same directory as the integration.",
|
|
"title": "Select Backend"
|
|
},
|
|
"model_parameters": {
|
|
"data": {
|
|
"max_new_tokens": "Maximum tokens to return in response",
|
|
"prompt": "System Prompt",
|
|
"prompt_template": "Prompt Format",
|
|
"temperature": "Temperature",
|
|
"top_k": "Top K",
|
|
"top_p": "Top P",
|
|
"min_p": "Min P",
|
|
"typical_p": "Typical P",
|
|
"request_timeout": "Remote Request Timeout (seconds)",
|
|
"ollama_keep_alive": "Keep Alive/Inactivity Timeout (minutes)",
|
|
"ollama_json_mode": "JSON Output Mode",
|
|
"extra_attributes_to_expose": "Additional attribute to expose in the context",
|
|
"allowed_service_call_arguments": "Arguments allowed to be pass to service calls",
|
|
"gbnf_grammar": "Enable GBNF Grammar",
|
|
"gbnf_grammar_file": "GBNF Grammar Filename",
|
|
"openai_api_key": "API Key",
|
|
"text_generation_webui_admin_key": "Admin Key",
|
|
"service_call_regex": "Service Call Regex",
|
|
"refresh_prompt_per_tern": "Refresh System Prompt Every Turn",
|
|
"remember_conversation": "Remember conversation",
|
|
"remember_num_interactions": "Number of past interactions to remember",
|
|
"in_context_examples": "Enable in context learning (ICL) examples",
|
|
"in_context_examples_file": "In context learning examples CSV filename",
|
|
"num_in_context_examples": "Number of ICL examples to generate",
|
|
"text_generation_webui_preset": "Generation Preset/Character Name",
|
|
"remote_use_chat_endpoint": "Use chat completions endpoint",
|
|
"text_generation_webui_chat_mode": "Chat Mode",
|
|
"prompt_caching": "Enable Prompt Caching",
|
|
"prompt_caching_interval": "Prompt Caching fastest refresh interval (sec)",
|
|
"context_length": "Context Length",
|
|
"batch_size": "Batch Size",
|
|
"n_threads": "Thread Count",
|
|
"n_batch_threads": "Batch Thread Count"
|
|
},
|
|
"data_description": {
|
|
"prompt": "See [here](https://github.com/acon96/home-llm/blob/develop/docs/Model%20Prompting.md) for more information on model prompting.",
|
|
"in_context_examples": "If you are using a model that is not specifically fine-tuned for use with this integration: enable this option",
|
|
"remote_use_chat_endpoint": "If this is enabled, then the integration will use the chat completion HTTP endpoint instead of the text completion one.",
|
|
"extra_attributes_to_expose": "This is the list of Home Assistant 'attributes' that are exposed to the model. This limits how much information the model is able to see and answer questions on.",
|
|
"allowed_service_call_arguments": "This is the list of parameters that are allowed to be passed to Home Assistant service calls."
|
|
},
|
|
"description": "Please configure the model according to how it should be prompted. Defaults have been chosen for you based on the selected model.",
|
|
"title": "Configure the selected model"
|
|
}
|
|
}
|
|
},
|
|
"options": {
|
|
"step": {
|
|
"init": {
|
|
"data": {
|
|
"max_new_tokens": "Maximum tokens to return in response",
|
|
"prompt": "System Prompt",
|
|
"prompt_template": "Prompt Format",
|
|
"temperature": "Temperature",
|
|
"top_k": "Top K",
|
|
"top_p": "Top P",
|
|
"min_p": "Min P",
|
|
"typical_p": "Typical P",
|
|
"request_timeout": "Remote Request Timeout (seconds)",
|
|
"ollama_keep_alive": "Keep Alive/Inactivity Timeout (minutes)",
|
|
"ollama_json_mode": "JSON Output Mode",
|
|
"extra_attributes_to_expose": "Additional attribute to expose in the context",
|
|
"allowed_service_call_arguments": "Arguments allowed to be pass to service calls",
|
|
"gbnf_grammar": "Enable GBNF Grammar",
|
|
"gbnf_grammar_file": "GBNF Grammar Filename",
|
|
"openai_api_key": "API Key",
|
|
"text_generation_webui_admin_key": "Admin Key",
|
|
"service_call_regex": "Service Call Regex",
|
|
"refresh_prompt_per_tern": "Refresh System Prompt Every Turn",
|
|
"remember_conversation": "Remember conversation",
|
|
"remember_num_interactions": "Number of past interactions to remember",
|
|
"in_context_examples": "Enable in context learning (ICL) examples",
|
|
"in_context_examples_file": "In context learning examples CSV filename",
|
|
"num_in_context_examples": "Number of ICL examples to generate",
|
|
"text_generation_webui_preset": "Generation Preset/Character Name",
|
|
"remote_use_chat_endpoint": "Use chat completions endpoint",
|
|
"text_generation_webui_chat_mode": "Chat Mode",
|
|
"prompt_caching": "Enable Prompt Caching",
|
|
"prompt_caching_interval": "Prompt Caching fastest refresh interval (sec)",
|
|
"context_length": "Context Length",
|
|
"batch_size": "Batch Size",
|
|
"n_threads": "Thread Count",
|
|
"n_batch_threads": "Batch Thread Count"
|
|
}
|
|
}
|
|
},
|
|
"error": {
|
|
"sys_refresh_caching_enabled": "System prompt refresh must be enabled for prompt caching to work!",
|
|
"missing_gbnf_file": "The GBNF file was not found: '{filename}'",
|
|
"missing_icl_file": "The in context learning example CSV file was not found: '{filename}'"
|
|
}
|
|
},
|
|
"selector": {
|
|
"prompt_template": {
|
|
"options": {
|
|
"chatml": "ChatML",
|
|
"vicuna": "Vicuna",
|
|
"alpaca": "Alpaca",
|
|
"mistral": "Mistral",
|
|
"zephyr": "Zephyr",
|
|
"no_prompt_template": "None"
|
|
}
|
|
},
|
|
"model_backend": {
|
|
"options": {
|
|
"llama_cpp_hf": "Llama.cpp (HuggingFace)",
|
|
"llama_cpp_existing": "Llama.cpp (existing model)",
|
|
"text-generation-webui_api": "text-generation-webui API",
|
|
"generic_openai": "Generic OpenAI Compatible API",
|
|
"llama_cpp_python_server": "llama-cpp-python Server",
|
|
"ollama": "Ollama API"
|
|
|
|
}
|
|
},
|
|
"text_generation_webui_chat_mode": {
|
|
"options": {
|
|
"chat": "Chat",
|
|
"instruct": "Instruct",
|
|
"chat-instruct": "Chat-Instruct"
|
|
}
|
|
}
|
|
}
|
|
} |