add recommended models

This commit is contained in:
Alex O'Connell
2024-04-06 22:55:52 -04:00
parent d3817a4f8f
commit 5def7669f0
3 changed files with 11 additions and 1 deletions

View File

@@ -112,6 +112,7 @@ from .const import (
DOMAIN,
DEFAULT_OPTIONS,
OPTIONS_OVERRIDES,
RECOMMENDED_CHAT_MODELS,
)
_LOGGER = logging.getLogger(__name__)
@@ -150,7 +151,12 @@ def STEP_LOCAL_SETUP_EXISTING_DATA_SCHEMA(model_file=None):
def STEP_LOCAL_SETUP_DOWNLOAD_DATA_SCHEMA(*, chat_model=None, downloaded_model_quantization=None):
return vol.Schema(
{
vol.Required(CONF_CHAT_MODEL, default=chat_model if chat_model else DEFAULT_CHAT_MODEL): str,
vol.Required(CONF_CHAT_MODEL, default=chat_model if chat_model else DEFAULT_CHAT_MODEL): SelectSelector(SelectSelectorConfig(
options=RECOMMENDED_CHAT_MODELS,
custom_value=True,
multiple=False,
mode=SelectSelectorMode.DROPDOWN,
)),
vol.Required(CONF_DOWNLOADED_MODEL_QUANTIZATION, default=downloaded_model_quantization if downloaded_model_quantization else DEFAULT_DOWNLOADED_MODEL_QUANTIZATION): vol.In(CONF_DOWNLOADED_MODEL_QUANTIZATION_OPTIONS),
}
)

View File

@@ -25,6 +25,7 @@ Devices:
{{ devices }}"""
CONF_CHAT_MODEL = "huggingface_model"
DEFAULT_CHAT_MODEL = "acon96/Home-3B-v3-GGUF"
RECOMMENDED_CHAT_MODELS = [ "acon96/Home-3B-v3-GGUF", "acon96/Home-1B-v2-GGUF", "TheBloke/Mistral-7B-Instruct-v0.2-GGUF" ]
CONF_MAX_TOKENS = "max_new_tokens"
DEFAULT_MAX_TOKENS = 128
CONF_TOP_K = "top_k"

View File

@@ -68,6 +68,9 @@ def install_llama_cpp_python(config_dir: str):
if is_installed("llama-cpp-python"):
_LOGGER.info("llama-cpp-python is already installed")
# not sure why this is still needed
time.sleep(0.1)
return True
platform_suffix = platform.machine()