Merge pull request #138 from acon96/release/v0.2.16

Release v0.2.16
This commit is contained in:
Alex O'Connell
2024-05-04 13:37:44 -04:00
committed by GitHub
6 changed files with 23 additions and 7 deletions

View File

@@ -126,6 +126,7 @@ In order to facilitate running the project entirely on the system where Home Ass
## Version History
| Version | Description |
|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| v0.2.16 | Fix for missing huggingface_hub package preventing startup |
| v0.2.15 | Fix startup error when using llama.cpp backend and add flash attention to llama.cpp backend |
| v0.2.14 | Fix llama.cpp wheels + AVX detection |
| v0.2.13 | Add support for Llama 3, build llama.cpp wheels that are compatible with non-AVX systems, fix an error with exposing script entities, fix multiple small Ollama backend issues, and add basic multi-language support |

View File

@@ -270,7 +270,7 @@ class LLaMAAgent(AbstractConversationAgent):
intent_response = intent.IntentResponse(language=user_input.language)
intent_response.async_set_error(
intent.IntentResponseErrorCode.UNKNOWN,
f"Sorry, there was a problem talking to the backend: {err}",
f"Sorry, there was a problem talking to the backend: {repr(err)}",
)
return ConversationResult(
response=intent_response, conversation_id=conversation_id

View File

@@ -70,6 +70,7 @@ PROMPT_TEMPLATE_LLAMA3 = "llama3"
PROMPT_TEMPLATE_NONE = "no_prompt_template"
PROMPT_TEMPLATE_ZEPHYR = "zephyr"
PROMPT_TEMPLATE_ZEPHYR2 = "zephyr2"
PROMPT_TEMPLATE_ZEPHYR3 = "zephyr3"
DEFAULT_PROMPT_TEMPLATE = PROMPT_TEMPLATE_CHATML
PROMPT_TEMPLATE_DESCRIPTIONS = {
PROMPT_TEMPLATE_CHATML: {
@@ -113,6 +114,12 @@ PROMPT_TEMPLATE_DESCRIPTIONS = {
"assistant": { "prefix": "<|assistant|>\n", "suffix": "</s>" },
"generation_prompt": "<|assistant|>\n"
},
PROMPT_TEMPLATE_ZEPHYR3: {
"system": { "prefix": "<|system|>\n", "suffix": "<|end|>" },
"user": { "prefix": "<|user|>\n", "suffix": "<|end|>" },
"assistant": { "prefix": "<|assistant|>\n", "suffix": "<|end|>" },
"generation_prompt": "<|assistant|>\n"
},
PROMPT_TEMPLATE_LLAMA3: {
"system": { "prefix": "<|start_header_id|>system<|end_header_id|>\n\n", "suffix": "<|eot_id|>"},
"user": { "prefix": "<|start_header_id|>user<|end_header_id|>\n\n", "suffix": "<|eot_id|>"},
@@ -271,8 +278,11 @@ OPTIONS_OVERRIDES = {
"zephyr": {
CONF_PROMPT: DEFAULT_PROMPT_BASE + ICL_EXTRAS,
CONF_PROMPT_TEMPLATE: PROMPT_TEMPLATE_ZEPHYR,
},
"phi-3": {
CONF_PROMPT_TEMPLATE: PROMPT_TEMPLATE_ZEPHYR3
}
}
INTEGRATION_VERSION = "0.2.15"
INTEGRATION_VERSION = "0.2.16"
EMBEDDED_LLAMA_CPP_PYTHON_VERSION = "0.2.69"

View File

@@ -1,7 +1,7 @@
{
"domain": "llama_conversation",
"name": "LLaMA Conversation",
"version": "0.2.15",
"version": "0.2.16",
"codeowners": ["@acon96"],
"config_flow": true,
"dependencies": ["conversation"],
@@ -9,8 +9,8 @@
"integration_type": "service",
"iot_class": "local_polling",
"requirements": [
"requests",
"huggingface-hub",
"webcolors"
"requests==2.31.0",
"huggingface-hub==0.23.0",
"webcolors==1.13"
]
}

View File

@@ -164,6 +164,7 @@
"mistral": "Mistral",
"zephyr": "Zephyr (<|endoftext|>)",
"zephyr2": "Zephyr ('</s>')",
"zephyr3": "Zephyr (<|end|>)",
"llama3": "Llama 3",
"no_prompt_template": "None"
}

View File

@@ -7,7 +7,6 @@ import multiprocessing
import voluptuous as vol
import webcolors
from importlib.metadata import version
from huggingface_hub import hf_hub_download, HfFileSystem
from homeassistant.requirements import pip_kwargs
from homeassistant.util.package import install_package, is_installed
@@ -48,6 +47,11 @@ def flatten_vol_schema(schema):
return flattened
def download_model_from_hf(model_name: str, quantization_type: str, storage_folder: str):
try:
from huggingface_hub import hf_hub_download, HfFileSystem
except Exception as ex:
raise Exception(f"Failed to import huggingface-hub library. Please re-install the integration.") from ex
fs = HfFileSystem()
potential_files = [ f for f in fs.glob(f"{model_name}/*.gguf") ]
wanted_file = [f for f in potential_files if (f".{quantization_type.lower()}." in f or f".{quantization_type.upper()}." in f)]