mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-09 13:48:05 -05:00
Release v0.4.1
This commit is contained in:
@@ -158,6 +158,7 @@ python3 train.py \
|
||||
## Version History
|
||||
| Version | Description |
|
||||
|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| v0.4.1 | Fix an issue with using Llama.cpp models downloaded from HuggingFace |
|
||||
| v0.4 | Rewrite integration to support tool calling models/agentic tool use loop, voice streaming, multiple config sub-entries per backend, and dynamic llama.cpp processor selection |
|
||||
| v0.3.11 | Bug-fixes and llama.cpp version update |
|
||||
| v0.3.10 | Add support for the OpenAI "Responses" API endpoint, Update llama.cpp version, Fix for breaking change in HA version 2025.7.0 |
|
||||
|
||||
@@ -1074,8 +1074,7 @@ class LocalLLMSubentryFlowHandler(ConfigSubentryFlow):
|
||||
storage_folder = os.path.join(self.hass.config.media_dirs.get("local", self.hass.config.path("media")), "models")
|
||||
|
||||
async def download_task():
|
||||
# return await self.hass.async_add_executor_job(
|
||||
await self.hass.async_add_executor_job(
|
||||
return await self.hass.async_add_executor_job(
|
||||
download_model_from_hf, model_name, quantization_type, storage_folder
|
||||
)
|
||||
|
||||
|
||||
@@ -317,5 +317,5 @@ OPTIONS_OVERRIDES = {
|
||||
},
|
||||
}
|
||||
|
||||
INTEGRATION_VERSION = "0.4.0"
|
||||
INTEGRATION_VERSION = "0.4.1"
|
||||
EMBEDDED_LLAMA_CPP_PYTHON_VERSION = "0.3.16+b6153"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"domain": "llama_conversation",
|
||||
"name": "Local LLMs",
|
||||
"version": "0.4.0",
|
||||
"version": "0.4.1",
|
||||
"codeowners": ["@acon96"],
|
||||
"config_flow": true,
|
||||
"dependencies": ["conversation"],
|
||||
|
||||
Reference in New Issue
Block a user