mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-08 05:14:02 -05:00
Release v0.4.3
This commit is contained in:
@@ -158,6 +158,7 @@ python3 train.py \
|
||||
## Version History
|
||||
| Version | Description |
|
||||
|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| v0.4.3 | Fix an issue with the integration not creating model configs properly during setup |
|
||||
| v0.4.2 | Fix the following issues: not correctly setting default model settings during initial setup, non-integers being allowed in numeric config fields, being too strict with finish_reason requirements, and not letting the user clear the active LLM API |
|
||||
| v0.4.1 | Fix an issue with using Llama.cpp models downloaded from HuggingFace |
|
||||
| v0.4 | Rewrite integration to support tool calling models/agentic tool use loop, voice streaming, multiple config sub-entries per backend, and dynamic llama.cpp processor selection |
|
||||
|
||||
@@ -337,5 +337,5 @@ def option_overrides(backend_type: str) -> dict[str, Any]:
|
||||
},
|
||||
}
|
||||
|
||||
INTEGRATION_VERSION = "0.4.2"
|
||||
INTEGRATION_VERSION = "0.4.3"
|
||||
EMBEDDED_LLAMA_CPP_PYTHON_VERSION = "0.3.16+b6153"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"domain": "llama_conversation",
|
||||
"name": "Local LLMs",
|
||||
"version": "0.4.2",
|
||||
"version": "0.4.3",
|
||||
"codeowners": ["@acon96"],
|
||||
"config_flow": true,
|
||||
"dependencies": ["conversation"],
|
||||
|
||||
Reference in New Issue
Block a user