mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-08 21:28:05 -05:00
Release v0.4.2
This commit is contained in:
@@ -157,7 +157,8 @@ python3 train.py \
|
||||
|
||||
## Version History
|
||||
| Version | Description |
|
||||
|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| v0.4.2 | Fix the following issues: not correctly setting default model settings during initial setup, non-integers being allowed in numeric config fields, being too strict with finish_reason requirements, and not letting the user clear the active LLM API |
|
||||
| v0.4.1 | Fix an issue with using Llama.cpp models downloaded from HuggingFace |
|
||||
| v0.4 | Rewrite integration to support tool calling models/agentic tool use loop, voice streaming, multiple config sub-entries per backend, and dynamic llama.cpp processor selection |
|
||||
| v0.3.11 | Bug-fixes and llama.cpp version update |
|
||||
|
||||
@@ -337,5 +337,5 @@ def option_overrides(backend_type: str) -> dict[str, Any]:
|
||||
},
|
||||
}
|
||||
|
||||
INTEGRATION_VERSION = "0.4.1"
|
||||
INTEGRATION_VERSION = "0.4.2"
|
||||
EMBEDDED_LLAMA_CPP_PYTHON_VERSION = "0.3.16+b6153"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"domain": "llama_conversation",
|
||||
"name": "Local LLMs",
|
||||
"version": "0.4.1",
|
||||
"version": "0.4.2",
|
||||
"codeowners": ["@acon96"],
|
||||
"config_flow": true,
|
||||
"dependencies": ["conversation"],
|
||||
|
||||
Reference in New Issue
Block a user