mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-08 21:28:05 -05:00
Merge branch 'main' into develop
This commit is contained in:
@@ -160,6 +160,7 @@ python3 train.py \
|
|||||||
## Version History
|
## Version History
|
||||||
| Version | Description |
|
| Version | Description |
|
||||||
|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| v0.4.5 | Add support for AI Task entities, Replace custom Ollama API implementation with the official `ollama-python` package to avoid future compatibility issues, Support multiple LLM APIs at once, Fix issues in tool call handling for various backends |
|
||||||
| v0.4.4 | Fix issue with OpenAI backends appending `/v1` to all URLs, and fix an issue with tools being serialized into the system prompt. |
|
| v0.4.4 | Fix issue with OpenAI backends appending `/v1` to all URLs, and fix an issue with tools being serialized into the system prompt. |
|
||||||
| v0.4.3 | Fix an issue with the integration not creating model configs properly during setup |
|
| v0.4.3 | Fix an issue with the integration not creating model configs properly during setup |
|
||||||
| v0.4.2 | Fix the following issues: not correctly setting default model settings during initial setup, non-integers being allowed in numeric config fields, being too strict with finish_reason requirements, and not letting the user clear the active LLM API |
|
| v0.4.2 | Fix the following issues: not correctly setting default model settings during initial setup, non-integers being allowed in numeric config fields, being too strict with finish_reason requirements, and not letting the user clear the active LLM API |
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ class LocalLLMTaskEntity(
|
|||||||
def _extract_data(
|
def _extract_data(
|
||||||
self,
|
self,
|
||||||
raw_text: str,
|
raw_text: str,
|
||||||
tool_calls: list | None,
|
tool_calls: list[llm.ToolInput] | None,
|
||||||
extraction_method: ResultExtractionMethod,
|
extraction_method: ResultExtractionMethod,
|
||||||
chat_log: conversation.ChatLog,
|
chat_log: conversation.ChatLog,
|
||||||
structure: vol.Schema | None,
|
structure: vol.Schema | None,
|
||||||
@@ -178,8 +178,9 @@ class LocalLLMTaskEntity(
|
|||||||
|
|
||||||
if extraction_method == ResultExtractionMethod.TOOL:
|
if extraction_method == ResultExtractionMethod.TOOL:
|
||||||
first_tool = next(iter(tool_calls or []), None)
|
first_tool = next(iter(tool_calls or []), None)
|
||||||
if not first_tool or not getattr(first_tool, "tool_args", None):
|
if not first_tool:
|
||||||
return None, HomeAssistantError("Please produce at least one tool call with the structured response.")
|
return None, HomeAssistantError("Please produce at least one tool call with the structured response.")
|
||||||
|
|
||||||
structure(first_tool.tool_args) # validate tool call against vol schema structure
|
structure(first_tool.tool_args) # validate tool call against vol schema structure
|
||||||
return ai_task.GenDataTaskResult(
|
return ai_task.GenDataTaskResult(
|
||||||
conversation_id=chat_log.conversation_id,
|
conversation_id=chat_log.conversation_id,
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ class GenericOpenAIAPIClient(LocalLLMClient):
|
|||||||
) as response:
|
) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
models_result = await response.json()
|
models_result = await response.json()
|
||||||
except:
|
except (asyncio.TimeoutError, aiohttp.ClientResponseError):
|
||||||
_LOGGER.exception("Failed to get available models")
|
_LOGGER.exception("Failed to get available models")
|
||||||
return RECOMMENDED_CHAT_MODELS
|
return RECOMMENDED_CHAT_MODELS
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"domain": "llama_conversation",
|
"domain": "llama_conversation",
|
||||||
"name": "Local LLMs",
|
"name": "Local LLMs",
|
||||||
"version": "0.4.4",
|
"version": "0.4.5",
|
||||||
"codeowners": ["@acon96"],
|
"codeowners": ["@acon96"],
|
||||||
"config_flow": true,
|
"config_flow": true,
|
||||||
"dependencies": ["conversation", "ai_task"],
|
"dependencies": ["conversation", "ai_task"],
|
||||||
|
|||||||
@@ -279,7 +279,8 @@ def get_oai_formatted_tools(llm_api: llm.APIInstance, domains: list[str]) -> Lis
|
|||||||
result: List[ChatCompletionTool] = []
|
result: List[ChatCompletionTool] = []
|
||||||
|
|
||||||
for tool in llm_api.tools:
|
for tool in llm_api.tools:
|
||||||
if tool.name == SERVICE_TOOL_NAME:
|
# when combining with home assistant llm APIs, it adds a prefix to differentiate tools; compare against the suffix here
|
||||||
|
if tool.name.endswith(SERVICE_TOOL_NAME):
|
||||||
result.extend([{
|
result.extend([{
|
||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
|
|||||||
Reference in New Issue
Block a user