Merge branch 'main' into develop

This commit is contained in:
Alex O'Connell
2025-12-14 20:23:47 -05:00
5 changed files with 8 additions and 5 deletions

View File

@@ -160,6 +160,7 @@ python3 train.py \
## Version History
| Version | Description |
|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| v0.4.5 | Add support for AI Task entities, Replace custom Ollama API implementation with the official `ollama-python` package to avoid future compatibility issues, Support multiple LLM APIs at once, Fix issues in tool call handling for various backends |
| v0.4.4 | Fix issue with OpenAI backends appending `/v1` to all URLs, and fix an issue with tools being serialized into the system prompt. |
| v0.4.3 | Fix an issue with the integration not creating model configs properly during setup |
| v0.4.2 | Fix the following issues: not correctly setting default model settings during initial setup, non-integers being allowed in numeric config fields, being too strict with finish_reason requirements, and not letting the user clear the active LLM API |

View File

@@ -156,7 +156,7 @@ class LocalLLMTaskEntity(
def _extract_data(
self,
raw_text: str,
tool_calls: list | None,
tool_calls: list[llm.ToolInput] | None,
extraction_method: ResultExtractionMethod,
chat_log: conversation.ChatLog,
structure: vol.Schema | None,
@@ -178,8 +178,9 @@ class LocalLLMTaskEntity(
if extraction_method == ResultExtractionMethod.TOOL:
first_tool = next(iter(tool_calls or []), None)
if not first_tool or not getattr(first_tool, "tool_args", None):
if not first_tool:
return None, HomeAssistantError("Please produce at least one tool call with the structured response.")
structure(first_tool.tool_args) # validate tool call against vol schema structure
return ai_task.GenDataTaskResult(
conversation_id=chat_log.conversation_id,

View File

@@ -111,7 +111,7 @@ class GenericOpenAIAPIClient(LocalLLMClient):
) as response:
response.raise_for_status()
models_result = await response.json()
except:
except (asyncio.TimeoutError, aiohttp.ClientResponseError):
_LOGGER.exception("Failed to get available models")
return RECOMMENDED_CHAT_MODELS

View File

@@ -1,7 +1,7 @@
{
"domain": "llama_conversation",
"name": "Local LLMs",
"version": "0.4.4",
"version": "0.4.5",
"codeowners": ["@acon96"],
"config_flow": true,
"dependencies": ["conversation", "ai_task"],

View File

@@ -279,7 +279,8 @@ def get_oai_formatted_tools(llm_api: llm.APIInstance, domains: list[str]) -> Lis
result: List[ChatCompletionTool] = []
for tool in llm_api.tools:
if tool.name == SERVICE_TOOL_NAME:
# when combining with home assistant llm APIs, it adds a prefix to differentiate tools; compare against the suffix here
if tool.name.endswith(SERVICE_TOOL_NAME):
result.extend([{
"type": "function",
"function": {