mirror of
https://github.com/acon96/home-llm.git
synced 2026-01-08 05:14:02 -05:00
make ai tasks more usable
This commit is contained in:
@@ -4,16 +4,17 @@ This project provides the required "glue" components to control your Home Assist
|
||||
## Quick Start
|
||||
Please see the [Setup Guide](./docs/Setup.md) for more information on installation.
|
||||
|
||||
## Local LLM Conversation Integration
|
||||
## Local LLM Integration
|
||||
**The latest version of this integration requires Home Assistant 2025.7.0 or newer**
|
||||
|
||||
In order to integrate with Home Assistant, we provide a custom component that exposes the locally running LLM as a "conversation agent".
|
||||
In order to integrate with Home Assistant, we provide a custom component that exposes the locally running LLM as a "conversation agent" or as an "ai task handler".
|
||||
|
||||
This component can be interacted with in a few ways:
|
||||
- using a chat interface so you can chat with it.
|
||||
- integrating with Speech-to-Text and Text-to-Speech addons so you can just speak to it.
|
||||
- using automations or scripts to trigger "ai tasks"; these process input data with a prompt, and return structured data that can be used in further automations.
|
||||
|
||||
The integration can either run the model in 2 different ways:
|
||||
The integration can either run the model in a few ways:
|
||||
1. Directly as part of the Home Assistant software using llama-cpp-python
|
||||
2. On a separate machine using one of the following backends:
|
||||
- [Ollama](https://ollama.com/) (easier)
|
||||
@@ -36,6 +37,7 @@ The latest models can be found on HuggingFace:
|
||||
|
||||
**Gemma3**:
|
||||
1B: TBD
|
||||
270M: TBD
|
||||
|
||||
<details>
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""AI Task integration for Local LLMs."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from json import JSONDecodeError
|
||||
import logging
|
||||
from enum import StrEnum
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous_openapi import convert as convert_to_openapi
|
||||
@@ -20,8 +19,8 @@ from homeassistant.util.json import json_loads
|
||||
|
||||
from .entity import LocalLLMEntity, LocalLLMClient
|
||||
from .const import (
|
||||
CONF_PROMPT,
|
||||
CONF_RESPONSE_JSON_SCHEMA,
|
||||
CONF_AI_TASK_PROMPT,
|
||||
DEFAULT_AI_TASK_PROMPT,
|
||||
CONF_AI_TASK_RETRIES,
|
||||
DEFAULT_AI_TASK_RETRIES,
|
||||
@@ -159,36 +158,43 @@ class LocalLLMTaskEntity(
|
||||
structure: vol.Schema | None,
|
||||
) -> ai_task.GenDataTaskResult:
|
||||
"""Extract the final data from the LLM response based on the extraction method."""
|
||||
|
||||
if extraction_method == ResultExtractionMethod.NONE or structure is None:
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=raw_text,
|
||||
)
|
||||
try:
|
||||
if extraction_method == ResultExtractionMethod.NONE or structure is None:
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=raw_text,
|
||||
)
|
||||
|
||||
if extraction_method == ResultExtractionMethod.STRUCTURED_OUTPUT:
|
||||
try:
|
||||
data = json_loads(raw_text)
|
||||
except JSONDecodeError as err:
|
||||
raise HomeAssistantError(
|
||||
"Error with Local LLM structured response"
|
||||
) from err
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=data,
|
||||
)
|
||||
if extraction_method == ResultExtractionMethod.STRUCTURED_OUTPUT:
|
||||
try:
|
||||
data = json_loads(raw_text)
|
||||
except JSONDecodeError as err:
|
||||
raise HomeAssistantError("Error with Local LLM structured response") from err
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=data,
|
||||
)
|
||||
|
||||
if extraction_method == ResultExtractionMethod.TOOL:
|
||||
first_tool = (tool_calls or [None])[0]
|
||||
if not first_tool or not getattr(first_tool, "tool_args", None):
|
||||
raise HomeAssistantError("Error with Local LLM tool response")
|
||||
structure(first_tool.tool_args) # validate against structure
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=first_tool.tool_args,
|
||||
)
|
||||
if extraction_method == ResultExtractionMethod.TOOL:
|
||||
first_tool = next(iter(tool_calls or []), None)
|
||||
if not first_tool or not getattr(first_tool, "tool_args", None):
|
||||
raise HomeAssistantError("Error with Local LLM tool response")
|
||||
structure(first_tool.tool_args) # validate tool call against vol schema structure
|
||||
return ai_task.GenDataTaskResult(
|
||||
conversation_id=chat_log.conversation_id,
|
||||
data=first_tool.tool_args,
|
||||
)
|
||||
except vol.Invalid as err:
|
||||
if isinstance(err, vol.MultipleInvalid):
|
||||
# combine all error messages into one
|
||||
error_message = "; ".join(f"Error at '{e.path}': {e.error_message}" for e in err.errors)
|
||||
else:
|
||||
error_message = f"Error at '{err.path}': {err.error_message}"
|
||||
raise HomeAssistantError(f"Please address the following schema errors: {error_message}") from err
|
||||
except JSONDecodeError as err:
|
||||
raise HomeAssistantError(f"Please produce properly formatted JSON: {repr(err)}") from err
|
||||
|
||||
raise HomeAssistantError("Invalid extraction method for AI Task")
|
||||
raise HomeAssistantError(f"Invalid extraction method for AI Task {extraction_method}")
|
||||
|
||||
async def _async_generate_data(
|
||||
self,
|
||||
@@ -196,69 +202,50 @@ class LocalLLMTaskEntity(
|
||||
chat_log: conversation.ChatLog,
|
||||
) -> ai_task.GenDataTaskResult:
|
||||
"""Handle a generate data task."""
|
||||
try:
|
||||
task_prompt = self.runtime_options.get(CONF_AI_TASK_PROMPT, DEFAULT_AI_TASK_PROMPT)
|
||||
retries = max(0, self.runtime_options.get(CONF_AI_TASK_RETRIES, DEFAULT_AI_TASK_RETRIES))
|
||||
extraction_method = self.runtime_options.get(CONF_AI_TASK_EXTRACTION_METHOD, DEFAULT_AI_TASK_EXTRACTION_METHOD)
|
||||
max_attempts = retries + 1
|
||||
raw_task_prompt = self.runtime_options.get(CONF_PROMPT, DEFAULT_AI_TASK_PROMPT)
|
||||
retries = max(0, self.runtime_options.get(CONF_AI_TASK_RETRIES, DEFAULT_AI_TASK_RETRIES))
|
||||
extraction_method = self.runtime_options.get(CONF_AI_TASK_EXTRACTION_METHOD, DEFAULT_AI_TASK_EXTRACTION_METHOD)
|
||||
max_attempts = retries + 1
|
||||
|
||||
entity_options = {**self.runtime_options}
|
||||
if task.structure and extraction_method == ResultExtractionMethod.STRUCTURED_OUTPUT:
|
||||
entity_options = {**self.runtime_options}
|
||||
if task.structure: # set up extraction method specifics
|
||||
if extraction_method == ResultExtractionMethod.STRUCTURED_OUTPUT:
|
||||
_LOGGER.debug("Using structure for AI Task '%s': %s", task.name, task.structure)
|
||||
entity_options[CONF_RESPONSE_JSON_SCHEMA] = convert_to_openapi(task.structure, custom_serializer=llm.selector_serializer)
|
||||
|
||||
message_history = list(chat_log.content) if chat_log.content else []
|
||||
|
||||
system_message = conversation.SystemContent(content=task_prompt)
|
||||
if message_history and isinstance(message_history[0], conversation.SystemContent):
|
||||
message_history[0] = system_message
|
||||
else:
|
||||
message_history.insert(0, system_message)
|
||||
|
||||
if not any(isinstance(msg, conversation.UserContent) for msg in message_history):
|
||||
message_history.append(
|
||||
conversation.UserContent(
|
||||
content=task.instructions, attachments=task.attachments
|
||||
)
|
||||
)
|
||||
|
||||
if extraction_method == ResultExtractionMethod.TOOL and task.structure:
|
||||
elif extraction_method == ResultExtractionMethod.TOOL:
|
||||
chat_log.llm_api = await SubmitResponseAPI(self.hass, [SubmitResponseTool(task.structure)]).async_get_api_instance(
|
||||
llm.LLMContext(DOMAIN, context=None, language=None, assistant=None, device_id=None)
|
||||
)
|
||||
|
||||
message_history = list(chat_log.content) if chat_log.content else []
|
||||
task_prompt = self.client._generate_system_prompt(raw_task_prompt, llm_api=chat_log.llm_api, entity_options=entity_options)
|
||||
system_message = conversation.SystemContent(content=task_prompt)
|
||||
if message_history and isinstance(message_history[0], conversation.SystemContent):
|
||||
message_history[0] = system_message
|
||||
else:
|
||||
message_history.insert(0, system_message)
|
||||
|
||||
if not any(isinstance(msg, conversation.UserContent) for msg in message_history):
|
||||
message_history.append(
|
||||
conversation.UserContent(
|
||||
content=task.instructions, attachments=task.attachments
|
||||
)
|
||||
)
|
||||
try:
|
||||
last_error: Exception | None = None
|
||||
for attempt in range(max_attempts):
|
||||
_LOGGER.debug("Generating response for %s (attempt %s/%s)...", task.name, attempt + 1, max_attempts)
|
||||
text, tool_calls = await self._generate_once(message_history, chat_log, entity_options)
|
||||
try:
|
||||
_LOGGER.debug(
|
||||
"Generating response for %s (attempt %s/%s)...",
|
||||
task.name,
|
||||
attempt + 1,
|
||||
max_attempts,
|
||||
)
|
||||
text, tool_calls = await self._generate_once(message_history, chat_log, entity_options)
|
||||
return self._extract_data(text, tool_calls, extraction_method, chat_log, task.structure)
|
||||
except HomeAssistantError as err:
|
||||
last_error = err
|
||||
if attempt < max_attempts - 1:
|
||||
continue
|
||||
raise
|
||||
except Exception as err:
|
||||
last_error = err
|
||||
_LOGGER.exception(
|
||||
"Unhandled exception while running AI Task '%s'",
|
||||
task.name,
|
||||
)
|
||||
raise HomeAssistantError(
|
||||
f"Unhandled error while running AI Task '{task.name}'"
|
||||
) from err
|
||||
|
||||
if last_error:
|
||||
raise last_error
|
||||
|
||||
raise HomeAssistantError("AI Task generation failed without an error")
|
||||
message_history.append(conversation.AssistantContent(agent_id=self.entity_id, content=text, tool_calls=tool_calls))
|
||||
message_history.append(conversation.UserContent(content=f"Error: {str(err)}. Please try again."))
|
||||
except Exception as err:
|
||||
_LOGGER.exception("Unhandled exception while running AI Task '%s'", task.name)
|
||||
raise HomeAssistantError(
|
||||
f"Unhandled error while running AI Task '{task.name}'"
|
||||
) from err
|
||||
raise HomeAssistantError(f"Unhandled error while running AI Task '{task.name}'") from err
|
||||
|
||||
if last_error:
|
||||
raise last_error
|
||||
raise HomeAssistantError("AI Task generation failed without an error")
|
||||
|
||||
@@ -47,7 +47,6 @@ from .const import (
|
||||
CONF_CHAT_MODEL,
|
||||
CONF_MAX_TOKENS,
|
||||
CONF_PROMPT,
|
||||
CONF_AI_TASK_PROMPT,
|
||||
DEFAULT_AI_TASK_PROMPT,
|
||||
CONF_AI_TASK_RETRIES,
|
||||
DEFAULT_AI_TASK_RETRIES,
|
||||
@@ -590,19 +589,47 @@ def local_llama_config_option_schema(
|
||||
backend_type: str,
|
||||
subentry_type: str,
|
||||
) -> dict:
|
||||
|
||||
result: dict = {
|
||||
vol.Optional(
|
||||
CONF_TEMPERATURE,
|
||||
description={"suggested_value": options.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE)},
|
||||
default=options.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE),
|
||||
): NumberSelector(NumberSelectorConfig(min=0.0, max=2.0, step=0.05, mode=NumberSelectorMode.BOX)),
|
||||
vol.Required(
|
||||
CONF_THINKING_PREFIX,
|
||||
description={"suggested_value": options.get(CONF_THINKING_PREFIX)},
|
||||
default=DEFAULT_THINKING_PREFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_THINKING_SUFFIX,
|
||||
description={"suggested_value": options.get(CONF_THINKING_SUFFIX)},
|
||||
default=DEFAULT_THINKING_SUFFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_TOOL_CALL_PREFIX,
|
||||
description={"suggested_value": options.get(CONF_TOOL_CALL_PREFIX)},
|
||||
default=DEFAULT_TOOL_CALL_PREFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_TOOL_CALL_SUFFIX,
|
||||
description={"suggested_value": options.get(CONF_TOOL_CALL_SUFFIX)},
|
||||
default=DEFAULT_TOOL_CALL_SUFFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_ENABLE_LEGACY_TOOL_CALLING,
|
||||
description={"suggested_value": options.get(CONF_ENABLE_LEGACY_TOOL_CALLING)},
|
||||
default=DEFAULT_ENABLE_LEGACY_TOOL_CALLING
|
||||
): bool,
|
||||
}
|
||||
|
||||
is_ai_task = subentry_type == ai_task.DOMAIN
|
||||
default_prompt = DEFAULT_AI_TASK_PROMPT if is_ai_task else build_prompt_template(language, DEFAULT_PROMPT)
|
||||
prompt_key = CONF_AI_TASK_PROMPT if is_ai_task else CONF_PROMPT
|
||||
prompt_selector = TextSelector(TextSelectorConfig(type=TextSelectorType.TEXT, multiline=True)) if is_ai_task else TemplateSelector()
|
||||
|
||||
if is_ai_task:
|
||||
result: dict = {
|
||||
if subentry_type == ai_task.DOMAIN:
|
||||
result.update({
|
||||
vol.Optional(
|
||||
prompt_key,
|
||||
description={"suggested_value": options.get(prompt_key, default_prompt)},
|
||||
default=options.get(prompt_key, default_prompt),
|
||||
): prompt_selector,
|
||||
CONF_PROMPT,
|
||||
description={"suggested_value": options.get(CONF_PROMPT, DEFAULT_AI_TASK_PROMPT)},
|
||||
default=options.get(CONF_PROMPT, DEFAULT_AI_TASK_PROMPT),
|
||||
): TemplateSelector(),
|
||||
vol.Required(
|
||||
CONF_AI_TASK_EXTRACTION_METHOD,
|
||||
description={"suggested_value": options.get(CONF_AI_TASK_EXTRACTION_METHOD, DEFAULT_AI_TASK_EXTRACTION_METHOD)},
|
||||
@@ -620,19 +647,22 @@ def local_llama_config_option_schema(
|
||||
description={"suggested_value": options.get(CONF_AI_TASK_RETRIES, DEFAULT_AI_TASK_RETRIES)},
|
||||
default=options.get(CONF_AI_TASK_RETRIES, DEFAULT_AI_TASK_RETRIES),
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=5, step=1, mode=NumberSelectorMode.BOX)),
|
||||
}
|
||||
else:
|
||||
result: dict = {
|
||||
})
|
||||
elif subentry_type == conversation.DOMAIN:
|
||||
default_prompt = build_prompt_template(language, DEFAULT_PROMPT)
|
||||
apis: list[SelectOptionDict] = [
|
||||
SelectOptionDict(
|
||||
label=api.name,
|
||||
value=api.id,
|
||||
)
|
||||
for api in llm.async_get_apis(hass)
|
||||
]
|
||||
result.update({
|
||||
vol.Optional(
|
||||
prompt_key,
|
||||
description={"suggested_value": options.get(prompt_key, default_prompt)},
|
||||
default=options.get(prompt_key, default_prompt),
|
||||
): prompt_selector,
|
||||
vol.Optional(
|
||||
CONF_TEMPERATURE,
|
||||
description={"suggested_value": options.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE)},
|
||||
default=options.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE),
|
||||
): NumberSelector(NumberSelectorConfig(min=0.0, max=2.0, step=0.05, mode=NumberSelectorMode.BOX)),
|
||||
CONF_PROMPT,
|
||||
description={"suggested_value": options.get(CONF_PROMPT, default_prompt)},
|
||||
default=options.get(CONF_PROMPT, default_prompt),
|
||||
): TemplateSelector(),
|
||||
vol.Required(
|
||||
CONF_USE_IN_CONTEXT_LEARNING_EXAMPLES,
|
||||
description={"suggested_value": options.get(CONF_USE_IN_CONTEXT_LEARNING_EXAMPLES)},
|
||||
@@ -653,34 +683,52 @@ def local_llama_config_option_schema(
|
||||
description={"suggested_value": options.get(CONF_EXTRA_ATTRIBUTES_TO_EXPOSE)},
|
||||
default=DEFAULT_EXTRA_ATTRIBUTES_TO_EXPOSE,
|
||||
): TextSelector(TextSelectorConfig(multiple=True)),
|
||||
vol.Optional(
|
||||
CONF_LLM_HASS_API,
|
||||
description={"suggested_value": options.get(CONF_LLM_HASS_API)},
|
||||
default=None,
|
||||
): SelectSelector(SelectSelectorConfig(options=apis, multiple=True)),
|
||||
vol.Optional(
|
||||
CONF_REFRESH_SYSTEM_PROMPT,
|
||||
description={"suggested_value": options.get(CONF_REFRESH_SYSTEM_PROMPT, DEFAULT_REFRESH_SYSTEM_PROMPT)},
|
||||
default=options.get(CONF_REFRESH_SYSTEM_PROMPT, DEFAULT_REFRESH_SYSTEM_PROMPT),
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_CONVERSATION,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_CONVERSATION, DEFAULT_REMEMBER_CONVERSATION)},
|
||||
default=options.get(CONF_REMEMBER_CONVERSATION, DEFAULT_REMEMBER_CONVERSATION),
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_NUM_INTERACTIONS,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_NUM_INTERACTIONS, DEFAULT_REMEMBER_NUM_INTERACTIONS)},
|
||||
default=options.get(CONF_REMEMBER_NUM_INTERACTIONS, DEFAULT_REMEMBER_NUM_INTERACTIONS),
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=100, mode=NumberSelectorMode.BOX)),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_CONVERSATION_TIME_MINUTES,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_CONVERSATION_TIME_MINUTES, DEFAULT_REMEMBER_CONVERSATION)},
|
||||
default=options.get(CONF_REMEMBER_CONVERSATION_TIME_MINUTES, DEFAULT_REMEMBER_CONVERSATION),
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=1440, mode=NumberSelectorMode.BOX)),
|
||||
vol.Required(
|
||||
CONF_THINKING_PREFIX,
|
||||
description={"suggested_value": options.get(CONF_THINKING_PREFIX)},
|
||||
default=DEFAULT_THINKING_PREFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_THINKING_SUFFIX,
|
||||
description={"suggested_value": options.get(CONF_THINKING_SUFFIX)},
|
||||
default=DEFAULT_THINKING_SUFFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_TOOL_CALL_PREFIX,
|
||||
description={"suggested_value": options.get(CONF_TOOL_CALL_PREFIX)},
|
||||
default=DEFAULT_TOOL_CALL_PREFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_TOOL_CALL_SUFFIX,
|
||||
description={"suggested_value": options.get(CONF_TOOL_CALL_SUFFIX)},
|
||||
default=DEFAULT_TOOL_CALL_SUFFIX,
|
||||
): str,
|
||||
vol.Required(
|
||||
CONF_ENABLE_LEGACY_TOOL_CALLING,
|
||||
description={"suggested_value": options.get(CONF_ENABLE_LEGACY_TOOL_CALLING)},
|
||||
default=DEFAULT_ENABLE_LEGACY_TOOL_CALLING
|
||||
): bool,
|
||||
}
|
||||
CONF_MAX_TOOL_CALL_ITERATIONS,
|
||||
description={"suggested_value": options.get(CONF_MAX_TOOL_CALL_ITERATIONS)},
|
||||
default=DEFAULT_MAX_TOOL_CALL_ITERATIONS,
|
||||
): int,
|
||||
})
|
||||
|
||||
if backend_type == BACKEND_TYPE_LLAMA_CPP:
|
||||
if subentry_type == conversation.DOMAIN:
|
||||
result.update({
|
||||
vol.Required(
|
||||
CONF_PROMPT_CACHING_ENABLED,
|
||||
description={"suggested_value": options.get(CONF_PROMPT_CACHING_ENABLED)},
|
||||
default=DEFAULT_PROMPT_CACHING_ENABLED,
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Required(
|
||||
CONF_PROMPT_CACHING_INTERVAL,
|
||||
description={"suggested_value": options.get(CONF_PROMPT_CACHING_INTERVAL)},
|
||||
default=DEFAULT_PROMPT_CACHING_INTERVAL,
|
||||
): NumberSelector(NumberSelectorConfig(min=1, max=60, step=1)),
|
||||
})
|
||||
result.update({
|
||||
vol.Required(
|
||||
CONF_MAX_TOKENS,
|
||||
@@ -707,16 +755,6 @@ def local_llama_config_option_schema(
|
||||
description={"suggested_value": options.get(CONF_TYPICAL_P)},
|
||||
default=DEFAULT_TYPICAL_P,
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)),
|
||||
vol.Required(
|
||||
CONF_PROMPT_CACHING_ENABLED,
|
||||
description={"suggested_value": options.get(CONF_PROMPT_CACHING_ENABLED)},
|
||||
default=DEFAULT_PROMPT_CACHING_ENABLED,
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Required(
|
||||
CONF_PROMPT_CACHING_INTERVAL,
|
||||
description={"suggested_value": options.get(CONF_PROMPT_CACHING_INTERVAL)},
|
||||
default=DEFAULT_PROMPT_CACHING_INTERVAL,
|
||||
): NumberSelector(NumberSelectorConfig(min=1, max=60, step=1)),
|
||||
# TODO: add rope_scaling_type
|
||||
vol.Required(
|
||||
CONF_CONTEXT_LENGTH,
|
||||
@@ -915,56 +953,11 @@ def local_llama_config_option_schema(
|
||||
): NumberSelector(NumberSelectorConfig(min=-1, max=1440, step=1, unit_of_measurement=UnitOfTime.MINUTES, mode=NumberSelectorMode.BOX)),
|
||||
})
|
||||
|
||||
if subentry_type == conversation.DOMAIN:
|
||||
apis: list[SelectOptionDict] = [
|
||||
SelectOptionDict(
|
||||
label=api.name,
|
||||
value=api.id,
|
||||
)
|
||||
for api in llm.async_get_apis(hass)
|
||||
]
|
||||
result.update({
|
||||
vol.Optional(
|
||||
CONF_LLM_HASS_API,
|
||||
description={"suggested_value": options.get(CONF_LLM_HASS_API)},
|
||||
default=None,
|
||||
): SelectSelector(SelectSelectorConfig(options=apis, multiple=True)),
|
||||
vol.Optional(
|
||||
CONF_REFRESH_SYSTEM_PROMPT,
|
||||
description={"suggested_value": options.get(CONF_REFRESH_SYSTEM_PROMPT, DEFAULT_REFRESH_SYSTEM_PROMPT)},
|
||||
default=options.get(CONF_REFRESH_SYSTEM_PROMPT, DEFAULT_REFRESH_SYSTEM_PROMPT),
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_CONVERSATION,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_CONVERSATION, DEFAULT_REMEMBER_CONVERSATION)},
|
||||
default=options.get(CONF_REMEMBER_CONVERSATION, DEFAULT_REMEMBER_CONVERSATION),
|
||||
): BooleanSelector(BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_NUM_INTERACTIONS,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_NUM_INTERACTIONS, DEFAULT_REMEMBER_NUM_INTERACTIONS)},
|
||||
default=options.get(CONF_REMEMBER_NUM_INTERACTIONS, DEFAULT_REMEMBER_NUM_INTERACTIONS),
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=100, mode=NumberSelectorMode.BOX)),
|
||||
vol.Optional(
|
||||
CONF_REMEMBER_CONVERSATION_TIME_MINUTES,
|
||||
description={"suggested_value": options.get(CONF_REMEMBER_CONVERSATION_TIME_MINUTES, DEFAULT_REMEMBER_CONVERSATION)},
|
||||
default=options.get(CONF_REMEMBER_CONVERSATION_TIME_MINUTES, DEFAULT_REMEMBER_CONVERSATION),
|
||||
): NumberSelector(NumberSelectorConfig(min=0, max=1440, mode=NumberSelectorMode.BOX)),
|
||||
vol.Required(
|
||||
CONF_MAX_TOOL_CALL_ITERATIONS,
|
||||
description={"suggested_value": options.get(CONF_MAX_TOOL_CALL_ITERATIONS)},
|
||||
default=DEFAULT_MAX_TOOL_CALL_ITERATIONS,
|
||||
): int,
|
||||
})
|
||||
elif subentry_type == ai_task.DOMAIN:
|
||||
# no extra conversation/tool options for ai_task
|
||||
pass
|
||||
|
||||
# sort the options
|
||||
global_order = [
|
||||
# general
|
||||
CONF_LLM_HASS_API,
|
||||
CONF_PROMPT,
|
||||
CONF_AI_TASK_PROMPT,
|
||||
CONF_AI_TASK_EXTRACTION_METHOD,
|
||||
CONF_AI_TASK_RETRIES,
|
||||
CONF_CONTEXT_LENGTH,
|
||||
@@ -1159,8 +1152,8 @@ class LocalLLMSubentryFlowHandler(ConfigSubentryFlow):
|
||||
is_ai_task = self._subentry_type == ai_task.DOMAIN
|
||||
|
||||
if is_ai_task:
|
||||
if CONF_AI_TASK_PROMPT not in self.model_config:
|
||||
self.model_config[CONF_AI_TASK_PROMPT] = DEFAULT_AI_TASK_PROMPT
|
||||
if CONF_PROMPT not in self.model_config:
|
||||
self.model_config[CONF_PROMPT] = DEFAULT_AI_TASK_PROMPT
|
||||
if CONF_AI_TASK_RETRIES not in self.model_config:
|
||||
self.model_config[CONF_AI_TASK_RETRIES] = DEFAULT_AI_TASK_RETRIES
|
||||
if CONF_AI_TASK_EXTRACTION_METHOD not in self.model_config:
|
||||
|
||||
@@ -8,10 +8,9 @@ SERVICE_TOOL_NAME = "HassCallService"
|
||||
SERVICE_TOOL_ALLOWED_SERVICES = ["turn_on", "turn_off", "toggle", "press", "increase_speed", "decrease_speed", "open_cover", "close_cover", "stop_cover", "lock", "unlock", "start", "stop", "return_to_base", "pause", "cancel", "add_item", "set_temperature", "set_humidity", "set_fan_mode", "set_hvac_mode", "set_preset_mode"]
|
||||
SERVICE_TOOL_ALLOWED_DOMAINS = ["light", "switch", "button", "fan", "cover", "lock", "media_player", "climate", "vacuum", "todo", "timer", "script"]
|
||||
CONF_PROMPT = "prompt"
|
||||
CONF_AI_TASK_PROMPT = "ai_task_prompt"
|
||||
DEFAULT_AI_TASK_PROMPT = "You are a task-specific assistant. Follow the task instructions and return the requested data."
|
||||
CONF_AI_TASK_RETRIES = "ai_task_retries"
|
||||
DEFAULT_AI_TASK_RETRIES = 0
|
||||
DEFAULT_AI_TASK_RETRIES = 1
|
||||
CONF_AI_TASK_EXTRACTION_METHOD = "ai_task_extraction_method"
|
||||
DEFAULT_AI_TASK_EXTRACTION_METHOD = "structure"
|
||||
PERSONA_PROMPTS = {
|
||||
|
||||
@@ -246,7 +246,9 @@
|
||||
"tool_call_prefix": "Tool Call Prefix",
|
||||
"tool_call_suffix": "Tool Call Suffix",
|
||||
"enable_legacy_tool_calling": "Enable Legacy Tool Calling",
|
||||
"max_tool_call_iterations": "Maximum Tool Call Attempts"
|
||||
"max_tool_call_iterations": "Maximum Tool Call Attempts",
|
||||
"ai_task_extraction_method": "Structured Data Extraction Method",
|
||||
"ai_task_retries": "Retry attempts for structured data extraction"
|
||||
},
|
||||
"data_description": {
|
||||
"prompt": "See [here](https://github.com/acon96/home-llm/blob/develop/docs/Model%20Prompting.md) for more information on model prompting.",
|
||||
@@ -255,7 +257,8 @@
|
||||
"gbnf_grammar": "Forces the model to output properly formatted responses. Ensure the file specified below exists in the integration directory.",
|
||||
"prompt_caching": "Prompt caching attempts to pre-process the prompt (house state) and cache the processing that needs to be done to understand the prompt. Enabling this will cause the model to re-process the prompt any time an entity state changes in the house, restricted by the interval below.",
|
||||
"enable_legacy_tool_calling": "Prefer to process tool calls locally rather than relying on the backend to handle the tool calling format. Can be more reliable, however it requires properly setting the tool call prefix and suffix.",
|
||||
"max_tool_call_iterations": "Set to 0 to generate the response and tool call in one attempt, without looping (use this for Home models v1-v3)."
|
||||
"max_tool_call_iterations": "Set to 0 to generate the response and tool call in one attempt, without looping (use this for Home models v1-v3).",
|
||||
"ai_task_extraction_method": "Select the method used to extract structured data from the model's response. 'Structured Output' tells the backend to force the model to produce output following the provided JSON Schema; 'Tool Calling' provides a tool to the model that should be called with the appropriate arguments that match the desired output structure."
|
||||
},
|
||||
"description": "Please configure the model according to how it should be prompted. There are many different options and selecting the correct ones for your model is essential to getting optimal performance. See [here](https://github.com/acon96/home-llm/blob/develop/docs/Backend%20Configuration.md) for more information about the options on this page.\n\n**Some defaults may have been chosen for you based on the name of the selected model name or filename.** If you renamed a file or are using a fine-tuning of a supported model, then the defaults may not have been detected.",
|
||||
"title": "Configure the selected model"
|
||||
|
||||
52
docs/AI Tasks.md
Normal file
52
docs/AI Tasks.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Using AI Tasks
|
||||
The AI Tasks feature allows you to define structured tasks that your local LLM can perform. These tasks can be integrated into Home Assistant automations and scripts, enabling you to generate dynamic content based on specific prompts and instructions.
|
||||
|
||||
## Setting up an AI Task Handler
|
||||
Setting up a task handler is similar to setting up a conversation agent. You can choose to run the model directly within Home Assistant using `llama-cpp-python`, or you can use an external backend like Ollama. See the [Setup Guide](./docs/Setup.md) for detailed instructions on configuring your AI Task handler.
|
||||
|
||||
The specific configuration options for AI Tasks are:
|
||||
| Option Name | Description |
|
||||
|-----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Structured Data Extraction Method | Choose how the AI Task should extract structured data from the model's output. Options include `structured_output` and `tool`. |
|
||||
| Data Extraction Retry Count | The number of times to retry data extraction if the initial attempt fails. Useful when models can produce incorrect tool responses. |
|
||||
|
||||
If no structured data extraction method is specified, then the task entity will always return raw text.
|
||||
|
||||
## Using an AI Task in a Script or Automation
|
||||
To use an AI Task in a Home Assistant script or automation, you can utilize the `ai_task.generate_data` action. This action allows you to specify the task name, instructions, and the structure of the expected output. Below is an example of a script that generates a joke about a smart device in your home.
|
||||
|
||||
**Device Joke Script:**
|
||||
```yaml
|
||||
sequence:
|
||||
- action: ai_task.generate_data
|
||||
data:
|
||||
task_name: Device Joke Generation
|
||||
instructions: |
|
||||
Write a funny joke about one of the smart devices in my home.
|
||||
Here are all of the smart devices I have:
|
||||
{% for device in states | rejectattr('domain', 'in', ['update', 'event']) -%}
|
||||
- {{ device.name }} ({{device.domain}})
|
||||
{% endfor %}
|
||||
# You MUST set this to your own LLM entity ID if you do not set a default one in HA Settings
|
||||
# entity_id: ai_task.unsloth_qwen3_0_6b_gguf_unsloth_qwen3_0_6b_gguf
|
||||
structure:
|
||||
joke_setup:
|
||||
description: The beginning of a joke about a smart device in the home
|
||||
required: true
|
||||
selector:
|
||||
text: null
|
||||
joke_punchline:
|
||||
description: The punchline of the same joke about the smart device
|
||||
required: true
|
||||
selector:
|
||||
text: null
|
||||
response_variable: joke_output
|
||||
- action: notify.persistent_notification
|
||||
data:
|
||||
message: |-
|
||||
{{ joke_output.data.joke_setup }}
|
||||
...
|
||||
{{ joke_output.data.joke_punchline }}
|
||||
alias: Device Joke
|
||||
description: "Generates a funny joke about one of the smart devices in the home."
|
||||
```
|
||||
@@ -36,6 +36,9 @@ class DummyClient:
|
||||
def __init__(self, result: TextGenerationResult):
|
||||
self._result = result
|
||||
|
||||
def _generate_system_prompt(self, prompt_template, llm_api, entity_options):
|
||||
return prompt_template
|
||||
|
||||
def _supports_vision(self, _options): # pragma: no cover - not needed for tests
|
||||
return False
|
||||
|
||||
|
||||
Reference in New Issue
Block a user