diff --git a/custom_components/llama_conversation/__init__.py b/custom_components/llama_conversation/__init__.py index 1aa851f..4d2a618 100644 --- a/custom_components/llama_conversation/__init__.py +++ b/custom_components/llama_conversation/__init__.py @@ -184,6 +184,21 @@ async def async_migrate_entry(hass: HomeAssistant, config_entry: LocalLLMConfigE _LOGGER.debug("Migration to add downloaded model file complete") + if config_entry.version == 3 and config_entry.minor_version == 1: + # convert selected APIs from single value to list + api_to_convert = config_entry.options.get(CONF_LLM_HASS_API) + new_options = dict(config_entry.options) + if api_to_convert is not None: + new_options[CONF_LLM_HASS_API] = [api_to_convert] + else: + new_options[CONF_LLM_HASS_API] = [] + + hass.config_entries.async_update_entry( + config_entry, options=MappingProxyType(new_options) + ) + hass.config_entries.async_update_entry(config_entry, minor_version=2) + + return True class HassServiceTool(llm.Tool): @@ -255,14 +270,14 @@ class HomeLLMAPI(llm.API): super().__init__( hass=hass, id=HOME_LLM_API_ID, - name="Home-LLM (v1-v3)", + name="Home Assistant Services", ) async def async_get_api_instance(self, llm_context: llm.LLMContext) -> llm.APIInstance: """Return the instance of the API.""" return llm.APIInstance( api=self, - api_prompt="Call services in Home Assistant by passing the service name and the device to control.", + api_prompt="Call services in Home Assistant by passing the service name and the device to control. Designed for Home-LLM Models (v1-v3)", llm_context=llm_context, tools=[HassServiceTool()], ) diff --git a/custom_components/llama_conversation/config_flow.py b/custom_components/llama_conversation/config_flow.py index ffeb147..f4d2ead 100644 --- a/custom_components/llama_conversation/config_flow.py +++ b/custom_components/llama_conversation/config_flow.py @@ -225,7 +225,7 @@ class ConfigFlow(BaseConfigFlow, domain=DOMAIN): """Handle a config flow for Local LLM Conversation.""" VERSION = 3 - MINOR_VERSION = 1 + MINOR_VERSION = 2 install_wheel_task = None install_wheel_error = None @@ -881,24 +881,18 @@ def local_llama_config_option_schema( if subentry_type == "conversation": apis: list[SelectOptionDict] = [ - SelectOptionDict( - label="No control", - value="none", - ) - ] - apis.extend( SelectOptionDict( label=api.name, value=api.id, ) for api in llm.async_get_apis(hass) - ) + ] result.update({ vol.Optional( CONF_LLM_HASS_API, description={"suggested_value": options.get(CONF_LLM_HASS_API)}, - default="none", - ): SelectSelector(SelectSelectorConfig(options=apis)), + default=None, + ): SelectSelector(SelectSelectorConfig(options=apis, multiple=True)), vol.Optional( CONF_REFRESH_SYSTEM_PROMPT, description={"suggested_value": options.get(CONF_REFRESH_SYSTEM_PROMPT, DEFAULT_REFRESH_SYSTEM_PROMPT)}, @@ -1187,10 +1181,6 @@ class LocalLLMSubentryFlowHandler(ConfigSubentryFlow): # validate input schema(user_input) self.model_config.update(user_input) - - # clear LLM API if 'none' selected - if self.model_config.get(CONF_LLM_HASS_API) == "none": - self.model_config.pop(CONF_LLM_HASS_API, None) return await self.async_step_finish() except Exception: diff --git a/custom_components/llama_conversation/translations/en.json b/custom_components/llama_conversation/translations/en.json index c47b8e4..2c3e005 100644 --- a/custom_components/llama_conversation/translations/en.json +++ b/custom_components/llama_conversation/translations/en.json @@ -70,7 +70,7 @@ "model_parameters": { "data": { "max_new_tokens": "Maximum tokens to return in response", - "llm_hass_api": "Selected LLM API", + "llm_hass_api": "Selected LLM API(s)", "prompt": "System Prompt", "temperature": "Temperature", "top_k": "Top K", @@ -109,7 +109,7 @@ "max_tool_call_iterations": "Maximum Tool Call Attempts" }, "data_description": { - "llm_hass_api": "Select 'Assist' if you want the model to be able to control devices. If you are using the Home-LLM v1, v2, or v3 model then select 'Home-LLM (v1-3)'", + "llm_hass_api": "Select 'Assist' if you want the model to be able to control devices. If you are using the Home-LLM (v1-3) model then select 'Home Assistant Services'", "prompt": "See [here](https://github.com/acon96/home-llm/blob/develop/docs/Model%20Prompting.md) for more information on model prompting.", "in_context_examples": "If you are using a model that is not specifically fine-tuned for use with this integration: enable this", "extra_attributes_to_expose": "This is the list of Home Assistant 'attributes' that are exposed to the model. This limits how much information the model is able to see and answer questions on.", @@ -124,7 +124,7 @@ "reconfigure": { "data": { "max_new_tokens": "Maximum tokens to return in response", - "llm_hass_api": "Selected LLM API", + "llm_hass_api": "Selected LLM API(s)", "prompt": "System Prompt", "temperature": "Temperature", "top_k": "Top K", @@ -163,7 +163,7 @@ "max_tool_call_iterations": "Maximum Tool Call Attempts" }, "data_description": { - "llm_hass_api": "Select 'Assist' if you want the model to be able to control devices. If you are using the Home-LLM v1, v2, or v3 model then select 'Home-LLM (v1-3)'", + "llm_hass_api": "Select 'Assist' if you want the model to be able to control devices. If you are using the Home-LLM (v1-3) model then select 'Home Assistant Services'", "prompt": "See [here](https://github.com/acon96/home-llm/blob/develop/docs/Model%20Prompting.md) for more information on model prompting.", "in_context_examples": "If you are using a model that is not specifically fine-tuned for use with this integration: enable this", "extra_attributes_to_expose": "This is the list of Home Assistant 'attributes' that are exposed to the model. This limits how much information the model is able to see and answer questions on.", @@ -263,7 +263,7 @@ "reconfigure": { "data": { "max_new_tokens": "Maximum tokens to return in response", - "llm_hass_api": "Selected LLM API", + "llm_hass_api": "Selected LLM API(s)", "prompt": "System Prompt", "temperature": "Temperature", "top_k": "Top K",