diff --git a/custom_components/llama_conversation/backends/generic_openai.py b/custom_components/llama_conversation/backends/generic_openai.py index d5d0192..8d9460c 100644 --- a/custom_components/llama_conversation/backends/generic_openai.py +++ b/custom_components/llama_conversation/backends/generic_openai.py @@ -191,7 +191,9 @@ class GenericOpenAIAPIClient(LocalLLMClient): return endpoint, request_params def _extract_response(self, response_json: dict, llm_api: llm.APIInstance | None, user_input: conversation.ConversationInput) -> Tuple[Optional[str], Optional[List[llm.ToolInput]]]: - if len(response_json["choices"]) == 0: # finished + if "choices" not in response_json or len(response_json["choices"]) == 0: # finished + _LOGGER.warning("Response missing or empty 'choices'. Keys present: %s. Full response: %s", + list(response_json.keys()), response_json) return None, None choice = response_json["choices"][0] diff --git a/custom_components/llama_conversation/config_flow.py b/custom_components/llama_conversation/config_flow.py index a1b28fd..8498d29 100644 --- a/custom_components/llama_conversation/config_flow.py +++ b/custom_components/llama_conversation/config_flow.py @@ -1139,8 +1139,8 @@ class LocalLLMSubentryFlowHandler(ConfigSubentryFlow): selected_default_options[CONF_PROMPT] = build_prompt_template( selected_language, str(selected_default_options.get(CONF_PROMPT, DEFAULT_PROMPT)) ) - - self.model_config = selected_default_options + + self.model_config = {**selected_default_options, **self.model_config} schema = vol.Schema( local_llama_config_option_schema(