From 089aa8cff060bb3772b969582af547552ec7ef28 Mon Sep 17 00:00:00 2001 From: syn-nick Date: Wed, 22 Oct 2025 11:19:30 +0200 Subject: [PATCH] handle empty finish_reason --- .../llama_conversation/backends/generic_openai.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/custom_components/llama_conversation/backends/generic_openai.py b/custom_components/llama_conversation/backends/generic_openai.py index c4d2a8b..d5d0192 100644 --- a/custom_components/llama_conversation/backends/generic_openai.py +++ b/custom_components/llama_conversation/backends/generic_openai.py @@ -217,8 +217,9 @@ class GenericOpenAIAPIClient(LocalLLMClient): response_text = choice["text"] streamed = False - if not streamed or streamed and choice["finish_reason"]: - if choice["finish_reason"] == "length" or choice["finish_reason"] == "content_filter": + if not streamed or (streamed and choice.get("finish_reason")): + finish_reason = choice.get("finish_reason") + if finish_reason in ("length", "content_filter"): _LOGGER.warning("Model response did not end on a stop token (unfinished sentence)") return response_text, tool_calls @@ -405,4 +406,4 @@ class GenericOpenAIResponsesAPIClient(LocalLLMClient): _LOGGER.debug(f"Err was: {err}") _LOGGER.debug(f"Request was: {request_params}") _LOGGER.debug(f"Result was: {response}") - return TextGenerationResult(raise_error=True, error_msg=f"Failed to communicate with the API! {err}") \ No newline at end of file + return TextGenerationResult(raise_error=True, error_msg=f"Failed to communicate with the API! {err}")