mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 14:37:59 -05:00
fix: exclude empty stop parameter from LLM completion params
This fixes issue #4149 where models like gpt-5.1 don't support the stop parameter at all. Previously, an empty list was always passed to the API, causing an error. Changes: - Only include stop in params when it has values (non-empty) - Respect additional_drop_params to ensure retry logic works correctly - Add tests to verify the fix Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -676,14 +676,13 @@ class LLM(BaseLLM):
|
|||||||
formatted_messages = self._format_messages_for_provider(messages)
|
formatted_messages = self._format_messages_for_provider(messages)
|
||||||
|
|
||||||
# --- 2) Prepare the parameters for the completion call
|
# --- 2) Prepare the parameters for the completion call
|
||||||
params = {
|
params: dict[str, Any] = {
|
||||||
"model": self.model,
|
"model": self.model,
|
||||||
"messages": formatted_messages,
|
"messages": formatted_messages,
|
||||||
"timeout": self.timeout,
|
"timeout": self.timeout,
|
||||||
"temperature": self.temperature,
|
"temperature": self.temperature,
|
||||||
"top_p": self.top_p,
|
"top_p": self.top_p,
|
||||||
"n": self.n,
|
"n": self.n,
|
||||||
"stop": self.stop,
|
|
||||||
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
||||||
"presence_penalty": self.presence_penalty,
|
"presence_penalty": self.presence_penalty,
|
||||||
"frequency_penalty": self.frequency_penalty,
|
"frequency_penalty": self.frequency_penalty,
|
||||||
@@ -702,6 +701,12 @@ class LLM(BaseLLM):
|
|||||||
**self.additional_params,
|
**self.additional_params,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Only include stop if it has values and is not in additional_drop_params
|
||||||
|
# Some models (e.g., gpt-5.1) don't support the stop parameter at all
|
||||||
|
drop_params = self.additional_params.get("additional_drop_params", [])
|
||||||
|
if self.stop and "stop" not in drop_params:
|
||||||
|
params["stop"] = self.stop
|
||||||
|
|
||||||
# Remove None values from params
|
# Remove None values from params
|
||||||
return {k: v for k, v in params.items() if v is not None}
|
return {k: v for k, v in params.items() if v is not None}
|
||||||
|
|
||||||
|
|||||||
@@ -877,3 +877,62 @@ def test_validate_model_in_constants():
|
|||||||
LLM._validate_model_in_constants("anthropic.claude-future-v1:0", "bedrock")
|
LLM._validate_model_in_constants("anthropic.claude-future-v1:0", "bedrock")
|
||||||
is True
|
is True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_prepare_completion_params_excludes_empty_stop():
|
||||||
|
"""Test that _prepare_completion_params excludes stop when it's empty.
|
||||||
|
|
||||||
|
This is a regression test for issue #4149 where models like gpt-5.1
|
||||||
|
don't support the stop parameter at all, and passing an empty list
|
||||||
|
would cause an error.
|
||||||
|
"""
|
||||||
|
llm = LLM(model="gpt-4o", is_litellm=True)
|
||||||
|
# By default, stop is initialized to an empty list
|
||||||
|
assert llm.stop == []
|
||||||
|
|
||||||
|
params = llm._prepare_completion_params("Hello")
|
||||||
|
# stop should not be in params when it's empty
|
||||||
|
assert "stop" not in params
|
||||||
|
|
||||||
|
|
||||||
|
def test_prepare_completion_params_includes_stop_when_provided():
|
||||||
|
"""Test that _prepare_completion_params includes stop when it has values."""
|
||||||
|
llm = LLM(model="gpt-4o", stop=["Observation:"], is_litellm=True)
|
||||||
|
assert llm.stop == ["Observation:"]
|
||||||
|
|
||||||
|
params = llm._prepare_completion_params("Hello")
|
||||||
|
# stop should be in params when it has values
|
||||||
|
assert "stop" in params
|
||||||
|
assert params["stop"] == ["Observation:"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_prepare_completion_params_excludes_stop_when_in_drop_params():
|
||||||
|
"""Test that _prepare_completion_params excludes stop when it's in additional_drop_params.
|
||||||
|
|
||||||
|
This ensures the retry logic works correctly when a model doesn't support stop.
|
||||||
|
"""
|
||||||
|
llm = LLM(
|
||||||
|
model="gpt-4o",
|
||||||
|
stop=["Observation:"],
|
||||||
|
additional_drop_params=["stop"],
|
||||||
|
is_litellm=True,
|
||||||
|
)
|
||||||
|
assert llm.stop == ["Observation:"]
|
||||||
|
|
||||||
|
params = llm._prepare_completion_params("Hello")
|
||||||
|
# stop should not be in params when it's in additional_drop_params
|
||||||
|
assert "stop" not in params
|
||||||
|
|
||||||
|
|
||||||
|
def test_prepare_completion_params_excludes_stop_with_existing_drop_params():
|
||||||
|
"""Test that stop is excluded when additional_drop_params already has other params."""
|
||||||
|
llm = LLM(
|
||||||
|
model="gpt-4o",
|
||||||
|
stop=["Observation:"],
|
||||||
|
additional_drop_params=["another_param", "stop"],
|
||||||
|
is_litellm=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
params = llm._prepare_completion_params("Hello")
|
||||||
|
# stop should not be in params
|
||||||
|
assert "stop" not in params
|
||||||
|
|||||||
Reference in New Issue
Block a user