mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-09 14:57:59 -05:00
Revert "Simplify max_output_tokens handling in LLM classes" (#9364)
This commit is contained in:
@@ -163,7 +163,6 @@ class LLM(RetryMixin, DebugMixin):
|
||||
'temperature': self.config.temperature,
|
||||
'max_completion_tokens': self.config.max_output_tokens,
|
||||
}
|
||||
|
||||
if self.config.top_k is not None:
|
||||
# openai doesn't expose top_k
|
||||
# litellm will handle it a bit differently than the openai-compatible params
|
||||
@@ -493,6 +492,26 @@ class LLM(RetryMixin, DebugMixin):
|
||||
# Safe fallback for any potentially viable model
|
||||
self.config.max_input_tokens = 4096
|
||||
|
||||
if self.config.max_output_tokens is None:
|
||||
# Safe default for any potentially viable model
|
||||
self.config.max_output_tokens = 4096
|
||||
if self.model_info is not None:
|
||||
# max_output_tokens has precedence over max_tokens, if either exists.
|
||||
# litellm has models with both, one or none of these 2 parameters!
|
||||
if 'max_output_tokens' in self.model_info and isinstance(
|
||||
self.model_info['max_output_tokens'], int
|
||||
):
|
||||
self.config.max_output_tokens = self.model_info['max_output_tokens']
|
||||
elif 'max_tokens' in self.model_info and isinstance(
|
||||
self.model_info['max_tokens'], int
|
||||
):
|
||||
self.config.max_output_tokens = self.model_info['max_tokens']
|
||||
if any(
|
||||
model in self.config.model
|
||||
for model in ['claude-3-7-sonnet', 'claude-3.7-sonnet']
|
||||
):
|
||||
self.config.max_output_tokens = 64000 # litellm set max to 128k, but that requires a header to be set
|
||||
|
||||
# Initialize function calling capability
|
||||
# Check if model name is in our supported list
|
||||
model_name_supported = (
|
||||
|
||||
Reference in New Issue
Block a user