mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Add fallback token limit in llm.utils.create_chat_completion (#4839)
Co-authored-by: Reinier van der Leer <github@pwuts.nl>
This commit is contained in:
@@ -115,6 +115,8 @@ def create_chat_completion(
|
||||
model = prompt.model.name
|
||||
if temperature is None:
|
||||
temperature = config.temperature
|
||||
if max_tokens is None:
|
||||
max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt.token_length
|
||||
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
|
||||
Reference in New Issue
Block a user