diff --git a/scripts/chat.py b/scripts/chat.py index 4a7056d2b1..67cfe52a68 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -64,7 +64,7 @@ def chat_with_ai( model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - if cfg.debug_mode: + if cfg.debug: print(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000