From bc6f34d7dc70771273992e2377d890bfe91c6c71 Mon Sep 17 00:00:00 2001 From: vadi Date: Wed, 12 Apr 2023 16:32:13 +1000 Subject: [PATCH] Fixes #803 - Brings back debug mode - Replaces all calls from cfg.debug to cfg.debug_mode that was updated on 5b2d6010dc59bab1026d13bfcd75b37618e573b9 - Remove unnecessary config instance at main.py --- scripts/chat.py | 6 +++--- scripts/json_parser.py | 2 +- scripts/main.py | 4 ++++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/chat.py b/scripts/chat.py index 23e5b50149..30f7603c76 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -64,14 +64,14 @@ def chat_with_ai( model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - if cfg.debug: + if cfg.debug_mode: print(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) - if cfg.debug: + if cfg.debug_mode: print('Memory Stats: ', permanent_memory.get_stats()) next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context( @@ -110,7 +110,7 @@ def chat_with_ai( # assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT" # Debug print the current context - if cfg.debug: + if cfg.debug_mode: print(f"Token limit: {token_limit}") print(f"Send Token Count: {current_tokens_used}") print(f"Tokens remaining for response: {tokens_remaining}") diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 8c17dfa252..1d93b10926 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str: result_string = call_ai_function( function_string, args, description_string, model=cfg.fast_llm_model ) - if cfg.debug: + if cfg.debug_mode: print("------------ JSON FIX ATTEMPT ---------------") print(f"Original JSON: {json_str}") print("-----------") diff --git a/scripts/main.py b/scripts/main.py index d84e150850..40cad2b839 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -266,6 +266,7 @@ def prompt_user(): def parse_arguments(): """Parses the arguments passed to the script""" global cfg + cfg.set_debug_mode(False) cfg.set_continuous_mode(False) cfg.set_speak_mode(False) @@ -292,6 +293,9 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) + if args.debug: + print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") + cfg.set_debug_mode(True) # TODO: fill in llm values here