From 5b2d6010dc59bab1026d13bfcd75b37618e573b9 Mon Sep 17 00:00:00 2001 From: kinance Date: Mon, 10 Apr 2023 20:10:11 +0900 Subject: [PATCH 1/5] Resolve the conflict around debug mode flag after pull merge --- scripts/config.py | 7 ++----- scripts/json_utils.py | 8 ++++---- scripts/main.py | 5 +---- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 1eb74b2bc7..50432c425f 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -33,7 +33,7 @@ class Config(metaclass=Singleton): def __init__(self): """Initialize the Config class""" - self.debug = False + self.debug_mode = False self.continuous_mode = False self.speak_mode = False @@ -86,9 +86,6 @@ class Config(metaclass=Singleton): """Set the speak mode value.""" self.speak_mode = value - def set_debug_mode(self, value: bool): - self.debug_mode = value - def set_fast_llm_model(self, value: str): """Set the fast LLM model value.""" self.fast_llm_model = value @@ -131,4 +128,4 @@ class Config(metaclass=Singleton): def set_debug_mode(self, value: bool): """Set the debug mode value.""" - self.debug = value + self.debug_mode = value diff --git a/scripts/json_utils.py b/scripts/json_utils.py index b3ffe4b9ab..9f26970eaa 100644 --- a/scripts/json_utils.py +++ b/scripts/json_utils.py @@ -88,7 +88,7 @@ def fix_invalid_escape(json_str: str, error_message: str) -> str: json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error - fix invalid escape', e) error_message = str(e) return json_str @@ -103,12 +103,12 @@ def correct_json(json_str: str) -> str: """ try: - if cfg.debug: + if cfg.debug_mode: print("json", json_str) json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error', e) error_message = str(e) if error_message.startswith('Invalid \\escape'): @@ -119,7 +119,7 @@ def correct_json(json_str: str) -> str: json.loads(json_str) return json_str except json.JSONDecodeError as e: - if cfg.debug: + if cfg.debug_mode: print('json loads error - add quotes', e) error_message = str(e) if balanced_str := balance_braces(json_str): diff --git a/scripts/main.py b/scripts/main.py index 844c2375d2..34750fa04d 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -285,9 +285,6 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) - if args.debug: - print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") - cfg.set_debug_mode(True) # TODO: fill in llm values here @@ -318,7 +315,7 @@ while True: user_input, full_message_history, memory, - cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument # Print Assistant thoughts print_assistant_thoughts(assistant_reply) From 8cf58d00cfb43ece4236a35e7b5ea1d2fe359685 Mon Sep 17 00:00:00 2001 From: Sma Das Date: Mon, 10 Apr 2023 10:57:47 -0400 Subject: [PATCH 2/5] Removed unneeded imports --- scripts/main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index f96afeb163..b433eb0ea5 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -8,8 +8,6 @@ from colorama import Fore, Style from spinner import Spinner import time import speak -from enum import Enum, auto -import sys from config import Config from json_parser import fix_and_parse_json from ai_config import AIConfig From d12da33e55ec026be8cc5efdcaab4172e8d5631e Mon Sep 17 00:00:00 2001 From: Andy Melnikov Date: Mon, 10 Apr 2023 18:46:40 +0200 Subject: [PATCH 3/5] Fix flake8 W293 and W391 --- scripts/ai_config.py | 1 - scripts/browse.py | 2 +- scripts/chat.py | 4 ++-- scripts/json_parser.py | 6 +++--- scripts/main.py | 1 - 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/ai_config.py b/scripts/ai_config.py index 1d5832c182..2a4854cb97 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -92,4 +92,3 @@ class AIConfig: full_prompt += f"\n\n{data.load_prompt()}" return full_prompt - diff --git a/scripts/browse.py b/scripts/browse.py index 09f376a70a..b0c745ef42 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -15,7 +15,7 @@ def scrape_text(url): # Most basic check if the URL is valid: if not url.startswith('http'): return "Error: Invalid URL" - + # Restrict access to local files if check_local_file_access(url): return "Error: Access to local files is restricted" diff --git a/scripts/chat.py b/scripts/chat.py index c00e4d4a60..23e5b50149 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -63,10 +63,10 @@ def chat_with_ai( """ model = cfg.fast_llm_model # TODO: Change model from hardcode to argument # Reserve 1000 tokens for the response - + if cfg.debug: print(f"Token limit: {token_limit}") - + send_token_limit = token_limit - 1000 relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10) diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 1fd6824408..8c17dfa252 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -71,11 +71,11 @@ def fix_and_parse_json( return json_str else: raise e - - + + def fix_json(json_str: str, schema: str) -> str: """Fix the given JSON string to make it parseable and fully complient with the provided schema.""" - + # Try to fix the JSON using gpt: function_string = "def fix_json(json_str: str, schema:str=None) -> str:" args = [f"'''{json_str}'''", f"'''{schema}'''"] diff --git a/scripts/main.py b/scripts/main.py index 8661bfad14..217461188d 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -415,4 +415,3 @@ while True: chat.create_chat_message( "system", "Unable to execute command")) print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") - From b90f355e7d31cffd0969f9bdd292f862d16e5a9b Mon Sep 17 00:00:00 2001 From: onekum <55006697+onekum@users.noreply.github.com> Date: Tue, 11 Apr 2023 05:01:02 -0400 Subject: [PATCH 4/5] Change "an" to "a" Grammar error: In English, only words starting with (a, e, i, o, u) get "an". --- scripts/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main.py b/scripts/main.py index 8661bfad14..7faf1d4293 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -170,7 +170,7 @@ def load_variables(config_file="config.yaml"): documents = yaml.dump(config, file) prompt = data.load_prompt() - prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.""" + prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as a LLM and pursue simple strategies with no legal complications.""" # Construct full prompt full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n" From b06974904c5ce48da1681ef2d8a362cde59e90de Mon Sep 17 00:00:00 2001 From: kinance Date: Tue, 11 Apr 2023 19:26:23 +0900 Subject: [PATCH 5/5] Remove duplicates of set debug mode func --- scripts/config.py | 3 --- scripts/main.py | 4 ---- 2 files changed, 7 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index 27cc946cf0..c9a285ac2c 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -89,9 +89,6 @@ class Config(metaclass=Singleton): """Set the speak mode value.""" self.speak_mode = value - def set_debug_mode(self, value: bool): - self.debug_mode = value - def set_fast_llm_model(self, value: str): """Set the fast LLM model value.""" self.fast_llm_model = value diff --git a/scripts/main.py b/scripts/main.py index 8661bfad14..6afcdf55b6 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -288,10 +288,6 @@ def parse_arguments(): print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED") cfg.set_speak_mode(True) - if args.debug: - print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED") - cfg.set_debug_mode(True) - if args.gpt3only: print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model)