From 947d27a9edcc1ab6116cf8eed1f5e50a0d416dbf Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:42:15 +0200 Subject: [PATCH 1/7] docs: Update README.md with the flake8 command used in the CI --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 87b5d1be6f..b5d9ac4ec5 100644 --- a/README.md +++ b/README.md @@ -342,7 +342,9 @@ coverage run -m unittest discover tests ## Run linter -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305`. See the [flake8 rules](https://www.flake8rules.com/) for more information. + +To run the linter, run the following command: ``` flake8 scripts/ tests/ From 8ff36bb8ba5663aa7ee12f365fc89fd101d9aee6 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:55:20 +0200 Subject: [PATCH 2/7] lint: Add rule E231 to the flake8 linting job --- .github/workflows/ci.yml | 2 +- README.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 070df794b9..de21400576 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: - name: Lint with flake8 continue-on-error: false - run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 + run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231 - name: Run unittest tests with coverage run: | diff --git a/README.md b/README.md index b5d9ac4ec5..82d489b13e 100644 --- a/README.md +++ b/README.md @@ -342,7 +342,7 @@ coverage run -m unittest discover tests ## Run linter -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305`. See the [flake8 rules](https://www.flake8rules.com/) for more information. +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231`. See the [flake8 rules](https://www.flake8rules.com/) for more information. To run the linter, run the following command: @@ -350,5 +350,5 @@ To run the linter, run the following command: flake8 scripts/ tests/ # Or, if you want to run flake8 with the same configuration as the CI: -flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 -``` +flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231 +``` \ No newline at end of file From 4afd0a3714e3fdc22a0dc16920869f3df8fa1d5e Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:55:42 +0200 Subject: [PATCH 3/7] lint: Fix E231 flake8 linting errors --- scripts/config.py | 2 +- tests/test_json_parser.py | 4 ++-- tests/unit/json_tests.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/config.py b/scripts/config.py index ebf1b08b22..01505b22dd 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -73,7 +73,7 @@ class Config(metaclass=Singleton): # User agent headers to use when browsing web # Some websites might just completely deny request with an error code if no user agent was found. - self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} + self.user_agent_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_password = os.getenv("REDIS_PASSWORD", "") diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index b8cb2680d4..0f2c6488c6 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -50,7 +50,7 @@ class TestParseJson(unittest.TestCase): good_obj = { "command": { "name": "browse_website", - "args":{ + "args": { "url": "https://github.com/Torantulino/Auto-GPT" } }, @@ -89,7 +89,7 @@ class TestParseJson(unittest.TestCase): good_obj = { "command": { "name": "browse_website", - "args":{ + "args": { "url": "https://github.com/Torantulino/Auto-GPT" } }, diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 1edbaeaf36..3320ad5e9a 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -52,7 +52,7 @@ class TestParseJson(unittest.TestCase): good_obj = { "command": { "name": "browse_website", - "args":{ + "args": { "url": "https://github.com/Torantulino/Auto-GPT" } }, @@ -91,7 +91,7 @@ class TestParseJson(unittest.TestCase): good_obj = { "command": { "name": "browse_website", - "args":{ + "args": { "url": "https://github.com/Torantulino/Auto-GPT" } }, From 04dc0f7149d29ea284e55ff19b0178db2b706a40 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 23:04:59 +0200 Subject: [PATCH 4/7] lint: Add flake8 rule E302 to the flake8 workflow job --- .github/workflows/ci.yml | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de21400576..0b90b55d34 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: - name: Lint with flake8 continue-on-error: false - run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231 + run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 - name: Run unittest tests with coverage run: | diff --git a/README.md b/README.md index 82d489b13e..51649bd66a 100644 --- a/README.md +++ b/README.md @@ -342,7 +342,7 @@ coverage run -m unittest discover tests ## Run linter -This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231`. See the [flake8 rules](https://www.flake8rules.com/) for more information. +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information. To run the linter, run the following command: @@ -350,5 +350,5 @@ To run the linter, run the following command: flake8 scripts/ tests/ # Or, if you want to run flake8 with the same configuration as the CI: -flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231 +flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302 ``` \ No newline at end of file From d1ea6cf002fb9b7747221666ba5593108cd48984 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 23:05:14 +0200 Subject: [PATCH 5/7] lint: Fix all E302 linting errors --- scripts/agent_manager.py | 1 + scripts/ai_config.py | 1 + scripts/browse.py | 16 ++++++++++++++++ scripts/call_ai_function.py | 2 ++ scripts/chat.py | 1 + scripts/commands.py | 3 +++ scripts/data.py | 1 + scripts/file_operations.py | 1 + scripts/image_gen.py | 1 + scripts/llm_utils.py | 1 + scripts/logger.py | 1 + scripts/main.py | 4 ++++ scripts/memory/__init__.py | 2 ++ scripts/speak.py | 4 ++++ scripts/token_counter.py | 2 ++ tests/integration/memory_tests.py | 1 + tests/local_cache_test.py | 2 ++ tests/test_config.py | 1 + tests/test_json_parser.py | 1 + tests/unit/json_tests.py | 1 + 20 files changed, 47 insertions(+) diff --git a/scripts/agent_manager.py b/scripts/agent_manager.py index a0e5f16482..191ab838a3 100644 --- a/scripts/agent_manager.py +++ b/scripts/agent_manager.py @@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model) # Create new GPT agent # TODO: Centralise use of create_chat_completion() to globally enforce token limit + def create_agent(task, prompt, model): """Create a new agent and return its key""" global next_key diff --git a/scripts/ai_config.py b/scripts/ai_config.py index bd373944fc..ee4b1fda23 100644 --- a/scripts/ai_config.py +++ b/scripts/ai_config.py @@ -2,6 +2,7 @@ import yaml import data import os + class AIConfig: """ A class object that contains the configuration information for the AI diff --git a/scripts/browse.py b/scripts/browse.py index 9e93c55a33..a5b167c928 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -21,12 +21,28 @@ def sanitize_url(url): return urljoin(url, urlparse(url).path) +# Function to make a request with a specified timeout and handle exceptions +def make_request(url, timeout=10): + try: + response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout) + response.raise_for_status() + return response + except requests.exceptions.RequestException as e: + return "Error: " + str(e) + + # Define and check for local file address prefixes def check_local_file_access(url): local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost'] return any(url.startswith(prefix) for prefix in local_prefixes) +def scrape_text(url): + """Scrape text from a webpage""" + # Basic check if the URL is valid + if not url.startswith('http'): + return "Error: Invalid URL" + def get_response(url, headers=cfg.user_agent_header, timeout=10): try: # Restrict access to local files diff --git a/scripts/call_ai_function.py b/scripts/call_ai_function.py index f823865869..6f1d6ceee8 100644 --- a/scripts/call_ai_function.py +++ b/scripts/call_ai_function.py @@ -3,6 +3,8 @@ from config import Config cfg = Config() from llm_utils import create_chat_completion + + # This is a magic function that can do anything with no-code. See # https://github.com/Torantulino/AI-Functions for more info. def call_ai_function(function, args, description, model=None): diff --git a/scripts/chat.py b/scripts/chat.py index e16cee3837..2b7c34b5c4 100644 --- a/scripts/chat.py +++ b/scripts/chat.py @@ -9,6 +9,7 @@ import logging cfg = Config() + def create_chat_message(role, content): """ Create a chat message with the given role and content. diff --git a/scripts/commands.py b/scripts/commands.py index 3966e86ab8..fe6f6c30e9 100644 --- a/scripts/commands.py +++ b/scripts/commands.py @@ -24,6 +24,7 @@ def is_valid_int(value): except ValueError: return False + def get_command(response): """Parse the response and return the command name and arguments""" try: @@ -135,6 +136,7 @@ def google_search(query, num_results=8): return json.dumps(search_results, ensure_ascii=False, indent=4) + def google_official_search(query, num_results=8): """Return the results of a google search using the official Google API""" from googleapiclient.discovery import build @@ -171,6 +173,7 @@ def google_official_search(query, num_results=8): # Return the list of search result URLs return search_results_links + def browse_website(url, question): """Browse a website and return the summary and links""" summary = get_text_summary(url, question) diff --git a/scripts/data.py b/scripts/data.py index f80c2875d8..088fd51ce1 100644 --- a/scripts/data.py +++ b/scripts/data.py @@ -1,6 +1,7 @@ import os from pathlib import Path + def load_prompt(): """Load the prompt from data/prompt.txt""" try: diff --git a/scripts/file_operations.py b/scripts/file_operations.py index 7b48c13487..2999bc24f1 100644 --- a/scripts/file_operations.py +++ b/scripts/file_operations.py @@ -65,6 +65,7 @@ def delete_file(filename): except Exception as e: return "Error: " + str(e) + def search_files(directory): found_files = [] diff --git a/scripts/image_gen.py b/scripts/image_gen.py index 4481696ffa..6c27df3f35 100644 --- a/scripts/image_gen.py +++ b/scripts/image_gen.py @@ -11,6 +11,7 @@ cfg = Config() working_directory = "auto_gpt_workspace" + def generate_image(prompt): filename = str(uuid.uuid4()) + ".jpg" diff --git a/scripts/llm_utils.py b/scripts/llm_utils.py index 35cc5ce040..16739dddf0 100644 --- a/scripts/llm_utils.py +++ b/scripts/llm_utils.py @@ -4,6 +4,7 @@ cfg = Config() openai.api_key = cfg.openai_api_key + # Overly simple abstraction until we create something better def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: """Create a chat completion using the OpenAI API""" diff --git a/scripts/logger.py b/scripts/logger.py index 85dde81331..42a4387851 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -151,6 +151,7 @@ class TypingConsoleHandler(logging.StreamHandler): except Exception: self.handleError(record) + class ConsoleHandler(logging.StreamHandler): def emit(self, record): msg = self.format(record) diff --git a/scripts/main.py b/scripts/main.py index 81f560b216..4a89e8e127 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -20,6 +20,7 @@ import logging cfg = Config() + def check_openai_api_key(): """Check if the OpenAI API key is set in config.py or as an environment variable.""" if not cfg.openai_api_key: @@ -30,6 +31,7 @@ def check_openai_api_key(): print("You can get your key from https://beta.openai.com/account/api-keys") exit(1) + def attempt_to_fix_json_by_finding_outermost_brackets(json_string): if cfg.speak_mode and cfg.debug_mode: speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") @@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string): return json_string + def print_assistant_thoughts(assistant_reply): """Prints the assistant's thoughts to the console""" global ai_name @@ -262,6 +265,7 @@ def prompt_user(): config = AIConfig(ai_name, ai_role, ai_goals) return config + def parse_arguments(): """Parses the arguments passed to the script""" global cfg diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index a07f9fd88d..7eee1b3da1 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -18,6 +18,7 @@ except ImportError: print("Pinecone not installed. Skipping import.") PineconeMemory = None + def get_memory(cfg, init=False): memory = None if cfg.memory_backend == "pinecone": @@ -41,6 +42,7 @@ def get_memory(cfg, init=False): memory.clear() return memory + def get_supported_memory_backends(): return supported_memory diff --git a/scripts/speak.py b/scripts/speak.py index 64054e3c58..7a17873c5e 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -31,6 +31,7 @@ tts_headers = { mutex_lock = Lock() # Ensure only one sound is played at a time queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread + def eleven_labs_speech(text, voice_index=0): """Speak text using elevenlabs.io's API""" tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( @@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0): print("Response content:", response.content) return False + def gtts_speech(text): tts = gtts.gTTS(text) with mutex_lock: @@ -58,6 +60,7 @@ def gtts_speech(text): playsound("speech.mp3", True) os.remove("speech.mp3") + def macos_tts_speech(text, voice_index=0): if voice_index == 0: os.system(f'say "{text}"') @@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0): else: os.system(f'say -v Samantha "{text}"') + def say_text(text, voice_index=0): def speak(): diff --git a/scripts/token_counter.py b/scripts/token_counter.py index 635d328638..8aecf1681b 100644 --- a/scripts/token_counter.py +++ b/scripts/token_counter.py @@ -1,6 +1,7 @@ import tiktoken from typing import List, Dict + def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int: """ Returns the number of tokens used by a list of messages. @@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5 num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> return num_tokens + def count_string_tokens(string: str, model_name: str) -> int: """ Returns the number of tokens in a text string. diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index 5f1611be96..d0c3096280 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts')) from config import Config from memory.local import LocalCache + class TestLocalCache(unittest.TestCase): def random_string(self, length): diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index d1f1ef0843..0352624ea2 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -4,6 +4,7 @@ import sys sys.path.append(os.path.abspath('../scripts')) from memory.local import LocalCache + def MockConfig(): return type('MockConfig', (object,), { 'debug_mode': False, @@ -12,6 +13,7 @@ def MockConfig(): 'memory_index': 'auto-gpt', }) + class TestLocalCache(unittest.TestCase): def setUp(self): diff --git a/tests/test_config.py b/tests/test_config.py index c1310b7098..ba8381e1e7 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,6 +1,7 @@ import unittest from scripts.config import Config + class TestConfig(unittest.TestCase): def test_singleton(self): diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index 0f2c6488c6..438e047b44 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -3,6 +3,7 @@ import tests.context from scripts.json_parser import fix_and_parse_json + class TestParseJson(unittest.TestCase): def test_valid_json(self): # Test that a valid JSON string is parsed correctly diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index 3320ad5e9a..4f3267217a 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -5,6 +5,7 @@ import sys sys.path.append(os.path.abspath('../scripts')) from json_parser import fix_and_parse_json + class TestParseJson(unittest.TestCase): def test_valid_json(self): # Test that a valid JSON string is parsed correctly From 62edc148a8b43dfe4b30c5ca9de3c462cd366a46 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Thu, 13 Apr 2023 10:56:02 +0200 Subject: [PATCH 6/7] chore: Remove functions that had been removed on the master branch recently --- scripts/browse.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/scripts/browse.py b/scripts/browse.py index a5b167c928..9e93c55a33 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -21,28 +21,12 @@ def sanitize_url(url): return urljoin(url, urlparse(url).path) -# Function to make a request with a specified timeout and handle exceptions -def make_request(url, timeout=10): - try: - response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout) - response.raise_for_status() - return response - except requests.exceptions.RequestException as e: - return "Error: " + str(e) - - # Define and check for local file address prefixes def check_local_file_access(url): local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost'] return any(url.startswith(prefix) for prefix in local_prefixes) -def scrape_text(url): - """Scrape text from a webpage""" - # Basic check if the URL is valid - if not url.startswith('http'): - return "Error: Invalid URL" - def get_response(url, headers=cfg.user_agent_header, timeout=10): try: # Restrict access to local files From abe01ab81e0428b6b9cc1844df8e2130c9ffe3f6 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Thu, 13 Apr 2023 11:05:36 +0200 Subject: [PATCH 7/7] fix: Fix flake8 linting errors --- scripts/execute_code.py | 1 + scripts/logger.py | 8 ++++---- scripts/memory/base.py | 1 - 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/execute_code.py b/scripts/execute_code.py index 2c92903cf4..dbd62c2265 100644 --- a/scripts/execute_code.py +++ b/scripts/execute_code.py @@ -67,6 +67,7 @@ def execute_python_file(file): except Exception as e: return f"Error: {str(e)}" + def execute_shell(command_line): current_dir = os.getcwd() diff --git a/scripts/logger.py b/scripts/logger.py index 42a4387851..f5e94687b0 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -161,11 +161,11 @@ class ConsoleHandler(logging.StreamHandler): self.handleError(record) -''' -Allows to handle custom placeholders 'title_color' and 'message_no_color'. -To use this formatter, make sure to pass 'color', 'title' as log extras. -''' class AutoGptFormatter(logging.Formatter): + """ + Allows to handle custom placeholders 'title_color' and 'message_no_color'. + To use this formatter, make sure to pass 'color', 'title' as log extras. + """ def format(self, record: LogRecord) -> str: if (hasattr(record, 'color')): record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL diff --git a/scripts/memory/base.py b/scripts/memory/base.py index 1be7b3ddcb..96cf3df135 100644 --- a/scripts/memory/base.py +++ b/scripts/memory/base.py @@ -4,7 +4,6 @@ from config import AbstractSingleton, Config import openai cfg = Config() -cfg = Config() def get_ada_embedding(text): text = text.replace("\n", " ")