From 739b0ed96bb5e72d47730359f9a277a2402673b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kiss=20P=C3=A9ter?= Date: Tue, 11 Apr 2023 14:04:37 +0200 Subject: [PATCH 01/14] Improve Dockerfile: - Use smaller base image - Make it smaller by not saving cache (1,15GB -> 356MB) --- Dockerfile | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 146a374717..4d264c88c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,7 @@ -FROM python:3.11 - +FROM python:3.11-slim +ENV PIP_NO_CACHE_DIR=yes WORKDIR /app -COPY scripts/ /app -COPY requirements.txt /app - +COPY requirements.txt . RUN pip install -r requirements.txt - -CMD ["python", "main.py"] +COPY scripts/ . +ENTRYPOINT ["python", "main.py"] From dbb78b636f8c91a651bfbea5ebdd06e4df6cae55 Mon Sep 17 00:00:00 2001 From: PierreBastiani Date: Tue, 11 Apr 2023 14:57:29 +0100 Subject: [PATCH 02/14] check for authorise 'y' without trailing space --- scripts/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main.py b/scripts/main.py index d84e150850..87a8f4faba 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -348,7 +348,7 @@ while True: flush=True) while True: console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": + if console_input.lower().rstrip() == "y": user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().startswith("y -"): From 7736762b5aa70ee6e0e1ddf06c45b867b125f85b Mon Sep 17 00:00:00 2001 From: vadi Date: Wed, 12 Apr 2023 16:38:51 +1000 Subject: [PATCH 03/14] Fix #840 - Add defensive coding for local memory to load the JSON file that was gitignored at 54101c79973ca5ca8ccd7e1ac59856cb282c57d8 - Added unit test placholder for local cache test - Removed unused imports from local cache unit test placeholder --- scripts/memory/local.py | 16 +++++++++--- tests/local_cache_test.py | 51 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 tests/local_cache_test.py diff --git a/scripts/memory/local.py b/scripts/memory/local.py index 8dc90021ff..bdc4c9110c 100644 --- a/scripts/memory/local.py +++ b/scripts/memory/local.py @@ -28,10 +28,20 @@ class LocalCache(MemoryProviderSingleton): def __init__(self, cfg) -> None: self.filename = f"{cfg.memory_index}.json" if os.path.exists(self.filename): - with open(self.filename, 'rb') as f: - loaded = orjson.loads(f.read()) - self.data = CacheContent(**loaded) + try: + with open(self.filename, 'w+b') as f: + file_content = f.read() + if not file_content.strip(): + file_content = b'{}' + f.write(file_content) + + loaded = orjson.loads(file_content) + self.data = CacheContent(**loaded) + except orjson.JSONDecodeError: + print(f"Error: The file '{self.filename}' is not in JSON format.") + self.data = CacheContent() else: + print(f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file.") self.data = CacheContent() def add(self, text: str): diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py new file mode 100644 index 0000000000..ac045a5549 --- /dev/null +++ b/tests/local_cache_test.py @@ -0,0 +1,51 @@ +import os +import sys +# Probably a better way: +sys.path.append(os.path.abspath('../scripts')) +from memory.local import LocalCache + +def MockConfig(): + return type('MockConfig', (object,), { + 'debug_mode': False, + 'continuous_mode': False, + 'speak_mode': False, + 'memory_index': 'auto-gpt', + }) + +class TestLocalCache(unittest.TestCase): + + def setUp(self): + self.cfg = MockConfig() + self.cache = LocalCache(self.cfg) + + def test_add(self): + text = "Sample text" + self.cache.add(text) + self.assertIn(text, self.cache.data.texts) + + def test_clear(self): + self.cache.clear() + self.assertEqual(self.cache.data, [""]) + + def test_get(self): + text = "Sample text" + self.cache.add(text) + result = self.cache.get(text) + self.assertEqual(result, [text]) + + def test_get_relevant(self): + text1 = "Sample text 1" + text2 = "Sample text 2" + self.cache.add(text1) + self.cache.add(text2) + result = self.cache.get_relevant(text1, 1) + self.assertEqual(result, [text1]) + + def test_get_stats(self): + text = "Sample text" + self.cache.add(text) + stats = self.cache.get_stats() + self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) + +if __name__ == '__main__': + unittest.main() From fa5b71c022487896d3b07a1574d1faf6ebbeca19 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Tue, 11 Apr 2023 20:23:41 +0200 Subject: [PATCH 04/14] docs: Update README.md about running tests and coverage --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index dc3386f580..8862ade7c2 100644 --- a/README.md +++ b/README.md @@ -96,9 +96,10 @@ pip install -r requirements.txt ``` 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well. - - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. - - Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. - - If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively + +- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys. +- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website. +- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively ## 🔧 Usage @@ -113,9 +114,11 @@ python scripts/main.py 3. To exit the program, type "exit" and press Enter. ### Logs -You will find activity and error logs in the folder ```./logs``` + +You will find activity and error logs in the folder `./logs` To output debug logs: + ``` python scripts/main.py --debug ``` From dc0a94bba36549411b108763cd11ba82e94fbf7e Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:02:05 +0200 Subject: [PATCH 05/14] ci: Add a flake8 linting job --- .github/workflows/unit_tests.yml | 3 +++ requirements.txt | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 5973dd029a..dda45e6c17 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -30,6 +30,9 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt + - name: Lint with flake8 + run: flake8 scripts/ tests/ + - name: Run unittest tests with coverage run: | coverage run --source=scripts -m unittest discover tests diff --git a/requirements.txt b/requirements.txt index b196c3d788..b864c1d3e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,5 @@ pinecone-client==2.2.1 redis orjson Pillow -coverage \ No newline at end of file +coverage +flake8 \ No newline at end of file From 87d465a8f1e2cd85772dfdbefe2b0aabf47d7f0d Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:02:28 +0200 Subject: [PATCH 06/14] chore: Rename unit test workflow file to ci.yml --- .github/workflows/{unit_tests.yml => ci.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{unit_tests.yml => ci.yml} (98%) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/ci.yml similarity index 98% rename from .github/workflows/unit_tests.yml rename to .github/workflows/ci.yml index dda45e6c17..a06e5ff9c8 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Unit Tests +name: Python CI on: push: From 76cc0d2d743c99a5b954ca10dd30a6381877b758 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:03:57 +0200 Subject: [PATCH 07/14] docs: Document flake8 linter --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 8862ade7c2..fe49b247bf 100644 --- a/README.md +++ b/README.md @@ -323,3 +323,11 @@ To run tests and see coverage, run the following command: ``` coverage run -m unittest discover tests ``` + +## Run linter + +This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: + +``` +flake8 scripts/ tests/ +``` \ No newline at end of file From 29d6ecd4d30510c6887bb21f0a21f355de47707f Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 09:07:03 +0200 Subject: [PATCH 08/14] ci: Allow flake8 failure since there are a lot of issues --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a06e5ff9c8..3d3628bc8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,6 +31,7 @@ jobs: pip install -r requirements.txt - name: Lint with flake8 + continue-on-error: true run: flake8 scripts/ tests/ - name: Run unittest tests with coverage From d780988554918d4ad0b4b4e8187bf34df1af5868 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 16:02:18 +0200 Subject: [PATCH 09/14] chore: Add new lines to end of files --- .gitignore | 2 +- main.py | 2 +- tests/context.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index aa0dceaa9e..cfa3b08b5d 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ log.txt # Coverage reports .coverage coverage.xml -htmlcov/ \ No newline at end of file +htmlcov/ diff --git a/main.py b/main.py index 5f044237e1..656c34ecb6 100644 --- a/main.py +++ b/main.py @@ -1 +1 @@ -from scripts.main import main \ No newline at end of file +from scripts.main import main diff --git a/tests/context.py b/tests/context.py index 2adb9dd6e4..b668c8dc20 100644 --- a/tests/context.py +++ b/tests/context.py @@ -2,4 +2,4 @@ import sys import os sys.path.insert(0, os.path.abspath( - os.path.join(os.path.dirname(__file__), '../scripts'))) \ No newline at end of file + os.path.join(os.path.dirname(__file__), '../scripts'))) From b5c71a1b8ef2a18da17f1e3cc03ee76c58cc0fc9 Mon Sep 17 00:00:00 2001 From: Pi Date: Wed, 12 Apr 2023 20:21:06 +0100 Subject: [PATCH 10/14] Update 1.bug.yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index cf49ab5f83..e2404c763d 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -7,7 +7,19 @@ body: value: | Please provide a searchable summary of the issue in the title above ⬆️. - Thanks for contributing by creating an issue! ❤️ + ⚠️ SUPER-busy repo, please help the volunteer maintainers. + The less time we spend here, the more time we spend building AutoGPT. + + Please help us help you: + - Does it work on `stable` branch (https://github.com/Torantulino/Auto-GPT/tree/stable)? + - Does it work on current `master` (https://github.com/Torantulino/Auto-GPT/tree/master)? + - Search for existing issues, "add comment" is tidier than "new issue" + - Ask on our Discord (https://discord.gg/autogpt) + - Provide relevant info: + - Provide commit-hash (`git rev-parse HEAD` gets it) + - If it's a pip/packages issue, provide pip version, python version + - If it's a crash, provide traceback. + - type: checkboxes attributes: label: Duplicates @@ -32,8 +44,8 @@ body: attributes: label: Your prompt 📝 description: | - Please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. + If applicable please provide the prompt you are using. You can find your last-used prompt in last_run_ai_settings.yaml. value: | ```yaml # Paste your prompt here - ``` \ No newline at end of file + ``` From 8d0d4135ea5c1400f3b44f8c94522b35f1245ffa Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:07:04 +0200 Subject: [PATCH 11/14] ci: Update flake8 command to ignore some issues. We can later gradually make it stricter until we have no errors anymore. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3d3628bc8d..06d6ed6477 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: - name: Lint with flake8 continue-on-error: true - run: flake8 scripts/ tests/ + run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 - name: Run unittest tests with coverage run: | From 1f837968ff8c15559b1bb73c21e0cac8a30b5809 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:12:25 +0200 Subject: [PATCH 12/14] fix: Fix flake8 errors based on the flake8 command with a narrower definition of errors --- scripts/ai_functions.py | 2 -- scripts/browse.py | 2 +- scripts/config.py | 2 +- scripts/file_operations.py | 2 +- scripts/logger.py | 1 + scripts/memory/__init__.py | 1 + scripts/speak.py | 4 ++-- tests/integration/memory_tests.py | 1 + tests/local_cache_test.py | 1 + tests/test_browse_scrape_text.py | 2 -- tests/test_json_parser.py | 3 --- tests/unit/json_tests.py | 3 --- 12 files changed, 9 insertions(+), 15 deletions(-) diff --git a/scripts/ai_functions.py b/scripts/ai_functions.py index 8ad77441dc..782bb55871 100644 --- a/scripts/ai_functions.py +++ b/scripts/ai_functions.py @@ -45,8 +45,6 @@ def improve_code(suggestions: List[str], code: str) -> str: result_string = call_ai_function(function_string, args, description_string) return result_string - - def write_tests(code: str, focus: List[str]) -> str: """ A function that takes in code and focus topics and returns a response from create chat completion api call. diff --git a/scripts/browse.py b/scripts/browse.py index c3fc066274..b936c5b197 100644 --- a/scripts/browse.py +++ b/scripts/browse.py @@ -41,7 +41,7 @@ def scrape_text(url): # Restrict access to local files if check_local_file_access(url): return "Error: Access to local files is restricted" - + # Validate the input URL if not is_valid_url(url): # Sanitize the input URL diff --git a/scripts/config.py b/scripts/config.py index 6e44895436..255587d76f 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -61,7 +61,7 @@ class Config(metaclass=Singleton): self.use_mac_os_tts = False self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") - + self.google_api_key = os.getenv("GOOGLE_API_KEY") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") diff --git a/scripts/file_operations.py b/scripts/file_operations.py index c6066ef930..7b48c13487 100644 --- a/scripts/file_operations.py +++ b/scripts/file_operations.py @@ -80,4 +80,4 @@ def search_files(directory): relative_path = os.path.relpath(os.path.join(root, file), working_directory) found_files.append(relative_path) - return found_files \ No newline at end of file + return found_files diff --git a/scripts/logger.py b/scripts/logger.py index a609e60275..5c7d68bb3b 100644 --- a/scripts/logger.py +++ b/scripts/logger.py @@ -159,6 +159,7 @@ class ConsoleHandler(logging.StreamHandler): except Exception: self.handleError(record) + ''' Allows to handle custom placeholders 'title_color' and 'message_no_color'. To use this formatter, make sure to pass 'color', 'title' as log extras. diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py index 2900353ed9..a07f9fd88d 100644 --- a/scripts/memory/__init__.py +++ b/scripts/memory/__init__.py @@ -44,6 +44,7 @@ def get_memory(cfg, init=False): def get_supported_memory_backends(): return supported_memory + __all__ = [ "get_memory", "LocalCache", diff --git a/scripts/speak.py b/scripts/speak.py index 2cc6a55897..64054e3c58 100644 --- a/scripts/speak.py +++ b/scripts/speak.py @@ -61,7 +61,7 @@ def gtts_speech(text): def macos_tts_speech(text, voice_index=0): if voice_index == 0: os.system(f'say "{text}"') - else: + else: if voice_index == 1: os.system(f'say -v "Ava (Premium)" "{text}"') else: @@ -79,7 +79,7 @@ def say_text(text, voice_index=0): success = eleven_labs_speech(text, voice_index) if not success: gtts_speech(text) - + queue_semaphore.release() queue_semaphore.acquire(True) diff --git a/tests/integration/memory_tests.py b/tests/integration/memory_tests.py index ed444d9196..5f1611be96 100644 --- a/tests/integration/memory_tests.py +++ b/tests/integration/memory_tests.py @@ -45,5 +45,6 @@ class TestLocalCache(unittest.TestCase): self.assertEqual(len(relevant_texts), k) self.assertIn(self.example_texts[1], relevant_texts) + if __name__ == '__main__': unittest.main() diff --git a/tests/local_cache_test.py b/tests/local_cache_test.py index ac045a5549..d1f1ef0843 100644 --- a/tests/local_cache_test.py +++ b/tests/local_cache_test.py @@ -47,5 +47,6 @@ class TestLocalCache(unittest.TestCase): stats = self.cache.get_stats() self.assertEqual(stats, (1, self.cache.data.embeddings.shape)) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_browse_scrape_text.py b/tests/test_browse_scrape_text.py index 5ecd7407c1..775eefcd25 100644 --- a/tests/test_browse_scrape_text.py +++ b/tests/test_browse_scrape_text.py @@ -33,8 +33,6 @@ Additional aspects: - The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text. """ - - class TestScrapeText: # Tests that scrape_text() returns the expected text when given a valid URL. diff --git a/tests/test_json_parser.py b/tests/test_json_parser.py index 4561659e59..b8cb2680d4 100644 --- a/tests/test_json_parser.py +++ b/tests/test_json_parser.py @@ -66,8 +66,6 @@ class TestParseJson(unittest.TestCase): # Assert that this raises an exception: self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - - def test_invalid_json_leading_sentence_with_gpt(self): # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. @@ -108,6 +106,5 @@ class TestParseJson(unittest.TestCase): self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - if __name__ == '__main__': unittest.main() diff --git a/tests/unit/json_tests.py b/tests/unit/json_tests.py index fdac9c2f77..1edbaeaf36 100644 --- a/tests/unit/json_tests.py +++ b/tests/unit/json_tests.py @@ -68,8 +68,6 @@ class TestParseJson(unittest.TestCase): # Assert that this raises an exception: self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - - def test_invalid_json_leading_sentence_with_gpt(self): # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. @@ -110,6 +108,5 @@ class TestParseJson(unittest.TestCase): self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj) - if __name__ == '__main__': unittest.main() From f4a481513ddce95775c07cce6813066a4b2cdd48 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:13:02 +0200 Subject: [PATCH 13/14] ci: Set continue-on-error to false for flake8 lint job --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 06d6ed6477..070df794b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: pip install -r requirements.txt - name: Lint with flake8 - continue-on-error: true + continue-on-error: false run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 - name: Run unittest tests with coverage From 639df44865dac0402c06f574c4b57068a47db261 Mon Sep 17 00:00:00 2001 From: Drikus Roor Date: Wed, 12 Apr 2023 22:42:15 +0200 Subject: [PATCH 14/14] docs: Update README.md with the flake8 command used in the CI --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index a2e14189e8..c9ef9d5c59 100644 --- a/README.md +++ b/README.md @@ -341,4 +341,7 @@ This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To ``` flake8 scripts/ tests/ + +# Or, if you want to run flake8 with the same configuration as the CI: +flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 ``` \ No newline at end of file