Date: Mon, 17 Apr 2023 03:21:46 +0200
Subject: [PATCH 38/51] Add Get Help header in README
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index 07a088e3cb..e2b3b4d285 100644
--- a/README.md
+++ b/README.md
@@ -4,6 +4,7 @@
[](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
[](https://twitter.com/SigGravitas)
+## 💡 Get help - [Q&A](https://github.com/Significant-Gravitas/Auto-GPT/discussions/categories/q-a) or [Discord 💬](https://discord.gg/autogpt)
From 9ffa587f6f1c96c76f82a55ae3d18aead5ff0b9a Mon Sep 17 00:00:00 2001
From: Void&Null <70048414+Void-n-Null@users.noreply.github.com>
Date: Sun, 16 Apr 2023 17:39:03 -0700
Subject: [PATCH 39/51] Implement new demo video into read me
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index e2b3b4d285..7154801ecb 100644
--- a/README.md
+++ b/README.md
@@ -18,9 +18,9 @@ The `master` branch may often be in a **broken** state.
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
-### Demo (30/03/2023):
+ Demo April 16th 2023
-https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
+https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
💖 Help Fund Auto-GPT's Development 💖
From 0409079983542a7babc50b1b0fd28a7a506b7019 Mon Sep 17 00:00:00 2001
From: Void&Null <70048414+Void-n-Null@users.noreply.github.com>
Date: Sun, 16 Apr 2023 19:27:39 -0700
Subject: [PATCH 40/51] Added Credit to README.md Demo
---
README.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/README.md b/README.md
index 7154801ecb..71957748f0 100644
--- a/README.md
+++ b/README.md
@@ -22,6 +22,8 @@ Auto-GPT is an experimental open-source application showcasing the capabilities
https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
+Demo made by Blake Werlinger
+
💖 Help Fund Auto-GPT's Development 💖
If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
From 9589334a305198c837bfb8720ed6f06176b2f216 Mon Sep 17 00:00:00 2001
From: EH
Date: Mon, 17 Apr 2023 03:34:02 +0100
Subject: [PATCH 41/51] Add File Downloading Capabilities (#1680)
* Added 'download_file' command
* Added util and fixed spinner
* Fixed comma and added autogpt/auto_gpt_workspace to .gitignore
* Fix linter issues
* Fix more linter issues
* Fix Lint Issues
* Added 'download_file' command
* Added util and fixed spinner
* Fixed comma and added autogpt/auto_gpt_workspace to .gitignore
* Fix linter issues
* Fix more linter issues
* Conditionally add the 'download_file' prompt
* Update args.py
* Removed Duplicate Prompt
* Switched to using path_in_workspace function
---
.gitignore | 1 +
autogpt/app.py | 5 +++
autogpt/args.py | 16 +++++++++-
autogpt/commands/file_operations.py | 49 ++++++++++++++++++++++++++++-
autogpt/config/config.py | 1 +
autogpt/prompt.py | 10 ++++++
autogpt/spinner.py | 15 ++++++++-
autogpt/utils.py | 13 ++++++++
8 files changed, 107 insertions(+), 3 deletions(-)
diff --git a/.gitignore b/.gitignore
index eda7f32734..2220ef6e3a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
+autogpt/auto_gpt_workspace
package-lock.json
*.pyc
auto_gpt_workspace/*
diff --git a/autogpt/app.py b/autogpt/app.py
index 78b5bd2fde..19c075f0b0 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -17,6 +17,7 @@ from autogpt.commands.file_operations import (
read_file,
search_files,
write_to_file,
+ download_file
)
from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory
@@ -164,6 +165,10 @@ def execute_command(command_name: str, arguments):
return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
+ elif command_name == "download_file":
+ if not CFG.allow_downloads:
+ return "Error: You do not have user authorization to download files locally."
+ return download_file(arguments["url"], arguments["file"])
elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
diff --git a/autogpt/args.py b/autogpt/args.py
index eca3233472..f0e9c07a36 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -1,7 +1,7 @@
"""This module contains the argument parsing logic for the script."""
import argparse
-from colorama import Fore
+from colorama import Fore, Back, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
@@ -63,6 +63,12 @@ def parse_arguments() -> None:
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
+ parser.add_argument(
+ '--allow-downloads',
+ action='store_true',
+ dest='allow_downloads',
+ help='Dangerous: Allows Auto-GPT to download files natively.'
+ )
args = parser.parse_args()
if args.debug:
@@ -133,5 +139,13 @@ def parse_arguments() -> None:
CFG.ai_settings_file = file
CFG.skip_reprompt = True
+ if args.allow_downloads:
+ logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW,
+ f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
+ "It is recommended that you monitor any files it downloads carefully.")
+ logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
+ CFG.allow_downloads = True
+
if args.browser_name:
CFG.selenium_web_browser = args.browser_name
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 8abc2e2329..d273c1a34d 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -4,9 +4,16 @@ from __future__ import annotations
import os
import os.path
from pathlib import Path
-from typing import Generator
+from typing import Generator, List
+import requests
+from requests.adapters import HTTPAdapter
+from requests.adapters import Retry
+from colorama import Fore, Back
+from autogpt.spinner import Spinner
+from autogpt.utils import readable_file_size
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
+
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -214,3 +221,43 @@ def search_files(directory: str) -> list[str]:
found_files.append(relative_path)
return found_files
+
+
+def download_file(url, filename):
+ """Downloads a file
+ Args:
+ url (str): URL of the file to download
+ filename (str): Filename to save the file as
+ """
+ safe_filename = path_in_workspace(filename)
+ try:
+ message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
+ with Spinner(message) as spinner:
+ session = requests.Session()
+ retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
+ adapter = HTTPAdapter(max_retries=retry)
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+
+ total_size = 0
+ downloaded_size = 0
+
+ with session.get(url, allow_redirects=True, stream=True) as r:
+ r.raise_for_status()
+ total_size = int(r.headers.get('Content-Length', 0))
+ downloaded_size = 0
+
+ with open(safe_filename, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=8192):
+ f.write(chunk)
+ downloaded_size += len(chunk)
+
+ # Update the progress message
+ progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
+ spinner.update_message(f"{message} {progress}")
+
+ return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
+ except requests.HTTPError as e:
+ return f"Got an HTTP Error whilst trying to download file: {e}"
+ except Exception as e:
+ return "Error: " + str(e)
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index 22da52b047..fe6f4f3258 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -24,6 +24,7 @@ class Config(metaclass=Singleton):
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
+ self.allow_downloads = False
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
diff --git a/autogpt/prompt.py b/autogpt/prompt.py
index 18a5736c19..a2b20b1fef 100644
--- a/autogpt/prompt.py
+++ b/autogpt/prompt.py
@@ -105,6 +105,16 @@ def get_prompt() -> str:
),
)
+ # Only add the download file command if the AI is allowed to execute it
+ if cfg.allow_downloads:
+ commands.append(
+ (
+ "Downloads a file from the internet, and stores it locally",
+ "download_file",
+ {"url": "", "file": ""}
+ ),
+ )
+
# Add these command last.
commands.append(
("Do Nothing", "do_nothing", {}),
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
index 56b4f20a68..febcea8eb1 100644
--- a/autogpt/spinner.py
+++ b/autogpt/spinner.py
@@ -29,12 +29,14 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
- def __enter__(self) -> None:
+ def __enter__(self):
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
+ return self
+
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
"""Stop the spinner
@@ -48,3 +50,14 @@ class Spinner:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
+
+ def update_message(self, new_message, delay=0.1):
+ """Update the spinner message
+ Args:
+ new_message (str): New message to display
+ delay: Delay in seconds before updating the message
+ """
+ time.sleep(delay)
+ sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
+ sys.stdout.flush()
+ self.message = new_message
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 59709d02be..11d98d1b74 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -24,3 +24,16 @@ def validate_yaml_file(file: str):
)
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
+
+
+def readable_file_size(size, decimal_places=2):
+ """Converts the given size in bytes to a readable format.
+ Args:
+ size: Size in bytes
+ decimal_places (int): Number of decimal places to display
+ """
+ for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
+ if size < 1024.0:
+ break
+ size /= 1024.0
+ return f"{size:.{decimal_places}f} {unit}"
From c110f3489dba6ab738967bf322f1ff6567b4caac Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 21:51:36 -0500
Subject: [PATCH 42/51] Finish integrating command registry
---
autogpt/__main__.py | 28 ++++---
autogpt/agent/agent.py | 2 +-
autogpt/agent/agent_manager.py | 2 +-
autogpt/app.py | 90 ++++------------------
autogpt/args.py | 1 +
autogpt/commands/audio_text.py | 42 ++++++++--
autogpt/commands/command.py | 6 +-
autogpt/commands/evaluate_code.py | 2 +-
autogpt/commands/execute_code.py | 5 +-
autogpt/commands/file_operations.py | 3 +-
autogpt/commands/git_operations.py | 13 +++-
autogpt/commands/google_search.py | 34 +++++++-
autogpt/commands/image_gen.py | 2 +-
autogpt/commands/improve_code.py | 2 +-
autogpt/commands/twitter.py | 27 ++++++-
autogpt/commands/web_playwright.py | 1 +
autogpt/commands/web_requests.py | 4 +-
autogpt/commands/web_selenium.py | 29 +++----
autogpt/commands/write_tests.py | 3 +-
autogpt/config/__init__.py | 2 +-
autogpt/config/ai_config.py | 7 +-
autogpt/config/config.py | 7 +-
autogpt/data_ingestion.py | 2 +-
autogpt/json_fixes/auto_fix.py | 2 +-
autogpt/json_fixes/bracket_termination.py | 3 +-
autogpt/llm_utils.py | 4 +-
autogpt/logs.py | 4 +-
autogpt/memory/local.py | 2 +-
autogpt/memory/milvus.py | 8 +-
autogpt/memory/pinecone.py | 2 +-
autogpt/memory/redismem.py | 2 +-
autogpt/memory/weaviate.py | 6 +-
autogpt/plugins.py | 4 +-
autogpt/processing/html.py | 2 +-
autogpt/processing/text.py | 6 +-
autogpt/prompts/generator.py | 13 +++-
autogpt/prompts/prompt.py | 57 +-------------
autogpt/setup.py | 1 +
autogpt/speech/brian.py | 1 +
autogpt/speech/eleven_labs.py | 2 +-
autogpt/speech/gtts.py | 3 +-
autogpt/speech/say.py | 11 ++-
scripts/check_requirements.py | 3 +-
tests.py | 1 +
tests/browse_tests.py | 2 +-
tests/integration/weaviate_memory_tests.py | 8 +-
tests/test_commands.py | 1 +
tests/test_token_counter.py | 1 +
tests/unit/test_chat.py | 2 +-
tests/unit/test_commands.py | 7 +-
50 files changed, 238 insertions(+), 234 deletions(-)
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index 5fc9a1ead5..cd597506eb 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -2,17 +2,17 @@
import logging
import os
from pathlib import Path
+
from colorama import Fore
+
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments
from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory
-
-from autogpt.prompts.prompt import construct_main_ai_config
from autogpt.plugins import load_plugins
-
+from autogpt.prompts.prompt import construct_main_ai_config
# Load environment variables from .env file
@@ -47,13 +47,20 @@ def main() -> None:
cfg.set_plugins(loaded_plugins)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()
- command_registry.import_commands("scripts.ai_functions")
- command_registry.import_commands("scripts.commands")
- command_registry.import_commands("scripts.execute_code")
- command_registry.import_commands("scripts.agent_manager")
- command_registry.import_commands("scripts.file_operations")
+ command_registry.import_commands("autogpt.commands.audio_text")
+ command_registry.import_commands("autogpt.commands.evaluate_code")
+ command_registry.import_commands("autogpt.commands.execute_code")
+ command_registry.import_commands("autogpt.commands.file_operations")
+ command_registry.import_commands("autogpt.commands.git_operations")
+ command_registry.import_commands("autogpt.commands.google_search")
+ command_registry.import_commands("autogpt.commands.image_gen")
+ command_registry.import_commands("autogpt.commands.twitter")
+ command_registry.import_commands("autogpt.commands.web_selenium")
+ command_registry.import_commands("autogpt.commands.write_tests")
+ command_registry.import_commands("autogpt.app")
ai_name = ""
ai_config = construct_main_ai_config()
+ ai_config.command_registry = command_registry
# print(prompt)
# Initialize variables
full_message_history = []
@@ -70,6 +77,9 @@ def main() -> None:
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
+ prompt = ai_config.construct_full_prompt()
+ if cfg.debug_mode:
+ logger.typewriter_log("Prompt:", Fore.GREEN, prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
@@ -77,7 +87,7 @@ def main() -> None:
next_action_count=next_action_count,
command_registry=command_registry,
config=ai_config,
- prompt=ai_config.construct_full_prompt(),
+ prompt=prompt,
user_input=user_input,
)
agent.start_interaction_loop()
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 513478b9d8..8117818ea4 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -1,6 +1,6 @@
from colorama import Fore, Style
-from autogpt.app import execute_command, get_command
+from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
from autogpt.json_fixes.bracket_termination import (
diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py
index e848bfe77c..e1353e03a5 100644
--- a/autogpt/agent/agent_manager.py
+++ b/autogpt/agent/agent_manager.py
@@ -1,8 +1,8 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
+from autogpt.config.config import Config, Singleton
from autogpt.llm_utils import create_chat_completion
-from autogpt.config.config import Singleton, Config
class AgentManager(metaclass=Singleton):
diff --git a/autogpt/app.py b/autogpt/app.py
index 1e78262662..3a8bbc2a9a 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -1,16 +1,11 @@
""" Command and Control """
import json
from typing import List, NoReturn, Union
+
from autogpt.agent.agent_manager import AgentManager
-from autogpt.commands.command import command, CommandRegistry
-from autogpt.commands.evaluate_code import evaluate_code
-from autogpt.commands.google_search import google_official_search, google_search
-from autogpt.commands.improve_code import improve_code
-from autogpt.commands.write_tests import write_tests
-from autogpt.config import Config
-from autogpt.commands.image_gen import generate_image
from autogpt.commands.audio_text import read_audio_from_file
-from autogpt.commands.web_requests import scrape_links, scrape_text
+from autogpt.commands.command import CommandRegistry, command
+from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.execute_code import execute_python_file, execute_shell
from autogpt.commands.file_operations import (
append_to_file,
@@ -19,15 +14,20 @@ from autogpt.commands.file_operations import (
search_files,
write_to_file,
)
+from autogpt.commands.git_operations import clone_repository
+from autogpt.commands.google_search import google_official_search, google_search
+from autogpt.commands.image_gen import generate_image
+from autogpt.commands.improve_code import improve_code
+from autogpt.commands.twitter import send_tweet
+from autogpt.commands.web_requests import scrape_links, scrape_text
+from autogpt.commands.web_selenium import browse_website
+from autogpt.commands.write_tests import write_tests
+from autogpt.config import Config
from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text
-from autogpt.commands.web_selenium import browse_website
-from autogpt.commands.git_operations import clone_repository
-from autogpt.commands.twitter import send_tweet
-
CFG = Config()
AGENT_MANAGER = AgentManager()
@@ -132,76 +132,16 @@ def execute_command(
# TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name)
- if command_name == "google":
- # Check if the Google API key is set and use the official search method
- # If the API key is not set or has only whitespaces, use the unofficial
- # search method
- key = CFG.google_api_key
- if key and key.strip() and key != "your-google-api-key":
- google_result = google_official_search(arguments["input"])
- return google_result
- else:
- google_result = google_search(arguments["input"])
- # google_result can be a list or a string depending on the search results
- if isinstance(google_result, list):
- safe_message = [
- google_result_single.encode("utf-8", "ignore")
- for google_result_single in google_result
- ]
- else:
- safe_message = google_result.encode("utf-8", "ignore")
-
- return str(safe_message)
- elif command_name == "memory_add":
+ if command_name == "memory_add":
return memory.add(arguments["string"])
- elif command_name == "start_agent":
- return start_agent(
- arguments["name"], arguments["task"], arguments["prompt"]
- )
- elif command_name == "message_agent":
- return message_agent(arguments["key"], arguments["message"])
- elif command_name == "list_agents":
- return list_agents()
- elif command_name == "delete_agent":
- return delete_agent(arguments["key"])
elif command_name == "get_text_summary":
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
- elif command_name == "clone_repository":
- return clone_repository(
- arguments["repository_url"], arguments["clone_path"]
- )
- elif command_name == "read_file":
- return read_file(arguments["file"])
- elif command_name == "write_to_file":
- return write_to_file(arguments["file"], arguments["text"])
- elif command_name == "append_to_file":
- return append_to_file(arguments["file"], arguments["text"])
- elif command_name == "delete_file":
- return delete_file(arguments["file"])
- elif command_name == "search_files":
- return search_files(arguments["directory"])
- elif command_name == "browse_website":
- return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
- # filepath, write your code to file and try again"
- elif command_name == "evaluate_code":
- return evaluate_code(arguments["code"])
- elif command_name == "improve_code":
- return improve_code(arguments["suggestions"], arguments["code"])
- elif command_name == "write_tests":
- return write_tests(arguments["code"], arguments.get("focus"))
- elif command_name == "execute_python_file": # Add this command
- return execute_python_file(arguments["file"])
- elif command_name == "read_audio_from_file":
- return read_audio_from_file(arguments["file"])
- elif command_name == "generate_image":
- return generate_image(arguments["prompt"])
- elif command_name == "send_tweet":
- return send_tweet(arguments["text"])
+ # filepath, write your code to file and try again
elif command_name == "do_nothing":
return "No action performed."
elif command_name == "task_complete":
@@ -305,7 +245,7 @@ def message_agent(key: str, message: str) -> str:
@command("list_agents", "List GPT Agents", "")
-def list_agents():
+def list_agents() -> str:
"""List all agents
Returns:
diff --git a/autogpt/args.py b/autogpt/args.py
index eca3233472..20d25a4c4c 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -2,6 +2,7 @@
import argparse
from colorama import Fore
+
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py
index 84819d5ed7..421a1f18ee 100644
--- a/autogpt/commands/audio_text.py
+++ b/autogpt/commands/audio_text.py
@@ -1,23 +1,51 @@
-import requests
+"""Commands for converting audio to text."""
import json
+import requests
+
+from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
-cfg = Config()
+CFG = Config()
-def read_audio_from_file(audio_path):
+@command(
+ "read_audio_from_file",
+ "Convert Audio to text",
+ '"file": ""',
+ CFG.huggingface_audio_to_text_model,
+ "Configure huggingface_audio_to_text_model.",
+)
+def read_audio_from_file(audio_path: str) -> str:
+ """
+ Convert audio to text.
+
+ Args:
+ audio_path (str): The path to the audio file
+
+ Returns:
+ str: The text from the audio
+ """
audio_path = path_in_workspace(audio_path)
with open(audio_path, "rb") as audio_file:
audio = audio_file.read()
return read_audio(audio)
-def read_audio(audio):
- model = cfg.huggingface_audio_to_text_model
+def read_audio(audio: bytes) -> str:
+ """
+ Convert audio to text.
+
+ Args:
+ audio (bytes): The audio to convert
+
+ Returns:
+ str: The text from the audio
+ """
+ model = CFG.huggingface_audio_to_text_model
api_url = f"https://api-inference.huggingface.co/models/{model}"
- api_token = cfg.huggingface_api_token
+ api_token = CFG.huggingface_api_token
headers = {"Authorization": f"Bearer {api_token}"}
if api_token is None:
@@ -32,4 +60,4 @@ def read_audio(audio):
)
text = json.loads(response.content.decode("utf-8"))["text"]
- return "The audio says: " + text
+ return f"The audio says: {text}"
diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py
index d1dfc8fde6..3b3ccf51c2 100644
--- a/autogpt/commands/command.py
+++ b/autogpt/commands/command.py
@@ -1,8 +1,8 @@
-import os
-import sys
import importlib
import inspect
-from typing import Callable, Any, List, Optional
+import os
+import sys
+from typing import Any, Callable, List, Optional
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
diff --git a/autogpt/commands/evaluate_code.py b/autogpt/commands/evaluate_code.py
index 1c9b117dda..064e4512be 100644
--- a/autogpt/commands/evaluate_code.py
+++ b/autogpt/commands/evaluate_code.py
@@ -5,7 +5,7 @@ from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
-@command("evaluate_code", "Evaluate Code", '"code": ""')
+@command("evaluate_code", "Evaluate Code", '"code": ""')
def evaluate_code(code: str) -> list[str]:
"""
A function that takes in a string and returns a response from create chat
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index aa8a354552..b67707279a 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -4,9 +4,10 @@ import subprocess
import docker
from docker.errors import ImageNotFound
-from autogpt.config import Config
+
from autogpt.commands.command import command
-from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
+from autogpt.config import Config
+from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
CFG = Config()
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index fea62fad18..4d8d76b311 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -5,8 +5,9 @@ import os
import os.path
from pathlib import Path
from typing import Generator
+
from autogpt.commands.command import command
-from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
+from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 3ff35cf31a..f5954032d4 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,10 +1,19 @@
"""Git operations for autogpt"""
-import git
+from git.repo import Repo
+
+from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
+@command(
+ "clone_repository",
+ "Clone Repositoryy",
+ '"repository_url": "", "clone_path": ""',
+ CFG.github_username and CFG.github_api_key,
+ "Configure github_username and github_api_key.",
+)
def clone_repository(repo_url: str, clone_path: str) -> str:
"""Clone a github repository locally
@@ -17,7 +26,7 @@ def clone_repository(repo_url: str, clone_path: str) -> str:
split_url = repo_url.split("//")
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
try:
- git.Repo.clone_from(auth_repo_url, clone_path)
+ Repo.clone_from(auth_repo_url, clone_path)
return f"""Cloned {repo_url} to {clone_path}"""
except Exception as e:
return f"Error: {str(e)}"
diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py
index 148ba1d0e1..0f635bcadc 100644
--- a/autogpt/commands/google_search.py
+++ b/autogpt/commands/google_search.py
@@ -5,11 +5,13 @@ import json
from duckduckgo_search import ddg
+from autogpt.commands.command import command
from autogpt.config import Config
CFG = Config()
+@command("google", "Google Search", '"input": ""', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search
@@ -31,9 +33,17 @@ def google_search(query: str, num_results: int = 8) -> str:
for j in results:
search_results.append(j)
- return json.dumps(search_results, ensure_ascii=False, indent=4)
+ results = json.dumps(search_results, ensure_ascii=False, indent=4)
+ return safe_google_results(results)
+@command(
+ "google",
+ "Google Search",
+ '"input": ""',
+ bool(CFG.google_api_key),
+ "Configure google_api_key.",
+)
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
"""Return the results of a google search using the official Google API
@@ -82,6 +92,26 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
+ # google_result can be a list or a string depending on the search results
# Return the list of search result URLs
- return search_results_links
+ return safe_google_results(search_results_links)
+
+
+def safe_google_results(results: str | list) -> str:
+ """
+ Return the results of a google search in a safe format.
+
+ Args:
+ results (str | list): The search results.
+
+ Returns:
+ str: The results of the search.
+ """
+ if isinstance(results, list):
+ safe_message = json.dumps(
+ [result.enocde("utf-8", "ignore") for result in results]
+ )
+ else:
+ safe_message = results.encode("utf-8", "ignore").decode("utf-8")
+ return safe_message
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index 2b62aa354b..9dbb2fa586 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -1,12 +1,12 @@
""" Image Generation Module for AutoGPT."""
import io
-import os.path
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
+
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
diff --git a/autogpt/commands/improve_code.py b/autogpt/commands/improve_code.py
index 0bfe725305..41a369b4db 100644
--- a/autogpt/commands/improve_code.py
+++ b/autogpt/commands/improve_code.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import json
-from autogpt.commands import command
+from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py
index dc4d450c1f..8e64b21357 100644
--- a/autogpt/commands/twitter.py
+++ b/autogpt/commands/twitter.py
@@ -1,11 +1,30 @@
-import tweepy
+"""A module that contains a command to send a tweet."""
import os
+
+import tweepy
from dotenv import load_dotenv
+from autogpt.commands.command import command
+
load_dotenv()
-def send_tweet(tweet_text):
+@command(
+ "send_tweet",
+ "Send Tweet",
+ '"text": ""',
+)
+def send_tweet(tweet_text: str) -> str:
+ """
+ A function that takes in a string and returns a response from create chat
+ completion api call.
+
+ Args:
+ tweet_text (str): Text to be tweeted.
+
+ Returns:
+ A result from sending the tweet.
+ """
consumer_key = os.environ.get("TW_CONSUMER_KEY")
consumer_secret = os.environ.get("TW_CONSUMER_SECRET")
access_token = os.environ.get("TW_ACCESS_TOKEN")
@@ -20,6 +39,6 @@ def send_tweet(tweet_text):
# Send tweet
try:
api.update_status(tweet_text)
- print("Tweet sent successfully!")
+ return "Tweet sent successfully!"
except tweepy.TweepyException as e:
- print("Error sending tweet: {}".format(e.reason))
+ return f"Error sending tweet: {e.reason}"
diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py
index a1abb6cb73..4e388ded20 100644
--- a/autogpt/commands/web_playwright.py
+++ b/autogpt/commands/web_playwright.py
@@ -8,6 +8,7 @@ except ImportError:
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup
+
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py
index 50d8d383cb..7613e5bd1d 100644
--- a/autogpt/commands/web_requests.py
+++ b/autogpt/commands/web_requests.py
@@ -4,9 +4,9 @@ from __future__ import annotations
from urllib.parse import urljoin, urlparse
import requests
-from requests.compat import urljoin
-from requests import Response
from bs4 import BeautifulSoup
+from requests import Response
+from requests.compat import urljoin
from autogpt.config import Config
from autogpt.memory import get_memory
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 591d3162a3..ed79d56ce2 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -1,22 +1,25 @@
"""Selenium web scraping module."""
from __future__ import annotations
-from selenium import webdriver
-from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
-import autogpt.processing.text as summary
-from bs4 import BeautifulSoup
-from selenium.webdriver.remote.webdriver import WebDriver
-from selenium.webdriver.common.by import By
-from selenium.webdriver.support.wait import WebDriverWait
-from selenium.webdriver.support import expected_conditions as EC
-from webdriver_manager.chrome import ChromeDriverManager
-from webdriver_manager.firefox import GeckoDriverManager
-from selenium.webdriver.chrome.options import Options as ChromeOptions
-from selenium.webdriver.firefox.options import Options as FirefoxOptions
-from selenium.webdriver.safari.options import Options as SafariOptions
import logging
from pathlib import Path
+
+from bs4 import BeautifulSoup
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options as ChromeOptions
+from selenium.webdriver.common.by import By
+from selenium.webdriver.firefox.options import Options as FirefoxOptions
+from selenium.webdriver.remote.webdriver import WebDriver
+from selenium.webdriver.safari.options import Options as SafariOptions
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.wait import WebDriverWait
+from webdriver_manager.chrome import ChromeDriverManager
+from webdriver_manager.firefox import GeckoDriverManager
+
+from autogpt.commands.command import command
+import autogpt.processing.text as summary
from autogpt.config import Config
+from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py
index 23d4c13000..91cd930429 100644
--- a/autogpt/commands/write_tests.py
+++ b/autogpt/commands/write_tests.py
@@ -2,7 +2,8 @@
from __future__ import annotations
import json
-from autogpt.commands import command
+
+from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py
index ceb5566ce7..726b6dcf3d 100644
--- a/autogpt/config/__init__.py
+++ b/autogpt/config/__init__.py
@@ -2,7 +2,7 @@
This module contains the configuration classes for AutoGPT.
"""
from autogpt.config.ai_config import AIConfig
-from autogpt.config.config import check_openai_api_key, Config
+from autogpt.config.config import Config, check_openai_api_key
from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
index d18c75ba7d..c9022773fb 100644
--- a/autogpt/config/ai_config.py
+++ b/autogpt/config/ai_config.py
@@ -6,7 +6,8 @@ from __future__ import annotations
import os
from pathlib import Path
-from typing import Type
+from typing import Optional, Type
+
import yaml
from autogpt.prompts.generator import PromptGenerator
@@ -41,6 +42,7 @@ class AIConfig:
self.ai_role = ai_role
self.ai_goals = ai_goals
self.prompt_generator = None
+ self.command_registry = None
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = Path(os.getcwd()) / "ai_settings.yaml"
@@ -113,8 +115,8 @@ class AIConfig:
""
)
- from autogpt.prompts.prompt import build_default_prompt_generator
from autogpt.config import Config
+ from autogpt.prompts.prompt import build_default_prompt_generator
cfg = Config()
if prompt_generator is None:
@@ -122,6 +124,7 @@ class AIConfig:
prompt_generator.goals = self.ai_goals
prompt_generator.name = self.ai_name
prompt_generator.role = self.ai_role
+ prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins:
prompt_generator = plugin.post_prompt(prompt_generator)
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index 46ab95d8d4..a5cd07101f 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -1,14 +1,13 @@
"""Configuration class to store the state of bools for different scripts access."""
import os
-from colorama import Fore
-
-from autogpt.config.singleton import Singleton
import openai
import yaml
-
+from colorama import Fore
from dotenv import load_dotenv
+from autogpt.config.singleton import Singleton
+
load_dotenv(verbose=True)
diff --git a/autogpt/data_ingestion.py b/autogpt/data_ingestion.py
index 01bafc2ad0..b89a33dafd 100644
--- a/autogpt/data_ingestion.py
+++ b/autogpt/data_ingestion.py
@@ -1,8 +1,8 @@
import argparse
import logging
-from autogpt.config import Config
from autogpt.commands.file_operations import ingest_file, search_files
+from autogpt.config import Config
from autogpt.memory import get_memory
cfg = Config()
diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py
index 9fcf909a49..8ad458ab97 100644
--- a/autogpt/json_fixes/auto_fix.py
+++ b/autogpt/json_fixes/auto_fix.py
@@ -1,9 +1,9 @@
"""This module contains the function to fix JSON strings using GPT-3."""
import json
+from autogpt.config import Config
from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
-from autogpt.config import Config
CFG = Config()
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
index 822eed4a54..260301dc25 100644
--- a/autogpt/json_fixes/bracket_termination.py
+++ b/autogpt/json_fixes/bracket_termination.py
@@ -3,11 +3,12 @@ from __future__ import annotations
import contextlib
import json
+
import regex
from colorama import Fore
-from autogpt.logs import logger
from autogpt.config import Config
+from autogpt.logs import logger
from autogpt.speech import say_text
CFG = Config()
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 559820ed8d..701d622b54 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from ast import List
import time
+from ast import List
import openai
-from openai.error import APIError, RateLimitError
from colorama import Fore
+from openai.error import APIError, RateLimitError
from autogpt.config import Config
diff --git a/autogpt/logs.py b/autogpt/logs.py
index 22ce23f4aa..f5c6fa8177 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -5,13 +5,13 @@ import os
import random
import re
import time
-from logging import LogRecord
import traceback
+from logging import LogRecord
from colorama import Fore, Style
-from autogpt.speech import say_text
from autogpt.config import Config, Singleton
+from autogpt.speech import say_text
CFG = Config()
diff --git a/autogpt/memory/local.py b/autogpt/memory/local.py
index 6c7ee1b36a..998b5f1d94 100644
--- a/autogpt/memory/local.py
+++ b/autogpt/memory/local.py
@@ -7,8 +7,8 @@ from typing import Any
import numpy as np
import orjson
-from autogpt.memory.base import MemoryProviderSingleton
from autogpt.llm_utils import create_embedding_with_ada
+from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py
index c6e7d5a372..93aa8b15d6 100644
--- a/autogpt/memory/milvus.py
+++ b/autogpt/memory/milvus.py
@@ -1,11 +1,5 @@
""" Milvus memory storage provider."""
-from pymilvus import (
- connections,
- FieldSchema,
- CollectionSchema,
- DataType,
- Collection,
-)
+from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py
index d781073e3f..27fcd62482 100644
--- a/autogpt/memory/pinecone.py
+++ b/autogpt/memory/pinecone.py
@@ -1,9 +1,9 @@
import pinecone
from colorama import Fore, Style
+from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
-from autogpt.llm_utils import create_embedding_with_ada
class PineconeMemory(MemoryProviderSingleton):
diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py
index 0e8dd71d91..082a812c53 100644
--- a/autogpt/memory/redismem.py
+++ b/autogpt/memory/redismem.py
@@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
+from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
-from autogpt.llm_utils import create_embedding_with_ada
SCHEMA = [
TextField("data"),
diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py
index 19035381f3..ef0e34760e 100644
--- a/autogpt/memory/weaviate.py
+++ b/autogpt/memory/weaviate.py
@@ -1,11 +1,13 @@
-from autogpt.config import Config
-from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
import uuid
+
import weaviate
from weaviate import Client
from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
+from autogpt.config import Config
+from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
+
def default_schema(weaviate_index):
return {
diff --git a/autogpt/plugins.py b/autogpt/plugins.py
index 7b843a6ac1..a00b989eae 100644
--- a/autogpt/plugins.py
+++ b/autogpt/plugins.py
@@ -1,10 +1,10 @@
"""Handles loading of plugins."""
-from ast import Module
import zipfile
+from ast import Module
from pathlib import Path
-from zipimport import zipimporter
from typing import List, Optional, Tuple
+from zipimport import zipimporter
def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]:
diff --git a/autogpt/processing/html.py b/autogpt/processing/html.py
index e1912b6ad4..81387b12ad 100644
--- a/autogpt/processing/html.py
+++ b/autogpt/processing/html.py
@@ -1,8 +1,8 @@
"""HTML processing functions"""
from __future__ import annotations
-from requests.compat import urljoin
from bs4 import BeautifulSoup
+from requests.compat import urljoin
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py
index d30036d878..84e6a1dea6 100644
--- a/autogpt/processing/text.py
+++ b/autogpt/processing/text.py
@@ -1,9 +1,11 @@
"""Text processing functions"""
-from typing import Generator, Optional, Dict
+from typing import Dict, Generator, Optional
+
from selenium.webdriver.remote.webdriver import WebDriver
-from autogpt.memory import get_memory
+
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
+from autogpt.memory import get_memory
CFG = Config()
MEMORY = get_memory(CFG)
diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py
index f8a37b85e7..24768203d5 100644
--- a/autogpt/prompts/generator.py
+++ b/autogpt/prompts/generator.py
@@ -19,6 +19,7 @@ class PromptGenerator:
self.resources = []
self.performance_evaluation = []
self.goals = []
+ self.command_registry = None
self.name = "Bob"
self.role = "AI"
self.response_format = {
@@ -119,10 +120,14 @@ class PromptGenerator:
str: The formatted numbered list.
"""
if item_type == "command":
- return "\n".join(
- f"{i+1}. {self._generate_command_string(item)}"
- for i, item in enumerate(items)
- )
+ command_strings = []
+ if self.command_registry:
+ command_strings += [
+ str(item) for item in self.command_registry.commands.values()
+ ]
+ # These are the commands that are added manually, do_nothing and terminate
+ command_strings += [self._generate_command_string(item) for item in items]
+ return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py
index d82cdb16ae..ba04263e05 100644
--- a/autogpt/prompts/prompt.py
+++ b/autogpt/prompts/prompt.py
@@ -1,4 +1,5 @@
from colorama import Fore
+
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.logs import logger
@@ -37,63 +38,9 @@ def build_default_prompt_generator() -> PromptGenerator:
# Define the command list
commands = [
- ("Google Search", "google", {"input": ""}),
- (
- "Browse Website",
- "browse_website",
- {"url": "", "question": ""},
- ),
- (
- "Start GPT Agent",
- "start_agent",
- {"name": "", "task": "", "prompt": ""},
- ),
- (
- "Message GPT Agent",
- "message_agent",
- {"key": "", "message": ""},
- ),
- ("List GPT Agents", "list_agents", {}),
- ("Delete GPT Agent", "delete_agent", {"key": ""}),
- (
- "Clone Repository",
- "clone_repository",
- {"repository_url": "", "clone_path": ""},
- ),
- ("Write to file", "write_to_file", {"file": "", "text": ""}),
- ("Read file", "read_file", {"file": ""}),
- ("Append to file", "append_to_file", {"file": "", "text": ""}),
- ("Delete file", "delete_file", {"file": ""}),
- ("Search Files", "search_files", {"directory": ""}),
- ("Evaluate Code", "evaluate_code", {"code": ""}),
- (
- "Get Improved Code",
- "improve_code",
- {"suggestions": "", "code": ""},
- ),
- (
- "Write Tests",
- "write_tests",
- {"code": "", "focus": ""},
- ),
- ("Execute Python File", "execute_python_file", {"file": ""}),
- ("Generate Image", "generate_image", {"prompt": ""}),
- ("Send Tweet", "send_tweet", {"text": ""}),
- ]
-
- # Only add the audio to text command if the model is specified
- if cfg.huggingface_audio_to_text_model:
- commands.append(
- ("Convert Audio to text", "read_audio_from_file", {"file": ""}),
- )
-
- # Add these command last.
- commands.append(
("Do Nothing", "do_nothing", {}),
- )
- commands.append(
("Task Complete (Shutdown)", "task_complete", {"reason": ""}),
- )
+ ]
# Add commands to the PromptGenerator object
for command_label, command_name, args in commands:
diff --git a/autogpt/setup.py b/autogpt/setup.py
index 5315c01db0..d719688dc6 100644
--- a/autogpt/setup.py
+++ b/autogpt/setup.py
@@ -1,5 +1,6 @@
"""Setup the AI and its goals"""
from colorama import Fore, Style
+
from autogpt import utils
from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger
diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py
index e581bbcc8d..3cc593c216 100644
--- a/autogpt/speech/brian.py
+++ b/autogpt/speech/brian.py
@@ -1,5 +1,6 @@
""" Brian speech module for autogpt """
import os
+
import requests
from playsound import playsound
diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py
index 0af48cae15..a9b30dbbc3 100644
--- a/autogpt/speech/eleven_labs.py
+++ b/autogpt/speech/eleven_labs.py
@@ -1,8 +1,8 @@
"""ElevenLabs speech module"""
import os
-from playsound import playsound
import requests
+from playsound import playsound
from autogpt.config import Config
from autogpt.speech.base import VoiceBase
diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py
index 37497075e7..1c3e9cae05 100644
--- a/autogpt/speech/gtts.py
+++ b/autogpt/speech/gtts.py
@@ -1,7 +1,8 @@
""" GTTS Voice. """
import os
-from playsound import playsound
+
import gtts
+from playsound import playsound
from autogpt.speech.base import VoiceBase
diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py
index 78b75b21fc..727983d12b 100644
--- a/autogpt/speech/say.py
+++ b/autogpt/speech/say.py
@@ -1,13 +1,12 @@
""" Text to speech module """
-from autogpt.config import Config
-
import threading
from threading import Semaphore
-from autogpt.speech.brian import BrianSpeech
-from autogpt.speech.macos_tts import MacOSTTS
-from autogpt.speech.gtts import GTTSVoice
-from autogpt.speech.eleven_labs import ElevenLabsSpeech
+from autogpt.config import Config
+from autogpt.speech.brian import BrianSpeech
+from autogpt.speech.eleven_labs import ElevenLabsSpeech
+from autogpt.speech.gtts import GTTSVoice
+from autogpt.speech.macos_tts import MacOSTTS
CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice()
diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py
index d1f2350413..e4eab024a6 100644
--- a/scripts/check_requirements.py
+++ b/scripts/check_requirements.py
@@ -1,6 +1,7 @@
-import pkg_resources
import sys
+import pkg_resources
+
def main():
requirements_file = sys.argv[1]
diff --git a/tests.py b/tests.py
index 67ba1c8eb1..62f76da8ac 100644
--- a/tests.py
+++ b/tests.py
@@ -1,4 +1,5 @@
import unittest
+
import coverage
if __name__ == "__main__":
diff --git a/tests/browse_tests.py b/tests/browse_tests.py
index 1ac523ecdc..f896e7dd75 100644
--- a/tests/browse_tests.py
+++ b/tests/browse_tests.py
@@ -1,6 +1,6 @@
-import unittest
import os
import sys
+import unittest
from bs4 import BeautifulSoup
diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py
index 6f3edd99f6..ce4c63da3d 100644
--- a/tests/integration/weaviate_memory_tests.py
+++ b/tests/integration/weaviate_memory_tests.py
@@ -1,15 +1,15 @@
+import os
+import sys
import unittest
from unittest import mock
-import sys
-import os
+from uuid import uuid4
from weaviate import Client
from weaviate.util import get_valid_uuid
-from uuid import uuid4
from autogpt.config import Config
-from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding
+from autogpt.memory.weaviate import WeaviateMemory
@mock.patch.dict(
diff --git a/tests/test_commands.py b/tests/test_commands.py
index a21bbb4dc2..49c09f11db 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -3,6 +3,7 @@ import sys
from pathlib import Path
import pytest
+
from autogpt.commands.command import Command, CommandRegistry
diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py
index 81e68277fd..6d7ae016b2 100644
--- a/tests/test_token_counter.py
+++ b/tests/test_token_counter.py
@@ -1,4 +1,5 @@
import unittest
+
import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens
diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py
index 55a44492a0..774f410376 100644
--- a/tests/unit/test_chat.py
+++ b/tests/unit/test_chat.py
@@ -1,6 +1,6 @@
# Generated by CodiumAI
-import unittest
import time
+import unittest
from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context
diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py
index e15709aa37..7e5426f0fc 100644
--- a/tests/unit/test_commands.py
+++ b/tests/unit/test_commands.py
@@ -1,7 +1,8 @@
-import autogpt.agent.agent_manager as agent_manager
-from autogpt.app import start_agent, list_agents, execute_command
import unittest
-from unittest.mock import patch, MagicMock
+from unittest.mock import MagicMock, patch
+
+import autogpt.agent.agent_manager as agent_manager
+from autogpt.app import execute_command, list_agents, start_agent
class TestCommands(unittest.TestCase):
From c0aa423d7b6533d017d22af6d342a2d40a4a929e Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 22:46:38 -0500
Subject: [PATCH 43/51] Fix agent remembering do nothing command, use correct
google function, disabled image_gen if not configured.
---
autogpt/agent/agent.py | 40 +++++++++++++++----------------
autogpt/commands/google_search.py | 4 ++--
autogpt/commands/image_gen.py | 3 ++-
3 files changed, 24 insertions(+), 23 deletions(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index b771b1de48..65ca3c96ff 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -187,24 +187,24 @@ class Agent:
result = plugin.post_command(command_name, result)
if self.next_action_count > 0:
self.next_action_count -= 1
-
- memory_to_add = (
- f"Assistant Reply: {assistant_reply} "
- f"\nResult: {result} "
- f"\nHuman Feedback: {user_input} "
- )
-
- self.memory.add(memory_to_add)
-
- # Check if there's a result from the command append it to the message
- # history
- if result is not None:
- self.full_message_history.append(create_chat_message("system", result))
- logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
- else:
- self.full_message_history.append(
- create_chat_message("system", "Unable to execute command")
- )
- logger.typewriter_log(
- "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
+ if command_name != "do_nothing":
+ memory_to_add = (
+ f"Assistant Reply: {assistant_reply} "
+ f"\nResult: {result} "
+ f"\nHuman Feedback: {user_input} "
)
+
+ self.memory.add(memory_to_add)
+
+ # Check if there's a result from the command append it to the message
+ # history
+ if result is not None:
+ self.full_message_history.append(create_chat_message("system", result))
+ logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
+ else:
+ self.full_message_history.append(
+ create_chat_message("system", "Unable to execute command")
+ )
+ logger.typewriter_log(
+ "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
+ )
diff --git a/autogpt/commands/google_search.py b/autogpt/commands/google_search.py
index 0f635bcadc..f549ae8f6e 100644
--- a/autogpt/commands/google_search.py
+++ b/autogpt/commands/google_search.py
@@ -11,7 +11,7 @@ from autogpt.config import Config
CFG = Config()
-@command("google", "Google Search", '"input": ""', not CFG.google_api_key)
+@command("google", "Google Search", '"query": ""', not CFG.google_api_key)
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search
@@ -40,7 +40,7 @@ def google_search(query: str, num_results: int = 8) -> str:
@command(
"google",
"Google Search",
- '"input": ""',
+ '"query": ""',
bool(CFG.google_api_key),
"Configure google_api_key.",
)
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index 9dbb2fa586..ada285e721 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -14,7 +14,8 @@ from autogpt.workspace import path_in_workspace
CFG = Config()
-@command("generate_image", "Generate Image", '"prompt": ""')
+@command("generate_image", "Generate Image", '"prompt": ""',
+ CFG.image_provider)
def generate_image(prompt: str) -> str:
"""Generate an image from a prompt.
From 81c65af5600041cff773dc216d53494e732e1b98 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 22:51:39 -0500
Subject: [PATCH 44/51] blacked
---
autogpt/agent/agent.py | 7 ++--
autogpt/app.py | 4 +-
autogpt/args.py | 23 ++++++----
autogpt/commands/command.py | 4 +-
autogpt/commands/file_operations.py | 10 ++---
autogpt/commands/image_gen.py | 3 +-
autogpt/json_fixes/master_json_fix_method.py | 10 ++++-
autogpt/json_validation/validate_json.py | 4 +-
autogpt/logs.py | 42 +++++++++----------
autogpt/prompts/generator.py | 4 +-
autogpt/spinner.py | 4 +-
autogpt/utils.py | 2 +-
...ark_entrepeneur_gpt_with_difficult_user.py | 33 +++++++++------
13 files changed, 86 insertions(+), 64 deletions(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 65ca3c96ff..6683aae5f7 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -89,10 +89,9 @@ class Agent:
for plugin in cfg.plugins:
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
-
# Print Assistant thoughts
if assistant_reply_json != {}:
- validate_json(assistant_reply_json, 'llm_response_format_1')
+ validate_json(assistant_reply_json, "llm_response_format_1")
# Get command name and arguments
try:
print_assistant_thoughts(self.ai_name, assistant_reply_json)
@@ -199,7 +198,9 @@ class Agent:
# Check if there's a result from the command append it to the message
# history
if result is not None:
- self.full_message_history.append(create_chat_message("system", result))
+ self.full_message_history.append(
+ create_chat_message("system", result)
+ )
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.full_message_history.append(
diff --git a/autogpt/app.py b/autogpt/app.py
index 97daaf05ba..38c49e4261 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -12,7 +12,7 @@ from autogpt.commands.file_operations import (
read_file,
search_files,
write_to_file,
- download_file
+ download_file,
)
from autogpt.commands.git_operations import clone_repository
from autogpt.commands.google_search import google_official_search, google_search
@@ -141,7 +141,7 @@ def execute_command(
if not CFG.allow_downloads:
return "Error: You do not have user authorization to download files locally."
return download_file(arguments["url"], arguments["file"])
-
+
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again
diff --git a/autogpt/args.py b/autogpt/args.py
index f0e9c07a36..0e6eddfdb2 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -64,10 +64,10 @@ def parse_arguments() -> None:
" skip the re-prompt.",
)
parser.add_argument(
- '--allow-downloads',
- action='store_true',
- dest='allow_downloads',
- help='Dangerous: Allows Auto-GPT to download files natively.'
+ "--allow-downloads",
+ action="store_true",
+ dest="allow_downloads",
+ help="Dangerous: Allows Auto-GPT to download files natively.",
)
args = parser.parse_args()
@@ -141,10 +141,17 @@ def parse_arguments() -> None:
if args.allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
- logger.typewriter_log("WARNING: ", Fore.YELLOW,
- f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
- "It is recommended that you monitor any files it downloads carefully.")
- logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
+ logger.typewriter_log(
+ "WARNING: ",
+ Fore.YELLOW,
+ f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
+ + "It is recommended that you monitor any files it downloads carefully.",
+ )
+ logger.typewriter_log(
+ "WARNING: ",
+ Fore.YELLOW,
+ f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
+ )
CFG.allow_downloads = True
if args.browser_name:
diff --git a/autogpt/commands/command.py b/autogpt/commands/command.py
index 3b3ccf51c2..f21b1b526c 100644
--- a/autogpt/commands/command.py
+++ b/autogpt/commands/command.py
@@ -1,8 +1,6 @@
import importlib
import inspect
-import os
-import sys
-from typing import Any, Callable, List, Optional
+from typing import Any, Callable, Optional
# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 5faf6d40e4..9011dc3b2e 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -243,23 +243,23 @@ def download_file(url, filename):
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
- session.mount('http://', adapter)
- session.mount('https://', adapter)
+ session.mount("http://", adapter)
+ session.mount("https://", adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
- total_size = int(r.headers.get('Content-Length', 0))
+ total_size = int(r.headers.get("Content-Length", 0))
downloaded_size = 0
- with open(safe_filename, 'wb') as f:
+ with open(safe_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
- # Update the progress message
+ # Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index ada285e721..f82e97aba6 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -14,8 +14,7 @@ from autogpt.workspace import path_in_workspace
CFG = Config()
-@command("generate_image", "Generate Image", '"prompt": ""',
- CFG.image_provider)
+@command("generate_image", "Generate Image", '"prompt": ""', CFG.image_provider)
def generate_image(prompt: str) -> str:
"""Generate an image from a prompt.
diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py
index 7a2cf3cc81..135d754071 100644
--- a/autogpt/json_fixes/master_json_fix_method.py
+++ b/autogpt/json_fixes/master_json_fix_method.py
@@ -3,11 +3,14 @@ from typing import Any, Dict
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text
+
CFG = Config()
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
- from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
+ from autogpt.json_fixes.parsing import (
+ attempt_to_fix_json_by_finding_outermost_brackets,
+ )
from autogpt.json_fixes.parsing import fix_and_parse_json
@@ -21,7 +24,10 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
if assistant_reply_json != {}:
return assistant_reply_json
- logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
+ logger.error(
+ "Error: The following AI output couldn't be converted to a JSON:\n",
+ assistant_reply,
+ )
if CFG.speak_mode:
say_text("I have received an invalid JSON response from the OpenAI API.")
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py
index 440c3b0b91..f6e55180f4 100644
--- a/autogpt/json_validation/validate_json.py
+++ b/autogpt/json_validation/validate_json.py
@@ -19,7 +19,9 @@ def validate_json(json_object: object, schema_name: object) -> object:
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
logger.error("The JSON object is invalid.")
if CFG.debug_mode:
- logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
+ logger.error(
+ json.dumps(json_object, indent=4)
+ ) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:")
for error in errors:
diff --git a/autogpt/logs.py b/autogpt/logs.py
index a585dffa53..df3487f2c1 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -47,7 +47,7 @@ class Logger(metaclass=Singleton):
# Info handler in activity.log
self.file_handler = logging.FileHandler(
- os.path.join(log_dir, log_file), 'a', 'utf-8'
+ os.path.join(log_dir, log_file), "a", "utf-8"
)
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
@@ -57,7 +57,7 @@ class Logger(metaclass=Singleton):
# Error handler error.log
error_handler = logging.FileHandler(
- os.path.join(log_dir, error_file), 'a', 'utf-8'
+ os.path.join(log_dir, error_file), "a", "utf-8"
)
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
@@ -79,7 +79,7 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(logging.DEBUG)
def typewriter_log(
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
+ self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}")
@@ -95,18 +95,18 @@ class Logger(metaclass=Singleton):
)
def debug(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.DEBUG)
def warn(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.WARN)
@@ -180,10 +180,10 @@ class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
if hasattr(record, "color"):
record.title_color = (
- getattr(record, "color")
- + getattr(record, "title")
- + " "
- + Style.RESET_ALL
+ getattr(record, "color")
+ + getattr(record, "title")
+ + " "
+ + Style.RESET_ALL
)
else:
record.title_color = getattr(record, "title")
@@ -294,7 +294,9 @@ def print_assistant_thoughts(ai_name, assistant_reply):
logger.error("Error: \n", call_stack)
-def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
+def print_assistant_thoughts(
+ ai_name: object, assistant_reply_json_valid: object
+) -> None:
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
@@ -310,9 +312,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
- logger.typewriter_log(
- "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
- )
+ logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
@@ -326,9 +326,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
- logger.typewriter_log(
- "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
- )
+ logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
# Speak the assistant's thoughts
if CFG.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak)
diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py
index b422b6d6ce..c9a441d882 100644
--- a/autogpt/prompts/generator.py
+++ b/autogpt/prompts/generator.py
@@ -123,7 +123,9 @@ class PromptGenerator:
command_strings = []
if self.command_registry:
command_strings += [
- str(item) for item in self.command_registry.commands.values() if item.enabled
+ str(item)
+ for item in self.command_registry.commands.values()
+ if item.enabled
]
# These are the commands that are added manually, do_nothing and terminate
command_strings += [self._generate_command_string(item) for item in items]
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
index febcea8eb1..4e33d74213 100644
--- a/autogpt/spinner.py
+++ b/autogpt/spinner.py
@@ -58,6 +58,8 @@ class Spinner:
delay: Delay in seconds before updating the message
"""
time.sleep(delay)
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
+ sys.stdout.write(
+ f"\r{' ' * (len(self.message) + 2)}\r"
+ ) # Clear the current message
sys.stdout.flush()
self.message = new_message
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 11d98d1b74..db7d332136 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2):
size: Size in bytes
decimal_places (int): Number of decimal places to display
"""
- for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
+ for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
break
size /= 1024.0
diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
index f7f1dac9dd..9a5025d37a 100644
--- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
+++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
@@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
# Read the current ai_settings.yaml file and store its content.
ai_settings = None
- if os.path.exists('ai_settings.yaml'):
- with open('ai_settings.yaml', 'r') as f:
+ if os.path.exists("ai_settings.yaml"):
+ with open("ai_settings.yaml", "r") as f:
ai_settings = f.read()
- os.remove('ai_settings.yaml')
+ os.remove("ai_settings.yaml")
- input_data = '''Entrepreneur-GPT
+ input_data = """Entrepreneur-GPT
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
Increase net worth.
Develop and manage multiple businesses autonomously.
@@ -72,27 +72,34 @@ Refocus, please.
Disappointing suggestion.
Not helpful.
Needs improvement.
-Not what I need.'''
+Not what I need."""
# TODO: add questions above, to distract it even more.
- command = f'{sys.executable} -m autogpt'
+ command = f"{sys.executable} -m autogpt"
- process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- shell=True)
+ process = subprocess.Popen(
+ command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
stdout_output, stderr_output = process.communicate(input_data.encode())
# Decode the output and print it
- stdout_output = stdout_output.decode('utf-8')
- stderr_output = stderr_output.decode('utf-8')
+ stdout_output = stdout_output.decode("utf-8")
+ stderr_output = stderr_output.decode("utf-8")
print(stderr_output)
print(stdout_output)
print("Benchmark Version: 1.0.0")
print("JSON ERROR COUNT:")
- count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
- print(f'{count_errors}/50 Human feedbacks')
+ count_errors = stdout_output.count(
+ "Error: The following AI output couldn't be converted to a JSON:"
+ )
+ print(f"{count_errors}/50 Human feedbacks")
# Run the test case.
-if __name__ == '__main__':
+if __name__ == "__main__":
benchmark_entrepeneur_gpt_with_difficult_user()
From 708374d95b6fb54e15c6a85c71a81235cacbe7c1 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 22:56:34 -0500
Subject: [PATCH 45/51] fix linting
---
autogpt/__main__.py | 4 +--
autogpt/agent/agent.py | 33 +++++++++++++--------
tests/unit/test_browse_scrape_text.py | 41 ++++++++++++++++++---------
3 files changed, 51 insertions(+), 27 deletions(-)
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index c721088ace..c2e2e5c168 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -74,9 +74,9 @@ def main() -> None:
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
logger.typewriter_log(
- f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
+ "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
- logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
+ logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
system_prompt = ai_config.construct_full_prompt()
if cfg.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 6683aae5f7..e65c7e6177 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -19,18 +19,25 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
- system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
- Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
+ system_prompt: The system prompt is the initial prompt that defines everything
+ the AI needs to know to achieve its task successfully.
+ Currently, the dynamic and customizable information in the system prompt are
+ ai_name, description and goals.
- triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
- Determine which next command to use, and respond using the format specified above:
- The triggering prompt is not part of the system prompt because between the system prompt and the triggering
- prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
+ triggering_prompt: The last sentence the AI will see before answering.
+ For Auto-GPT, this prompt is:
+ Determine which next command to use, and respond using the format specified
+ above:
+ The triggering prompt is not part of the system prompt because between the
+ system prompt and the triggering
+ prompt we have contextual information that can distract the AI and make it
+ forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
- The triggering prompt reminds the AI about its short term meta task (defining the next task)
+ The triggering prompt reminds the AI about its short term meta task
+ (defining the next task)
"""
def __init__(
@@ -96,14 +103,13 @@ class Agent:
try:
print_assistant_thoughts(self.ai_name, assistant_reply_json)
command_name, arguments = get_command(assistant_reply_json)
- # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
if cfg.speak_mode:
say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
- ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
+ # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
logger.typewriter_log(
@@ -177,10 +183,13 @@ class Agent:
command_name, arguments = plugin.pre_command(
command_name, arguments
)
- result = (
- f"Command {command_name} returned: "
- f"{execute_command(self.command_registry, command_name, arguments, self.config.prompt_generator)}"
+ command_result = execute_command(
+ self.command_registry,
+ command_name,
+ arguments,
+ self.config.prompt_generator,
)
+ result = f"Command {command_name} returned: " f"{command_result}"
for plugin in cfg.plugins:
result = plugin.post_command(command_name, result)
diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py
index fea5ebfc05..1a36e19b5d 100644
--- a/tests/unit/test_browse_scrape_text.py
+++ b/tests/unit/test_browse_scrape_text.py
@@ -9,16 +9,20 @@ Code Analysis
Objective:
The objective of the "scrape_text" function is to scrape the text content from
-a given URL and return it as a string, after removing any unwanted HTML tags and scripts.
+a given URL and return it as a string, after removing any unwanted HTML tags and
+ scripts.
Inputs:
- url: a string representing the URL of the webpage to be scraped.
Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
+1. Send a GET request to the given URL using the requests library and the user agent
+ header from the config file.
2. Check if the response contains an HTTP error. If it does, return an error message.
-3. Use BeautifulSoup to parse the HTML content of the response and extract all script and style tags.
-4. Get the text content of the remaining HTML using the get_text() method of BeautifulSoup.
+3. Use BeautifulSoup to parse the HTML content of the response and extract all script
+ and style tags.
+4. Get the text content of the remaining HTML using the get_text() method of
+ BeautifulSoup.
5. Split the text into lines and then into chunks, removing any extra whitespace.
6. Join the chunks into a single string with newline characters between them.
7. Return the cleaned text.
@@ -27,9 +31,12 @@ Outputs:
- A string representing the cleaned text content of the webpage.
Additional aspects:
-- The function uses the requests library and BeautifulSoup to handle the HTTP request and HTML parsing, respectively.
-- The function removes script and style tags from the HTML to avoid including unwanted content in the text output.
-- The function uses a generator expression to split the text into lines and chunks, which can improve performance for large amounts of text.
+- The function uses the requests library and BeautifulSoup to handle the HTTP request
+ and HTML parsing, respectively.
+- The function removes script and style tags from the HTML to avoid including unwanted
+ content in the text output.
+- The function uses a generator expression to split the text into lines and chunks,
+ which can improve performance for large amounts of text.
"""
@@ -40,26 +47,33 @@ class TestScrapeText:
expected_text = "This is some sample text"
mock_response = mocker.Mock()
mock_response.status_code = 200
- mock_response.text = f""
+ mock_response.text = (
+ ""
+ )
mocker.patch("requests.Session.get", return_value=mock_response)
- # Call the function with a valid URL and assert that it returns the expected text
+ # Call the function with a valid URL and assert that it returns the
+ # expected text
url = "http://www.example.com"
assert scrape_text(url) == expected_text
- # Tests that the function returns an error message when an invalid or unreachable url is provided.
+ # Tests that the function returns an error message when an invalid or unreachable
+ # url is provided.
def test_invalid_url(self, mocker):
# Mock the requests.get() method to raise an exception
mocker.patch(
"requests.Session.get", side_effect=requests.exceptions.RequestException
)
- # Call the function with an invalid URL and assert that it returns an error message
+ # Call the function with an invalid URL and assert that it returns an error
+ # message
url = "http://www.invalidurl.com"
error_message = scrape_text(url)
assert "Error:" in error_message
- # Tests that the function returns an empty string when the html page contains no text to be scraped.
+ # Tests that the function returns an empty string when the html page contains no
+ # text to be scraped.
def test_no_text(self, mocker):
# Mock the requests.get() method to return a response with no text
mock_response = mocker.Mock()
@@ -71,7 +85,8 @@ class TestScrapeText:
url = "http://www.example.com"
assert scrape_text(url) == ""
- # Tests that the function returns an error message when the response status code is an http error (>=400).
+ # Tests that the function returns an error message when the response status code is
+ # an http error (>=400).
def test_http_error(self, mocker):
# Mock the requests.get() method to return a response with a 404 status code
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
From 23d3dafc5152b5c7437a484d38e29f282fbc75ad Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 23:18:29 -0500
Subject: [PATCH 46/51] Maybe fix tests, fix safe_path function.
---
autogpt/workspace.py | 2 +-
tests/test_commands.py | 4 ++--
tests/test_prompt_generator.py | 1 +
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/autogpt/workspace.py b/autogpt/workspace.py
index 964a94d14d..e1e990824d 100644
--- a/autogpt/workspace.py
+++ b/autogpt/workspace.py
@@ -35,7 +35,7 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
"""
joined_path = base.joinpath(*paths).resolve()
- if not joined_path.is_relative_to(base):
+ if not str(joined_path.absolute()).startswith(str(base.absolute())):
raise ValueError(
f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
)
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 49c09f11db..a1fe0cb518 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -139,7 +139,7 @@ class TestCommandRegistry:
def test_import_mock_commands_module(self):
"""Test that the registry can import a module with mock command plugins."""
registry = CommandRegistry()
- mock_commands_module = "auto_gpt.tests.mocks.mock_commands"
+ mock_commands_module = "tests.mocks.mock_commands"
registry.import_commands(mock_commands_module)
@@ -155,7 +155,7 @@ class TestCommandRegistry:
registry = CommandRegistry()
# Create a temp command file
- src = Path("/app/auto_gpt/tests/mocks/mock_commands.py")
+ src = Path("mocks/mock_commands.py")
temp_commands_file = tmp_path / "mock_commands.py"
shutil.copyfile(src, temp_commands_file)
diff --git a/tests/test_prompt_generator.py b/tests/test_prompt_generator.py
index 59ca7f95da..1fa1754d74 100644
--- a/tests/test_prompt_generator.py
+++ b/tests/test_prompt_generator.py
@@ -38,6 +38,7 @@ class TestPromptGenerator(TestCase):
"label": command_label,
"name": command_name,
"args": args,
+ "function": None,
}
self.assertIn(command, self.generator.commands)
From d394b032d79f1329ae6a4df3cfa56c22757a2db7 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 23:23:31 -0500
Subject: [PATCH 47/51] Fix test
---
tests/test_commands.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/test_commands.py b/tests/test_commands.py
index a1fe0cb518..4be41a9080 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -1,4 +1,5 @@
import shutil
+import os
import sys
from pathlib import Path
@@ -155,7 +156,7 @@ class TestCommandRegistry:
registry = CommandRegistry()
# Create a temp command file
- src = Path("mocks/mock_commands.py")
+ src = Path(os.getcwd()) / "tests/mocks/mock_commands.py"
temp_commands_file = tmp_path / "mock_commands.py"
shutil.copyfile(src, temp_commands_file)
From 3715ebc7eb33ba0831deefcf4947a8b5bb295307 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 23:30:42 -0500
Subject: [PATCH 48/51] Add hooks for chat completion
---
autogpt/llm_utils.py | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 701d622b54..7aac703c02 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import time
-from ast import List
import openai
from colorama import Fore
@@ -76,6 +75,20 @@ def create_chat_completion(
+ f"Creating chat completion with model {model}, temperature {temperature},"
f" max_tokens {max_tokens}" + Fore.RESET
)
+ for plugin in CFG.plugins:
+ if plugin.can_handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ ):
+ response = plugin.handle_chat_completion(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ )
+ return response
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
@@ -99,7 +112,7 @@ def create_chat_completion(
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
- f"Reached rate limit, passing..." + Fore.RESET,
+ "Reached rate limit, passing..." + Fore.RESET,
)
except APIError as e:
if e.http_status == 502:
From fbd4e06df5d185d05d1daed1a0ee2d9db2c9b947 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Sun, 16 Apr 2023 23:39:33 -0500
Subject: [PATCH 49/51] Add early abort functions.
---
autogpt/agent/agent.py | 4 ++++
autogpt/agent/agent_manager.py | 12 ++++++++++++
autogpt/chat.py | 2 ++
autogpt/llm_utils.py | 2 ++
4 files changed, 20 insertions(+)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index e65c7e6177..7b1b5e1520 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -180,6 +180,8 @@ class Agent:
result = f"Human feedback: {user_input}"
else:
for plugin in cfg.plugins:
+ if not plugin.can_handle_pre_command():
+ continue
command_name, arguments = plugin.pre_command(
command_name, arguments
)
@@ -192,6 +194,8 @@ class Agent:
result = f"Command {command_name} returned: " f"{command_result}"
for plugin in cfg.plugins:
+ if not plugin.can_handle_post_command():
+ continue
result = plugin.post_command(command_name, result)
if self.next_action_count > 0:
self.next_action_count -= 1
diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py
index e1353e03a5..d264815084 100644
--- a/autogpt/agent/agent_manager.py
+++ b/autogpt/agent/agent_manager.py
@@ -31,6 +31,8 @@ class AgentManager(metaclass=Singleton):
{"role": "user", "content": prompt},
]
for plugin in self.cfg.plugins:
+ if not plugin.can_handle_pre_instruction():
+ continue
plugin_messages = plugin.pre_instruction(messages)
if plugin_messages:
for plugin_message in plugin_messages:
@@ -46,6 +48,8 @@ class AgentManager(metaclass=Singleton):
plugins_reply = ""
for i, plugin in enumerate(self.cfg.plugins):
+ if not plugin.can_handle_on_instruction():
+ continue
plugin_result = plugin.on_instruction(messages)
if plugin_result:
sep = "" if not i else "\n"
@@ -61,6 +65,8 @@ class AgentManager(metaclass=Singleton):
self.agents[key] = (task, messages, model)
for plugin in self.cfg.plugins:
+ if not plugin.can_handle_post_instruction():
+ continue
agent_reply = plugin.post_instruction(agent_reply)
return key, agent_reply
@@ -81,6 +87,8 @@ class AgentManager(metaclass=Singleton):
messages.append({"role": "user", "content": message})
for plugin in self.cfg.plugins:
+ if not plugin.can_handle_pre_instruction():
+ continue
plugin_messages = plugin.pre_instruction(messages)
if plugin_messages:
for plugin_message in plugin_messages:
@@ -96,6 +104,8 @@ class AgentManager(metaclass=Singleton):
plugins_reply = agent_reply
for i, plugin in enumerate(self.cfg.plugins):
+ if not plugin.can_handle_on_instruction():
+ continue
plugin_result = plugin.on_instruction(messages)
if plugin_result:
sep = "" if not i else "\n"
@@ -105,6 +115,8 @@ class AgentManager(metaclass=Singleton):
messages.append({"role": "assistant", "content": plugins_reply})
for plugin in self.cfg.plugins:
+ if not plugin.can_handle_post_instruction():
+ continue
agent_reply = plugin.post_instruction(agent_reply)
return agent_reply
diff --git a/autogpt/chat.py b/autogpt/chat.py
index 16693040a9..22fe636cb7 100644
--- a/autogpt/chat.py
+++ b/autogpt/chat.py
@@ -137,6 +137,8 @@ def chat_with_ai(
plugin_count = len(cfg.plugins)
for i, plugin in enumerate(cfg.plugins):
+ if not plugin.can_handle_on_planning():
+ continue
plugin_response = plugin.on_planning(
agent.prompt_generator, current_context
)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 7aac703c02..4fb0e1f537 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -131,6 +131,8 @@ def create_chat_completion(
raise RuntimeError(f"Failed to get response after {num_retries} retries")
resp = response.choices[0].message["content"]
for plugin in CFG.plugins:
+ if not plugin.can_handle_on_response():
+ continue
resp = plugin.on_response(resp)
return resp
From 83861883565d64394ebbf88a57a533b6adf60a31 Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Mon, 17 Apr 2023 00:49:51 -0500
Subject: [PATCH 50/51] Fix early abort
---
autogpt/config/ai_config.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
index c9022773fb..af387f0bdc 100644
--- a/autogpt/config/ai_config.py
+++ b/autogpt/config/ai_config.py
@@ -126,6 +126,8 @@ class AIConfig:
prompt_generator.role = self.ai_role
prompt_generator.command_registry = self.command_registry
for plugin in cfg.plugins:
+ if not plugin.can_handle_post_prompt():
+ continue
prompt_generator = plugin.post_prompt(prompt_generator)
# Construct full prompt
From fe85f079b08f919efecba1374073c9495ef6d5de Mon Sep 17 00:00:00 2001
From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com>
Date: Mon, 17 Apr 2023 01:09:17 -0500
Subject: [PATCH 51/51] Fix early abort
---
autogpt/agent/agent.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 7b1b5e1520..3a79760cc7 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -94,6 +94,8 @@ class Agent:
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:
+ if not plugin.can_handle_post_planning():
+ continue
assistant_reply_json = plugin.post_planning(self, assistant_reply_json)
# Print Assistant thoughts