Merge with master

This commit is contained in:
Joseph C. Miller, II
2023-04-13 12:51:36 -06:00
39 changed files with 587 additions and 229 deletions

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model):
"""Create a new agent and return its key"""
global next_key

View File

@@ -2,6 +2,7 @@ import yaml
import data
import os
class AIConfig:
"""
A class object that contains the configuration information for the AI

View File

@@ -45,6 +45,7 @@ def improve_code(suggestions: List[str], code: str) -> str:
result_string = call_ai_function(function_string, args, description_string)
return result_string
def write_tests(code: str, focus: List[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create chat completion api call.

View File

@@ -6,6 +6,7 @@ from urllib.parse import urlparse, urljoin
cfg = Config()
# Function to check if the URL is valid
def is_valid_url(url):
try:
@@ -14,49 +15,51 @@ def is_valid_url(url):
except ValueError:
return False
# Function to sanitize the URL
def sanitize_url(url):
return urljoin(url, urlparse(url).path)
# Function to make a request with a specified timeout and handle exceptions
def make_request(url, timeout=10):
try:
response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
return "Error: " + str(e)
# Define and check for local file address prefixes
def check_local_file_access(url):
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
return any(url.startswith(prefix) for prefix in local_prefixes)
def get_response(url, headers=cfg.user_agent_header, timeout=10):
try:
# Restrict access to local files
if check_local_file_access(url):
raise ValueError('Access to local files is restricted')
# Most basic check if the URL is valid:
if not url.startswith('http://') and not url.startswith('https://'):
raise ValueError('Invalid URL format')
sanitized_url = sanitize_url(url)
response = requests.get(sanitized_url, headers=headers, timeout=timeout)
# Check if the response contains an HTTP error
if response.status_code >= 400:
return None, "Error: HTTP " + str(response.status_code) + " error"
return response, None
except ValueError as ve:
# Handle invalid URL format
return None, "Error: " + str(ve)
except requests.exceptions.RequestException as re:
# Handle exceptions related to the HTTP request (e.g., connection errors, timeouts, etc.)
return None, "Error: " + str(re)
def scrape_text(url):
"""Scrape text from a webpage"""
# Basic check if the URL is valid
if not url.startswith('http'):
return "Error: Invalid URL"
# Restrict access to local files
if check_local_file_access(url):
return "Error: Access to local files is restricted"
# Validate the input URL
if not is_valid_url(url):
# Sanitize the input URL
sanitized_url = sanitize_url(url)
# Make the request with a timeout and handle exceptions
response = make_request(sanitized_url)
if isinstance(response, str):
return response
else:
# Sanitize the input URL
sanitized_url = sanitize_url(url)
response = requests.get(sanitized_url, headers=cfg.user_agent_header)
response, error_message = get_response(url)
if error_message:
return error_message
soup = BeautifulSoup(response.text, "html.parser")
@@ -89,11 +92,9 @@ def format_hyperlinks(hyperlinks):
def scrape_links(url):
"""Scrape links from a webpage"""
response = requests.get(url, headers=cfg.user_agent_header)
# Check if the response contains an HTTP error
if response.status_code >= 400:
return "error"
response, error_message = get_response(url)
if error_message:
return error_message
soup = BeautifulSoup(response.text, "html.parser")
@@ -131,6 +132,7 @@ def create_message(chunk, question):
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
}
def summarize_text(text, question):
"""Summarize text using the LLM model"""
if not text:

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config()
from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config()
def create_chat_message(role, content):
"""
Create a chat message with the given role and content.

View File

@@ -7,7 +7,7 @@ import speak
from config import Config
import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from execute_code import execute_python_file
from execute_code import execute_python_file, execute_shell
from json_parser import fix_and_parse_json
from image_gen import generate_image
from duckduckgo_search import ddg
@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
@@ -103,6 +104,11 @@ def execute_command(command_name, arguments):
return ai.write_tests(arguments["code"], arguments.get("focus"))
elif command_name == "execute_python_file": # Add this command
return execute_python_file(arguments["file"])
elif command_name == "execute_shell":
if cfg.execute_local_commands:
return execute_shell(arguments["command_line"])
else:
return "You are not allowed to run local shell commands. To execute shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' in your config. Do not attempt to bypass the restriction."
elif command_name == "generate_image":
return generate_image(arguments["prompt"])
elif command_name == "do_nothing":
@@ -130,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build
@@ -166,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs
return search_results_links
def browse_website(url, question):
"""Browse a website and return the summary and links"""
summary = get_text_summary(url, question)

View File

@@ -1,6 +1,7 @@
import abc
import os
import openai
import yaml
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
@@ -44,15 +45,13 @@ class Config(metaclass=Singleton):
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.use_azure = False
self.temperature = float(os.getenv("TEMPERATURE", "1"))
self.use_azure = os.getenv("USE_AZURE") == 'True'
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
self.azure_chat_deployment_id = os.getenv("OPENAI_AZURE_CHAT_DEPLOYMENT_ID")
self.azure_embeddigs_deployment_id = os.getenv("OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID")
openai.api_type = "azure"
self.load_azure_config()
openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
@@ -74,7 +73,7 @@ class Config(metaclass=Singleton):
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.user_agent_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "")
@@ -86,6 +85,47 @@ class Config(metaclass=Singleton):
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
def get_azure_deployment_id_for_model(self, model: str) -> str:
"""
Returns the relevant deployment id for the model specified.
Parameters:
model(str): The model to map to the deployment id.
Returns:
The matching deployment id if found, otherwise an empty string.
"""
if model == self.fast_llm_model:
return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"]
elif model == self.smart_llm_model:
return self.azure_model_to_deployment_id_map["smart_llm_model_deployment_id"]
elif model == "text-embedding-ada-002":
return self.azure_model_to_deployment_id_map["embedding_model_deployment_id"]
else:
return ""
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'azure.yaml')
def load_azure_config(self, config_file: str=AZURE_CONFIG_FILE) -> None:
"""
Loads the configuration parameters for Azure hosting from the specified file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
Returns:
None
"""
try:
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
self.openai_api_type = os.getenv("OPENAI_API_TYPE", config_params.get("azure_api_type", "azure"))
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", ""))
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", ""))
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool):
"""Set the continuous mode value."""
self.continuous_mode = value

View File

@@ -1,6 +1,7 @@
import os
from pathlib import Path
def load_prompt():
"""Load the prompt from data/prompt.txt"""
try:

View File

@@ -22,9 +22,10 @@ COMMANDS:
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
18. Execute Python File: "execute_python_file", args: "file": "<file>"
19. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
20. Generate Image: "generate_image", args: "prompt": "<prompt>"
21. Do Nothing: "do_nothing", args: ""
19. Execute Shell Command, non-interactive commands only: "execute_shell", args: "command_line": "<command_line>".
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
21. Generate Image: "generate_image", args: "prompt": "<prompt>"
22. Do Nothing: "do_nothing", args: ""
RESOURCES:
@@ -44,8 +45,7 @@ You should only respond in JSON format as described below
RESPONSE FORMAT:
{
"thoughts":
{
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
@@ -54,7 +54,7 @@ RESPONSE FORMAT:
},
"command": {
"name": "command name",
"args":{
"args": {
"arg name": "value"
}
}

View File

@@ -1,17 +1,20 @@
import docker
import os
import subprocess
WORKSPACE_FOLDER = "auto_gpt_workspace"
def execute_python_file(file):
"""Execute a Python file in a Docker container and return the output"""
workspace_folder = "auto_gpt_workspace"
print (f"Executing file '{file}' in workspace '{workspace_folder}'")
print (f"Executing file '{file}' in workspace '{WORKSPACE_FOLDER}'")
if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
file_path = os.path.join(workspace_folder, file)
file_path = os.path.join(WORKSPACE_FOLDER, file)
if not os.path.isfile(file_path):
return f"Error: File '{file}' does not exist."
@@ -19,14 +22,31 @@ def execute_python_file(file):
try:
client = docker.from_env()
image_name = 'python:3.10'
try:
client.images.get(image_name)
print(f"Image '{image_name}' found locally")
except docker.errors.ImageNotFound:
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
for line in low_level_client.pull(image_name, stream=True, decode=True):
# Print the status and progress, if available
status = line.get('status')
progress = line.get('progress')
if status and progress:
print(f"{status}: {progress}")
elif status:
print(status)
# You can replace 'python:3.8' with the desired Python image/version
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
container = client.containers.run(
'python:3.10',
image_name,
f'python {file}',
volumes={
os.path.abspath(workspace_folder): {
os.path.abspath(WORKSPACE_FOLDER): {
'bind': '/workspace',
'mode': 'ro'}},
working_dir='/workspace',
@@ -46,3 +66,23 @@ def execute_python_file(file):
except Exception as e:
return f"Error: {str(e)}"
def execute_shell(command_line):
current_dir = os.getcwd()
if not WORKSPACE_FOLDER in current_dir: # Change dir into workspace if necessary
work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER)
os.chdir(work_dir)
print (f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
result = subprocess.run(command_line, capture_output=True, shell=True)
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return output

View File

@@ -38,7 +38,7 @@ def write_to_file(filename, text):
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, "w") as f:
with open(filepath, "w", encoding='utf-8') as f:
f.write(text)
return "File written to successfully."
except Exception as e:
@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e:
return "Error: " + str(e)
def search_files(directory):
found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace"
def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -4,12 +4,13 @@ cfg = Config()
openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API"""
if cfg.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.azure_chat_deployment_id,
deployment_id=cfg.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,

View File

@@ -124,6 +124,12 @@ class Logger(metaclass=Singleton):
self.logger.setLevel(level)
self.typing_logger.setLevel(level)
def double_check(self, additionalText=None):
if not additionalText:
additionalText = "Please ensure you've setup and configured everything correctly. Read https://github.com/Torantulino/Auto-GPT#readme to double check. You can also create a github issue or join the discord and ask there!"
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
'''
Output stream to console using simulated typing
@@ -151,6 +157,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception:
self.handleError(record)
class ConsoleHandler(logging.StreamHandler):
def emit(self, record):
msg = self.format(record)
@@ -160,13 +167,11 @@ class ConsoleHandler(logging.StreamHandler):
self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
'''
class AutoGptFormatter(logging.Formatter):
"""
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
def format(self, record: LogRecord) -> str:
if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL

View File

@@ -20,16 +20,18 @@ import logging
cfg = Config()
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
print(
Fore.RED +
"Please set your OpenAI API key in config.py or as an environment variable."
"Please set your OpenAI API key in .env or as an environment variable."
)
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals)
return config
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
@@ -322,132 +326,136 @@ def parse_arguments():
supported_memory = get_supported_memory_backends()
chosen = args.memory_type
if not chosen in supported_memory:
print_to_console("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}')
print_to_console(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
logger.typewriter_log("ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", Fore.RED, f'{supported_memory}')
logger.typewriter_log(f"Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
else:
cfg.memory_backend = chosen
# TODO: fill in llm values here
check_openai_api_key()
cfg = Config()
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
result = None
next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.RED, f"{cfg.continuous_limit}")
break
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
prompt,
user_input,
full_message_history,
memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
next_action_count = abs(int(console_input.split(" ")[1]))
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif user_input == "EXIT":
print("Exiting...", flush=True)
def main():
global ai_name, memory
# TODO: fill in llm values here
check_openai_api_key()
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
result = None
next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.RED, f"{cfg.continuous_limit}")
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith( "error" ):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if next_action_count > 0:
next_action_count -= 1
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
prompt,
user_input,
full_message_history,
memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {user_input} "
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
memory.add(memory_to_add)
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
# Check if there's a result from the command append it to the message
# history
if result is not None:
full_message_history.append(chat.create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
full_message_history.append(
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if not cfg.continuous_mode and next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
next_action_count = abs(int(console_input.split(" ")[1]))
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
break
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if next_action_count > 0:
next_action_count -= 1
memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {user_input} "
memory.add(memory_to_add)
# Check if there's a result from the command append it to the message
# history
if result is not None:
full_message_history.append(chat.create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
full_message_history.append(
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,5 @@
from memory.local import LocalCache
from memory.no_memory import NoMemory
# List of supported memory backends
# Add a backend to this list if the import attempt is successful
@@ -18,6 +19,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.")
PineconeMemory = None
def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
@@ -34,6 +36,8 @@ def get_memory(cfg, init=False):
" use Redis as a memory backend.")
else:
memory = RedisMemory(cfg)
elif cfg.memory_backend == "no_memory":
memory = NoMemory(cfg)
if memory is None:
memory = LocalCache(cfg)
@@ -41,6 +45,7 @@ def get_memory(cfg, init=False):
memory.clear()
return memory
def get_supported_memory_backends():
return supported_memory
@@ -50,4 +55,5 @@ __all__ = [
"LocalCache",
"RedisMemory",
"PineconeMemory",
"NoMemory"
]

View File

@@ -2,13 +2,14 @@
import abc
from config import AbstractSingleton, Config
import openai
cfg = Config()
def get_ada_embedding(text):
text = text.replace("\n", " ")
if cfg.use_azure:
return openai.Embedding.create(input=[text], engine=cfg.azure_embeddigs_deployment_id, model="text-embedding-ada-002")["data"][0]["embedding"]
return openai.Embedding.create(input=[text], engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"))["data"][0]["embedding"]
else:
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]

View File

@@ -0,0 +1,66 @@
from typing import Optional, List, Any
from memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton):
def __init__(self, cfg):
"""
Initializes the NoMemory provider.
Args:
cfg: The config object.
Returns: None
"""
pass
def add(self, data: str) -> str:
"""
Adds a data point to the memory. No action is taken in NoMemory.
Args:
data: The data to add.
Returns: An empty string.
"""
return ""
def get(self, data: str) -> Optional[List[Any]]:
"""
Gets the data from the memory that is most relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
Returns: None
"""
return None
def clear(self) -> str:
"""
Clears the memory. No action is taken in NoMemory.
Returns: An empty string.
"""
return ""
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
"""
Returns all the data in the memory that is relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
num_relevant: The number of relevant data to return.
Returns: None
"""
return None
def get_stats(self):
"""
Returns: An empty dictionary as there are no stats in NoMemory.
"""
return {}

View File

@@ -2,6 +2,8 @@
import pinecone
from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from colorama import Fore, Style
class PineconeMemory(MemoryProviderSingleton):
@@ -17,6 +19,15 @@ class PineconeMemory(MemoryProviderSingleton):
# for now this works.
# we'll need a more complicated and robust system if we want to start with memory.
self.vec_num = 0
try:
pinecone.whoami()
except Exception as e:
logger.typewriter_log("FAILED TO CONNECT TO PINECONE", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
logger.double_check("Please ensure you have setup and configured Pinecone properly for use. " +
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
exit(1)
if table_name not in pinecone.list_indexes():
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
self.index = pinecone.Index(table_name)

View File

@@ -7,6 +7,8 @@ from redis.commands.search.indexDefinition import IndexDefinition, IndexType
import numpy as np
from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from colorama import Fore, Style
SCHEMA = [
@@ -44,6 +46,16 @@ class RedisMemory(MemoryProviderSingleton):
db=0 # Cannot be changed
)
self.cfg = cfg
# Check redis connection
try:
self.redis.ping()
except redis.ConnectionError as e:
logger.typewriter_log("FAILED TO CONNECT TO REDIS", Fore.RED, Style.BRIGHT + str(e) + Style.RESET_ALL)
logger.double_check("Please ensure you have setup and configured Redis properly for use. " +
f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly.")
exit(1)
if cfg.wipe_redis_on_start:
self.redis.flushall()
try:

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0):
def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken
from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
"""
Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.