mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-03 11:24:57 -05:00
Merge branch 'master' of https://github.com/Significant-Gravitas/Auto-GPT into plugin-support
This commit is contained in:
@@ -1,72 +0,0 @@
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
next_key = 0
|
||||
agents = {} # key, (task, full_message_history, model)
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(task, prompt, model):
|
||||
"""Create a new agent and return its key"""
|
||||
global next_key
|
||||
global agents
|
||||
|
||||
messages = [{"role": "user", "content": prompt}, ]
|
||||
|
||||
# Start GTP3 instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
key = next_key
|
||||
# This is done instead of len(agents) to make keys unique even if agents
|
||||
# are deleted
|
||||
next_key += 1
|
||||
|
||||
agents[key] = (task, messages, model)
|
||||
|
||||
return key, agent_reply
|
||||
|
||||
|
||||
def message_agent(key, message):
|
||||
"""Send a message to an agent and return its response"""
|
||||
global agents
|
||||
|
||||
task, messages, model = agents[int(key)]
|
||||
|
||||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
# Start GTP3 instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
return agent_reply
|
||||
|
||||
|
||||
def list_agents():
|
||||
"""Return a list of all agents"""
|
||||
global agents
|
||||
|
||||
# Return a list of agent keys and their tasks
|
||||
return [(key, task) for key, (task, _, _) in agents.items()]
|
||||
|
||||
|
||||
def delete_agent(key):
|
||||
"""Delete an agent and return True if successful, False otherwise"""
|
||||
global agents
|
||||
|
||||
try:
|
||||
del agents[int(key)]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
@@ -1,95 +0,0 @@
|
||||
import yaml
|
||||
import data
|
||||
import os
|
||||
|
||||
class AIConfig:
|
||||
"""
|
||||
A class object that contains the configuration information for the AI
|
||||
|
||||
Attributes:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
"""
|
||||
|
||||
def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
|
||||
Parameters:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
|
||||
|
||||
@classmethod
|
||||
def load(cls: object, config_file: str=SAVE_FILE) -> object:
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
cls (class object): An AIConfig Class object.
|
||||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): A instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
|
||||
return cls(ai_name, ai_role, ai_goals)
|
||||
|
||||
def save(self, config_file: str=SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
yaml.dump(config, file)
|
||||
|
||||
def construct_full_prompt(self) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
|
||||
Parameters:
|
||||
None
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
"""
|
||||
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{data.load_prompt()}"
|
||||
return full_prompt
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
from typing import List, Optional
|
||||
import json
|
||||
from config import Config
|
||||
from call_ai_function import call_ai_function
|
||||
from json_parser import fix_and_parse_json
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
args = [code]
|
||||
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
|
||||
return result_string
|
||||
|
||||
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||
)
|
||||
args = [json.dumps(suggestions), code]
|
||||
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
|
||||
|
||||
|
||||
def write_tests(code: str, focus: List[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
args = [code, json.dumps(focus)]
|
||||
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
@@ -1,138 +0,0 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from config import Config
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
# Define and check for local file address prefixes
|
||||
def check_local_file_access(url):
|
||||
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
def scrape_text(url):
|
||||
"""Scrape text from a webpage"""
|
||||
# Most basic check if the URL is valid:
|
||||
if not url.startswith('http'):
|
||||
return "Error: Invalid URL"
|
||||
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
return "Error: Access to local files is restricted"
|
||||
|
||||
try:
|
||||
response = requests.get(url, headers=cfg.user_agent_header)
|
||||
except requests.exceptions.RequestException as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
return "Error: HTTP " + str(response.status_code) + " error"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = '\n'.join(chunk for chunk in chunks if chunk)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def extract_hyperlinks(soup):
|
||||
"""Extract hyperlinks from a BeautifulSoup object"""
|
||||
hyperlinks = []
|
||||
for link in soup.find_all('a', href=True):
|
||||
hyperlinks.append((link.text, link['href']))
|
||||
return hyperlinks
|
||||
|
||||
|
||||
def format_hyperlinks(hyperlinks):
|
||||
"""Format hyperlinks into a list of strings"""
|
||||
formatted_links = []
|
||||
for link_text, link_url in hyperlinks:
|
||||
formatted_links.append(f"{link_text} ({link_url})")
|
||||
return formatted_links
|
||||
|
||||
|
||||
def scrape_links(url):
|
||||
"""Scrape links from a webpage"""
|
||||
response = requests.get(url, headers=cfg.user_agent_header)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
return "error"
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def split_text(text, max_length=8192):
|
||||
"""Split text into chunks of a maximum length"""
|
||||
paragraphs = text.split("\n")
|
||||
current_length = 0
|
||||
current_chunk = []
|
||||
|
||||
for paragraph in paragraphs:
|
||||
if current_length + len(paragraph) + 1 <= max_length:
|
||||
current_chunk.append(paragraph)
|
||||
current_length += len(paragraph) + 1
|
||||
else:
|
||||
yield "\n".join(current_chunk)
|
||||
current_chunk = [paragraph]
|
||||
current_length = len(paragraph) + 1
|
||||
|
||||
if current_chunk:
|
||||
yield "\n".join(current_chunk)
|
||||
|
||||
|
||||
def create_message(chunk, question):
|
||||
"""Create a message for the user to summarize a chunk of text"""
|
||||
return {
|
||||
"role": "user",
|
||||
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
|
||||
}
|
||||
|
||||
def summarize_text(text, question):
|
||||
"""Summarize text using the LLM model"""
|
||||
if not text:
|
||||
return "Error: No text to summarize"
|
||||
|
||||
text_length = len(text)
|
||||
print(f"Text length: {text_length} characters")
|
||||
|
||||
summaries = []
|
||||
chunks = list(split_text(text))
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
|
||||
messages = [create_message(chunk, question)]
|
||||
|
||||
summary = create_chat_completion(
|
||||
model=cfg.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=300,
|
||||
)
|
||||
summaries.append(summary)
|
||||
|
||||
print(f"Summarized {len(chunks)} chunks.")
|
||||
|
||||
combined_summary = "\n".join(summaries)
|
||||
messages = [create_message(combined_summary, question)]
|
||||
|
||||
final_summary = create_chat_completion(
|
||||
model=cfg.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=300,
|
||||
)
|
||||
|
||||
return final_summary
|
||||
@@ -1,28 +0,0 @@
|
||||
from config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
from llm_utils import create_chat_completion
|
||||
# This is a magic function that can do anything with no-code. See
|
||||
# https://github.com/Torantulino/AI-Functions for more info.
|
||||
def call_ai_function(function, args, description, model=None):
|
||||
"""Call an AI function"""
|
||||
if model is None:
|
||||
model = cfg.smart_llm_model
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma seperated string
|
||||
args = ", ".join(args)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
response = create_chat_completion(
|
||||
model=model, messages=messages, temperature=0
|
||||
)
|
||||
|
||||
return response
|
||||
146
scripts/chat.py
146
scripts/chat.py
@@ -1,146 +0,0 @@
|
||||
import time
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
from config import Config
|
||||
import token_counter
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def create_chat_message(role, content):
|
||||
"""
|
||||
Create a chat message with the given role and content.
|
||||
|
||||
Args:
|
||||
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
|
||||
content (str): The content of the message.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the role and content of the message.
|
||||
"""
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
current_context = [
|
||||
create_chat_message(
|
||||
"system", prompt),
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"),
|
||||
create_chat_message(
|
||||
"system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
return next_message_to_add_index, current_tokens_used, insertion_index, current_context
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
prompt,
|
||||
user_input,
|
||||
full_message_history,
|
||||
permanent_memory,
|
||||
token_limit):
|
||||
"""Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory."""
|
||||
while True:
|
||||
try:
|
||||
"""
|
||||
Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the user and the AI.
|
||||
permanent_memory (Obj): The memory object containing the permanent memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
if cfg.debug:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[1:]
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
|
||||
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context, after the two system prompts.
|
||||
current_context.insert(insertion_index, full_message_history[next_message_to_add_index])
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
# Append user input, the length of this is accounted for above
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
print("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
if message["role"] == "system" and message["content"] == prompt:
|
||||
continue
|
||||
print(
|
||||
f"{message['role'].capitalize()}: {message['content']}")
|
||||
print()
|
||||
print("----------- END OF CONTEXT ----------------")
|
||||
|
||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=current_context,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"user", user_input))
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
"assistant", assistant_reply))
|
||||
|
||||
return assistant_reply
|
||||
except openai.error.RateLimitError:
|
||||
# TODO: WHen we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
31
scripts/check_requirements.py
Normal file
31
scripts/check_requirements.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
requirements_file = sys.argv[1]
|
||||
with open(requirements_file, "r") as f:
|
||||
required_packages = [
|
||||
line.strip().split("#")[0].strip() for line in f.readlines()
|
||||
]
|
||||
|
||||
installed_packages = [package.key for package in pkg_resources.working_set]
|
||||
|
||||
missing_packages = []
|
||||
for package in required_packages:
|
||||
if not package: # Skip empty lines
|
||||
continue
|
||||
package_name = package.strip().split("==")[0]
|
||||
if package_name.lower() not in installed_packages:
|
||||
missing_packages.append(package_name)
|
||||
|
||||
if missing_packages:
|
||||
print("Missing packages:")
|
||||
print(", ".join(missing_packages))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("All packages are installed.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,301 +0,0 @@
|
||||
import browse
|
||||
import json
|
||||
from memory import get_memory
|
||||
import datetime
|
||||
import agent_manager as agents
|
||||
import speak
|
||||
from config import Config
|
||||
import ai_functions as ai
|
||||
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
|
||||
from execute_code import execute_python_file
|
||||
from json_parser import fix_and_parse_json
|
||||
from image_gen import generate_image
|
||||
from duckduckgo_search import ddg
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def is_valid_int(value):
|
||||
try:
|
||||
int(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def get_command(response):
|
||||
"""Parse the response and return the command name and arguments"""
|
||||
try:
|
||||
response_json = fix_and_parse_json(response)
|
||||
|
||||
if "command" not in response_json:
|
||||
return "Error:" , "Missing 'command' object in JSON"
|
||||
|
||||
command = response_json["command"]
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
return command_name, arguments
|
||||
except json.decoder.JSONDecodeError:
|
||||
return "Error:", "Invalid JSON"
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error:", str(e)
|
||||
|
||||
|
||||
def execute_command(command_name, arguments):
|
||||
"""Execute the command and return the result"""
|
||||
memory = get_memory(cfg)
|
||||
|
||||
try:
|
||||
if command_name == "google":
|
||||
|
||||
# Check if the Google API key is set and use the official search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial search method
|
||||
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
|
||||
return google_official_search(arguments["input"])
|
||||
else:
|
||||
return google_search(arguments["input"])
|
||||
elif command_name == "memory_add":
|
||||
return memory.add(arguments["string"])
|
||||
elif command_name == "start_agent":
|
||||
return start_agent(
|
||||
arguments["name"],
|
||||
arguments["task"],
|
||||
arguments["prompt"])
|
||||
elif command_name == "message_agent":
|
||||
return message_agent(arguments["key"], arguments["message"])
|
||||
elif command_name == "list_agents":
|
||||
return list_agents()
|
||||
elif command_name == "delete_agent":
|
||||
return delete_agent(arguments["key"])
|
||||
elif command_name == "get_text_summary":
|
||||
return get_text_summary(arguments["url"], arguments["question"])
|
||||
elif command_name == "get_hyperlinks":
|
||||
return get_hyperlinks(arguments["url"])
|
||||
elif command_name == "read_file":
|
||||
return read_file(arguments["file"])
|
||||
elif command_name == "write_to_file":
|
||||
return write_to_file(arguments["file"], arguments["text"])
|
||||
elif command_name == "append_to_file":
|
||||
return append_to_file(arguments["file"], arguments["text"])
|
||||
elif command_name == "delete_file":
|
||||
return delete_file(arguments["file"])
|
||||
elif command_name == "search_files":
|
||||
return search_files(arguments["directory"])
|
||||
elif command_name == "browse_website":
|
||||
return browse_website(arguments["url"], arguments["question"])
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again"
|
||||
elif command_name == "evaluate_code":
|
||||
return ai.evaluate_code(arguments["code"])
|
||||
elif command_name == "improve_code":
|
||||
return ai.improve_code(arguments["suggestions"], arguments["code"])
|
||||
elif command_name == "write_tests":
|
||||
return ai.write_tests(arguments["code"], arguments.get("focus"))
|
||||
elif command_name == "execute_python_file": # Add this command
|
||||
return execute_python_file(arguments["file"])
|
||||
elif command_name == "generate_image":
|
||||
return generate_image(arguments["prompt"])
|
||||
elif command_name == "do_nothing":
|
||||
return "No action performed."
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
|
||||
# All errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
|
||||
def get_datetime():
|
||||
"""Return the current date and time"""
|
||||
return "Current date and time: " + \
|
||||
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
|
||||
def google_search(query, num_results=8):
|
||||
"""Return the results of a google search"""
|
||||
search_results = []
|
||||
for j in ddg(query, max_results=num_results):
|
||||
search_results.append(j)
|
||||
|
||||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
|
||||
def google_official_search(query, num_results=8):
|
||||
"""Return the results of a google search using the official Google API"""
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
import json
|
||||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = cfg.google_api_key
|
||||
custom_search_engine_id = cfg.custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
|
||||
# Send the search query and retrieve the results
|
||||
result = service.cse().list(q=query, cx=custom_search_engine_id, num=num_results).execute()
|
||||
|
||||
# Extract the search result items from the response
|
||||
search_results = result.get("items", [])
|
||||
|
||||
# Create a list of only the URLs from the search results
|
||||
search_results_links = [item["link"] for item in search_results]
|
||||
|
||||
except HttpError as e:
|
||||
# Handle errors in the API call
|
||||
error_details = json.loads(e.content.decode())
|
||||
|
||||
# Check if the error is related to an invalid or missing API key
|
||||
if error_details.get("error", {}).get("code") == 403 and "invalid API key" in error_details.get("error", {}).get("message", ""):
|
||||
return "Error: The provided Google API key is invalid or missing."
|
||||
else:
|
||||
return f"Error: {e}"
|
||||
|
||||
# Return the list of search result URLs
|
||||
return search_results_links
|
||||
|
||||
def browse_website(url, question):
|
||||
"""Browse a website and return the summary and links"""
|
||||
summary = get_text_summary(url, question)
|
||||
links = get_hyperlinks(url)
|
||||
|
||||
# Limit links to 5
|
||||
if len(links) > 5:
|
||||
links = links[:5]
|
||||
|
||||
result = f"""Website Content Summary: {summary}\n\nLinks: {links}"""
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_text_summary(url, question):
|
||||
"""Return the results of a google search"""
|
||||
text = browse.scrape_text(url)
|
||||
summary = browse.summarize_text(text, question)
|
||||
return """ "Result" : """ + summary
|
||||
|
||||
|
||||
def get_hyperlinks(url):
|
||||
"""Return the results of a google search"""
|
||||
link_list = browse.scrape_links(url)
|
||||
return link_list
|
||||
|
||||
|
||||
def commit_memory(string):
|
||||
"""Commit a string to memory"""
|
||||
_text = f"""Committing memory with string "{string}" """
|
||||
mem.permanent_memory.append(string)
|
||||
return _text
|
||||
|
||||
|
||||
def delete_memory(key):
|
||||
"""Delete a memory with a given key"""
|
||||
if key >= 0 and key < len(mem.permanent_memory):
|
||||
_text = "Deleting memory with key " + str(key)
|
||||
del mem.permanent_memory[key]
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print("Invalid key, cannot delete memory.")
|
||||
return None
|
||||
|
||||
|
||||
def overwrite_memory(key, string):
|
||||
"""Overwrite a memory with a given key and string"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
key_int = int(key)
|
||||
# Check if the integer key is within the range of the permanent_memory list
|
||||
if 0 <= key_int < len(mem.permanent_memory):
|
||||
_text = "Overwriting memory with key " + str(key) + " and string " + string
|
||||
# Overwrite the memory slot with the given integer key and string
|
||||
mem.permanent_memory[key_int] = string
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print(f"Invalid key '{key}', out of range.")
|
||||
return None
|
||||
# Check if the key is a valid string
|
||||
elif isinstance(key, str):
|
||||
_text = "Overwriting memory with key " + key + " and string " + string
|
||||
# Overwrite the memory slot with the given string key and string
|
||||
mem.permanent_memory[key] = string
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
print(f"Invalid key '{key}', must be an integer or a string.")
|
||||
return None
|
||||
|
||||
|
||||
def shutdown():
|
||||
"""Shut down the program"""
|
||||
print("Shutting down...")
|
||||
quit()
|
||||
|
||||
|
||||
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
||||
"""Start an agent with a given name, task, and prompt"""
|
||||
global cfg
|
||||
|
||||
# Remove underscores from name
|
||||
voice_name = name.replace("_", " ")
|
||||
|
||||
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(agent_intro, 1)
|
||||
key, ack = agents.create_agent(task, first_message, model)
|
||||
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
agent_response = message_agent(key, prompt)
|
||||
|
||||
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
||||
|
||||
|
||||
def message_agent(key, message):
|
||||
"""Message an agent with a given key and message"""
|
||||
global cfg
|
||||
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
agent_response = agents.message_agent(int(key), message)
|
||||
# Check if the key is a valid string
|
||||
elif isinstance(key, str):
|
||||
agent_response = agents.message_agent(key, message)
|
||||
else:
|
||||
return "Invalid key, must be an integer or a string."
|
||||
|
||||
# Speak response
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
def list_agents():
|
||||
"""List all agents"""
|
||||
return agents.list_agents()
|
||||
|
||||
|
||||
def delete_agent(key):
|
||||
"""Delete an agent with a given key"""
|
||||
result = agents.delete_agent(key)
|
||||
if not result:
|
||||
return f"Agent {key} does not exist."
|
||||
return f"Agent {key} deleted."
|
||||
@@ -1,145 +0,0 @@
|
||||
import abc
|
||||
import os
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class Singleton(abc.ABCMeta, type):
|
||||
"""
|
||||
Singleton metaclass for ensuring only one instance of a class.
|
||||
"""
|
||||
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
"""Call method for the singleton metaclass."""
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(
|
||||
Singleton, cls).__call__(
|
||||
*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
||||
pass
|
||||
|
||||
|
||||
class Config(metaclass=Singleton):
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Config class"""
|
||||
self.debug = False
|
||||
self.continuous_mode = False
|
||||
self.speak_mode = False
|
||||
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.use_azure = False
|
||||
self.use_azure = os.getenv("USE_AZURE") == 'True'
|
||||
if self.use_azure:
|
||||
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
|
||||
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
|
||||
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
|
||||
openai.api_type = "azure"
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
|
||||
# User agent headers to use when browsing web
|
||||
# Some websites might just completely deny request with an error code if no user agent was found.
|
||||
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
||||
# Note that indexes must be created on db 0 in redis, this is not configureable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
self.plugins = []
|
||||
self.plugins_whitelist = []
|
||||
self.plugins_blacklist = []
|
||||
|
||||
def set_continuous_mode(self, value: bool):
|
||||
"""Set the continuous mode value."""
|
||||
self.continuous_mode = value
|
||||
|
||||
def set_speak_mode(self, value: bool):
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_debug_mode(self, value: bool):
|
||||
self.debug_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str):
|
||||
"""Set the fast LLM model value."""
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str):
|
||||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int):
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int):
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_openai_api_key(self, value: str):
|
||||
"""Set the OpenAI API key value."""
|
||||
self.openai_api_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str):
|
||||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_google_api_key(self, value: str):
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str):
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str):
|
||||
"""Set the Pinecone API key value."""
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str):
|
||||
"""Set the Pinecone region value."""
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool):
|
||||
"""Set the debug mode value."""
|
||||
self.debug = value
|
||||
|
||||
def set_plugins(self, value: list):
|
||||
"""Set the plugins value."""
|
||||
self.plugins = value
|
||||
@@ -1,18 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
def load_prompt():
|
||||
"""Load the prompt from data/prompt.txt"""
|
||||
try:
|
||||
# get directory of this file:
|
||||
file_dir = Path(__file__).parent
|
||||
prompt_file_path = file_dir / "data" / "prompt.txt"
|
||||
|
||||
# Load the prompt from data/prompt.txt
|
||||
with open(prompt_file_path, "r") as prompt_file:
|
||||
prompt = prompt_file.read()
|
||||
|
||||
return prompt
|
||||
except FileNotFoundError:
|
||||
print("Error: Prompt file not found", flush=True)
|
||||
return ""
|
||||
@@ -1,63 +0,0 @@
|
||||
CONSTRAINTS:
|
||||
|
||||
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
|
||||
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
|
||||
3. No user assistance
|
||||
4. Exclusively use the commands listed in double quotes e.g. "command name"
|
||||
|
||||
COMMANDS:
|
||||
|
||||
1. Google Search: "google", args: "input": "<search>"
|
||||
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
|
||||
6. Start GPT Agent: "start_agent", args: "name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"
|
||||
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
|
||||
8. List GPT Agents: "list_agents", args: ""
|
||||
9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
|
||||
10. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>"
|
||||
11. Read file: "read_file", args: "file": "<file>"
|
||||
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
|
||||
13. Delete file: "delete_file", args: "file": "<file>"
|
||||
14. Search Files: "search_files", args: "directory": "<directory>"
|
||||
15. Evaluate Code: "evaluate_code", args: "code": "<full_code_string>"
|
||||
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
|
||||
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
|
||||
18. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
19. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
20. Generate Image: "generate_image", args: "prompt": "<prompt>"
|
||||
21. Do Nothing: "do_nothing", args: ""
|
||||
|
||||
RESOURCES:
|
||||
|
||||
1. Internet access for searches and information gathering.
|
||||
2. Long Term memory management.
|
||||
3. GPT-3.5 powered Agents for delegation of simple tasks.
|
||||
4. File output.
|
||||
|
||||
PERFORMANCE EVALUATION:
|
||||
|
||||
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
|
||||
2. Constructively self-criticize your big-picture behavior constantly.
|
||||
3. Reflect on past decisions and strategies to refine your approach.
|
||||
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
|
||||
|
||||
You should only respond in JSON format as described below
|
||||
|
||||
RESPONSE FORMAT:
|
||||
{
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ensure the response can be parsed by Python json.loads
|
||||
@@ -1,48 +0,0 @@
|
||||
import docker
|
||||
import os
|
||||
|
||||
|
||||
def execute_python_file(file):
|
||||
"""Execute a Python file in a Docker container and return the output"""
|
||||
workspace_folder = "auto_gpt_workspace"
|
||||
|
||||
print (f"Executing file '{file}' in workspace '{workspace_folder}'")
|
||||
|
||||
if not file.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
file_path = os.path.join(workspace_folder, file)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
return f"Error: File '{file}' does not exist."
|
||||
|
||||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
# You can replace 'python:3.8' with the desired Python image/version
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
container = client.containers.run(
|
||||
'python:3.10',
|
||||
f'python {file}',
|
||||
volumes={
|
||||
os.path.abspath(workspace_folder): {
|
||||
'bind': '/workspace',
|
||||
'mode': 'ro'}},
|
||||
working_dir='/workspace',
|
||||
stderr=True,
|
||||
stdout=True,
|
||||
detach=True,
|
||||
)
|
||||
|
||||
output = container.wait()
|
||||
logs = container.logs().decode('utf-8')
|
||||
container.remove()
|
||||
|
||||
# print(f"Execution complete. Output: {output}")
|
||||
# print(f"Logs: {logs}")
|
||||
|
||||
return logs
|
||||
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
@@ -1,83 +0,0 @@
|
||||
import os
|
||||
import os.path
|
||||
|
||||
# Set a dedicated folder for file I/O
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
if not os.path.exists(working_directory):
|
||||
os.makedirs(working_directory)
|
||||
|
||||
|
||||
def safe_join(base, *paths):
|
||||
"""Join one or more path components intelligently."""
|
||||
new_path = os.path.join(base, *paths)
|
||||
norm_new_path = os.path.normpath(new_path)
|
||||
|
||||
if os.path.commonprefix([base, norm_new_path]) != base:
|
||||
raise ValueError("Attempted to access outside of working directory.")
|
||||
|
||||
return norm_new_path
|
||||
|
||||
|
||||
def read_file(filename):
|
||||
"""Read a file and return the contents"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
|
||||
def write_to_file(filename, text):
|
||||
"""Write text to a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
directory = os.path.dirname(filepath)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(filepath, "w") as f:
|
||||
f.write(text)
|
||||
return "File written to successfully."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
|
||||
def append_to_file(filename, text):
|
||||
"""Append text to a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
with open(filepath, "a") as f:
|
||||
f.write(text)
|
||||
return "Text appended successfully."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
|
||||
def delete_file(filename):
|
||||
"""Delete a file"""
|
||||
try:
|
||||
filepath = safe_join(working_directory, filename)
|
||||
os.remove(filepath)
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
def search_files(directory):
|
||||
found_files = []
|
||||
|
||||
if directory == "" or directory == "/":
|
||||
search_directory = working_directory
|
||||
else:
|
||||
search_directory = safe_join(working_directory, directory)
|
||||
|
||||
for root, _, files in os.walk(search_directory):
|
||||
for file in files:
|
||||
if file.startswith('.'):
|
||||
continue
|
||||
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
@@ -1,57 +0,0 @@
|
||||
import requests
|
||||
import io
|
||||
import os.path
|
||||
from PIL import Image
|
||||
from config import Config
|
||||
import uuid
|
||||
import openai
|
||||
from base64 import b64decode
|
||||
|
||||
cfg = Config()
|
||||
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
def generate_image(prompt):
|
||||
|
||||
filename = str(uuid.uuid4()) + ".jpg"
|
||||
|
||||
# DALL-E
|
||||
if cfg.image_provider == 'dalle':
|
||||
|
||||
openai.api_key = cfg.openai_api_key
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size="256x256",
|
||||
response_format="b64_json",
|
||||
)
|
||||
|
||||
print("Image Generated for prompt:" + prompt)
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
with open(working_directory + "/" + filename, mode="wb") as png:
|
||||
png.write(image_data)
|
||||
|
||||
return "Saved to disk:" + filename
|
||||
|
||||
# STABLE DIFFUSION
|
||||
elif cfg.image_provider == 'sd':
|
||||
|
||||
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
|
||||
|
||||
response = requests.post(API_URL, headers=headers, json={
|
||||
"inputs": prompt,
|
||||
})
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print("Image Generated for prompt:" + prompt)
|
||||
|
||||
image.save(os.path.join(working_directory, filename))
|
||||
|
||||
return "Saved to disk:" + filename
|
||||
|
||||
else:
|
||||
return "No Image Provider Set"
|
||||
@@ -1,109 +0,0 @@
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
from call_ai_function import call_ai_function
|
||||
from config import Config
|
||||
from json_utils import correct_json
|
||||
|
||||
cfg = Config()
|
||||
|
||||
JSON_SCHEMA = """
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def fix_and_parse_json(
|
||||
json_str: str,
|
||||
try_to_fix_with_gpt: bool = True
|
||||
) -> Union[str, Dict[Any, Any]]:
|
||||
"""Fix and parse JSON string"""
|
||||
try:
|
||||
json_str = json_str.replace('\t', '')
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as _: # noqa: F841
|
||||
json_str = correct_json(json_str)
|
||||
try:
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as _: # noqa: F841
|
||||
pass
|
||||
# Let's do something manually:
|
||||
# sometimes GPT responds with something BEFORE the braces:
|
||||
# "I'm sorry, I don't understand. Please try again."
|
||||
# {"text": "I'm sorry, I don't understand. Please try again.",
|
||||
# "confidence": 0.0}
|
||||
# So let's try to find the first brace and then parse the rest
|
||||
# of the string
|
||||
try:
|
||||
brace_index = json_str.index("{")
|
||||
json_str = json_str[brace_index:]
|
||||
last_brace_index = json_str.rindex("}")
|
||||
json_str = json_str[:last_brace_index+1]
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as e: # noqa: F841
|
||||
if try_to_fix_with_gpt:
|
||||
print("Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly.")
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_str, JSON_SCHEMA)
|
||||
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
else:
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
print("Failed to fix ai output, telling the AI.")
|
||||
return json_str
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def fix_json(json_str: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
|
||||
|
||||
# Try to fix the JSON using gpt:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
description_string = "Fixes the provided JSON string to make it parseable"\
|
||||
" and fully complient with the provided schema.\n If an object or"\
|
||||
" field specified in the schema isn't contained within the correct"\
|
||||
" JSON, it is ommited.\n This function is brilliant at guessing"\
|
||||
" when the format is incorrect."
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_str.startswith("`"):
|
||||
json_str = "```json\n" + json_str + "\n```"
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if cfg.debug:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
print(f"Fixed JSON: {result_string}")
|
||||
print("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except: # noqa: E722
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
|
||||
return "failed"
|
||||
@@ -1,127 +0,0 @@
|
||||
import re
|
||||
import json
|
||||
from config import Config
|
||||
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def extract_char_position(error_message: str) -> int:
|
||||
"""Extract the character position from the JSONDecodeError message.
|
||||
|
||||
Args:
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
int: The character position.
|
||||
"""
|
||||
import re
|
||||
|
||||
char_pattern = re.compile(r'\(char (\d+)\)')
|
||||
if match := char_pattern.search(error_message):
|
||||
return int(match[1])
|
||||
else:
|
||||
raise ValueError("Character position not found in the error message.")
|
||||
|
||||
|
||||
def add_quotes_to_property_names(json_string: str) -> str:
|
||||
"""
|
||||
Add quotes to property names in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with quotes added to property names.
|
||||
"""
|
||||
|
||||
def replace_func(match):
|
||||
return f'"{match.group(1)}":'
|
||||
|
||||
property_name_pattern = re.compile(r'(\w+):')
|
||||
corrected_json_string = property_name_pattern.sub(
|
||||
replace_func,
|
||||
json_string)
|
||||
|
||||
try:
|
||||
json.loads(corrected_json_string)
|
||||
return corrected_json_string
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> str:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with braces balanced.
|
||||
"""
|
||||
|
||||
open_braces_count = json_string.count('{')
|
||||
close_braces_count = json_string.count('}')
|
||||
|
||||
while open_braces_count > close_braces_count:
|
||||
json_string += '}'
|
||||
close_braces_count += 1
|
||||
|
||||
while close_braces_count > open_braces_count:
|
||||
json_string = json_string.rstrip('}')
|
||||
close_braces_count -= 1
|
||||
|
||||
try:
|
||||
json.loads(json_string)
|
||||
return json_string
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
|
||||
|
||||
def fix_invalid_escape(json_str: str, error_message: str) -> str:
|
||||
while error_message.startswith('Invalid \\escape'):
|
||||
bad_escape_location = extract_char_position(error_message)
|
||||
json_str = json_str[:bad_escape_location] + \
|
||||
json_str[bad_escape_location + 1:]
|
||||
try:
|
||||
json.loads(json_str)
|
||||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug:
|
||||
print('json loads error - fix invalid escape', e)
|
||||
error_message = str(e)
|
||||
return json_str
|
||||
|
||||
|
||||
def correct_json(json_str: str) -> str:
|
||||
"""
|
||||
Correct common JSON errors.
|
||||
|
||||
Args:
|
||||
json_str (str): The JSON string.
|
||||
"""
|
||||
|
||||
try:
|
||||
if cfg.debug:
|
||||
print("json", json_str)
|
||||
json.loads(json_str)
|
||||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug:
|
||||
print('json loads error', e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith('Invalid \\escape'):
|
||||
json_str = fix_invalid_escape(json_str, error_message)
|
||||
if error_message.startswith('Expecting property name enclosed in double quotes'):
|
||||
json_str = add_quotes_to_property_names(json_str)
|
||||
try:
|
||||
json.loads(json_str)
|
||||
return json_str
|
||||
except json.JSONDecodeError as e:
|
||||
if cfg.debug:
|
||||
print('json loads error - add quotes', e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_str):
|
||||
return balanced_str
|
||||
return json_str
|
||||
@@ -1,28 +0,0 @@
|
||||
import openai
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
|
||||
openai.api_key = cfg.openai_api_key
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
|
||||
"""Create a chat completion using the OpenAI API"""
|
||||
if cfg.use_azure:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=cfg.openai_deployment_id,
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
resp = response.choices[0].message["content"]
|
||||
for plugin in cfg.plugins:
|
||||
resp = plugin.on_response(resp)
|
||||
return resp
|
||||
447
scripts/main.py
447
scripts/main.py
@@ -1,447 +0,0 @@
|
||||
import json
|
||||
import random
|
||||
import commands as cmd
|
||||
import utils
|
||||
from memory import get_memory
|
||||
import data
|
||||
import chat
|
||||
from colorama import Fore, Style
|
||||
from spinner import Spinner
|
||||
import time
|
||||
import speak
|
||||
from enum import Enum, auto
|
||||
import sys
|
||||
from config import Config
|
||||
from json_parser import fix_and_parse_json
|
||||
from plugins import load_plugins
|
||||
from ai_config import AIConfig
|
||||
import os
|
||||
from pathlib import Path
|
||||
import traceback
|
||||
import yaml
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def configure_logging():
|
||||
logging.basicConfig(filename='log.txt',
|
||||
filemode='a',
|
||||
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
|
||||
datefmt='%H:%M:%S',
|
||||
level=logging.DEBUG)
|
||||
return logging.getLogger('AutoGPT')
|
||||
|
||||
def check_openai_api_key():
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not cfg.openai_api_key:
|
||||
print(
|
||||
Fore.RED +
|
||||
"Please set your OpenAI API key in config.py or as an environment variable."
|
||||
)
|
||||
print("You can get your key from https://beta.openai.com/account/api-keys")
|
||||
exit(1)
|
||||
|
||||
def print_to_console(
|
||||
title,
|
||||
title_color,
|
||||
content,
|
||||
speak_text=False,
|
||||
min_typing_speed=0.05,
|
||||
max_typing_speed=0.01):
|
||||
"""Prints text to the console with a typing effect"""
|
||||
global cfg
|
||||
global logger
|
||||
if speak_text and cfg.speak_mode:
|
||||
speak.say_text(f"{title}. {content}")
|
||||
print(title_color + title + " " + Style.RESET_ALL, end="")
|
||||
if content:
|
||||
logger.info(title + ': ' + content)
|
||||
if isinstance(content, list):
|
||||
content = " ".join(content)
|
||||
words = content.split()
|
||||
for i, word in enumerate(words):
|
||||
print(word, end="", flush=True)
|
||||
if i < len(words) - 1:
|
||||
print(" ", end="", flush=True)
|
||||
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
||||
time.sleep(typing_speed)
|
||||
# type faster after each word
|
||||
min_typing_speed = min_typing_speed * 0.95
|
||||
max_typing_speed = max_typing_speed * 0.95
|
||||
print()
|
||||
|
||||
|
||||
def print_assistant_thoughts(assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
global ai_name
|
||||
global cfg
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
|
||||
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
|
||||
if isinstance(assistant_reply_json, str):
|
||||
try:
|
||||
assistant_reply_json = json.loads(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
|
||||
assistant_reply_json = {}
|
||||
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
assistant_thoughts_criticism = None
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
|
||||
print_to_console(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
|
||||
print_to_console("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
|
||||
|
||||
if assistant_thoughts_plan:
|
||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split('\n')
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
print_to_console("- ", Fore.GREEN, line.strip())
|
||||
|
||||
print_to_console("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
|
||||
# Speak the assistant's thoughts
|
||||
if cfg.speak_mode and assistant_thoughts_speak:
|
||||
speak.say_text(assistant_thoughts_speak)
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
|
||||
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
call_stack = traceback.format_exc()
|
||||
print_to_console("Error: \n", Fore.RED, call_stack)
|
||||
|
||||
|
||||
def load_variables(config_file="config.yaml"):
|
||||
"""Load variables from yaml file if it exists, otherwise prompt the user for input"""
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config = yaml.load(file, Loader=yaml.FullLoader)
|
||||
ai_name = config.get("ai_name")
|
||||
ai_role = config.get("ai_role")
|
||||
ai_goals = config.get("ai_goals")
|
||||
except FileNotFoundError:
|
||||
ai_name = ""
|
||||
ai_role = ""
|
||||
ai_goals = []
|
||||
|
||||
# Prompt the user for input if config file is missing or empty values
|
||||
if not ai_name:
|
||||
ai_name = utils.clean_input("Name your AI: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
if not ai_role:
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
||||
if not ai_goals:
|
||||
print("Enter up to 5 goals for your AI: ")
|
||||
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
print("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"Goal {i+1}: ")
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if len(ai_goals) == 0:
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
||||
|
||||
# Save variables to yaml file
|
||||
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
documents = yaml.dump(config, file)
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{prompt}"
|
||||
return full_prompt
|
||||
|
||||
|
||||
def construct_prompt():
|
||||
"""Construct the prompt for the AI to respond to"""
|
||||
config = AIConfig.load()
|
||||
if config.ai_name:
|
||||
print_to_console(
|
||||
f"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
speak_text=True)
|
||||
should_continue = utils.clean_input(f"""Continue with the last settings?
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
Continue (y/n): """)
|
||||
if should_continue.lower() == "n":
|
||||
config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save()
|
||||
|
||||
# Get rid of this global:
|
||||
global ai_name
|
||||
ai_name = config.ai_name
|
||||
|
||||
full_prompt = config.construct_full_prompt()
|
||||
return full_prompt
|
||||
|
||||
|
||||
def prompt_user():
|
||||
"""Prompt the user for input"""
|
||||
ai_name = ""
|
||||
# Construct the prompt
|
||||
print_to_console(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load defaults.",
|
||||
speak_text=True)
|
||||
|
||||
# Get AI Name from User
|
||||
print_to_console(
|
||||
"Name your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'Entrepreneur-GPT'")
|
||||
ai_name = utils.clean_input("AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
print_to_console(
|
||||
f"{ai_name} here!",
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"I am at your service.",
|
||||
speak_text=True)
|
||||
|
||||
# Get AI Role from User
|
||||
print_to_console(
|
||||
"Describe your AI's role: ",
|
||||
Fore.GREEN,
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.'")
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
||||
# Enter up to 5 goals for the AI
|
||||
print_to_console(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if len(ai_goals) == 0:
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously"]
|
||||
|
||||
config = AIConfig(ai_name, ai_role, ai_goals)
|
||||
return config
|
||||
|
||||
def parse_arguments():
|
||||
"""Parses the arguments passed to the script"""
|
||||
global cfg
|
||||
cfg.set_continuous_mode(False)
|
||||
cfg.set_speak_mode(False)
|
||||
|
||||
parser = argparse.ArgumentParser(description='Process arguments.')
|
||||
parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode')
|
||||
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
||||
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.continuous:
|
||||
print_to_console("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
print_to_console(
|
||||
"WARNING: ",
|
||||
Fore.RED,
|
||||
"Continuous mode is not recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. Use at your own risk.")
|
||||
cfg.set_continuous_mode(True)
|
||||
|
||||
if args.speak:
|
||||
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_speak_mode(True)
|
||||
|
||||
if args.debug:
|
||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||
|
||||
if args.debug:
|
||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
cfg = Config()
|
||||
logger = configure_logging()
|
||||
parse_arguments()
|
||||
ai_name = ""
|
||||
prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
result = None
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
user_input = "Determine which next command to use, and respond using the format specified above:"
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
print('Using memory of type: ' + memory.__class__.__name__)
|
||||
|
||||
|
||||
plugins_found = load_plugins(Path(os.getcwd()) / "plugins")
|
||||
loaded_plugins = []
|
||||
for plugin in plugins_found:
|
||||
if plugin.__name__ in cfg.plugins_blacklist:
|
||||
continue
|
||||
if plugin.__name__ in cfg.plugins_whitelist:
|
||||
loaded_plugins.append(plugin())
|
||||
else:
|
||||
ack = input(
|
||||
f"WARNNG Plugin {plugin.__name__} found. But not in the"
|
||||
" whitelist... Load? (y/n): "
|
||||
)
|
||||
if ack.lower() == "y":
|
||||
loaded_plugins.append(plugin())
|
||||
|
||||
if loaded_plugins:
|
||||
print(f"\nPlugins found: {len(loaded_plugins)}\n"
|
||||
"--------------------")
|
||||
for plugin in loaded_plugins:
|
||||
print(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
|
||||
cfg.set_plugins(loaded_plugins)
|
||||
|
||||
# Interaction Loop
|
||||
while True:
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... "):
|
||||
assistant_reply = chat.chat_with_ai(
|
||||
prompt,
|
||||
user_input,
|
||||
full_message_history,
|
||||
memory,
|
||||
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(assistant_reply)
|
||||
|
||||
# Get command name and arguments
|
||||
try:
|
||||
command_name, arguments = cmd.get_command(assistant_reply)
|
||||
except Exception as e:
|
||||
print_to_console("Error: \n", Fore.RED, str(e))
|
||||
|
||||
if not cfg.continuous_mode and next_action_count == 0:
|
||||
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
user_input = ""
|
||||
print_to_console(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
print(
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
|
||||
flush=True)
|
||||
while True:
|
||||
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
if console_input.lower() == "y":
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
next_action_count = abs(int(console_input.split(" ")[1]))
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
break
|
||||
|
||||
if user_input == "GENERATE NEXT COMMAND JSON":
|
||||
print_to_console(
|
||||
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
||||
Fore.MAGENTA,
|
||||
"")
|
||||
elif user_input == "EXIT":
|
||||
print("Exiting...", flush=True)
|
||||
break
|
||||
else:
|
||||
# Print command
|
||||
print_to_console(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
|
||||
# Execute command
|
||||
if command_name.lower().startswith( "error" ):
|
||||
result = f"Command {command_name} threw the following error: " + arguments
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||
if next_action_count > 0:
|
||||
next_action_count -= 1
|
||||
|
||||
memory_to_add = f"Assistant Reply: {assistant_reply} " \
|
||||
f"\nResult: {result} " \
|
||||
f"\nHuman Feedback: {user_input} "
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
if result is not None:
|
||||
full_message_history.append(chat.create_chat_message("system", result))
|
||||
print_to_console("SYSTEM: ", Fore.YELLOW, result)
|
||||
else:
|
||||
full_message_history.append(
|
||||
chat.create_chat_message(
|
||||
"system", "Unable to execute command"))
|
||||
print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
from memory.local import LocalCache
|
||||
try:
|
||||
from memory.redismem import RedisMemory
|
||||
except ImportError:
|
||||
print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from memory.pinecone import PineconeMemory
|
||||
except ImportError:
|
||||
print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print("Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend.")
|
||||
else:
|
||||
memory = PineconeMemory(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print("Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend.")
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
|
||||
if memory is None:
|
||||
memory = LocalCache(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
return memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"LocalCache",
|
||||
"RedisMemory",
|
||||
"PineconeMemory",
|
||||
]
|
||||
@@ -1,31 +0,0 @@
|
||||
"""Base class for memory providers."""
|
||||
import abc
|
||||
from config import AbstractSingleton
|
||||
import openai
|
||||
|
||||
|
||||
def get_ada_embedding(text):
|
||||
text = text.replace("\n", " ")
|
||||
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def add(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_stats(self):
|
||||
pass
|
||||
@@ -1,114 +0,0 @@
|
||||
import dataclasses
|
||||
import orjson
|
||||
from typing import Any, List, Optional
|
||||
import numpy as np
|
||||
import os
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
||||
|
||||
def create_default_embeddings():
|
||||
return np.zeros((0, EMBED_DIM)).astype(np.float32)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CacheContent:
|
||||
texts: List[str] = dataclasses.field(default_factory=list)
|
||||
embeddings: np.ndarray = dataclasses.field(
|
||||
default_factory=create_default_embeddings
|
||||
)
|
||||
|
||||
|
||||
class LocalCache(MemoryProviderSingleton):
|
||||
|
||||
# on load, load our database
|
||||
def __init__(self, cfg) -> None:
|
||||
self.filename = f"{cfg.memory_index}.json"
|
||||
if os.path.exists(self.filename):
|
||||
with open(self.filename, 'rb') as f:
|
||||
loaded = orjson.loads(f.read())
|
||||
self.data = CacheContent(**loaded)
|
||||
else:
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
"""
|
||||
Add text to our list of texts, add embedding as row to our
|
||||
embeddings-matrix
|
||||
|
||||
Args:
|
||||
text: str
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
if 'Command Error:' in text:
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
vector = np.array(embedding).astype(np.float32)
|
||||
vector = vector[np.newaxis, :]
|
||||
self.data.embeddings = np.concatenate(
|
||||
[
|
||||
vector,
|
||||
self.data.embeddings,
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
with open(self.filename, 'wb') as f:
|
||||
out = orjson.dumps(
|
||||
self.data,
|
||||
option=SAVE_OPTIONS
|
||||
)
|
||||
f.write(out)
|
||||
return text
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.data = CacheContent()
|
||||
return "Obliviated"
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def get_relevant(self, text: str, k: int) -> List[Any]:
|
||||
""""
|
||||
matrix-vector mult to find score-for-each-row-of-matrix
|
||||
get indices for top-k winning scores
|
||||
return texts for those indices
|
||||
Args:
|
||||
text: str
|
||||
k: int
|
||||
|
||||
Returns: List[str]
|
||||
"""
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
scores = np.dot(self.data.embeddings, embedding)
|
||||
|
||||
top_k_indices = np.argsort(scores)[-k:][::-1]
|
||||
|
||||
return [self.data.texts[i] for i in top_k_indices]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the local cache.
|
||||
"""
|
||||
return len(self.data.texts), self.data.embeddings.shape
|
||||
@@ -1,51 +0,0 @@
|
||||
|
||||
import pinecone
|
||||
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
pinecone_api_key = cfg.pinecone_api_key
|
||||
pinecone_region = cfg.pinecone_region
|
||||
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
||||
dimension = 1536
|
||||
metric = "cosine"
|
||||
pod_type = "p1"
|
||||
table_name = "auto-gpt"
|
||||
# this assumes we don't start with memory.
|
||||
# for now this works.
|
||||
# we'll need a more complicated and robust system if we want to start with memory.
|
||||
self.vec_num = 0
|
||||
if table_name not in pinecone.list_indexes():
|
||||
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
self.vec_num += 1
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.index.delete(deleteAll=True)
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
results = self.index.query(query_embedding, top_k=num_relevant, include_metadata=True)
|
||||
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
||||
return [str(item['metadata']["raw_text"]) for item in sorted_results]
|
||||
|
||||
def get_stats(self):
|
||||
return self.index.describe_index_stats()
|
||||
@@ -1,143 +0,0 @@
|
||||
"""Redis memory provider."""
|
||||
from typing import Any, List, Optional
|
||||
import redis
|
||||
from redis.commands.search.field import VectorField, TextField
|
||||
from redis.commands.search.query import Query
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
import numpy as np
|
||||
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
VectorField(
|
||||
"embedding",
|
||||
"HNSW",
|
||||
{
|
||||
"TYPE": "FLOAT32",
|
||||
"DIM": 1536,
|
||||
"DISTANCE_METRIC": "COSINE"
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class RedisMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the Redis memory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
redis_host = cfg.redis_host
|
||||
redis_port = cfg.redis_port
|
||||
redis_password = cfg.redis_password
|
||||
self.dimension = 1536
|
||||
self.redis = redis.Redis(
|
||||
host=redis_host,
|
||||
port=redis_port,
|
||||
password=redis_password,
|
||||
db=0 # Cannot be changed
|
||||
)
|
||||
self.cfg = cfg
|
||||
if cfg.wipe_redis_on_start:
|
||||
self.redis.flushall()
|
||||
try:
|
||||
self.redis.ft(f"{cfg.memory_index}").create_index(
|
||||
fields=SCHEMA,
|
||||
definition=IndexDefinition(
|
||||
prefix=[f"{cfg.memory_index}:"],
|
||||
index_type=IndexType.HASH
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num')
|
||||
self.vec_num = int(existing_vec_num.decode('utf-8')) if\
|
||||
existing_vec_num else 0
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: Message indicating that the data has been added.
|
||||
"""
|
||||
if 'Command Error:' in data:
|
||||
return ""
|
||||
vector = get_ada_embedding(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {
|
||||
b"data": data,
|
||||
"embedding": vector
|
||||
}
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n"\
|
||||
f"data: {data}"
|
||||
self.vec_num += 1
|
||||
pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num)
|
||||
pipe.execute()
|
||||
return _text
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(
|
||||
self,
|
||||
data: str,
|
||||
num_relevant: int = 5
|
||||
) -> Optional[List[Any]]:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: A list of the most relevant data.
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = Query(base_query).return_fields(
|
||||
"data",
|
||||
"vector_score"
|
||||
).sort_by("vector_score").dialect(2)
|
||||
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
||||
|
||||
try:
|
||||
results = self.redis.ft(f"{self.cfg.memory_index}").search(
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the memory index.
|
||||
"""
|
||||
return self.redis.ft(f"{self.cfg.memory_index}").info()
|
||||
@@ -1,55 +0,0 @@
|
||||
import os
|
||||
from playsound import playsound
|
||||
import requests
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
import gtts
|
||||
|
||||
|
||||
# TODO: Nicer names for these ids
|
||||
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
|
||||
tts_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": cfg.elevenlabs_api_key
|
||||
}
|
||||
|
||||
def eleven_labs_speech(text, voice_index=0):
|
||||
"""Speak text using elevenlabs.io's API"""
|
||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||
voice_id=voices[voice_index])
|
||||
formatted_message = {"text": text}
|
||||
response = requests.post(
|
||||
tts_url, headers=tts_headers, json=formatted_message)
|
||||
|
||||
if response.status_code == 200:
|
||||
with open("speech.mpeg", "wb") as f:
|
||||
f.write(response.content)
|
||||
playsound("speech.mpeg")
|
||||
os.remove("speech.mpeg")
|
||||
return True
|
||||
else:
|
||||
print("Request failed with status code:", response.status_code)
|
||||
print("Response content:", response.content)
|
||||
return False
|
||||
|
||||
def gtts_speech(text):
|
||||
tts = gtts.gTTS(text)
|
||||
tts.save("speech.mp3")
|
||||
playsound("speech.mp3")
|
||||
os.remove("speech.mp3")
|
||||
|
||||
def macos_tts_speech(text):
|
||||
os.system(f'say "{text}"')
|
||||
|
||||
def say_text(text, voice_index=0):
|
||||
if not cfg.elevenlabs_api_key:
|
||||
if cfg.use_mac_os_tts == 'True':
|
||||
macos_tts_speech(text)
|
||||
else:
|
||||
gtts_speech(text)
|
||||
else:
|
||||
success = eleven_labs_speech(text, voice_index)
|
||||
if not success:
|
||||
gtts_speech(text)
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import sys
|
||||
import threading
|
||||
import itertools
|
||||
import time
|
||||
|
||||
|
||||
class Spinner:
|
||||
"""A simple spinner class"""
|
||||
def __init__(self, message="Loading...", delay=0.1):
|
||||
"""Initialize the spinner class"""
|
||||
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
|
||||
self.delay = delay
|
||||
self.message = message
|
||||
self.running = False
|
||||
self.spinner_thread = None
|
||||
|
||||
def spin(self):
|
||||
"""Spin the spinner"""
|
||||
while self.running:
|
||||
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
|
||||
sys.stdout.flush()
|
||||
time.sleep(self.delay)
|
||||
sys.stdout.write('\b' * (len(self.message) + 2))
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
self.running = True
|
||||
self.spinner_thread = threading.Thread(target=self.spin)
|
||||
self.spinner_thread.start()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
"""Stop the spinner"""
|
||||
self.running = False
|
||||
self.spinner_thread.join()
|
||||
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
|
||||
sys.stdout.flush()
|
||||
@@ -1,57 +0,0 @@
|
||||
import tiktoken
|
||||
from typing import List, Dict
|
||||
|
||||
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
|
||||
Args:
|
||||
messages (list): A list of messages, each of which is a dictionary containing the role and content of the message.
|
||||
model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301".
|
||||
|
||||
Returns:
|
||||
int: The number of tokens used by the list of messages.
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||
elif model == "gpt-4":
|
||||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
return count_message_tokens(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
def count_string_tokens(string: str, model_name: str) -> int:
|
||||
"""
|
||||
Returns the number of tokens in a text string.
|
||||
|
||||
Args:
|
||||
string (str): The text string.
|
||||
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
||||
|
||||
Returns:
|
||||
int: The number of tokens in the text string.
|
||||
"""
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
num_tokens = len(encoding.encode(string))
|
||||
return num_tokens
|
||||
@@ -1,8 +0,0 @@
|
||||
def clean_input(prompt: str=''):
|
||||
try:
|
||||
return input(prompt)
|
||||
except KeyboardInterrupt:
|
||||
print("You interrupted Auto-GPT")
|
||||
print("Quitting...")
|
||||
exit(0)
|
||||
|
||||
Reference in New Issue
Block a user