mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-30 03:00:41 -04:00
Merge branch 'master' into dev
This commit is contained in:
@@ -1,10 +1,10 @@
|
||||
import openai
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
next_key = 0
|
||||
agents = {} # key, (task, full_message_history, model)
|
||||
|
||||
# Create new GPT agent
|
||||
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(task, prompt, model):
|
||||
"""Create a new agent and return its key"""
|
||||
@@ -14,13 +14,11 @@ def create_agent(task, prompt, model):
|
||||
messages = [{"role": "user", "content": prompt}, ]
|
||||
|
||||
# Start GTP3 instance
|
||||
response = openai.ChatCompletion.create(
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
agent_reply = response.choices[0].message["content"]
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
@@ -44,14 +42,11 @@ def message_agent(key, message):
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
# Start GTP3 instance
|
||||
response = openai.ChatCompletion.create(
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
# Get agent response
|
||||
agent_reply = response.choices[0].message["content"]
|
||||
|
||||
# Update full message history
|
||||
messages.append({"role": "assistant", "content": agent_reply})
|
||||
|
||||
|
||||
43
scripts/ai_config.py
Normal file
43
scripts/ai_config.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import yaml
|
||||
import data
|
||||
|
||||
|
||||
class AIConfig:
|
||||
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = "last_run_ai_settings.yaml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_file=SAVE_FILE):
|
||||
# Load variables from yaml file if it exists
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = config_params.get("ai_goals", [])
|
||||
|
||||
return cls(ai_name, ai_role, ai_goals)
|
||||
|
||||
def save(self, config_file=SAVE_FILE):
|
||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
yaml.dump(config, file)
|
||||
|
||||
def construct_full_prompt(self):
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(self.ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{data.load_prompt()}"
|
||||
return full_prompt
|
||||
@@ -1,31 +1,12 @@
|
||||
from typing import List, Optional
|
||||
import json
|
||||
import openai
|
||||
|
||||
|
||||
# This is a magic function that can do anything with no-code. See
|
||||
# https://github.com/Torantulino/AI-Functions for more info.
|
||||
def call_ai_function(function, args, description, model="gpt-4"):
|
||||
"""Calls an AI function and returns the result."""
|
||||
args = ", ".join(args)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model, messages=messages, temperature=0
|
||||
)
|
||||
|
||||
return response.choices[0].message["content"]
|
||||
|
||||
from config import Config
|
||||
from call_ai_function import call_ai_function
|
||||
from json_parser import fix_and_parse_json
|
||||
cfg = Config()
|
||||
|
||||
# Evaluating code
|
||||
|
||||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
"""Evaluates the given code and returns a list of suggestions for improvements."""
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
@@ -33,12 +14,12 @@ def evaluate_code(code: str) -> List[str]:
|
||||
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return json.loads(result_string)
|
||||
|
||||
return result_string
|
||||
|
||||
|
||||
# Improving code
|
||||
|
||||
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
"""Improves the provided code based on the suggestions provided, making no other changes."""
|
||||
function_string = (
|
||||
@@ -64,3 +45,5 @@ def write_tests(code: str, focus: List[str]) -> str:
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from googlesearch import search
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from readability import Document
|
||||
import openai
|
||||
from config import Config
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def scrape_text(url):
|
||||
"""Scrape text from a webpage"""
|
||||
@@ -94,7 +94,7 @@ def summarize_text(text, is_website=True):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
|
||||
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
|
||||
chunk},
|
||||
]
|
||||
else:
|
||||
@@ -105,13 +105,11 @@ def summarize_text(text, is_website=True):
|
||||
chunk},
|
||||
]
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
summary = create_chat_completion(
|
||||
model=cfg.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=300,
|
||||
)
|
||||
|
||||
summary = response.choices[0].message.content
|
||||
summaries.append(summary)
|
||||
print("Summarized " + str(len(chunks)) + " chunks.")
|
||||
|
||||
@@ -122,7 +120,7 @@ def summarize_text(text, is_website=True):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
|
||||
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
|
||||
combined_summary},
|
||||
]
|
||||
else:
|
||||
@@ -133,11 +131,10 @@ def summarize_text(text, is_website=True):
|
||||
combined_summary},
|
||||
]
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
final_summary = create_chat_completion(
|
||||
model=cfg.fast_llm_model,
|
||||
messages=messages,
|
||||
max_tokens=300,
|
||||
)
|
||||
|
||||
final_summary = response.choices[0].message.content
|
||||
return final_summary
|
||||
|
||||
25
scripts/call_ai_function.py
Normal file
25
scripts/call_ai_function.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
# This is a magic function that can do anything with no-code. See
|
||||
# https://github.com/Torantulino/AI-Functions for more info.
|
||||
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
|
||||
# For each arg, if any are None, convert to "None":
|
||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||
# parse args to comma seperated string
|
||||
args = ", ".join(args)
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
|
||||
},
|
||||
{"role": "user", "content": args},
|
||||
]
|
||||
|
||||
response = create_chat_completion(
|
||||
model=model, messages=messages, temperature=0
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -1,9 +1,12 @@
|
||||
import time
|
||||
import openai
|
||||
import keys
|
||||
from dotenv import load_dotenv
|
||||
from config import Config
|
||||
import token_counter
|
||||
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = keys.OPENAI_API_KEY
|
||||
cfg = Config()
|
||||
|
||||
from llm_utils import create_chat_completion
|
||||
|
||||
|
||||
def create_chat_message(role, content):
|
||||
@@ -20,6 +23,8 @@ def create_chat_message(role, content):
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
prompt,
|
||||
user_input,
|
||||
@@ -43,16 +48,55 @@ def chat_with_ai(
|
||||
Returns:
|
||||
str: The AI's response.
|
||||
"""
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
if debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
current_context = [
|
||||
create_chat_message(
|
||||
"system", prompt), create_chat_message(
|
||||
"system", f"Permanent memory: {permanent_memory}")]
|
||||
current_context.extend(
|
||||
full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
|
||||
"system", f"Permanent memory: {permanent_memory}")]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
current_tokens_used = 0
|
||||
insertion_index = len(current_context)
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
|
||||
|
||||
while next_message_to_add_index >= 0:
|
||||
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
||||
message_to_add = full_message_history[next_message_to_add_index]
|
||||
|
||||
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
break
|
||||
|
||||
# Add the most recent message to the start of the current context, after the two system prompts.
|
||||
current_context.insert(insertion_index, full_message_history[next_message_to_add_index])
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
# Append user input, the length of this is accounted for above
|
||||
current_context.extend([create_chat_message("user", user_input)])
|
||||
|
||||
# Calculate remaining tokens
|
||||
tokens_remaining = token_limit - current_tokens_used
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
print("------------ CONTEXT SENT TO AI ---------------")
|
||||
for message in current_context:
|
||||
# Skip printing the prompt
|
||||
@@ -60,15 +104,16 @@ def chat_with_ai(
|
||||
continue
|
||||
print(
|
||||
f"{message['role'].capitalize()}: {message['content']}")
|
||||
print()
|
||||
print("----------- END OF CONTEXT ----------------")
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=current_context,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
assistant_reply = response.choices[0].message["content"]
|
||||
|
||||
# Update full message history
|
||||
full_message_history.append(
|
||||
create_chat_message(
|
||||
@@ -79,5 +124,6 @@ def chat_with_ai(
|
||||
|
||||
return assistant_reply
|
||||
except openai.error.RateLimitError:
|
||||
# TODO: WHen we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
|
||||
@@ -8,16 +8,28 @@ from config import Config
|
||||
import ai_functions as ai
|
||||
from file_operations import read_file, write_to_file, append_to_file, delete_file
|
||||
from execute_code import execute_python_file
|
||||
from json_parser import fix_and_parse_json
|
||||
from googlesearch import search
|
||||
cfg = Config()
|
||||
|
||||
|
||||
def get_command(response):
|
||||
"""Parse the response and return the command name and arguments"""
|
||||
try:
|
||||
response_json = json.loads(response)
|
||||
response_json = fix_and_parse_json(response)
|
||||
|
||||
if "command" not in response_json:
|
||||
return "Error:" , "Missing 'command' object in JSON"
|
||||
|
||||
command = response_json["command"]
|
||||
|
||||
if "name" not in command:
|
||||
return "Error:", "Missing 'name' field in 'command' object"
|
||||
|
||||
command_name = command["name"]
|
||||
arguments = command["args"]
|
||||
|
||||
# Use an empty dictionary if 'args' field is not present in 'command' object
|
||||
arguments = command.get("args", {})
|
||||
|
||||
if not arguments:
|
||||
arguments = {}
|
||||
@@ -35,8 +47,6 @@ def execute_command(command_name, arguments):
|
||||
try:
|
||||
if command_name == "google":
|
||||
return google_search(arguments["input"])
|
||||
elif command_name == "check_notifications":
|
||||
return check_notifications(arguments["website"])
|
||||
elif command_name == "memory_add":
|
||||
return commit_memory(arguments["string"])
|
||||
elif command_name == "memory_del":
|
||||
@@ -54,12 +64,6 @@ def execute_command(command_name, arguments):
|
||||
return list_agents()
|
||||
elif command_name == "delete_agent":
|
||||
return delete_agent(arguments["key"])
|
||||
elif command_name == "navigate_website":
|
||||
return navigate_website(arguments["action"], arguments["username"])
|
||||
elif command_name == "register_account":
|
||||
return register_account(
|
||||
arguments["username"],
|
||||
arguments["website"])
|
||||
elif command_name == "get_text_summary":
|
||||
return get_text_summary(arguments["url"])
|
||||
elif command_name == "get_hyperlinks":
|
||||
@@ -103,7 +107,7 @@ def get_datetime():
|
||||
def google_search(query, num_results=8):
|
||||
"""Return the results of a google search"""
|
||||
search_results = []
|
||||
for j in browse.search(query, num_results=num_results):
|
||||
for j in search(query, num_results=num_results):
|
||||
search_results.append(j)
|
||||
|
||||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
@@ -156,8 +160,8 @@ def delete_memory(key):
|
||||
|
||||
|
||||
def overwrite_memory(key, string):
|
||||
"""Overwrite a memory with a given key"""
|
||||
if key >= 0 and key < len(mem.permanent_memory):
|
||||
"""Overwrite a memory with a given key and string"""
|
||||
if int(key) >= 0 and key < len(mem.permanent_memory):
|
||||
_text = "Overwriting memory with key " + \
|
||||
str(key) + " and string " + string
|
||||
mem.permanent_memory[key] = string
|
||||
@@ -174,7 +178,7 @@ def shutdown():
|
||||
quit()
|
||||
|
||||
|
||||
def start_agent(name, task, prompt, model="gpt-3.5-turbo"):
|
||||
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
||||
"""Start an agent with a given name, task, and prompt"""
|
||||
global cfg
|
||||
|
||||
@@ -220,23 +224,4 @@ def delete_agent(key):
|
||||
result = agents.delete_agent(key)
|
||||
if not result:
|
||||
return f"Agent {key} does not exist."
|
||||
return f"Agent {key} deleted."
|
||||
|
||||
|
||||
def navigate_website(action, username):
|
||||
_text = "Navigating website with action " + action + " and username " + username
|
||||
print(_text)
|
||||
return "Command not implemented yet."
|
||||
|
||||
|
||||
def register_account(username, website):
|
||||
_text = "Registering account with username " + \
|
||||
username + " and website " + website
|
||||
print(_text)
|
||||
return "Command not implemented yet."
|
||||
|
||||
|
||||
def check_notifications(website):
|
||||
_text = "Checking notifications from " + website
|
||||
print(_text)
|
||||
return "Command not implemented yet."
|
||||
return f"Agent {key} deleted."
|
||||
@@ -1,3 +1,9 @@
|
||||
import os
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
class Singleton(type):
|
||||
"""
|
||||
Singleton metaclass for ensuring only one instance of a class.
|
||||
@@ -23,6 +29,18 @@ class Config(metaclass=Singleton):
|
||||
"""Initialize the configuration class."""
|
||||
self.continuous_mode = False
|
||||
self.speak_mode = False
|
||||
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
|
||||
def set_continuous_mode(self, value: bool):
|
||||
"""Set the continuous mode value."""
|
||||
@@ -31,3 +49,24 @@ class Config(metaclass=Singleton):
|
||||
def set_speak_mode(self, value: bool):
|
||||
"""Set the speak mode value."""
|
||||
self.speak_mode = value
|
||||
|
||||
def set_fast_llm_model(self, value: str):
|
||||
self.fast_llm_model = value
|
||||
|
||||
def set_smart_llm_model(self, value: str):
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int):
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int):
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_openai_api_key(self, value: str):
|
||||
self.apiopenai_api_key_key = value
|
||||
|
||||
def set_elevenlabs_api_key(self, value: str):
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_prompt():
|
||||
"""Load the prompt from data/prompt.txt"""
|
||||
try:
|
||||
# get directory of this file:
|
||||
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
data_dir = file_dir / "data"
|
||||
prompt_file = data_dir / "prompt.txt"
|
||||
# Load the promt from data/prompt.txt
|
||||
with open("data/prompt.txt", "r") as prompt_file:
|
||||
with open(prompt_file, "r") as prompt_file:
|
||||
prompt = prompt_file.read()
|
||||
|
||||
return prompt
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
CONSTRAINTS:
|
||||
|
||||
1. 6000-word count limit for memory
|
||||
1. ~4000 word limit for memory. Your memory is short, so immidiately save important information to long term memory and code to files.
|
||||
2. No user assistance
|
||||
|
||||
COMMANDS:
|
||||
@@ -18,9 +18,9 @@ COMMANDS:
|
||||
11. Read file: "read_file", args: "file": "<file>"
|
||||
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
|
||||
13. Delete file: "delete_file", args: "file": "<file>"
|
||||
14. Evaluate Code: "evaluate_code", args: "code": "<code>"
|
||||
15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<string>"
|
||||
16. Write Tests: "write_tests", args: "code": "<string>", "focus": "<list_of_focus_areas>"
|
||||
14. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>"
|
||||
15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
|
||||
16. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
|
||||
17. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
18. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
|
||||
@@ -38,22 +38,24 @@ PERFORMANCE EVALUATION:
|
||||
3. Reflect on past decisions and strategies to refine your approach.
|
||||
4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
|
||||
|
||||
You should only respond in JSON format as described below
|
||||
|
||||
RESPONSE FORMAT:
|
||||
{
|
||||
"command":
|
||||
{
|
||||
"name": "command name",
|
||||
"args":
|
||||
{
|
||||
"arg name": "value"
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "short bulleted long-term plan",
|
||||
"criticism": "constructive self-criticism"
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
|
||||
Ensure the response can be parsed by Python json.loads
|
||||
@@ -6,6 +6,8 @@ def execute_python_file(file):
|
||||
"""Execute a Python file in a Docker container and return the output"""
|
||||
workspace_folder = "auto_gpt_workspace"
|
||||
|
||||
print (f"Executing file '{file}' in workspace '{workspace_folder}'")
|
||||
|
||||
if not file.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
@@ -21,7 +23,7 @@ def execute_python_file(file):
|
||||
# You can find available Python images on Docker Hub:
|
||||
# https://hub.docker.com/_/python
|
||||
container = client.containers.run(
|
||||
'python:3.8',
|
||||
'python:3.10',
|
||||
f'python {file}',
|
||||
volumes={
|
||||
os.path.abspath(workspace_folder): {
|
||||
@@ -37,6 +39,9 @@ def execute_python_file(file):
|
||||
logs = container.logs().decode('utf-8')
|
||||
container.remove()
|
||||
|
||||
# print(f"Execution complete. Output: {output}")
|
||||
# print(f"Logs: {logs}")
|
||||
|
||||
return logs
|
||||
|
||||
except Exception as e:
|
||||
|
||||
77
scripts/json_parser.py
Normal file
77
scripts/json_parser.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import dirtyjson
|
||||
from call_ai_function import call_ai_function
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
|
||||
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
||||
json_schema = """
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
return dirtyjson.loads(json_str)
|
||||
except Exception as e:
|
||||
# Let's do something manually - sometimes GPT responds with something BEFORE the braces:
|
||||
# "I'm sorry, I don't understand. Please try again."{"text": "I'm sorry, I don't understand. Please try again.", "confidence": 0.0}
|
||||
# So let's try to find the first brace and then parse the rest of the string
|
||||
try:
|
||||
brace_index = json_str.index("{")
|
||||
json_str = json_str[brace_index:]
|
||||
last_brace_index = json_str.rindex("}")
|
||||
json_str = json_str[:last_brace_index+1]
|
||||
return dirtyjson.loads(json_str)
|
||||
except Exception as e:
|
||||
if try_to_fix_with_gpt:
|
||||
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_str, json_schema, False)
|
||||
if ai_fixed_json != "failed":
|
||||
return dirtyjson.loads(ai_fixed_json)
|
||||
else:
|
||||
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
|
||||
return json_str
|
||||
else:
|
||||
raise e
|
||||
|
||||
# TODO: Make debug a global config var
|
||||
def fix_json(json_str: str, schema: str, debug=False) -> str:
|
||||
# Try to fix the JSON using gpt:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [json_str, schema]
|
||||
description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect."""
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_str.startswith("`"):
|
||||
json_str = "```json\n" + json_str + "\n```"
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if debug:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
print(f"Fixed JSON: {result_string}")
|
||||
print("----------- END OF FIX ATTEMPT ----------------")
|
||||
try:
|
||||
return dirtyjson.loads(result_string)
|
||||
except:
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
|
||||
return "failed"
|
||||
@@ -1,6 +0,0 @@
|
||||
# This file contains the API keys for the various APIs used in the project.
|
||||
# Get yours from: https://beta.openai.com/account/api-keys
|
||||
OPENAI_API_KEY = "YOUR-OPENAI-KEY"
|
||||
# To access your ElevenLabs API key, head to https://elevenlabs.io, you
|
||||
# can view your xi-api-key using the 'Profile' tab on the website.
|
||||
ELEVENLABS_API_KEY = "YOUR-ELEVENLABS-KEY"
|
||||
16
scripts/llm_utils.py
Normal file
16
scripts/llm_utils.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import openai
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
|
||||
openai.api_key = cfg.openai_api_key
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
|
||||
return response.choices[0].message["content"]
|
||||
170
scripts/main.py
170
scripts/main.py
@@ -11,14 +11,16 @@ import speak
|
||||
from enum import Enum, auto
|
||||
import sys
|
||||
from config import Config
|
||||
|
||||
from json_parser import fix_and_parse_json
|
||||
from ai_config import AIConfig
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
class Argument(Enum):
|
||||
"""This class is used to define the different arguments that can be passed"""
|
||||
CONTINUOUS_MODE = "continuous-mode"
|
||||
SPEAK_MODE = "speak-mode"
|
||||
|
||||
|
||||
def print_to_console(
|
||||
title,
|
||||
title_color,
|
||||
@@ -32,6 +34,8 @@ def print_to_console(
|
||||
speak.say_text(f"{title}. {content}")
|
||||
print(title_color + title + " " + Style.RESET_ALL, end="")
|
||||
if content:
|
||||
if isinstance(content, list):
|
||||
content = " ".join(content)
|
||||
words = content.split()
|
||||
for i, word in enumerate(words):
|
||||
print(word, end="", flush=True)
|
||||
@@ -51,21 +55,24 @@ def print_assistant_thoughts(assistant_reply):
|
||||
global cfg
|
||||
try:
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = json.loads(assistant_reply)
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts")
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
else:
|
||||
assistant_thoughts_text = None
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_criticism = None
|
||||
assistant_thoughts_speak = None
|
||||
try:
|
||||
assistant_thoughts = assistant_reply_json.get("thoughts")
|
||||
if assistant_thoughts:
|
||||
assistant_thoughts_text = assistant_thoughts.get("text")
|
||||
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
||||
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
||||
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
||||
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
||||
else:
|
||||
assistant_thoughts_text = None
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_criticism = None
|
||||
assistant_thoughts_speak = None
|
||||
except Exception as e:
|
||||
assistant_thoughts_text = "The AI's response was unreadable."
|
||||
|
||||
print_to_console(
|
||||
f"{ai_name.upper()} THOUGHTS:",
|
||||
@@ -78,8 +85,13 @@ def print_assistant_thoughts(assistant_reply):
|
||||
if assistant_thoughts_plan:
|
||||
print_to_console("PLAN:", Fore.YELLOW, "")
|
||||
if assistant_thoughts_plan:
|
||||
|
||||
# Split the input_string using the newline character and dash
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
# Split the input_string using the newline character and dash
|
||||
|
||||
lines = assistant_thoughts_plan.split('\n')
|
||||
|
||||
# Iterate through the lines and print each one with a bullet
|
||||
@@ -101,12 +113,94 @@ def print_assistant_thoughts(assistant_reply):
|
||||
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
|
||||
# All other errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
print_to_console("Error: \n", Fore.RED, str(e))
|
||||
call_stack = traceback.format_exc()
|
||||
print_to_console("Error: \n", Fore.RED, call_stack)
|
||||
|
||||
def load_variables(config_file="config.yaml"):
|
||||
# Load variables from yaml file if it exists
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config = yaml.load(file, Loader=yaml.FullLoader)
|
||||
ai_name = config.get("ai_name")
|
||||
ai_role = config.get("ai_role")
|
||||
ai_goals = config.get("ai_goals")
|
||||
except FileNotFoundError:
|
||||
ai_name = ""
|
||||
ai_role = ""
|
||||
ai_goals = []
|
||||
|
||||
# Prompt the user for input if config file is missing or empty values
|
||||
if not ai_name:
|
||||
ai_name = input("Name your AI: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
if not ai_role:
|
||||
ai_role = input(f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
|
||||
|
||||
if not ai_goals:
|
||||
print("Enter up to 5 goals for your AI: ")
|
||||
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
print("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = input(f"Goal {i+1}: ")
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
if len(ai_goals) == 0:
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
|
||||
|
||||
# Save variables to yaml file
|
||||
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
documents = yaml.dump(config, file)
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{prompt}"
|
||||
return full_prompt
|
||||
|
||||
|
||||
def construct_prompt():
|
||||
"""Constructs the prompt for the AI"""
|
||||
"""Construct the prompt for the AI to respond to"""
|
||||
config = AIConfig.load()
|
||||
if config.ai_name:
|
||||
print_to_console(
|
||||
f"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
speak_text=True)
|
||||
should_continue = input(f"""Continue with the last settings?
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
Continue (y/n): """)
|
||||
if should_continue.lower() == "n":
|
||||
config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save()
|
||||
|
||||
# Get rid of this global:
|
||||
global ai_name
|
||||
ai_name = config.ai_name
|
||||
|
||||
full_prompt = config.construct_full_prompt()
|
||||
return full_prompt
|
||||
|
||||
|
||||
def prompt_user():
|
||||
ai_name = ""
|
||||
# Construct the prompt
|
||||
print_to_console(
|
||||
"Welcome to Auto-GPT! ",
|
||||
@@ -142,7 +236,7 @@ def construct_prompt():
|
||||
print_to_console(
|
||||
"Enter up to 5 goals for your AI: ",
|
||||
Fore.GREEN,
|
||||
"For example: \nIncrease net worth \nGrow Twitter Account \nDevelop and manage multiple businesses autonomously'")
|
||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
|
||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
@@ -154,19 +248,8 @@ def construct_prompt():
|
||||
ai_goals = ["Increase net worth", "Grow Twitter Account",
|
||||
"Develop and manage multiple businesses autonomously"]
|
||||
|
||||
prompt = data.load_prompt()
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
||||
for i, goal in enumerate(ai_goals):
|
||||
full_prompt += f"{i+1}. {goal}\n"
|
||||
|
||||
full_prompt += f"\n\n{prompt}"
|
||||
return full_prompt
|
||||
|
||||
# Check if the python script was executed with arguments, get those arguments
|
||||
|
||||
config = AIConfig(ai_name, ai_role, ai_goals)
|
||||
return config
|
||||
|
||||
def parse_arguments():
|
||||
"""Parses the arguments passed to the script"""
|
||||
@@ -186,16 +269,20 @@ def parse_arguments():
|
||||
cfg.set_speak_mode(True)
|
||||
|
||||
|
||||
cfg = Config()
|
||||
|
||||
# TODO: Better argument parsing:
|
||||
# TODO: fill in llm values here
|
||||
|
||||
cfg = Config()
|
||||
parse_arguments()
|
||||
ai_name = ""
|
||||
prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
token_limit = 6000 # The maximum number of tokens allowed in the API call
|
||||
result = None
|
||||
user_input = "NEXT COMMAND"
|
||||
# Make a constant:
|
||||
user_input = "Determine which next command to use, and respond using the format specified above:"
|
||||
|
||||
# Interaction Loop
|
||||
while True:
|
||||
@@ -206,8 +293,9 @@ while True:
|
||||
user_input,
|
||||
full_message_history,
|
||||
mem.permanent_memory,
|
||||
token_limit)
|
||||
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# print("assistant reply: "+assistant_reply)
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(assistant_reply)
|
||||
|
||||
@@ -232,7 +320,7 @@ while True:
|
||||
while True:
|
||||
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
if console_input.lower() == "y":
|
||||
user_input = "NEXT COMMAND"
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
user_input = "EXIT"
|
||||
@@ -240,7 +328,7 @@ while True:
|
||||
else:
|
||||
continue
|
||||
|
||||
if user_input != "NEXT COMMAND":
|
||||
if user_input != "GENERATE NEXT COMMAND JSON":
|
||||
print("Exiting...", flush=True)
|
||||
break
|
||||
|
||||
@@ -255,7 +343,7 @@ while True:
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
|
||||
# Exectute command
|
||||
# Execute command
|
||||
if command_name.lower() != "error":
|
||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||
else:
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
beautifulsoup4==4.9.3
|
||||
colorama==0.4.6
|
||||
googlesearch_python==1.1.0
|
||||
openai==0.27.0
|
||||
playsound==1.2.2
|
||||
readability_lxml==0.8.1
|
||||
requests==2.25.1
|
||||
docker==6.0.1
|
||||
@@ -1,16 +1,17 @@
|
||||
import os
|
||||
from playsound import playsound
|
||||
import requests
|
||||
import keys
|
||||
from config import Config
|
||||
cfg = Config()
|
||||
|
||||
# TODO: Nicer names for these ids
|
||||
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
|
||||
tts_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": keys.ELEVENLABS_API_KEY
|
||||
"xi-api-key": cfg.elevenlabs_api_key
|
||||
}
|
||||
|
||||
|
||||
def say_text(text, voice_index=0):
|
||||
"""Say text using ElevenLabs API"""
|
||||
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
||||
|
||||
57
scripts/token_counter.py
Normal file
57
scripts/token_counter.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import tiktoken
|
||||
from typing import List, Dict
|
||||
|
||||
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
|
||||
Args:
|
||||
messages (list): A list of messages, each of which is a dictionary containing the role and content of the message.
|
||||
model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301".
|
||||
|
||||
Returns:
|
||||
int: The number of tokens used by the list of messages.
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||
elif model == "gpt-4":
|
||||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
return count_message_tokens(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
def count_string_tokens(string: str, model_name: str) -> int:
|
||||
"""
|
||||
Returns the number of tokens in a text string.
|
||||
|
||||
Args:
|
||||
string (str): The text string.
|
||||
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
||||
|
||||
Returns:
|
||||
int: The number of tokens in the text string.
|
||||
"""
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
num_tokens = len(encoding.encode(string))
|
||||
return num_tokens
|
||||
Reference in New Issue
Block a user