mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-01 02:15:27 -05:00
Merge branch 'master' into security-and-robustness-improvements
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import yaml
|
||||
import data
|
||||
|
||||
import os
|
||||
|
||||
class AIConfig:
|
||||
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
|
||||
@@ -9,7 +9,7 @@ class AIConfig:
|
||||
self.ai_goals = ai_goals
|
||||
|
||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||
SAVE_FILE = "last_run_ai_settings.yaml"
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_file=SAVE_FILE):
|
||||
|
||||
@@ -21,7 +21,7 @@ def sanitize_url(url):
|
||||
# Function to make a request with a specified timeout and handle exceptions
|
||||
def make_request(url, timeout=10):
|
||||
try:
|
||||
response = requests.get(url, timeout=timeout)
|
||||
response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except requests.exceptions.RequestException as e:
|
||||
@@ -69,7 +69,7 @@ def format_hyperlinks(hyperlinks):
|
||||
|
||||
|
||||
def scrape_links(url):
|
||||
response = requests.get(url)
|
||||
response = requests.get(url, headers=cfg.user_agent_header)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
if response.status_code >= 400:
|
||||
|
||||
@@ -23,6 +23,22 @@ def create_chat_message(role, content):
|
||||
return {"role": role, "content": content}
|
||||
|
||||
|
||||
def generate_context(prompt, relevant_memory, full_message_history, model):
|
||||
current_context = [
|
||||
create_chat_message(
|
||||
"system", prompt),
|
||||
create_chat_message(
|
||||
"system", f"The current time and date is {time.strftime('%c')}"),
|
||||
create_chat_message(
|
||||
"system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
insertion_index = len(current_context)
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
return next_message_to_add_index, current_tokens_used, insertion_index, current_context
|
||||
|
||||
|
||||
# TODO: Change debug from hardcode to argument
|
||||
def chat_with_ai(
|
||||
@@ -41,7 +57,7 @@ def chat_with_ai(
|
||||
prompt (str): The prompt explaining the rules to the AI.
|
||||
user_input (str): The input from the user.
|
||||
full_message_history (list): The list of all messages sent between the user and the AI.
|
||||
permanent_memory (list): The list of items in the AI's permanent memory.
|
||||
permanent_memory (Obj): The memory object containing the permanent memory.
|
||||
token_limit (int): The maximum number of tokens allowed in the API call.
|
||||
|
||||
Returns:
|
||||
@@ -53,18 +69,20 @@ def chat_with_ai(
|
||||
print(f"Token limit: {token_limit}")
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
current_context = [
|
||||
create_chat_message(
|
||||
"system", prompt), create_chat_message(
|
||||
"system", f"Permanent memory: {permanent_memory}")]
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
# Add messages from the full message history until we reach the token limit
|
||||
next_message_to_add_index = len(full_message_history) - 1
|
||||
current_tokens_used = 0
|
||||
insertion_index = len(current_context)
|
||||
if debug:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[1:]
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
||||
current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
|
||||
|
||||
while next_message_to_add_index >= 0:
|
||||
@@ -80,7 +98,7 @@ def chat_with_ai(
|
||||
|
||||
# Count the currently used tokens
|
||||
current_tokens_used += tokens_to_add
|
||||
|
||||
|
||||
# Move to the next most recent message in the full message history
|
||||
next_message_to_add_index -= 1
|
||||
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import browse
|
||||
import json
|
||||
import memory as mem
|
||||
from memory import get_memory
|
||||
import datetime
|
||||
import agent_manager as agents
|
||||
import speak
|
||||
from config import Config
|
||||
import ai_functions as ai
|
||||
from file_operations import read_file, write_to_file, append_to_file, delete_file
|
||||
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
|
||||
from execute_code import execute_python_file
|
||||
from json_parser import fix_and_parse_json
|
||||
from image_gen import generate_image
|
||||
from duckduckgo_search import ddg
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
@@ -52,9 +53,11 @@ def get_command(response):
|
||||
|
||||
|
||||
def execute_command(command_name, arguments):
|
||||
memory = get_memory(cfg)
|
||||
|
||||
try:
|
||||
if command_name == "google":
|
||||
|
||||
|
||||
# Check if the Google API key is set and use the official search method
|
||||
# If the API key is not set or has only whitespaces, use the unofficial search method
|
||||
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
|
||||
@@ -62,11 +65,7 @@ def execute_command(command_name, arguments):
|
||||
else:
|
||||
return google_search(arguments["input"])
|
||||
elif command_name == "memory_add":
|
||||
return commit_memory(arguments["string"])
|
||||
elif command_name == "memory_del":
|
||||
return delete_memory(arguments["key"])
|
||||
elif command_name == "memory_ovr":
|
||||
return overwrite_memory(arguments["key"], arguments["string"])
|
||||
return memory.add(arguments["string"])
|
||||
elif command_name == "start_agent":
|
||||
return start_agent(
|
||||
arguments["name"],
|
||||
@@ -90,6 +89,8 @@ def execute_command(command_name, arguments):
|
||||
return append_to_file(arguments["file"], arguments["text"])
|
||||
elif command_name == "delete_file":
|
||||
return delete_file(arguments["file"])
|
||||
elif command_name == "search_files":
|
||||
return search_files(arguments["directory"])
|
||||
elif command_name == "browse_website":
|
||||
return browse_website(arguments["url"], arguments["question"])
|
||||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
@@ -103,10 +104,12 @@ def execute_command(command_name, arguments):
|
||||
return ai.write_tests(arguments["code"], arguments.get("focus"))
|
||||
elif command_name == "execute_python_file": # Add this command
|
||||
return execute_python_file(arguments["file"])
|
||||
elif command_name == "generate_image":
|
||||
return generate_image(arguments["prompt"])
|
||||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
return f"Unknown command {command_name}"
|
||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
|
||||
# All errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
@@ -218,7 +221,7 @@ def overwrite_memory(key, string):
|
||||
elif isinstance(key, str):
|
||||
_text = "Overwriting memory with key " + key + " and string " + string
|
||||
# Overwrite the memory slot with the given string key and string
|
||||
mem.string_key_memory[key] = string
|
||||
mem.permanent_memory[key] = string
|
||||
print(_text)
|
||||
return _text
|
||||
else:
|
||||
@@ -256,18 +259,20 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
||||
|
||||
def message_agent(key, message):
|
||||
global cfg
|
||||
|
||||
|
||||
# Check if the key is a valid integer
|
||||
if not is_valid_int(key):
|
||||
return "Invalid key, cannot message agent."
|
||||
|
||||
agent_response = agents.message_agent(int(key), message)
|
||||
if is_valid_int(key):
|
||||
agent_response = agents.message_agent(int(key), message)
|
||||
# Check if the key is a valid string
|
||||
elif isinstance(key, str):
|
||||
agent_response = agents.message_agent(key, message)
|
||||
else:
|
||||
return "Invalid key, must be an integer or a string."
|
||||
|
||||
# Speak response
|
||||
if cfg.speak_mode:
|
||||
speak.say_text(agent_response, 1)
|
||||
|
||||
return f"Agent {key} responded: {agent_response}"
|
||||
return agent_response
|
||||
|
||||
|
||||
def list_agents():
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import abc
|
||||
import os
|
||||
import openai
|
||||
from dotenv import load_dotenv
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
class Singleton(type):
|
||||
|
||||
class Singleton(abc.ABCMeta, type):
|
||||
"""
|
||||
Singleton metaclass for ensuring only one instance of a class.
|
||||
"""
|
||||
@@ -19,12 +21,17 @@ class Singleton(type):
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
||||
pass
|
||||
|
||||
|
||||
class Config(metaclass=Singleton):
|
||||
"""
|
||||
Configuration class to store the state of bools for different scripts access.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.debug = False
|
||||
self.continuous_mode = False
|
||||
self.speak_mode = False
|
||||
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
|
||||
@@ -34,11 +41,38 @@ class Config(metaclass=Singleton):
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
|
||||
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
self.use_azure = False
|
||||
self.use_azure = os.getenv("USE_AZURE") == 'True'
|
||||
if self.use_azure:
|
||||
self.openai_api_base = os.getenv("OPENAI_API_BASE")
|
||||
self.openai_api_version = os.getenv("OPENAI_API_VERSION")
|
||||
self.openai_deployment_id = os.getenv("OPENAI_DEPLOYMENT_ID")
|
||||
openai.api_type = "azure"
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
|
||||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
||||
|
||||
# User agent headers to use when browsing web
|
||||
# Some websites might just completely deny request with an error code if no user agent was found.
|
||||
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
|
||||
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
||||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
||||
# Note that indexes must be created on db 0 in redis, this is not configureable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
||||
# Initialize the OpenAI API client
|
||||
openai.api_key = self.openai_api_key
|
||||
|
||||
@@ -70,4 +104,13 @@ class Config(metaclass=Singleton):
|
||||
self.google_api_key = value
|
||||
|
||||
def set_custom_search_engine_id(self, value: str):
|
||||
self.custom_search_engine_id = value
|
||||
self.custom_search_engine_id = value
|
||||
|
||||
def set_pinecone_api_key(self, value: str):
|
||||
self.pinecone_api_key = value
|
||||
|
||||
def set_pinecone_region(self, value: str):
|
||||
self.pinecone_region = value
|
||||
|
||||
def set_debug_mode(self, value: bool):
|
||||
self.debug = value
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
SRC_DIR = Path(__file__).parent
|
||||
|
||||
def load_prompt():
|
||||
try:
|
||||
# get directory of this file:
|
||||
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
data_dir = file_dir / "data"
|
||||
prompt_file = data_dir / "prompt.txt"
|
||||
# Load the promt from data/prompt.txt
|
||||
with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file:
|
||||
file_dir = Path(__file__).parent
|
||||
prompt_file_path = file_dir / "data" / "prompt.txt"
|
||||
|
||||
# Load the prompt from data/prompt.txt
|
||||
with open(prompt_file_path, "r") as prompt_file:
|
||||
prompt = prompt_file.read()
|
||||
|
||||
return prompt
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
CONSTRAINTS:
|
||||
|
||||
1. ~4000 word limit for memory. Your memory is short, so immediately save important information to long term memory and code to files.
|
||||
2. No user assistance
|
||||
3. Exclusively use the commands listed in double quotes e.g. "command name"
|
||||
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
|
||||
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
|
||||
3. No user assistance
|
||||
4. Exclusively use the commands listed in double quotes e.g. "command name"
|
||||
|
||||
COMMANDS:
|
||||
|
||||
1. Google Search: "google", args: "input": "<search>"
|
||||
2. Memory Add: "memory_add", args: "string": "<string>"
|
||||
3. Memory Delete: "memory_del", args: "key": "<key>"
|
||||
4. Memory Overwrite: "memory_ovr", args: "key": "<key>", "string": "<string>"
|
||||
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
|
||||
6. Start GPT Agent: "start_agent", args: "name": <name>, "task": "<short_task_desc>", "prompt": "<prompt>"
|
||||
6. Start GPT Agent: "start_agent", args: "name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"
|
||||
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
|
||||
8. List GPT Agents: "list_agents", args: ""
|
||||
9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
|
||||
@@ -19,11 +17,13 @@ COMMANDS:
|
||||
11. Read file: "read_file", args: "file": "<file>"
|
||||
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
|
||||
13. Delete file: "delete_file", args: "file": "<file>"
|
||||
14. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>"
|
||||
15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
|
||||
16. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
|
||||
17. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
18. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
14. Search Files: "search_files", args: "directory": "<directory>"
|
||||
15. Evaluate Code: "evaluate_code", args: "code": "<full_code_string>"
|
||||
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
|
||||
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
|
||||
18. Execute Python File: "execute_python_file", args: "file": "<file>"
|
||||
19. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||
20. Generate Image: "generate_image", args: "prompt": "<prompt>"
|
||||
|
||||
RESOURCES:
|
||||
|
||||
@@ -43,12 +43,6 @@ You should only respond in JSON format as described below
|
||||
|
||||
RESPONSE FORMAT:
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
@@ -56,6 +50,12 @@ RESPONSE FORMAT:
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args":{
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,3 +58,20 @@ def delete_file(filename):
|
||||
return "File deleted successfully."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
def search_files(directory):
|
||||
found_files = []
|
||||
|
||||
if directory == "" or directory == "/":
|
||||
search_directory = working_directory
|
||||
else:
|
||||
search_directory = safe_join(working_directory, directory)
|
||||
|
||||
for root, _, files in os.walk(search_directory):
|
||||
for file in files:
|
||||
if file.startswith('.'):
|
||||
continue
|
||||
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
57
scripts/image_gen.py
Normal file
57
scripts/image_gen.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import requests
|
||||
import io
|
||||
import os.path
|
||||
from PIL import Image
|
||||
from config import Config
|
||||
import uuid
|
||||
import openai
|
||||
from base64 import b64decode
|
||||
|
||||
cfg = Config()
|
||||
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
def generate_image(prompt):
|
||||
|
||||
filename = str(uuid.uuid4()) + ".jpg"
|
||||
|
||||
# DALL-E
|
||||
if cfg.image_provider == 'dalle':
|
||||
|
||||
openai.api_key = cfg.openai_api_key
|
||||
|
||||
response = openai.Image.create(
|
||||
prompt=prompt,
|
||||
n=1,
|
||||
size="256x256",
|
||||
response_format="b64_json",
|
||||
)
|
||||
|
||||
print("Image Generated for prompt:" + prompt)
|
||||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
with open(working_directory + "/" + filename, mode="wb") as png:
|
||||
png.write(image_data)
|
||||
|
||||
return "Saved to disk:" + filename
|
||||
|
||||
# STABLE DIFFUSION
|
||||
elif cfg.image_provider == 'sd':
|
||||
|
||||
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
||||
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
|
||||
|
||||
response = requests.post(API_URL, headers=headers, json={
|
||||
"inputs": prompt,
|
||||
})
|
||||
|
||||
image = Image.open(io.BytesIO(response.content))
|
||||
print("Image Generated for prompt:" + prompt)
|
||||
|
||||
image.save(os.path.join(working_directory, filename))
|
||||
|
||||
return "Saved to disk:" + filename
|
||||
|
||||
else:
|
||||
return "No Image Provider Set"
|
||||
@@ -24,6 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
||||
"""
|
||||
|
||||
try:
|
||||
json_str = json_str.replace('\t', '')
|
||||
return json.loads(json_str)
|
||||
except Exception as e:
|
||||
# Let's do something manually - sometimes GPT responds with something BEFORE the braces:
|
||||
@@ -39,7 +40,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
||||
if try_to_fix_with_gpt:
|
||||
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = fix_json(json_str, json_schema, False)
|
||||
ai_fixed_json = fix_json(json_str, json_schema, cfg.debug)
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
else:
|
||||
@@ -51,7 +52,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
|
||||
def fix_json(json_str: str, schema: str, debug=False) -> str:
|
||||
# Try to fix the JSON using gpt:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [json_str, schema]
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect."""
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
@@ -67,7 +68,8 @@ def fix_json(json_str: str, schema: str, debug=False) -> str:
|
||||
print(f"Fixed JSON: {result_string}")
|
||||
print("----------- END OF FIX ATTEMPT ----------------")
|
||||
try:
|
||||
return json.loads(result_string)
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except:
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
|
||||
@@ -6,11 +6,20 @@ openai.api_key = cfg.openai_api_key
|
||||
|
||||
# Overly simple abstraction until we create something better
|
||||
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
if cfg.use_azure:
|
||||
response = openai.ChatCompletion.create(
|
||||
deployment_id=cfg.openai_deployment_id,
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
else:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
|
||||
return response.choices[0].message["content"]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import random
|
||||
import commands as cmd
|
||||
import memory as mem
|
||||
from memory import get_memory
|
||||
import data
|
||||
import chat
|
||||
from colorama import Fore, Style
|
||||
@@ -266,6 +266,10 @@ def parse_arguments():
|
||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||
|
||||
if args.debug:
|
||||
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_debug_mode(True)
|
||||
|
||||
|
||||
# TODO: fill in llm values here
|
||||
|
||||
@@ -277,9 +281,15 @@ prompt = construct_prompt()
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
result = None
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
user_input = "Determine which next command to use, and respond using the format specified above:"
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
print('Using memory of type: ' + memory.__class__.__name__)
|
||||
|
||||
# Interaction Loop
|
||||
while True:
|
||||
# Send message to AI, get response
|
||||
@@ -288,10 +298,9 @@ while True:
|
||||
prompt,
|
||||
user_input,
|
||||
full_message_history,
|
||||
mem.permanent_memory,
|
||||
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
memory,
|
||||
cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
||||
|
||||
# print("assistant reply: "+assistant_reply)
|
||||
# Print Assistant thoughts
|
||||
print_assistant_thoughts(assistant_reply)
|
||||
|
||||
@@ -301,7 +310,7 @@ while True:
|
||||
except Exception as e:
|
||||
print_to_console("Error: \n", Fore.RED, str(e))
|
||||
|
||||
if not cfg.continuous_mode:
|
||||
if not cfg.continuous_mode and next_action_count == 0:
|
||||
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
@@ -311,13 +320,21 @@ while True:
|
||||
Fore.CYAN,
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
print(
|
||||
f"Enter 'y' to authorise command or 'n' to exit program, or enter feedback for {ai_name}...",
|
||||
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...",
|
||||
flush=True)
|
||||
while True:
|
||||
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
if console_input.lower() == "y":
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().startswith("y -"):
|
||||
try:
|
||||
next_action_count = abs(int(console_input.split(" ")[1]))
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
except ValueError:
|
||||
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == "n":
|
||||
user_input = "EXIT"
|
||||
break
|
||||
@@ -342,12 +359,20 @@ while True:
|
||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
|
||||
|
||||
# Execute command
|
||||
if command_name.lower() == "error":
|
||||
if command_name.lower().startswith( "error" ):
|
||||
result = f"Command {command_name} threw the following error: " + arguments
|
||||
elif command_name == "human_feedback":
|
||||
result = f"Human feedback: {user_input}"
|
||||
else:
|
||||
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
|
||||
if next_action_count > 0:
|
||||
next_action_count -= 1
|
||||
|
||||
memory_to_add = f"Assistant Reply: {assistant_reply} " \
|
||||
f"\nResult: {result} " \
|
||||
f"\nHuman Feedback: {user_input} "
|
||||
|
||||
memory.add(memory_to_add)
|
||||
|
||||
# Check if there's a result from the command append it to the message
|
||||
# history
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
permanent_memory = []
|
||||
44
scripts/memory/__init__.py
Normal file
44
scripts/memory/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from memory.local import LocalCache
|
||||
try:
|
||||
from memory.redismem import RedisMemory
|
||||
except ImportError:
|
||||
print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
from memory.pinecone import PineconeMemory
|
||||
except ImportError:
|
||||
print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg, init=False):
|
||||
memory = None
|
||||
if cfg.memory_backend == "pinecone":
|
||||
if not PineconeMemory:
|
||||
print("Error: Pinecone is not installed. Please install pinecone"
|
||||
" to use Pinecone as a memory backend.")
|
||||
else:
|
||||
memory = PineconeMemory(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
elif cfg.memory_backend == "redis":
|
||||
if not RedisMemory:
|
||||
print("Error: Redis is not installed. Please install redis-py to"
|
||||
" use Redis as a memory backend.")
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
|
||||
if memory is None:
|
||||
memory = LocalCache(cfg)
|
||||
if init:
|
||||
memory.clear()
|
||||
return memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_memory",
|
||||
"LocalCache",
|
||||
"RedisMemory",
|
||||
"PineconeMemory",
|
||||
]
|
||||
31
scripts/memory/base.py
Normal file
31
scripts/memory/base.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Base class for memory providers."""
|
||||
import abc
|
||||
from config import AbstractSingleton
|
||||
import openai
|
||||
|
||||
|
||||
def get_ada_embedding(text):
|
||||
text = text.replace("\n", " ")
|
||||
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
|
||||
|
||||
|
||||
class MemoryProviderSingleton(AbstractSingleton):
|
||||
@abc.abstractmethod
|
||||
def add(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, data):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_stats(self):
|
||||
pass
|
||||
114
scripts/memory/local.py
Normal file
114
scripts/memory/local.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import dataclasses
|
||||
import orjson
|
||||
from typing import Any, List, Optional
|
||||
import numpy as np
|
||||
import os
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
EMBED_DIM = 1536
|
||||
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
||||
|
||||
|
||||
def create_default_embeddings():
|
||||
return np.zeros((0, EMBED_DIM)).astype(np.float32)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CacheContent:
|
||||
texts: List[str] = dataclasses.field(default_factory=list)
|
||||
embeddings: np.ndarray = dataclasses.field(
|
||||
default_factory=create_default_embeddings
|
||||
)
|
||||
|
||||
|
||||
class LocalCache(MemoryProviderSingleton):
|
||||
|
||||
# on load, load our database
|
||||
def __init__(self, cfg) -> None:
|
||||
self.filename = f"{cfg.memory_index}.json"
|
||||
if os.path.exists(self.filename):
|
||||
with open(self.filename, 'rb') as f:
|
||||
loaded = orjson.loads(f.read())
|
||||
self.data = CacheContent(**loaded)
|
||||
else:
|
||||
self.data = CacheContent()
|
||||
|
||||
def add(self, text: str):
|
||||
"""
|
||||
Add text to our list of texts, add embedding as row to our
|
||||
embeddings-matrix
|
||||
|
||||
Args:
|
||||
text: str
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
if 'Command Error:' in text:
|
||||
return ""
|
||||
self.data.texts.append(text)
|
||||
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
vector = np.array(embedding).astype(np.float32)
|
||||
vector = vector[np.newaxis, :]
|
||||
self.data.embeddings = np.concatenate(
|
||||
[
|
||||
vector,
|
||||
self.data.embeddings,
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
with open(self.filename, 'wb') as f:
|
||||
out = orjson.dumps(
|
||||
self.data,
|
||||
option=SAVE_OPTIONS
|
||||
)
|
||||
f.write(out)
|
||||
return text
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.data = CacheContent()
|
||||
return "Obliviated"
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def get_relevant(self, text: str, k: int) -> List[Any]:
|
||||
""""
|
||||
matrix-vector mult to find score-for-each-row-of-matrix
|
||||
get indices for top-k winning scores
|
||||
return texts for those indices
|
||||
Args:
|
||||
text: str
|
||||
k: int
|
||||
|
||||
Returns: List[str]
|
||||
"""
|
||||
embedding = get_ada_embedding(text)
|
||||
|
||||
scores = np.dot(self.data.embeddings, embedding)
|
||||
|
||||
top_k_indices = np.argsort(scores)[-k:][::-1]
|
||||
|
||||
return [self.data.texts[i] for i in top_k_indices]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the local cache.
|
||||
"""
|
||||
return len(self.data.texts), self.data.embeddings.shape
|
||||
51
scripts/memory/pinecone.py
Normal file
51
scripts/memory/pinecone.py
Normal file
@@ -0,0 +1,51 @@
|
||||
|
||||
import pinecone
|
||||
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
pinecone_api_key = cfg.pinecone_api_key
|
||||
pinecone_region = cfg.pinecone_region
|
||||
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
||||
dimension = 1536
|
||||
metric = "cosine"
|
||||
pod_type = "p1"
|
||||
table_name = "auto-gpt"
|
||||
# this assumes we don't start with memory.
|
||||
# for now this works.
|
||||
# we'll need a more complicated and robust system if we want to start with memory.
|
||||
self.vec_num = 0
|
||||
if table_name not in pinecone.list_indexes():
|
||||
pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
self.vec_num += 1
|
||||
return _text
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.index.delete(deleteAll=True)
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
results = self.index.query(query_embedding, top_k=num_relevant, include_metadata=True)
|
||||
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
||||
return [str(item['metadata']["raw_text"]) for item in sorted_results]
|
||||
|
||||
def get_stats(self):
|
||||
return self.index.describe_index_stats()
|
||||
143
scripts/memory/redismem.py
Normal file
143
scripts/memory/redismem.py
Normal file
@@ -0,0 +1,143 @@
|
||||
"""Redis memory provider."""
|
||||
from typing import Any, List, Optional
|
||||
import redis
|
||||
from redis.commands.search.field import VectorField, TextField
|
||||
from redis.commands.search.query import Query
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
import numpy as np
|
||||
|
||||
from memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
VectorField(
|
||||
"embedding",
|
||||
"HNSW",
|
||||
{
|
||||
"TYPE": "FLOAT32",
|
||||
"DIM": 1536,
|
||||
"DISTANCE_METRIC": "COSINE"
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class RedisMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
"""
|
||||
Initializes the Redis memory provider.
|
||||
|
||||
Args:
|
||||
cfg: The config object.
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
redis_host = cfg.redis_host
|
||||
redis_port = cfg.redis_port
|
||||
redis_password = cfg.redis_password
|
||||
self.dimension = 1536
|
||||
self.redis = redis.Redis(
|
||||
host=redis_host,
|
||||
port=redis_port,
|
||||
password=redis_password,
|
||||
db=0 # Cannot be changed
|
||||
)
|
||||
self.cfg = cfg
|
||||
if cfg.wipe_redis_on_start:
|
||||
self.redis.flushall()
|
||||
try:
|
||||
self.redis.ft(f"{cfg.memory_index}").create_index(
|
||||
fields=SCHEMA,
|
||||
definition=IndexDefinition(
|
||||
prefix=[f"{cfg.memory_index}:"],
|
||||
index_type=IndexType.HASH
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error creating Redis search index: ", e)
|
||||
existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num')
|
||||
self.vec_num = int(existing_vec_num.decode('utf-8')) if\
|
||||
existing_vec_num else 0
|
||||
|
||||
def add(self, data: str) -> str:
|
||||
"""
|
||||
Adds a data point to the memory.
|
||||
|
||||
Args:
|
||||
data: The data to add.
|
||||
|
||||
Returns: Message indicating that the data has been added.
|
||||
"""
|
||||
if 'Command Error:' in data:
|
||||
return ""
|
||||
vector = get_ada_embedding(data)
|
||||
vector = np.array(vector).astype(np.float32).tobytes()
|
||||
data_dict = {
|
||||
b"data": data,
|
||||
"embedding": vector
|
||||
}
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n"\
|
||||
f"data: {data}"
|
||||
self.vec_num += 1
|
||||
pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num)
|
||||
pipe.execute()
|
||||
return _text
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
|
||||
Returns: The most relevant data.
|
||||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self) -> str:
|
||||
"""
|
||||
Clears the redis server.
|
||||
|
||||
Returns: A message indicating that the memory has been cleared.
|
||||
"""
|
||||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(
|
||||
self,
|
||||
data: str,
|
||||
num_relevant: int = 5
|
||||
) -> Optional[List[Any]]:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
num_relevant: The number of relevant data to return.
|
||||
|
||||
Returns: A list of the most relevant data.
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
||||
query = Query(base_query).return_fields(
|
||||
"data",
|
||||
"vector_score"
|
||||
).sort_by("vector_score").dialect(2)
|
||||
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
||||
|
||||
try:
|
||||
results = self.redis.ft(f"{self.cfg.memory_index}").search(
|
||||
query, query_params={"vector": query_vector}
|
||||
)
|
||||
except Exception as e:
|
||||
print("Error calling Redis search: ", e)
|
||||
return None
|
||||
return [result.data for result in results.docs]
|
||||
|
||||
def get_stats(self):
|
||||
"""
|
||||
Returns: The stats of the memory index.
|
||||
"""
|
||||
return self.redis.ft(f"{self.cfg.memory_index}").info()
|
||||
@@ -42,7 +42,7 @@ def say_text(text, voice_index=0):
|
||||
if not cfg.elevenlabs_api_key:
|
||||
gtts_speech(text)
|
||||
else:
|
||||
success = eleven_labs_speech(text)
|
||||
success = eleven_labs_speech(text, voice_index)
|
||||
if not success:
|
||||
gtts_speech(text)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user