diff --git a/.env.template b/.env.template
index e9ccda5edb..525cd61c5f 100644
--- a/.env.template
+++ b/.env.template
@@ -9,4 +9,6 @@ CUSTOM_SEARCH_ENGINE_ID=
USE_AZURE=False
OPENAI_API_BASE=your-base-url-for-azure
OPENAI_API_VERSION=api-version-for-azure
-OPENAI_DEPLOYMENT_ID=deployment-id-for-azure
\ No newline at end of file
+OPENAI_DEPLOYMENT_ID=deployment-id-for-azure
+IMAGE_PROVIDER=dalle
+HUGGINGFACE_API_TOKEN=
\ No newline at end of file
diff --git a/README.md b/README.md
index 4fc0c349e5..ba80818d0e 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ Auto-GPT is an experimental open-source application showcasing the capabilities
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
-## 💖 Help Fund Auto-GPT's Development
+
💖 Help Fund Auto-GPT's Development 💖
If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting.
@@ -17,14 +17,13 @@ Your support is greatly appreciated
- Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here. 💖
-
-
-
-
+ Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
+Individual Sponsors
-
+
+
+
@@ -43,6 +42,7 @@ Your support is greatly appreciated
- [Setting up environment variables](#setting-up-environment-variables)
- [💀 Continuous Mode ⚠️](#-continuous-mode-️)
- [GPT3.5 ONLY Mode](#gpt35-only-mode)
+ - [🖼 Image Generation](#image-generation)
- [⚠️ Limitations](#️-limitations)
- [🛡 Disclaimer](#-disclaimer)
- [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter)
@@ -57,7 +57,7 @@ Your support is greatly appreciated
- 🗃️ File storage and summarization with GPT-3.5
## 📋 Requirements
-- [Python 3.7 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
+- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- OpenAI API key
- PINECONE API key
@@ -141,6 +141,40 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID"
```
+## Redis Setup
+
+Install docker desktop.
+
+Run:
+```
+docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
+```
+See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
+
+Set the following environment variables:
+```
+MEMORY_BACKEND=redis
+REDIS_HOST=localhost
+REDIS_PORT=6379
+REDIS_PASSWORD=
+```
+
+Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really.
+
+You can optionally set
+
+```
+WIPE_REDIS_ON_START=False
+```
+
+To persist memory stored in Redis.
+
+You can specify the memory index for redis using the following:
+
+````
+MEMORY_INDEX=whatever
+````
+
## 🌲 Pinecone API Key Setup
Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories
@@ -170,6 +204,7 @@ Or you can set them in the `.env` file.
1. View memory usage by using the `--debug` flag :)
+
## 💀 Continuous Mode ⚠️
Run the AI **without** user authorisation, 100% automated.
Continuous mode is not recommended.
@@ -188,6 +223,15 @@ If you don't have access to the GPT4 api, this mode will allow you to use Auto-G
python scripts/main.py --gpt3only
```
+## 🖼 Image Generation
+By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required.
+
+Once you have a token, set these variables in your `.env`:
+```
+IMAGE_PROVIDER=sd
+HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN"
+```
+
## ⚠️ Limitations
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
diff --git a/requirements.txt b/requirements.txt
index ce24709858..6a9ba64330 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,3 +12,6 @@ docker
duckduckgo-search
google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
pinecone-client==2.2.1
+redis
+orjson
+Pillow
diff --git a/scripts/ai_config.py b/scripts/ai_config.py
index 2f43274863..8cfa183a9b 100644
--- a/scripts/ai_config.py
+++ b/scripts/ai_config.py
@@ -1,6 +1,6 @@
import yaml
import data
-
+import os
class AIConfig:
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
@@ -9,7 +9,7 @@ class AIConfig:
self.ai_goals = ai_goals
# Soon this will go in a folder where it remembers more stuff about the run(s)
- SAVE_FILE = "../ai_settings.yaml"
+ SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
@classmethod
def load(cls, config_file=SAVE_FILE):
diff --git a/scripts/chat.py b/scripts/chat.py
index 8da074c6bf..a27fbfd7ef 100644
--- a/scripts/chat.py
+++ b/scripts/chat.py
@@ -26,8 +26,11 @@ def create_chat_message(role, content):
def generate_context(prompt, relevant_memory, full_message_history, model):
current_context = [
create_chat_message(
- "system", prompt), create_chat_message(
- "system", f"Permanent memory: {relevant_memory}")]
+ "system", prompt),
+ create_chat_message(
+ "system", f"The current time and date is {time.strftime('%c')}"),
+ create_chat_message(
+ "system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
@@ -95,7 +98,7 @@ def chat_with_ai(
# Count the currently used tokens
current_tokens_used += tokens_to_add
-
+
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
diff --git a/scripts/commands.py b/scripts/commands.py
index 187ad6c4b3..a6f956f884 100644
--- a/scripts/commands.py
+++ b/scripts/commands.py
@@ -1,6 +1,6 @@
import browse
import json
-from memory import PineconeMemory
+from memory import get_memory
import datetime
import agent_manager as agents
import speak
@@ -9,6 +9,7 @@ import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from execute_code import execute_python_file, exec_shell
from json_parser import fix_and_parse_json
+from image_gen import generate_image
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
@@ -52,10 +53,11 @@ def get_command(response):
def execute_command(command_name, arguments):
- memory = PineconeMemory()
+ memory = get_memory(cfg)
+
try:
if command_name == "google":
-
+
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
@@ -104,10 +106,12 @@ def execute_command(command_name, arguments):
return execute_python_file(arguments["file"])
elif command_name == "exec_shell": # Add this command
return exec_shell(arguments["command_line"])
+ elif command_name == "generate_image":
+ return generate_image(arguments["prompt"])
elif command_name == "task_complete":
shutdown()
else:
- return f"Unknown command {command_name}"
+ return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
# All errors, return "Error: + error message"
except Exception as e:
return "Error: " + str(e)
diff --git a/scripts/config.py b/scripts/config.py
index fe48d29800..4d7adec1c0 100644
--- a/scripts/config.py
+++ b/scripts/config.py
@@ -1,3 +1,4 @@
+import abc
import os
import openai
from dotenv import load_dotenv
@@ -5,7 +6,7 @@ from dotenv import load_dotenv
load_dotenv()
-class Singleton(type):
+class Singleton(abc.ABCMeta, type):
"""
Singleton metaclass for ensuring only one instance of a class.
"""
@@ -20,12 +21,17 @@ class Singleton(type):
return cls._instances[cls]
+class AbstractSingleton(abc.ABC, metaclass=Singleton):
+ pass
+
+
class Config(metaclass=Singleton):
"""
Configuration class to store the state of bools for different scripts access.
"""
def __init__(self):
+ self.debug = False
self.continuous_mode = False
self.speak_mode = False
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
@@ -53,10 +59,20 @@ class Config(metaclass=Singleton):
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV")
+ self.image_provider = os.getenv("IMAGE_PROVIDER")
+ self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
+
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
+ self.redis_host = os.getenv("REDIS_HOST", "localhost")
+ self.redis_port = os.getenv("REDIS_PORT", "6379")
+ self.redis_password = os.getenv("REDIS_PASSWORD", "")
+ self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
+ self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
+ # Note that indexes must be created on db 0 in redis, this is not configureable.
+ self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
@@ -95,3 +111,6 @@ class Config(metaclass=Singleton):
def set_pinecone_region(self, value: str):
self.pinecone_region = value
+
+ def set_debug_mode(self, value: bool):
+ self.debug = value
diff --git a/scripts/data/prompt.txt b/scripts/data/prompt.txt
index 5b139586ed..d6289b6c0b 100644
--- a/scripts/data/prompt.txt
+++ b/scripts/data/prompt.txt
@@ -18,12 +18,13 @@ COMMANDS:
12. Append to file: "append_to_file", args: "file": "", "text": ""
13. Delete file: "delete_file", args: "file": ""
14. Search Files: "search_files", args: "directory": ""
-15. Evaluate Code: "evaluate_code", args: "code": ""
+15. Evaluate Code: "evaluate_code", args: "code": ""
16. Get Improved Code: "improve_code", args: "suggestions": "", "code": ""
17. Write Tests: "write_tests", args: "code": "", "focus": ""
18. Execute Python File: "execute_python_file", args: "file": ""
19. Execute Shell Command: "exec_shell", args: "command_line": "". Remember only to use commands that terminate, interactive tools like vim are not supported!
20. Task Complete (Shutdown): "task_complete", args: "reason": ""
+21. Generate Image: "generate_image", args: "prompt": ""
RESOURCES:
diff --git a/scripts/image_gen.py b/scripts/image_gen.py
new file mode 100644
index 0000000000..185ed4278b
--- /dev/null
+++ b/scripts/image_gen.py
@@ -0,0 +1,57 @@
+import requests
+import io
+import os.path
+from PIL import Image
+from config import Config
+import uuid
+import openai
+from base64 import b64decode
+
+cfg = Config()
+
+working_directory = "auto_gpt_workspace"
+
+def generate_image(prompt):
+
+ filename = str(uuid.uuid4()) + ".jpg"
+
+ # DALL-E
+ if cfg.image_provider == 'dalle':
+
+ openai.api_key = cfg.openai_api_key
+
+ response = openai.Image.create(
+ prompt=prompt,
+ n=1,
+ size="256x256",
+ response_format="b64_json",
+ )
+
+ print("Image Generated for prompt:" + prompt)
+
+ image_data = b64decode(response["data"][0]["b64_json"])
+
+ with open(working_directory + "/" + filename, mode="wb") as png:
+ png.write(image_data)
+
+ return "Saved to disk:" + filename
+
+ # STABLE DIFFUSION
+ elif cfg.image_provider == 'sd':
+
+ API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
+ headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
+
+ response = requests.post(API_URL, headers=headers, json={
+ "inputs": prompt,
+ })
+
+ image = Image.open(io.BytesIO(response.content))
+ print("Image Generated for prompt:" + prompt)
+
+ image.save(os.path.join(working_directory, filename))
+
+ return "Saved to disk:" + filename
+
+ else:
+ return "No Image Provider Set"
\ No newline at end of file
diff --git a/scripts/json_parser.py b/scripts/json_parser.py
index 8ec9238b4d..c863ccdbb0 100644
--- a/scripts/json_parser.py
+++ b/scripts/json_parser.py
@@ -40,7 +40,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
- ai_fixed_json = fix_json(json_str, json_schema, False)
+ ai_fixed_json = fix_json(json_str, json_schema, cfg.debug)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
else:
diff --git a/scripts/main.py b/scripts/main.py
index 17385bf339..f96afeb163 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -1,7 +1,7 @@
import json
import random
import commands as cmd
-from memory import PineconeMemory
+from memory import get_memory
import data
import chat
from colorama import Fore, Style
@@ -266,6 +266,10 @@ def parse_arguments():
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model)
+ if args.debug:
+ print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
+ cfg.set_debug_mode(True)
+
# TODO: fill in llm values here
@@ -281,12 +285,9 @@ next_action_count = 0
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
-# raise an exception if pinecone_api_key or region is not provided
-if not cfg.pinecone_api_key or not cfg.pinecone_region: raise Exception("Please provide pinecone_api_key and pinecone_region")
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
-memory = PineconeMemory()
-memory.clear()
+memory = get_memory(cfg, init=True)
print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop
@@ -298,7 +299,7 @@ while True:
user_input,
full_message_history,
memory,
- cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
+ cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
@@ -358,7 +359,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
- if command_name.lower() == "error":
+ if command_name.lower().startswith( "error" ):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
diff --git a/scripts/memory/__init__.py b/scripts/memory/__init__.py
new file mode 100644
index 0000000000..a441a46aa9
--- /dev/null
+++ b/scripts/memory/__init__.py
@@ -0,0 +1,44 @@
+from memory.local import LocalCache
+try:
+ from memory.redismem import RedisMemory
+except ImportError:
+ print("Redis not installed. Skipping import.")
+ RedisMemory = None
+
+try:
+ from memory.pinecone import PineconeMemory
+except ImportError:
+ print("Pinecone not installed. Skipping import.")
+ PineconeMemory = None
+
+
+def get_memory(cfg, init=False):
+ memory = None
+ if cfg.memory_backend == "pinecone":
+ if not PineconeMemory:
+ print("Error: Pinecone is not installed. Please install pinecone"
+ " to use Pinecone as a memory backend.")
+ else:
+ memory = PineconeMemory(cfg)
+ if init:
+ memory.clear()
+ elif cfg.memory_backend == "redis":
+ if not RedisMemory:
+ print("Error: Redis is not installed. Please install redis-py to"
+ " use Redis as a memory backend.")
+ else:
+ memory = RedisMemory(cfg)
+
+ if memory is None:
+ memory = LocalCache(cfg)
+ if init:
+ memory.clear()
+ return memory
+
+
+__all__ = [
+ "get_memory",
+ "LocalCache",
+ "RedisMemory",
+ "PineconeMemory",
+]
diff --git a/scripts/memory/base.py b/scripts/memory/base.py
new file mode 100644
index 0000000000..d7ab7fcf1f
--- /dev/null
+++ b/scripts/memory/base.py
@@ -0,0 +1,31 @@
+"""Base class for memory providers."""
+import abc
+from config import AbstractSingleton
+import openai
+
+
+def get_ada_embedding(text):
+ text = text.replace("\n", " ")
+ return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
+
+
+class MemoryProviderSingleton(AbstractSingleton):
+ @abc.abstractmethod
+ def add(self, data):
+ pass
+
+ @abc.abstractmethod
+ def get(self, data):
+ pass
+
+ @abc.abstractmethod
+ def clear(self):
+ pass
+
+ @abc.abstractmethod
+ def get_relevant(self, data, num_relevant=5):
+ pass
+
+ @abc.abstractmethod
+ def get_stats(self):
+ pass
diff --git a/scripts/memory/local.py b/scripts/memory/local.py
new file mode 100644
index 0000000000..8dc90021ff
--- /dev/null
+++ b/scripts/memory/local.py
@@ -0,0 +1,114 @@
+import dataclasses
+import orjson
+from typing import Any, List, Optional
+import numpy as np
+import os
+from memory.base import MemoryProviderSingleton, get_ada_embedding
+
+
+EMBED_DIM = 1536
+SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
+
+
+def create_default_embeddings():
+ return np.zeros((0, EMBED_DIM)).astype(np.float32)
+
+
+@dataclasses.dataclass
+class CacheContent:
+ texts: List[str] = dataclasses.field(default_factory=list)
+ embeddings: np.ndarray = dataclasses.field(
+ default_factory=create_default_embeddings
+ )
+
+
+class LocalCache(MemoryProviderSingleton):
+
+ # on load, load our database
+ def __init__(self, cfg) -> None:
+ self.filename = f"{cfg.memory_index}.json"
+ if os.path.exists(self.filename):
+ with open(self.filename, 'rb') as f:
+ loaded = orjson.loads(f.read())
+ self.data = CacheContent(**loaded)
+ else:
+ self.data = CacheContent()
+
+ def add(self, text: str):
+ """
+ Add text to our list of texts, add embedding as row to our
+ embeddings-matrix
+
+ Args:
+ text: str
+
+ Returns: None
+ """
+ if 'Command Error:' in text:
+ return ""
+ self.data.texts.append(text)
+
+ embedding = get_ada_embedding(text)
+
+ vector = np.array(embedding).astype(np.float32)
+ vector = vector[np.newaxis, :]
+ self.data.embeddings = np.concatenate(
+ [
+ vector,
+ self.data.embeddings,
+ ],
+ axis=0,
+ )
+
+ with open(self.filename, 'wb') as f:
+ out = orjson.dumps(
+ self.data,
+ option=SAVE_OPTIONS
+ )
+ f.write(out)
+ return text
+
+ def clear(self) -> str:
+ """
+ Clears the redis server.
+
+ Returns: A message indicating that the memory has been cleared.
+ """
+ self.data = CacheContent()
+ return "Obliviated"
+
+ def get(self, data: str) -> Optional[List[Any]]:
+ """
+ Gets the data from the memory that is most relevant to the given data.
+
+ Args:
+ data: The data to compare to.
+
+ Returns: The most relevant data.
+ """
+ return self.get_relevant(data, 1)
+
+ def get_relevant(self, text: str, k: int) -> List[Any]:
+ """"
+ matrix-vector mult to find score-for-each-row-of-matrix
+ get indices for top-k winning scores
+ return texts for those indices
+ Args:
+ text: str
+ k: int
+
+ Returns: List[str]
+ """
+ embedding = get_ada_embedding(text)
+
+ scores = np.dot(self.data.embeddings, embedding)
+
+ top_k_indices = np.argsort(scores)[-k:][::-1]
+
+ return [self.data.texts[i] for i in top_k_indices]
+
+ def get_stats(self):
+ """
+ Returns: The stats of the local cache.
+ """
+ return len(self.data.texts), self.data.embeddings.shape
diff --git a/scripts/memory.py b/scripts/memory/pinecone.py
similarity index 80%
rename from scripts/memory.py
rename to scripts/memory/pinecone.py
index 0d265a31d8..8e1eaa570f 100644
--- a/scripts/memory.py
+++ b/scripts/memory/pinecone.py
@@ -1,21 +1,11 @@
-from config import Config, Singleton
+
import pinecone
-import openai
-cfg = Config()
+from memory.base import MemoryProviderSingleton, get_ada_embedding
-def get_ada_embedding(text):
- text = text.replace("\n", " ")
- return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
-
-
-def get_text_from_embedding(embedding):
- return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"]
-
-
-class PineconeMemory(metaclass=Singleton):
- def __init__(self):
+class PineconeMemory(MemoryProviderSingleton):
+ def __init__(self, cfg):
pinecone_api_key = cfg.pinecone_api_key
pinecone_region = cfg.pinecone_region
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
diff --git a/scripts/memory/redismem.py b/scripts/memory/redismem.py
new file mode 100644
index 0000000000..2082fe5887
--- /dev/null
+++ b/scripts/memory/redismem.py
@@ -0,0 +1,143 @@
+"""Redis memory provider."""
+from typing import Any, List, Optional
+import redis
+from redis.commands.search.field import VectorField, TextField
+from redis.commands.search.query import Query
+from redis.commands.search.indexDefinition import IndexDefinition, IndexType
+import numpy as np
+
+from memory.base import MemoryProviderSingleton, get_ada_embedding
+
+
+SCHEMA = [
+ TextField("data"),
+ VectorField(
+ "embedding",
+ "HNSW",
+ {
+ "TYPE": "FLOAT32",
+ "DIM": 1536,
+ "DISTANCE_METRIC": "COSINE"
+ }
+ ),
+]
+
+
+class RedisMemory(MemoryProviderSingleton):
+ def __init__(self, cfg):
+ """
+ Initializes the Redis memory provider.
+
+ Args:
+ cfg: The config object.
+
+ Returns: None
+ """
+ redis_host = cfg.redis_host
+ redis_port = cfg.redis_port
+ redis_password = cfg.redis_password
+ self.dimension = 1536
+ self.redis = redis.Redis(
+ host=redis_host,
+ port=redis_port,
+ password=redis_password,
+ db=0 # Cannot be changed
+ )
+ self.cfg = cfg
+ if cfg.wipe_redis_on_start:
+ self.redis.flushall()
+ try:
+ self.redis.ft(f"{cfg.memory_index}").create_index(
+ fields=SCHEMA,
+ definition=IndexDefinition(
+ prefix=[f"{cfg.memory_index}:"],
+ index_type=IndexType.HASH
+ )
+ )
+ except Exception as e:
+ print("Error creating Redis search index: ", e)
+ existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num')
+ self.vec_num = int(existing_vec_num.decode('utf-8')) if\
+ existing_vec_num else 0
+
+ def add(self, data: str) -> str:
+ """
+ Adds a data point to the memory.
+
+ Args:
+ data: The data to add.
+
+ Returns: Message indicating that the data has been added.
+ """
+ if 'Command Error:' in data:
+ return ""
+ vector = get_ada_embedding(data)
+ vector = np.array(vector).astype(np.float32).tobytes()
+ data_dict = {
+ b"data": data,
+ "embedding": vector
+ }
+ pipe = self.redis.pipeline()
+ pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
+ _text = f"Inserting data into memory at index: {self.vec_num}:\n"\
+ f"data: {data}"
+ self.vec_num += 1
+ pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num)
+ pipe.execute()
+ return _text
+
+ def get(self, data: str) -> Optional[List[Any]]:
+ """
+ Gets the data from the memory that is most relevant to the given data.
+
+ Args:
+ data: The data to compare to.
+
+ Returns: The most relevant data.
+ """
+ return self.get_relevant(data, 1)
+
+ def clear(self) -> str:
+ """
+ Clears the redis server.
+
+ Returns: A message indicating that the memory has been cleared.
+ """
+ self.redis.flushall()
+ return "Obliviated"
+
+ def get_relevant(
+ self,
+ data: str,
+ num_relevant: int = 5
+ ) -> Optional[List[Any]]:
+ """
+ Returns all the data in the memory that is relevant to the given data.
+ Args:
+ data: The data to compare to.
+ num_relevant: The number of relevant data to return.
+
+ Returns: A list of the most relevant data.
+ """
+ query_embedding = get_ada_embedding(data)
+ base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
+ query = Query(base_query).return_fields(
+ "data",
+ "vector_score"
+ ).sort_by("vector_score").dialect(2)
+ query_vector = np.array(query_embedding).astype(np.float32).tobytes()
+
+ try:
+ results = self.redis.ft(f"{self.cfg.memory_index}").search(
+ query, query_params={"vector": query_vector}
+ )
+ except Exception as e:
+ print("Error calling Redis search: ", e)
+ return None
+ return [result.data for result in results.docs]
+
+ def get_stats(self):
+ """
+ Returns: The stats of the memory index.
+ """
+ return self.redis.ft(f"{self.cfg.memory_index}").info()