mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-09 15:17:59 -05:00
Merge branch 'master' into summary_memory
This commit is contained in:
@@ -127,3 +127,22 @@ When you run Pytest locally:
|
|||||||
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
- Or: The test might be poorly written. In that case, you can make suggestions to change the test.
|
||||||
|
|
||||||
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break.
|
||||||
|
|
||||||
|
|
||||||
|
### Community Challenges
|
||||||
|
Challenges are goals we need Auto-GPT to achieve.
|
||||||
|
To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on.
|
||||||
|
- a challenge is new if level_currently_beaten is None
|
||||||
|
- a challenge is in progress if level_currently_beaten is greater or equal to 1
|
||||||
|
- a challenge is beaten if level_currently_beaten = max_level
|
||||||
|
|
||||||
|
Here is an example of how to run the memory challenge A and attempt to beat level 3.
|
||||||
|
|
||||||
|
pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3
|
||||||
|
|
||||||
|
To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder
|
||||||
|
|
||||||
|
Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline.
|
||||||
|
Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run.
|
||||||
|
|
||||||
|
Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_messag
|
|||||||
from autogpt.logs import logger, print_assistant_thoughts
|
from autogpt.logs import logger, print_assistant_thoughts
|
||||||
from autogpt.speech import say_text
|
from autogpt.speech import say_text
|
||||||
from autogpt.spinner import Spinner
|
from autogpt.spinner import Spinner
|
||||||
from autogpt.utils import clean_input, send_chat_message_to_user
|
from autogpt.utils import clean_input
|
||||||
from autogpt.workspace import Workspace
|
from autogpt.workspace import Workspace
|
||||||
|
|
||||||
|
|
||||||
@@ -87,11 +87,7 @@ class Agent:
|
|||||||
logger.typewriter_log(
|
logger.typewriter_log(
|
||||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||||
)
|
)
|
||||||
send_chat_message_to_user(
|
|
||||||
f"Continuous Limit Reached: \n {cfg.continuous_limit}"
|
|
||||||
)
|
|
||||||
break
|
break
|
||||||
send_chat_message_to_user("Thinking... \n")
|
|
||||||
# Send message to AI, get response
|
# Send message to AI, get response
|
||||||
with Spinner("Thinking... "):
|
with Spinner("Thinking... "):
|
||||||
assistant_reply = chat_with_ai(
|
assistant_reply = chat_with_ai(
|
||||||
@@ -121,7 +117,6 @@ class Agent:
|
|||||||
if cfg.speak_mode:
|
if cfg.speak_mode:
|
||||||
say_text(f"I want to execute {command_name}")
|
say_text(f"I want to execute {command_name}")
|
||||||
|
|
||||||
send_chat_message_to_user("Thinking... \n")
|
|
||||||
arguments = self._resolve_pathlike_command_args(arguments)
|
arguments = self._resolve_pathlike_command_args(arguments)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -132,24 +127,19 @@ class Agent:
|
|||||||
# Get key press: Prompt the user to press enter to continue or escape
|
# Get key press: Prompt the user to press enter to continue or escape
|
||||||
# to exit
|
# to exit
|
||||||
self.user_input = ""
|
self.user_input = ""
|
||||||
send_chat_message_to_user(
|
|
||||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
|
||||||
f"ARGUMENTS = {arguments}"
|
|
||||||
)
|
|
||||||
logger.typewriter_log(
|
logger.typewriter_log(
|
||||||
"NEXT ACTION: ",
|
"NEXT ACTION: ",
|
||||||
Fore.CYAN,
|
Fore.CYAN,
|
||||||
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
||||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||||
)
|
)
|
||||||
print(
|
|
||||||
|
logger.info(
|
||||||
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
|
||||||
"'n' to exit program, or enter feedback for "
|
"'n' to exit program, or enter feedback for "
|
||||||
f"{self.ai_name}...",
|
f"{self.ai_name}..."
|
||||||
flush=True,
|
|
||||||
)
|
)
|
||||||
while True:
|
while True:
|
||||||
console_input = ""
|
|
||||||
if cfg.chat_messages_enabled:
|
if cfg.chat_messages_enabled:
|
||||||
console_input = clean_input("Waiting for your response...")
|
console_input = clean_input("Waiting for your response...")
|
||||||
else:
|
else:
|
||||||
@@ -180,7 +170,7 @@ class Agent:
|
|||||||
user_input = self_feedback_resp
|
user_input = self_feedback_resp
|
||||||
break
|
break
|
||||||
elif console_input.lower().strip() == "":
|
elif console_input.lower().strip() == "":
|
||||||
print("Invalid input format.")
|
logger.warn("Invalid input format.")
|
||||||
continue
|
continue
|
||||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||||
try:
|
try:
|
||||||
@@ -189,8 +179,8 @@ class Agent:
|
|||||||
)
|
)
|
||||||
user_input = "GENERATE NEXT COMMAND JSON"
|
user_input = "GENERATE NEXT COMMAND JSON"
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print(
|
logger.warn(
|
||||||
f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is"
|
"Invalid input format. Please enter 'y -n' where n is"
|
||||||
" the number of continuous tasks."
|
" the number of continuous tasks."
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
@@ -210,16 +200,10 @@ class Agent:
|
|||||||
"",
|
"",
|
||||||
)
|
)
|
||||||
elif user_input == "EXIT":
|
elif user_input == "EXIT":
|
||||||
send_chat_message_to_user("Exiting...")
|
logger.info("Exiting...")
|
||||||
print("Exiting...", flush=True)
|
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# Print command
|
# Print command
|
||||||
send_chat_message_to_user(
|
|
||||||
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
|
|
||||||
f"ARGUMENTS = {arguments}"
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.typewriter_log(
|
logger.typewriter_log(
|
||||||
"NEXT ACTION: ",
|
"NEXT ACTION: ",
|
||||||
Fore.CYAN,
|
Fore.CYAN,
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from autogpt.agent.agent_manager import AgentManager
|
|||||||
from autogpt.commands.command import CommandRegistry, command
|
from autogpt.commands.command import CommandRegistry, command
|
||||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.memory import get_memory
|
from autogpt.memory import get_memory
|
||||||
from autogpt.processing.text import summarize_text
|
from autogpt.processing.text import summarize_text
|
||||||
from autogpt.prompts.generator import PromptGenerator
|
from autogpt.prompts.generator import PromptGenerator
|
||||||
@@ -172,7 +173,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
|||||||
|
|
||||||
def shutdown() -> NoReturn:
|
def shutdown() -> NoReturn:
|
||||||
"""Shut down the program"""
|
"""Shut down the program"""
|
||||||
print("Shutting down...")
|
logger.info("Shutting down...")
|
||||||
quit()
|
quit()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from docker.errors import ImageNotFound
|
|||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ def execute_python_file(filename: str) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The output of the file
|
str: The output of the file
|
||||||
"""
|
"""
|
||||||
print(f"Executing file '{filename}'")
|
logger.info(f"Executing file '{filename}'")
|
||||||
|
|
||||||
if not filename.endswith(".py"):
|
if not filename.endswith(".py"):
|
||||||
return "Error: Invalid file type. Only .py files are allowed."
|
return "Error: Invalid file type. Only .py files are allowed."
|
||||||
@@ -47,9 +48,11 @@ def execute_python_file(filename: str) -> str:
|
|||||||
image_name = "python:3-alpine"
|
image_name = "python:3-alpine"
|
||||||
try:
|
try:
|
||||||
client.images.get(image_name)
|
client.images.get(image_name)
|
||||||
print(f"Image '{image_name}' found locally")
|
logger.warn(f"Image '{image_name}' found locally")
|
||||||
except ImageNotFound:
|
except ImageNotFound:
|
||||||
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
|
logger.info(
|
||||||
|
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
||||||
|
)
|
||||||
# Use the low-level API to stream the pull response
|
# Use the low-level API to stream the pull response
|
||||||
low_level_client = docker.APIClient()
|
low_level_client = docker.APIClient()
|
||||||
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
||||||
@@ -57,9 +60,9 @@ def execute_python_file(filename: str) -> str:
|
|||||||
status = line.get("status")
|
status = line.get("status")
|
||||||
progress = line.get("progress")
|
progress = line.get("progress")
|
||||||
if status and progress:
|
if status and progress:
|
||||||
print(f"{status}: {progress}")
|
logger.info(f"{status}: {progress}")
|
||||||
elif status:
|
elif status:
|
||||||
print(status)
|
logger.info(status)
|
||||||
container = client.containers.run(
|
container = client.containers.run(
|
||||||
image_name,
|
image_name,
|
||||||
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
f"python {Path(filename).relative_to(CFG.workspace_path)}",
|
||||||
@@ -85,7 +88,7 @@ def execute_python_file(filename: str) -> str:
|
|||||||
return logs
|
return logs
|
||||||
|
|
||||||
except docker.errors.DockerException as e:
|
except docker.errors.DockerException as e:
|
||||||
print(
|
logger.warn(
|
||||||
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
|
||||||
)
|
)
|
||||||
return f"Error: {str(e)}"
|
return f"Error: {str(e)}"
|
||||||
@@ -118,7 +121,9 @@ def execute_shell(command_line: str) -> str:
|
|||||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
if not current_dir.is_relative_to(CFG.workspace_path):
|
||||||
os.chdir(CFG.workspace_path)
|
os.chdir(CFG.workspace_path)
|
||||||
|
|
||||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
logger.info(
|
||||||
|
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||||
|
)
|
||||||
|
|
||||||
result = subprocess.run(command_line, capture_output=True, shell=True)
|
result = subprocess.run(command_line, capture_output=True, shell=True)
|
||||||
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
||||||
@@ -154,7 +159,9 @@ def execute_shell_popen(command_line) -> str:
|
|||||||
if CFG.workspace_path not in current_dir:
|
if CFG.workspace_path not in current_dir:
|
||||||
os.chdir(CFG.workspace_path)
|
os.chdir(CFG.workspace_path)
|
||||||
|
|
||||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
logger.info(
|
||||||
|
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||||
|
)
|
||||||
|
|
||||||
do_not_show_output = subprocess.DEVNULL
|
do_not_show_output = subprocess.DEVNULL
|
||||||
process = subprocess.Popen(
|
process = subprocess.Popen(
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ from requests.adapters import HTTPAdapter, Retry
|
|||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.spinner import Spinner
|
from autogpt.spinner import Spinner
|
||||||
from autogpt.utils import readable_file_size
|
from autogpt.utils import readable_file_size
|
||||||
|
|
||||||
@@ -106,25 +107,25 @@ def ingest_file(
|
|||||||
:param overlap: The number of overlapping characters between chunks, default is 200
|
:param overlap: The number of overlapping characters between chunks, default is 200
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
print(f"Working with file {filename}")
|
logger.info(f"Working with file {filename}")
|
||||||
content = read_file(filename)
|
content = read_file(filename)
|
||||||
content_length = len(content)
|
content_length = len(content)
|
||||||
print(f"File length: {content_length} characters")
|
logger.info(f"File length: {content_length} characters")
|
||||||
|
|
||||||
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
||||||
|
|
||||||
num_chunks = len(chunks)
|
num_chunks = len(chunks)
|
||||||
for i, chunk in enumerate(chunks):
|
for i, chunk in enumerate(chunks):
|
||||||
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
||||||
memory_to_add = (
|
memory_to_add = (
|
||||||
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
||||||
)
|
)
|
||||||
|
|
||||||
memory.add(memory_to_add)
|
memory.add(memory_to_add)
|
||||||
|
|
||||||
print(f"Done ingesting {num_chunks} chunks from {filename}.")
|
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error while ingesting file '{filename}': {str(e)}")
|
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||||
@@ -264,7 +265,7 @@ def download_file(url, filename):
|
|||||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||||
spinner.update_message(f"{message} {progress}")
|
spinner.update_message(f"{message} {progress}")
|
||||||
|
|
||||||
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
|
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
|
||||||
except requests.HTTPError as e:
|
except requests.HTTPError as e:
|
||||||
return f"Got an HTTP Error whilst trying to download file: {e}"
|
return f"Got an HTTP Error whilst trying to download file: {e}"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -11,25 +11,25 @@ CFG = Config()
|
|||||||
@command(
|
@command(
|
||||||
"clone_repository",
|
"clone_repository",
|
||||||
"Clone Repository",
|
"Clone Repository",
|
||||||
'"repository_url": "<repository_url>", "clone_path": "<clone_path>"',
|
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||||
CFG.github_username and CFG.github_api_key,
|
CFG.github_username and CFG.github_api_key,
|
||||||
"Configure github_username and github_api_key.",
|
"Configure github_username and github_api_key.",
|
||||||
)
|
)
|
||||||
@validate_url
|
@validate_url
|
||||||
def clone_repository(repository_url: str, clone_path: str) -> str:
|
def clone_repository(url: str, clone_path: str) -> str:
|
||||||
"""Clone a GitHub repository locally.
|
"""Clone a GitHub repository locally.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
repository_url (str): The URL of the repository to clone.
|
url (str): The URL of the repository to clone.
|
||||||
clone_path (str): The path to clone the repository to.
|
clone_path (str): The path to clone the repository to.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
str: The result of the clone operation.
|
str: The result of the clone operation.
|
||||||
"""
|
"""
|
||||||
split_url = repository_url.split("//")
|
split_url = url.split("//")
|
||||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
||||||
try:
|
try:
|
||||||
Repo.clone_from(auth_repo_url, clone_path)
|
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||||
return f"""Cloned {repository_url} to {clone_path}"""
|
return f"""Cloned {url} to {clone_path}"""
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return f"Error: {str(e)}"
|
return f"Error: {str(e)}"
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from PIL import Image
|
|||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
|
|
||||||
@@ -69,7 +70,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
|||||||
)
|
)
|
||||||
|
|
||||||
image = Image.open(io.BytesIO(response.content))
|
image = Image.open(io.BytesIO(response.content))
|
||||||
print(f"Image Generated for prompt:{prompt}")
|
logger.info(f"Image Generated for prompt:{prompt}")
|
||||||
|
|
||||||
image.save(filename)
|
image.save(filename)
|
||||||
|
|
||||||
@@ -91,7 +92,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
|||||||
# Check for supported image sizes
|
# Check for supported image sizes
|
||||||
if size not in [256, 512, 1024]:
|
if size not in [256, 512, 1024]:
|
||||||
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
|
||||||
print(
|
logger.info(
|
||||||
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
|
||||||
)
|
)
|
||||||
size = closest
|
size = closest
|
||||||
@@ -104,7 +105,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
|||||||
api_key=CFG.openai_api_key,
|
api_key=CFG.openai_api_key,
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"Image Generated for prompt:{prompt}")
|
logger.info(f"Image Generated for prompt:{prompt}")
|
||||||
|
|
||||||
image_data = b64decode(response["data"][0]["b64_json"])
|
image_data = b64decode(response["data"][0]["b64_json"])
|
||||||
|
|
||||||
@@ -153,7 +154,7 @@ def generate_image_with_sd_webui(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"Image Generated for prompt:{prompt}")
|
logger.info(f"Image Generated for prompt:{prompt}")
|
||||||
|
|
||||||
# Save the image to disk
|
# Save the image to disk
|
||||||
response = response.json()
|
response = response.json()
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""Web scraping commands using Playwright"""
|
"""Web scraping commands using Playwright"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from playwright.sync_api import sync_playwright
|
from playwright.sync_api import sync_playwright
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print(
|
logger.info(
|
||||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||||
)
|
)
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from __future__ import annotations
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, Type
|
from typing import Any, Optional, Type
|
||||||
|
|
||||||
import distro
|
import distro
|
||||||
import yaml
|
import yaml
|
||||||
@@ -79,7 +79,12 @@ class AIConfig:
|
|||||||
|
|
||||||
ai_name = config_params.get("ai_name", "")
|
ai_name = config_params.get("ai_name", "")
|
||||||
ai_role = config_params.get("ai_role", "")
|
ai_role = config_params.get("ai_role", "")
|
||||||
ai_goals = config_params.get("ai_goals", [])
|
ai_goals = [
|
||||||
|
str(goal).strip("{}").replace("'", "").replace('"', "")
|
||||||
|
if isinstance(goal, dict)
|
||||||
|
else str(goal)
|
||||||
|
for goal in config_params.get("ai_goals", [])
|
||||||
|
]
|
||||||
api_budget = config_params.get("api_budget", 0.0)
|
api_budget = config_params.get("api_budget", 0.0)
|
||||||
# type: Type[AIConfig]
|
# type: Type[AIConfig]
|
||||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from typing import Optional
|
|||||||
|
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.json_utils.utilities import extract_char_position
|
from autogpt.json_utils.utilities import extract_char_position
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
|
|
||||||
@@ -33,8 +34,7 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
|||||||
json.loads(json_to_load)
|
json.loads(json_to_load)
|
||||||
return json_to_load
|
return json_to_load
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if CFG.debug_mode:
|
logger.debug("json loads error - fix invalid escape", e)
|
||||||
print("json loads error - fix invalid escape", e)
|
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
return json_to_load
|
return json_to_load
|
||||||
|
|
||||||
@@ -98,13 +98,11 @@ def correct_json(json_to_load: str) -> str:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if CFG.debug_mode:
|
logger.debug("json", json_to_load)
|
||||||
print("json", json_to_load)
|
|
||||||
json.loads(json_to_load)
|
json.loads(json_to_load)
|
||||||
return json_to_load
|
return json_to_load
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if CFG.debug_mode:
|
logger.debug("json loads error", e)
|
||||||
print("json loads error", e)
|
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
if error_message.startswith("Invalid \\escape"):
|
if error_message.startswith("Invalid \\escape"):
|
||||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||||
@@ -116,8 +114,7 @@ def correct_json(json_to_load: str) -> str:
|
|||||||
json.loads(json_to_load)
|
json.loads(json_to_load)
|
||||||
return json_to_load
|
return json_to_load
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
if CFG.debug_mode:
|
logger.debug("json loads error - add quotes", e)
|
||||||
print("json loads error - add quotes", e)
|
|
||||||
error_message = str(e)
|
error_message = str(e)
|
||||||
if balanced_str := balance_braces(json_to_load):
|
if balanced_str := balance_braces(json_to_load):
|
||||||
return balanced_str
|
return balanced_str
|
||||||
|
|||||||
@@ -49,9 +49,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None:
|
|||||||
|
|
||||||
for error in errors:
|
for error in errors:
|
||||||
logger.error(f"Error: {error.message}")
|
logger.error(f"Error: {error.message}")
|
||||||
return None
|
else:
|
||||||
if CFG.debug_mode:
|
logger.debug("The JSON object is valid.")
|
||||||
print("The JSON object is valid.")
|
|
||||||
|
|
||||||
return json_object
|
return json_object
|
||||||
|
|
||||||
|
|||||||
@@ -208,9 +208,8 @@ def chat_with_ai(
|
|||||||
[create_chat_message("system", plugin_response)], model
|
[create_chat_message("system", plugin_response)], model
|
||||||
)
|
)
|
||||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||||
if cfg.debug_mode:
|
logger.debug("Plugin response too long, skipping:", plugin_response)
|
||||||
print("Plugin response too long, skipping:", plugin_response)
|
logger.debug("Plugins remaining at stop:", plugin_count - i)
|
||||||
print("Plugins remaining at stop:", plugin_count - i)
|
|
||||||
break
|
break
|
||||||
current_context.append(create_chat_message("system", plugin_response))
|
current_context.append(create_chat_message("system", plugin_response))
|
||||||
|
|
||||||
@@ -250,5 +249,5 @@ def chat_with_ai(
|
|||||||
return assistant_reply
|
return assistant_reply
|
||||||
except RateLimitError:
|
except RateLimitError:
|
||||||
# TODO: When we switch to langchain, this is built in
|
# TODO: When we switch to langchain, this is built in
|
||||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|||||||
@@ -128,10 +128,9 @@ def create_chat_completion(
|
|||||||
|
|
||||||
num_retries = 10
|
num_retries = 10
|
||||||
warned_user = False
|
warned_user = False
|
||||||
if cfg.debug_mode:
|
logger.debug(
|
||||||
print(
|
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
)
|
||||||
)
|
|
||||||
for plugin in cfg.plugins:
|
for plugin in cfg.plugins:
|
||||||
if plugin.can_handle_chat_completion(
|
if plugin.can_handle_chat_completion(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
@@ -169,10 +168,9 @@ def create_chat_completion(
|
|||||||
)
|
)
|
||||||
break
|
break
|
||||||
except RateLimitError:
|
except RateLimitError:
|
||||||
if cfg.debug_mode:
|
logger.debug(
|
||||||
print(
|
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
||||||
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
|
)
|
||||||
)
|
|
||||||
if not warned_user:
|
if not warned_user:
|
||||||
logger.double_check(
|
logger.double_check(
|
||||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||||
@@ -184,11 +182,10 @@ def create_chat_completion(
|
|||||||
raise
|
raise
|
||||||
if attempt == num_retries - 1:
|
if attempt == num_retries - 1:
|
||||||
raise
|
raise
|
||||||
if cfg.debug_mode:
|
logger.debug(
|
||||||
print(
|
f"{Fore.RED}Error: ",
|
||||||
f"{Fore.RED}Error: ",
|
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
||||||
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
|
)
|
||||||
)
|
|
||||||
time.sleep(backoff)
|
time.sleep(backoff)
|
||||||
if response is None:
|
if response is None:
|
||||||
logger.typewriter_log(
|
logger.typewriter_log(
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ from colorama import Fore, Style
|
|||||||
|
|
||||||
from autogpt.singleton import Singleton
|
from autogpt.singleton import Singleton
|
||||||
from autogpt.speech import say_text
|
from autogpt.speech import say_text
|
||||||
from autogpt.utils import send_chat_message_to_user
|
|
||||||
|
|
||||||
|
|
||||||
class Logger(metaclass=Singleton):
|
class Logger(metaclass=Singleton):
|
||||||
@@ -83,8 +82,6 @@ class Logger(metaclass=Singleton):
|
|||||||
if speak_text and self.speak_mode:
|
if speak_text and self.speak_mode:
|
||||||
say_text(f"{title}. {content}")
|
say_text(f"{title}. {content}")
|
||||||
|
|
||||||
send_chat_message_to_user(f"{title}. {content}")
|
|
||||||
|
|
||||||
if content:
|
if content:
|
||||||
if isinstance(content, list):
|
if isinstance(content, list):
|
||||||
content = " ".join(content)
|
content = " ".join(content)
|
||||||
@@ -103,6 +100,14 @@ class Logger(metaclass=Singleton):
|
|||||||
):
|
):
|
||||||
self._log(title, title_color, message, logging.DEBUG)
|
self._log(title, title_color, message, logging.DEBUG)
|
||||||
|
|
||||||
|
def info(
|
||||||
|
self,
|
||||||
|
message,
|
||||||
|
title="",
|
||||||
|
title_color="",
|
||||||
|
):
|
||||||
|
self._log(title, title_color, message, logging.INFO)
|
||||||
|
|
||||||
def warn(
|
def warn(
|
||||||
self,
|
self,
|
||||||
message,
|
message,
|
||||||
@@ -114,11 +119,19 @@ class Logger(metaclass=Singleton):
|
|||||||
def error(self, title, message=""):
|
def error(self, title, message=""):
|
||||||
self._log(title, Fore.RED, message, logging.ERROR)
|
self._log(title, Fore.RED, message, logging.ERROR)
|
||||||
|
|
||||||
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
def _log(
|
||||||
|
self,
|
||||||
|
title: str = "",
|
||||||
|
title_color: str = "",
|
||||||
|
message: str = "",
|
||||||
|
level=logging.INFO,
|
||||||
|
):
|
||||||
if message:
|
if message:
|
||||||
if isinstance(message, list):
|
if isinstance(message, list):
|
||||||
message = " ".join(message)
|
message = " ".join(message)
|
||||||
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
self.logger.log(
|
||||||
|
level, message, extra={"title": str(title), "color": str(title_color)}
|
||||||
|
)
|
||||||
|
|
||||||
def set_level(self, level):
|
def set_level(self, level):
|
||||||
self.logger.setLevel(level)
|
self.logger.setLevel(level)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.memory.local import LocalCache
|
from autogpt.memory.local import LocalCache
|
||||||
from autogpt.memory.no_memory import NoMemory
|
from autogpt.memory.no_memory import NoMemory
|
||||||
|
|
||||||
@@ -10,7 +11,6 @@ try:
|
|||||||
|
|
||||||
supported_memory.append("redis")
|
supported_memory.append("redis")
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# print("Redis not installed. Skipping import.")
|
|
||||||
RedisMemory = None
|
RedisMemory = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -18,7 +18,6 @@ try:
|
|||||||
|
|
||||||
supported_memory.append("pinecone")
|
supported_memory.append("pinecone")
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# print("Pinecone not installed. Skipping import.")
|
|
||||||
PineconeMemory = None
|
PineconeMemory = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -26,7 +25,6 @@ try:
|
|||||||
|
|
||||||
supported_memory.append("weaviate")
|
supported_memory.append("weaviate")
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# print("Weaviate not installed. Skipping import.")
|
|
||||||
WeaviateMemory = None
|
WeaviateMemory = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -34,7 +32,6 @@ try:
|
|||||||
|
|
||||||
supported_memory.append("milvus")
|
supported_memory.append("milvus")
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# print("pymilvus not installed. Skipping import.")
|
|
||||||
MilvusMemory = None
|
MilvusMemory = None
|
||||||
|
|
||||||
|
|
||||||
@@ -42,7 +39,7 @@ def get_memory(cfg, init=False):
|
|||||||
memory = None
|
memory = None
|
||||||
if cfg.memory_backend == "pinecone":
|
if cfg.memory_backend == "pinecone":
|
||||||
if not PineconeMemory:
|
if not PineconeMemory:
|
||||||
print(
|
logger.warn(
|
||||||
"Error: Pinecone is not installed. Please install pinecone"
|
"Error: Pinecone is not installed. Please install pinecone"
|
||||||
" to use Pinecone as a memory backend."
|
" to use Pinecone as a memory backend."
|
||||||
)
|
)
|
||||||
@@ -52,7 +49,7 @@ def get_memory(cfg, init=False):
|
|||||||
memory.clear()
|
memory.clear()
|
||||||
elif cfg.memory_backend == "redis":
|
elif cfg.memory_backend == "redis":
|
||||||
if not RedisMemory:
|
if not RedisMemory:
|
||||||
print(
|
logger.warn(
|
||||||
"Error: Redis is not installed. Please install redis-py to"
|
"Error: Redis is not installed. Please install redis-py to"
|
||||||
" use Redis as a memory backend."
|
" use Redis as a memory backend."
|
||||||
)
|
)
|
||||||
@@ -60,7 +57,7 @@ def get_memory(cfg, init=False):
|
|||||||
memory = RedisMemory(cfg)
|
memory = RedisMemory(cfg)
|
||||||
elif cfg.memory_backend == "weaviate":
|
elif cfg.memory_backend == "weaviate":
|
||||||
if not WeaviateMemory:
|
if not WeaviateMemory:
|
||||||
print(
|
logger.warn(
|
||||||
"Error: Weaviate is not installed. Please install weaviate-client to"
|
"Error: Weaviate is not installed. Please install weaviate-client to"
|
||||||
" use Weaviate as a memory backend."
|
" use Weaviate as a memory backend."
|
||||||
)
|
)
|
||||||
@@ -68,7 +65,7 @@ def get_memory(cfg, init=False):
|
|||||||
memory = WeaviateMemory(cfg)
|
memory = WeaviateMemory(cfg)
|
||||||
elif cfg.memory_backend == "milvus":
|
elif cfg.memory_backend == "milvus":
|
||||||
if not MilvusMemory:
|
if not MilvusMemory:
|
||||||
print(
|
logger.warn(
|
||||||
"Error: pymilvus sdk is not installed."
|
"Error: pymilvus sdk is not installed."
|
||||||
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("Error creating Redis search index: ", e)
|
logger.warn("Error creating Redis search index: ", e)
|
||||||
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
||||||
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
||||||
|
|
||||||
@@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||||||
query, query_params={"vector": query_vector}
|
query, query_params={"vector": query_vector}
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("Error calling Redis search: ", e)
|
logger.warn("Error calling Redis search: ", e)
|
||||||
return None
|
return None
|
||||||
return [result.data for result in results.docs]
|
return [result.data for result in results.docs]
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ from weaviate.embedded import EmbeddedOptions
|
|||||||
from weaviate.util import generate_uuid5
|
from weaviate.util import generate_uuid5
|
||||||
|
|
||||||
from autogpt.llm import get_ada_embedding
|
from autogpt.llm import get_ada_embedding
|
||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.memory.base import MemoryProviderSingleton
|
from autogpt.memory.base import MemoryProviderSingleton
|
||||||
|
|
||||||
|
|
||||||
@@ -35,7 +36,7 @@ class WeaviateMemory(MemoryProviderSingleton):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
print(
|
logger.info(
|
||||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -116,7 +117,7 @@ class WeaviateMemory(MemoryProviderSingleton):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Unexpected error {err=}, {type(err)=}")
|
logger.warn(f"Unexpected error {err=}, {type(err)=}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def get_stats(self):
|
def get_stats(self):
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
|||||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||||
|
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||||
|
|
||||||
|
|
||||||
@@ -33,11 +34,10 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
|||||||
with zipfile.ZipFile(zip_path, "r") as zfile:
|
with zipfile.ZipFile(zip_path, "r") as zfile:
|
||||||
for name in zfile.namelist():
|
for name in zfile.namelist():
|
||||||
if name.endswith("__init__.py"):
|
if name.endswith("__init__.py"):
|
||||||
if debug:
|
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
|
||||||
print(f"Found module '{name}' in the zipfile at: {name}")
|
|
||||||
result.append(name)
|
result.append(name)
|
||||||
if debug and len(result) == 0:
|
if len(result) == 0:
|
||||||
print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@@ -71,12 +71,12 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
|||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
manifest = response.json()
|
manifest = response.json()
|
||||||
if manifest["schema_version"] != "v1":
|
if manifest["schema_version"] != "v1":
|
||||||
print(
|
logger.warn(
|
||||||
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
if manifest["api"]["type"] != "openapi":
|
if manifest["api"]["type"] != "openapi":
|
||||||
print(
|
logger.warn(
|
||||||
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
f"Unsupported API type: {manifest['api']['type']} for {url}"
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
@@ -84,11 +84,13 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
|||||||
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(f"Failed to fetch manifest for {url}: {response.status_code}")
|
logger.warn(
|
||||||
|
f"Failed to fetch manifest for {url}: {response.status_code}"
|
||||||
|
)
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
print(f"Error while requesting manifest from {url}: {e}")
|
logger.warn(f"Error while requesting manifest from {url}: {e}")
|
||||||
else:
|
else:
|
||||||
print(f"Manifest for {url} already exists")
|
logger.info(f"Manifest for {url} already exists")
|
||||||
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
|
||||||
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
|
||||||
openapi_spec = openapi_python_client._get_document(
|
openapi_spec = openapi_python_client._get_document(
|
||||||
@@ -98,7 +100,7 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
|||||||
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(f"OpenAPI spec for {url} already exists")
|
logger.info(f"OpenAPI spec for {url} already exists")
|
||||||
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
|
||||||
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
|
||||||
return manifests
|
return manifests
|
||||||
@@ -115,13 +117,13 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
|||||||
if not os.path.exists(directory_path):
|
if not os.path.exists(directory_path):
|
||||||
try:
|
try:
|
||||||
os.makedirs(directory_path)
|
os.makedirs(directory_path)
|
||||||
print(f"Created directory: {directory_path}")
|
logger.debug(f"Created directory: {directory_path}")
|
||||||
return True
|
return True
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print(f"Error creating directory {directory_path}: {e}")
|
logger.warn(f"Error creating directory {directory_path}: {e}")
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
print(f"Directory {directory_path} already exists")
|
logger.info(f"Directory {directory_path} already exists")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@@ -159,7 +161,7 @@ def initialize_openai_plugins(
|
|||||||
config=_config,
|
config=_config,
|
||||||
)
|
)
|
||||||
if client_results:
|
if client_results:
|
||||||
print(
|
logger.warn(
|
||||||
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
f"Error creating OpenAPI client: {client_results[0].header} \n"
|
||||||
f" details: {client_results[0].detail}"
|
f" details: {client_results[0].detail}"
|
||||||
)
|
)
|
||||||
@@ -212,8 +214,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||||||
for module in moduleList:
|
for module in moduleList:
|
||||||
plugin = Path(plugin)
|
plugin = Path(plugin)
|
||||||
module = Path(module)
|
module = Path(module)
|
||||||
if debug:
|
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||||
print(f"Plugin: {plugin} Module: {module}")
|
|
||||||
zipped_package = zipimporter(str(plugin))
|
zipped_package = zipimporter(str(plugin))
|
||||||
zipped_module = zipped_package.load_module(str(module.parent))
|
zipped_module = zipped_package.load_module(str(module.parent))
|
||||||
for key in dir(zipped_module):
|
for key in dir(zipped_module):
|
||||||
@@ -240,9 +241,9 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||||||
loaded_plugins.append(plugin)
|
loaded_plugins.append(plugin)
|
||||||
|
|
||||||
if loaded_plugins:
|
if loaded_plugins:
|
||||||
print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
|
||||||
for plugin in loaded_plugins:
|
for plugin in loaded_plugins:
|
||||||
print(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||||
return loaded_plugins
|
return loaded_plugins
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from selenium.webdriver.remote.webdriver import WebDriver
|
|||||||
|
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.llm import count_message_tokens, create_chat_completion
|
from autogpt.llm import count_message_tokens, create_chat_completion
|
||||||
|
from autogpt.logs import logger
|
||||||
from autogpt.memory import get_memory
|
from autogpt.memory import get_memory
|
||||||
|
|
||||||
CFG = Config()
|
CFG = Config()
|
||||||
@@ -86,7 +87,7 @@ def summarize_text(
|
|||||||
|
|
||||||
model = CFG.fast_llm_model
|
model = CFG.fast_llm_model
|
||||||
text_length = len(text)
|
text_length = len(text)
|
||||||
print(f"Text length: {text_length} characters")
|
logger.info(f"Text length: {text_length} characters")
|
||||||
|
|
||||||
summaries = []
|
summaries = []
|
||||||
chunks = list(
|
chunks = list(
|
||||||
@@ -99,7 +100,7 @@ def summarize_text(
|
|||||||
for i, chunk in enumerate(chunks):
|
for i, chunk in enumerate(chunks):
|
||||||
if driver:
|
if driver:
|
||||||
scroll_to_percentage(driver, scroll_ratio * i)
|
scroll_to_percentage(driver, scroll_ratio * i)
|
||||||
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
||||||
|
|
||||||
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
||||||
|
|
||||||
@@ -108,7 +109,7 @@ def summarize_text(
|
|||||||
|
|
||||||
messages = [create_message(chunk, question)]
|
messages = [create_message(chunk, question)]
|
||||||
tokens_for_chunk = count_message_tokens(messages, model)
|
tokens_for_chunk = count_message_tokens(messages, model)
|
||||||
print(
|
logger.info(
|
||||||
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -117,7 +118,7 @@ def summarize_text(
|
|||||||
messages=messages,
|
messages=messages,
|
||||||
)
|
)
|
||||||
summaries.append(summary)
|
summaries.append(summary)
|
||||||
print(
|
logger.info(
|
||||||
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -125,7 +126,7 @@ def summarize_text(
|
|||||||
|
|
||||||
memory.add(memory_to_add)
|
memory.add(memory_to_add)
|
||||||
|
|
||||||
print(f"Summarized {len(chunks)} chunks.")
|
logger.info(f"Summarized {len(chunks)} chunks.")
|
||||||
|
|
||||||
combined_summary = "\n".join(summaries)
|
combined_summary = "\n".join(summaries)
|
||||||
messages = [create_message(combined_summary, question)]
|
messages = [create_message(combined_summary, question)]
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
|||||||
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
||||||
" multiple businesses autonomously'",
|
" multiple businesses autonomously'",
|
||||||
)
|
)
|
||||||
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||||
ai_goals = []
|
ai_goals = []
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||||
@@ -139,7 +139,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
|||||||
Fore.GREEN,
|
Fore.GREEN,
|
||||||
"For example: $1.50",
|
"For example: $1.50",
|
||||||
)
|
)
|
||||||
print("Enter nothing to let the AI run without monetary limit", flush=True)
|
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||||
api_budget_input = utils.clean_input(
|
api_budget_input = utils.clean_input(
|
||||||
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -69,6 +69,8 @@ class ElevenLabsSpeech(VoiceBase):
|
|||||||
Returns:
|
Returns:
|
||||||
bool: True if the request was successful, False otherwise
|
bool: True if the request was successful, False otherwise
|
||||||
"""
|
"""
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
tts_url = (
|
tts_url = (
|
||||||
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
||||||
)
|
)
|
||||||
@@ -81,6 +83,6 @@ class ElevenLabsSpeech(VoiceBase):
|
|||||||
os.remove("speech.mpeg")
|
os.remove("speech.mpeg")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
print("Request failed with status code:", response.status_code)
|
logger.warn("Request failed with status code:", response.status_code)
|
||||||
print("Response content:", response.content)
|
logger.info("Response content:", response.content)
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -5,27 +5,17 @@ import yaml
|
|||||||
from colorama import Fore
|
from colorama import Fore
|
||||||
from git.repo import Repo
|
from git.repo import Repo
|
||||||
|
|
||||||
|
from autogpt.logs import logger
|
||||||
|
|
||||||
# Use readline if available (for clean_input)
|
# Use readline if available (for clean_input)
|
||||||
try:
|
try:
|
||||||
import readline
|
import readline
|
||||||
except:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
def send_chat_message_to_user(report: str):
|
|
||||||
cfg = Config()
|
|
||||||
if not cfg.chat_messages_enabled:
|
|
||||||
return
|
|
||||||
for plugin in cfg.plugins:
|
|
||||||
if not hasattr(plugin, "can_handle_report"):
|
|
||||||
continue
|
|
||||||
if not plugin.can_handle_report():
|
|
||||||
continue
|
|
||||||
plugin.report(report)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_input(prompt: str = "", talk=False):
|
def clean_input(prompt: str = "", talk=False):
|
||||||
try:
|
try:
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
@@ -58,12 +48,12 @@ def clean_input(prompt: str = "", talk=False):
|
|||||||
return plugin_response
|
return plugin_response
|
||||||
|
|
||||||
# ask for input, default when just pressing Enter is y
|
# ask for input, default when just pressing Enter is y
|
||||||
print("Asking user via keyboard...")
|
logger.info("Asking user via keyboard...")
|
||||||
answer = input(prompt)
|
answer = input(prompt)
|
||||||
return answer
|
return answer
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("You interrupted Auto-GPT")
|
logger.info("You interrupted Auto-GPT")
|
||||||
print("Quitting...")
|
logger.info("Quitting...")
|
||||||
exit(0)
|
exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,14 @@ cfg = Config()
|
|||||||
|
|
||||||
def configure_logging():
|
def configure_logging():
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
filename="log-ingestion.txt",
|
|
||||||
filemode="a",
|
filemode="a",
|
||||||
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
|
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
|
||||||
datefmt="%H:%M:%S",
|
datefmt="%H:%M:%S",
|
||||||
level=logging.DEBUG,
|
level=logging.DEBUG,
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(filename="log-ingestion.txt"),
|
||||||
|
logging.StreamHandler(),
|
||||||
|
],
|
||||||
)
|
)
|
||||||
return logging.getLogger("AutoGPT-Ingestion")
|
return logging.getLogger("AutoGPT-Ingestion")
|
||||||
|
|
||||||
@@ -26,12 +29,13 @@ def ingest_directory(directory, memory, args):
|
|||||||
:param directory: The directory containing the files to ingest
|
:param directory: The directory containing the files to ingest
|
||||||
:param memory: An object with an add() method to store the chunks in memory
|
:param memory: An object with an add() method to store the chunks in memory
|
||||||
"""
|
"""
|
||||||
|
global logger
|
||||||
try:
|
try:
|
||||||
files = search_files(directory)
|
files = search_files(directory)
|
||||||
for file in files:
|
for file in files:
|
||||||
ingest_file(file, memory, args.max_length, args.overlap)
|
ingest_file(file, memory, args.max_length, args.overlap)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error while ingesting directory '{directory}': {str(e)}")
|
logger.error(f"Error while ingesting directory '{directory}': {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
@@ -69,24 +73,22 @@ def main() -> None:
|
|||||||
|
|
||||||
# Initialize memory
|
# Initialize memory
|
||||||
memory = get_memory(cfg, init=args.init)
|
memory = get_memory(cfg, init=args.init)
|
||||||
print("Using memory of type: " + memory.__class__.__name__)
|
logger.debug("Using memory of type: " + memory.__class__.__name__)
|
||||||
|
|
||||||
if args.file:
|
if args.file:
|
||||||
try:
|
try:
|
||||||
ingest_file(args.file, memory, args.max_length, args.overlap)
|
ingest_file(args.file, memory, args.max_length, args.overlap)
|
||||||
print(f"File '{args.file}' ingested successfully.")
|
logger.info(f"File '{args.file}' ingested successfully.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
|
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
|
||||||
print(f"Error while ingesting file '{args.file}': {str(e)}")
|
|
||||||
elif args.dir:
|
elif args.dir:
|
||||||
try:
|
try:
|
||||||
ingest_directory(args.dir, memory, args)
|
ingest_directory(args.dir, memory, args)
|
||||||
print(f"Directory '{args.dir}' ingested successfully.")
|
logger.info(f"Directory '{args.dir}' ingested successfully.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
||||||
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
|
||||||
else:
|
else:
|
||||||
print(
|
logger.warn(
|
||||||
"Please provide either a file path (--file) or a directory name (--dir)"
|
"Please provide either a file path (--file) or a directory name (--dir)"
|
||||||
" inside the auto_gpt_workspace directory as input."
|
" inside the auto_gpt_workspace directory as input."
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ python -m pytest
|
|||||||
:::shell
|
:::shell
|
||||||
pytest --cov=autogpt --without-integration --without-slow-integration
|
pytest --cov=autogpt --without-integration --without-slow-integration
|
||||||
|
|
||||||
## Runing the linter
|
## Running the linter
|
||||||
|
|
||||||
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting.
|
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting.
|
||||||
We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`.
|
We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`.
|
||||||
|
|||||||
@@ -1,29 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
sys.path.append(os.path.abspath("../scripts"))
|
|
||||||
|
|
||||||
from browse import extract_hyperlinks
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrowseLinks(unittest.TestCase):
|
|
||||||
"""Unit tests for the browse module functions that extract hyperlinks."""
|
|
||||||
|
|
||||||
def test_extract_hyperlinks(self):
|
|
||||||
"""Test the extract_hyperlinks function with a simple HTML body."""
|
|
||||||
body = """
|
|
||||||
<body>
|
|
||||||
<a href="https://google.com">Google</a>
|
|
||||||
<a href="foo.html">Foo</a>
|
|
||||||
<div>Some other crap</div>
|
|
||||||
</body>
|
|
||||||
"""
|
|
||||||
soup = BeautifulSoup(body, "html.parser")
|
|
||||||
links = extract_hyperlinks(soup, "http://example.com")
|
|
||||||
self.assertEqual(
|
|
||||||
links,
|
|
||||||
[("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")],
|
|
||||||
)
|
|
||||||
@@ -3,7 +3,7 @@ import pytest
|
|||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.command import CommandRegistry
|
from autogpt.commands.command import CommandRegistry
|
||||||
from autogpt.config import AIConfig, Config
|
from autogpt.config import AIConfig, Config
|
||||||
from autogpt.memory import NoMemory, get_memory
|
from autogpt.memory import LocalCache, NoMemory, get_memory
|
||||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||||
from autogpt.workspace import Workspace
|
from autogpt.workspace import Workspace
|
||||||
|
|
||||||
@@ -19,6 +19,16 @@ def agent_test_config(config: Config):
|
|||||||
config.set_temperature(was_temperature)
|
config.set_temperature(was_temperature)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def memory_local_cache(agent_test_config: Config):
|
||||||
|
was_memory_backend = agent_test_config.memory_backend
|
||||||
|
|
||||||
|
agent_test_config.set_memory_backend("local_cache")
|
||||||
|
yield get_memory(agent_test_config, init=True)
|
||||||
|
|
||||||
|
agent_test_config.set_memory_backend(was_memory_backend)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def memory_none(agent_test_config: Config):
|
def memory_none(agent_test_config: Config):
|
||||||
was_memory_backend = agent_test_config.memory_backend
|
was_memory_backend = agent_test_config.memory_backend
|
||||||
@@ -101,3 +111,38 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace)
|
|||||||
)
|
)
|
||||||
|
|
||||||
return agent
|
return agent
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def memory_management_agent(
|
||||||
|
agent_test_config, memory_local_cache, workspace: Workspace
|
||||||
|
):
|
||||||
|
command_registry = CommandRegistry()
|
||||||
|
command_registry.import_commands("autogpt.commands.file_operations")
|
||||||
|
command_registry.import_commands("autogpt.app")
|
||||||
|
|
||||||
|
ai_config = AIConfig(
|
||||||
|
ai_name="Follow-Instructions-GPT",
|
||||||
|
ai_role="an AI designed to read the instructions_1.txt file using the read_file method and follow the instructions in the file.",
|
||||||
|
ai_goals=[
|
||||||
|
"Use the command read_file to read the instructions_1.txt file",
|
||||||
|
"Follow the instructions in the instructions_1.txt file",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ai_config.command_registry = command_registry
|
||||||
|
|
||||||
|
system_prompt = ai_config.construct_full_prompt()
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
ai_name="",
|
||||||
|
memory=memory_local_cache,
|
||||||
|
full_message_history=[],
|
||||||
|
command_registry=command_registry,
|
||||||
|
config=ai_config,
|
||||||
|
next_action_count=0,
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||||
|
workspace_directory=workspace.root,
|
||||||
|
)
|
||||||
|
|
||||||
|
return agent
|
||||||
|
|||||||
@@ -332,4 +332,174 @@ interactions:
|
|||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"thoughts\", \"reasoning\":
|
||||||
|
\"reasoning\", \"plan\": \"plan\", \"criticism\": \"criticism\", \"speak\":
|
||||||
|
\"speak\" }, \"command\": { \"name\": \"google\", \"args\":
|
||||||
|
{ \"query\": \"google_query\" } } } Result: None Human
|
||||||
|
Feedback:Command Result: Important Information."], "model": "text-embedding-ada-002",
|
||||||
|
"encoding_format": "base64"}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '483'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/embeddings
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA1R6WxOyurbl+/4Vq9YrvUtEJWG9IfeLJihesKurCxC5iSCQBHLq/Pcu/E6d7n7x
|
||||||
|
AfFCMuaYY4yZ//jXX3/93SZVlo5///PX3+9yGP/+H8u1ZzzGf//z1//8119//fXXf/xe/787sybJ
|
||||||
|
ns/yk/9u/71Zfp7Z9Pc/f4n/feX/3vTPX3/fopdD0flZVNPUPC8yT+sT3gt8pY/uh6XKIzl8CTzP
|
||||||
|
74Rhggh4m3hNHTU6VhN7kC18XyqDotgb+zmfIhk+DKen3kq1uXg3HwJceS8R25JtA15/gkgZ7UnA
|
||||||
|
TptGFXfomsGpRFfq7XDr94G9VxVzjTbUY1RLlu+XQXJ53LE261NCuyg4w6QiDZHiXKrKTRMg+KJl
|
||||||
|
RuBWrkPyiQQGU2j55ASHhnM1HnIomOyK/UP46VlvDDGM50TFfqDyiqUNOyuv9Lahhye6hzPnWQTX
|
||||||
|
0DngC2N2zxHqDMiu8I6RNGxBvy4uKRxflxRn7lr0yfd0G6D9kXSMwuTT8yG7s13IOgHbO5FzPiY1
|
||||||
|
gUPXrSiGmtF3X3RSYWolBLEi6qohbVigrDwhJErOdX924KlWtIgcqepU34SIL3sLt1W6RYA/hpBd
|
||||||
|
XlYN7qJ2JXxb0rAtJOBAr1hv0EbURZ88Qa3CO68k7Hjw40+nMUeKEphbspncgg9QLSM4xqs3Vldq
|
||||||
|
2Q+N4auwuBY11W3fqDYwfjLZQOOZ7vX+0k8nZT9AZH0YdYzjPpl1ZEiwdpQNUjYbLZwf5gZBr0EE
|
||||||
|
yXfb1ScRHBFEb4QQUJTGZ6etZ0E8ZQeq7nDJSSh3AVTF3UR1cQQ9f/Rghrt9s8HqIbgl3dGzIazi
|
||||||
|
zZaqvQjC6fM+11AAnUH9G90l8yRLGdRbsSJSo0nhDx874yTL2DtEVcjelmDJT1utcDaLec/ZfmvA
|
||||||
|
q9po1EykIZnDzhRhfMgDnJovE4xm6NTQEsIj+azevJpv+HSAard6oM333erTls0GWGnUwK7DN9W4
|
||||||
|
G0wLNifhjI3tM+x5FRdQcTV5Qq0ajRV7R48cesn5RtYAv8NJj9sWyhZbY0MqPsm8CzADv/U/lPtJ
|
||||||
|
58q9HGQeHWO0RpHaSyvCNeW9eiUYqXcnmd9lxIAZrwKkMA2B6f18OHA9SAoBtI56LhFPlsVTOC37
|
||||||
|
U4SDaHcGkG+WgS1SQp9kzrlUhq5fYZWlnc+d/BLAfZhjanrVwOe5oRA+SHTCqLmvknnclNqqt8SC
|
||||||
|
PrEE+Dhf1Bk2gD6wE+GGz1BKLEj6JsbHfez5bGUypnjl7Ua9ZwX0Sex7AhJpeyTysy8BewRWBpUU
|
||||||
|
vwhJd2FPuDHJ8Ni+Juyvdmd/krtYkyV7ulJNqO468UPYwMo4R3SfayEgD3vnycA/a9R7H7Vqs5dg
|
||||||
|
Bm/9l1L9/IbVpE4lgsVoaTjQFKlqjuMjg7f0nqMdqu/JFL/cFFTxekujyH75v/oGIJ4q6th2mHDz
|
||||||
|
+4kg/OgFKRa804MxEvg9zh49qmmt840Xy2B0zk9qW49rwkmbOFAJ6js2d/6pkqxSJNAs9YAoJ6r3
|
||||||
|
kzp9EVhfZwGrRdT1k51oGiw0KaWOcDcr8aoEKVzwgrVCf3N2TcNSMYm3wQi1Rz6JwERwT9071a/p
|
||||||
|
N6GMpY2MOqPBL1xs9bFwijOM5Rbj+4m1nKNMa2C9P9rk/TG3Pnt9DwN0b1ZEfXx89Pw5PD14LjSb
|
||||||
|
yDa79mx3r6Bsf0Qd8chnCZ3dqYSZc3FJMSeFzp6lHgAxkjC1nncjHGCgy5CBtUXtjXnkE1lFIjSb
|
||||||
|
yMbJBuX6JB7aARhXfiav/vjWp1XHY8ir9kQtfPLAdNzwmyy9wQXJFLbJYH0DWV7qjXqm2PXvfDgF
|
||||||
|
UIZ9jTjTuoocTm4OdkOFyMZ4dsngzucblDo+4MN12FSD7YYQipvvih78wOxJJq1UmCm5TdXhc03m
|
||||||
|
/KWk8NrnD2qiSK1Y9Vx9wRoRAXt3u/PnPAgh9ByLUkshVTIJxz6TrbI1sakIfcIOm5xAaY0qatw8
|
||||||
|
J2HnS2wASyqf1Oi9kPPrVhXhFHfSgvfE5/k1rJXUehBqrjOz38jn9Ca3uxWhe20kFWvRqoRSLECs
|
||||||
|
CyHWqcwfZ+iO3p2sG/us98q9JPC7YgVF/u7DZ8N/zMBxpZii4/fDl342w+YxaljXbwFgZeh7snLJ
|
||||||
|
KMY38wQ2xf5ogU14KxEzh7QfLRqVcH1lAjZipfQndJPiHx4w/kSnkHu7yVHgtNphu2GXZFY/6gVe
|
||||||
|
lCimzjXPOVvpkgh2R5LRvQwswPZP14Af/J6wrl7uOnNuWgzkx0vBfgfUipw0N4LwYLvYzldiz/N6
|
||||||
|
bUH3ARn1TXoE8+68J4pyjCk2Nd32e+IGDsw3jzeNEncLSK7OKRDM+UpxbyrJV+G5BovhXWEru36T
|
||||||
|
WX1bMTQPTo5NfT/qfE4tA0p3o6aHVef5ZK8HN3jt9zLe36ioT+uWfeGzEABGluPobGKrA6wvMcH7
|
||||||
|
9GqD4RX3B3BzvxE+HmXK5yGJLVlzYwEfsVD1VC4UCU6NMWLrLEh85CwgCtiCErvtgVWt8662CkPy
|
||||||
|
HW220hGw2yNP4b19Z+RtstDnJWEZHJPMoOb82XDii1MOtnPYkt0pVMJB6FoGX5K9wobmlLwvj6AE
|
||||||
|
76AdcLgtcULWaSrCQGq93//3OV3vG8iFuqX7jvOeDF/ewMIaRGp06APGBDxi+MkOGGuf2e8JausU
|
||||||
|
quQWYLyJJT6sdEEEXzMH+IilBLBG7FKoSdIB47TzK/4ZX7J8bJ8T3UvOUZ/XIzsr95paGEXWA/Dp
|
||||||
|
UNfwZGYyVY8oDbnnngcZefCD1dqmVafvqhaEN2Ri119PITkmkgZHVUfY2vf3ZNA+6xo4JtTwq6qU
|
||||||
|
P3gBjzL40iDuqorI2daD/WnekTk7RDrHj2sDz+28IcL+SkJO7NsBiunwodqqqwFd9AJY+IKsiHzS
|
||||||
|
6VtcCZAe64nMq67m/VWytsr3yDycLHphoNtXAOyAGdT8mJE+m1Y9K5aRW/R4j5Jq4UNBeU6Bt/QD
|
||||||
|
Ho5XJrYQN/qVwPbtcdFD3xvklTuTebN6huyqRCmkFVSp8RxafT6dShk8DK/HnpRrfp9LQQmOg9hS
|
||||||
|
zXicQbeK7hDGssvQSrXVZD3Z8Q1UzZliJznlPW9ldIH6cevTxM+PlfTZgQFe4l1NUZ2EIa3iQgC9
|
||||||
|
9vwifo9uIV+d1wN0+LukjisUPnXy9AzyTfJGz12rhRsv+Zwh/5QddS3jVJEvbRtwFY4Fja/6OpkN
|
||||||
|
/8RgkcOEquWN9ryYqhKa9tSRKcovPquemy/YyuWGuhp+Av7a6Q0kJj5QdKhjn0MQE3AKkvOfz7M5
|
||||||
|
K1vF3mdrJFbtxR/vMtOU7bFxkLD4iW9OMwi6NKsRsPsdmL5wXYI6KG/YfzQfn83K+6vUczki/s4o
|
||||||
|
6OazJcK7NlBq4Ab1FMZXBndPvCK7avAq3s/NGQYX94JE1DoVHQ86gnorVWitKrtw8gwnBZMInngv
|
||||||
|
tGdAUeY10E6TA2EGkXpuRbEE1fLQIUF/lDqtvnomE9euETDpkfO19kiBgI4SNaIdCZlyDCQ4Cv6B
|
||||||
|
VGveJQufDUr9MSVq80DW5wM75NB9v3dk/epVf1P5TgYPfaVR1RxgNY1zGUH3jFTyDty8mstdfIEn
|
||||||
|
93omKVKmnt22fQZKH+3JdtFXvVa+BTB8nYyej6smIQLeavBPP86uXjgDr/hC13dU+uPb8Zq1FsRM
|
||||||
|
GSl6rorkexxPKWisKqF+EYZ6W9HEgJ8zf2C0PC/fWzcV5quviPXvye03fcAOskbnA6psHQL+rQz2
|
||||||
|
01dYj7Ghc7ZnhrL0M7wvnKKnWfc1YBz5D4zI3uK8liYZuhZ3yXolbxKiWnoEvcm3iLTxumoQdt4Z
|
||||||
|
yKh4Yzu9lH1XWp8bXNYXY30/h81aOTEQnqsV9fy3m7CWwgyGxRrhoxYFPqnfvgiFDRvoLclg9X1d
|
||||||
|
VajYwWwgmB2UhFy1coaege5k3uzvCdOvnQrXloCwxa9rPudTICvZ95KjMeiuYMr0eAupOzywfau0
|
||||||
|
iknnaw1HmwuLf+l0XpJtKluqi7HabWky3Qa73Sr0YVGNXvqEi+tSgEUtcuq/HQu0YeGUMKmGhh4K
|
||||||
|
3dene3epYXnzHti5ZRYQv+4pVqbGGolSwHVS39csUqrkcscnMxWr+Tg+Ujjf43B5XqYv+BTBhMeM
|
||||||
|
rJ3XQ59zeoNQf1QXtE1Oas/w6uvB8+PVEvDMViEr3i8ChgJgqn7cLuS1laSy3PID9jZZ69Ov4c7A
|
||||||
|
V7MrPSq5XI07QBoonk4T9fvNoZrCyzAAKNg5PeZc1zfppDkQIWmF1lc5D6crg1+QEbTFqBiO4eB/
|
||||||
|
5zO8CoOK789zW01FpLGfHqUvTR9ClkonBuWnJJLVTGvA3NMzgsAPNDQnZ5NvhlH1lMva9ala27if
|
||||||
|
b8U4w5dwRxhd0L6X/HlVA/O9a7G+y9fJbOf7r3w9AICNXfKouAtrBMPYelJPUxJAgEMH8A6+w8/v
|
||||||
|
6iTqOwd2VVkTZR9//blB3hl0fXghEzJKfURrJ4DgBnwiJsDRx/M2HaBFFIEiDxSgrZNDA/eVzjC+
|
||||||
|
2j5Y+9ktg6e03KGJ3ceEX5sxAj2fAuqxj9yzraeUYNH/2L9OPBykYmWAvhZybNpiwydDlFoIJmem
|
||||||
|
etFBfXxePxl8SeYK+9tV7lN19ylhaW08/OO7yQsLAQqrIqWaH+Qhf+ttJL+PT4NmdcLDrjWuLZS3
|
||||||
|
VoL3F+74U1R/Gnh+Si/qnkIlIdnnWcLr3DywRm5myNMi/8Ky4CX2x/ZQzcXQbyEP0i8+2n7Haez4
|
||||||
|
M4Ty0aHGuioSlsR7SdH2X0Td3SvQKUKFofz46YteST9rdD8r7vuzI1BvMp+l0mOG2zTe0r0VxxWr
|
||||||
|
/d6Ac7MTqd19Sp+VetEqb34XqRraFZ+f/rGBw9qQ8D7UHH/hx/inf5d6aQBrg10GR8E9/MmP6PMY
|
||||||
|
e5BI+Uz44vfyzctQ4Qd/JrLkJ/1M2R4CdR0Fv/WrJrR2znCpL6qNwaYf7N0cw0uZ6Wi3ewX+FJyC
|
||||||
|
M2wQ+yJZUxI+tbOrwsW/UJfv0pD89OAFKweyWfKb2V4ZX7Dw4+In8op9wroEBtycSOevp4QDUgfw
|
||||||
|
tK9LqhaRWzEY+FvI0PZOnyda9VMVPmdwRlWAJNq6YP20TAFKxKko3ptW8k2FlwX53k/QuoNvTsHL
|
||||||
|
Y3CXJx8EorbwGWbqF4p7FuDjwh+0e68kqCZOR7PFH9DKRTl8m+RC91apJZLjAwdepWzCtv7FiYhX
|
||||||
|
XwfGezPG1Pk0/vAgwUVxt1OOzZ9eDbMKwuzu7LAty/uKH9aGB8LCe1L19XH7GblFCpf+SJ01jXzy
|
||||||
|
MFcHeHkFCdmF98mfrmqG/uh999D6yXrlbSOwZtobbR/HAYys125Qr6GPo6P0SHiIvQyGt4NJ1be5
|
||||||
|
rsgNPw5gdZV7ii2p8VnQJy1Q3PJOVoPw9kdlSjLgPIcZ+3TIQ17sTQNOZ2CRpf/3w7e9xPB2LBlp
|
||||||
|
9fiTzAA4Bvwo6mrJD8uEb+/uDdqCrJFNFqCKb7qrDFfaaGBLp76+IRuxVcJzsSJzzU5g0eslgNKF
|
||||||
|
Ud3WU8DKcmjBJXTEBY8fzk5t3gBzZ2P882stuxWHH38TLo5JP2fSSgMfBiIEPe8O6OG298BJmnMC
|
||||||
|
+09djY8ClnCvXlp6cdeIr6P6U//05sLXjr7x3DOB7pbnJFj0Qz8pvgZ+eaDdFmLCrY94UWjAHtiL
|
||||||
|
tq4+tcazBbt6zqjx2FySce2RGTBtZtRY+I1rM07Bc6fJSIi9Y8Wcdy/DpX6R/D5qPTtfzobyW8/5
|
||||||
|
TTacdclbgB7evAgrb7iiJWEpzMhhS8JtSZORZKEMxWcSoQ/Y2n2VFnkLzVgJ6GEKIKCwW523Gyf2
|
||||||
|
fn7Cn9/B+gYvShxjx9Vf/XSXmQqgmu2xO6y+Cdsn2IPkgyuqM83tZydJ/2v/+cOVwkV/WErIeoFc
|
||||||
|
jXn02TYNINxevg6CU1RXk7wFOXgFOCDi4qemqClyeAR9hC3adqDwwk6A+BsfsHk2h55stgMDzenF
|
||||||
|
MarY3herIDDgq28tmsnFoVry7QHUqZtR20OdPifqyVA0ehoJS98YTMP6mQGLrAR0nboTEPW3V+6e
|
||||||
|
O1XG6g5rnMBm10Dn+qqoeTYPPVj6Mdwk5hmbxjzqTUKUGrKrcEfwzvpkxNpHhhGCA6lNbe0Pevgx
|
||||||
|
4LGFAdWliSZTs/l4MBqnlCIlOFd9/EEzIJcyIehdFPq8P+9EyE6fHCORDWDG710E05P4xv5bfenf
|
||||||
|
X5658DkR7Z1WLev7BdZF/1DD2bSAd5vkDIon41hzD0PFSTS1ytKPiOhsWj5/ImkGwcW/0IPeCDqf
|
||||||
|
xiJXyt1Fxd7FxOG08li8e1XRhp7bpwLI5TYFUEubN9aDzEho+bxqsO6zkShouwaMNRqBi/6nP7wv
|
||||||
|
eaMKNcoOCBQh19n67jVQSY8vNITgXv34E17lq4s976Mns7nREfzVozU0gS9mQnIAPz1gy/ODj5E5
|
||||||
|
NPIzs0oyPZqCT6SwIOi3no3y/Aw4rT9BrAD7Df/U2+K/blsihCWiggs5D7NegCczlbHpvhveCccq
|
||||||
|
hc5GeGC8+DeedaUFsyZzyIL/amMIyhZsmtz95U096y/m9sc32MfXIpzNmxXALmoOhOAm5Fy8EA1Y
|
||||||
|
nyNa8Ff47PZoU6Aaxhcb4LLXf35febxsjYjNa883lYtKeAtIROZKufNf3gTAbefjfbPbJ5K9OrRQ
|
||||||
|
aJ9P6q92s88jQkR5yQvw8R6BaoLnIIP4EByxrRQvPgHzq0Hqkgda3/W5X28upacseTJ2dvViR+s4
|
||||||
|
Anh9qbEZiXNPkXKsf/MOsi15Hs6K7jagTTyBVBAn4ThqPpRHIzKJuORl618e4e2iKwFLvstPpm1A
|
||||||
|
4po1mvevvGdsPRx+9T72XrwCVMqKCJYjcKn2RLM+7tK+hukTHn75lT7fM5lAK6lNjM+RCUTMnC8M
|
||||||
|
+mOI1den63nQHLRfP8JPciI//MdgiDcYrQwlq9jZ2DZQ2Z8cNP346LtXEOzcs4I9MSXhSFaBBC5f
|
||||||
|
xKl/2+x1vpGnG7gGZkXNtLNC8XZv8p+/wE5kr/xh28sZlAstxX7t5aD9uo8Ymm/QUj22YciNobvJ
|
||||||
|
y/NgVZEvYH7nmQRHIzaxdcAnf7qcpi/45W+4jP2EqS/owB24CtQ+grXfit3dAc6o7ukyvwIUf3ca
|
||||||
|
XPQD1cV1Fc7+O9LgE/GQ7Epi9O/Uk6FsPLov2XkbPxlfwijJhgVLmmrt1v+jhy93/UAWfusnn+5k
|
||||||
|
uBeyBu/v6z1YL3iQf/m0q2EF8LRov9A5fXw0XMip3wj65QB3/IPQz9/x80nJ5R9fRsFo6zzEWgbt
|
||||||
|
z70m4rJf7G1JFqw/toRdcbglE0FyDJEnfOiiRxPuufEA3+CjU5X0nc5uzlEFnJ4e2JjI2LPEIRE0
|
||||||
|
toGz5CmlTw77XFV0UPXUaF4Fnwxp28C2sW8Ur7Mvnw33LsFfvnPrNl99qsTzAOzPtcY6vFf6/KrL
|
||||||
|
VFnyanr81hqXztnppqDMQfSy9Asaf6wZtmGE/sz/eHx/n5W2RALWssNWn9nhAQFODAUJs5hXjN0K
|
||||||
|
pHi7+EqPxiVNxtXuIsMXzTMkZus4XPqTAIVnZaJ5MvtwEgaSysVoaPiw8P3PH8BcEHTsxP4ZzOte
|
||||||
|
uoFJcQZqvB/in3xFLnc3lTqfTqhm81MMP/7A2Pk0+h9/u/Q/spXGTG8Pm/bPfA6j74H7LdCfGSw1
|
||||||
|
YUYf67EOJ+MKWji/fJ2i5v5KFjyKwK/8J5mcsEx++wUWvqUH+GHh3IVRC08SywkQwMFn1WtqYNKe
|
||||||
|
KD7KTRvyylfTP/MziyVLPOCPX+iIxp5Gy/xrWPIjeNnlGtUuyTYca/oeYHTTBvQVtAIsec0ByJr8
|
||||||
|
xD+8MamaNFCez3uKHodPz9GBer/8Eu1OyVuf78qaAHHTrvAprm1Oln6m7JPzCmOAvWpCdirDjb3r
|
||||||
|
qbbot0XfB7DP3x3VJrNPFr5BP71O/Vp76AR09Re2hxvCep13YB5cqVWM2HOxpykADPpby8GSj5Lx
|
||||||
|
KFPQGaLQ/vILROvbvR9Xu1SG7/39iarFn/LAdlWIDU2gP30+uRYo5fal38gUqCScwNUv5V0gW9Sp
|
||||||
|
6zxc+t8AHFPQsJnf5nAUSB4rzOoc6l68Xh/HSpWhDARO7cndg01EGhGyUbthLQeZz1dq58GVSBOy
|
||||||
|
zFNC+tuPxf+h0yOTdfYaNjNcdcYZe1tXDtn+ubeUhhknetklu/4rnvILNA9eTo3mLvvvG9k3yt+/
|
||||||
|
UwH/+a+//vpfvxMGTfvM3svBgDGbxn//91GBf8fP+N+iKP2bSn9OIpAhzrO///mvQwh/d33bdOP/
|
||||||
|
Hts6+wx///OXIv85bvD32I7x+/+9/q/l1/7zX/8HAAD//wMAPe/DkOMgAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdedf56918cfbc-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 07:04:15 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '25'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '2999'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 20ms
|
||||||
|
x-request-id:
|
||||||
|
- f2850337441001a8e5bd70bacb4dede8
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
0
tests/integration/challenges/__init__.py
Normal file
0
tests/integration/challenges/__init__.py
Normal file
17
tests/integration/challenges/conftest.py
Normal file
17
tests/integration/challenges/conftest.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
parser.addoption(
|
||||||
|
"--level", action="store", default=None, type=int, help="Specify test level"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.option.level = config.getoption("--level")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def user_selected_level(request) -> int:
|
||||||
|
## used for challenges in the goal oriented tests
|
||||||
|
return request.config.option.level
|
||||||
0
tests/integration/challenges/memory/__init__.py
Normal file
0
tests/integration/challenges/memory/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,771 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n4. search_files: Search Files, args: \"directory\":
|
||||||
|
\"<directory>\"\n5. write_to_file: Write to file, args: \"filename\": \"<filename>\",
|
||||||
|
\"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7.
|
||||||
|
get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary:
|
||||||
|
Get text summary, args: \"url\": \"<url>\", \"question\": \"<question>\"\n9.
|
||||||
|
list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT
|
||||||
|
Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11. start_agent:
|
||||||
|
Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"}],
|
||||||
|
"temperature": 0, "max_tokens": 2766}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3294'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA4RSS2/aQBC+91eM5mwjCI8g31B6yaFSpTaHtq7Qsh7sLevd1c5YQBH/vTK2SQRR
|
||||||
|
ev3m8T1mTmgKzFBXSnQdbPq4avy3xY/xbLY6fnkKT+rrvvn8aJYvP3flAhP0mz+kpZ8YaV8HS2K8
|
||||||
|
wwR1JCVUYDZZLB+W08X4YZZg7QuymGEZJJ2O5qk0cePT8XQ8wQQbViVhdsIQfR1kLX5HjjF7nE4S
|
||||||
|
fN19xSfzWYLiRdkrtFzOzwnqyhtNjNmvE9bEw9roLWGGitmwKCetSO+EXGvglDsAgByl8k1ZCeeY
|
||||||
|
QQ/2BTpIC+b4DFz5xhbAoqLA5giRVGFcCVIRGMcSG91K5fVkJAeBrbEEDQ8dbff6gmlf18oVoxyT
|
||||||
|
t1SRFHtnXNnxfa8ItiayAAsFMAzioXEFxdZHcccKAyiKdwxSKQFHVLRjG4JAcetjTXe0wSrXMabw
|
||||||
|
wvS+1nZJC37kNc9dCiun7PEvva+uIKFYG9eVHR06b3yjSEcjRhuuh+AHG+S4idRZe/4wi/5xyB5h
|
||||||
|
Q1sfCUL0muhyr72RCpQ7dknd5sGB1G5g3htr/3/wi/1Rjt2aczJ8VR/e3VM5VVPHcA36RoSK5e0v
|
||||||
|
doW293X8/hKDiIuQXk/uznhOcGuc4WrdvRlmyOIDJmhcQQfMxuff50//AAAA//8DACSqIf4KBAAA
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb77eec9416f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:12 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '8377'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86494'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.337s
|
||||||
|
x-request-id:
|
||||||
|
- 75084ec318eb8fbda968c3f79a242ee3
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\n\nYou will receive the current summary and the latest development.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI
|
||||||
|
was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}],
|
||||||
|
"temperature": 0, "max_tokens": null}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '600'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SOMU/DMBQGd36F9c1OlBBaWm8wIGCMgAWhyrVfE0PiZ8WvKqjKf0eVCl1PutMd
|
||||||
|
ETwMXG/FjWkobu/27OsfeXmUw1v7sEtd67rn1/Z+eVOvoMHbT3JyNkrHYxpIAkdouImskIepl6vr
|
||||||
|
VbOsmoXGyJ4GGHRJiqZcFLKftlxUTVVDY59tRzBHpInHJBvhL4oZZl1rXNIXrCEsdvgHdVXNGq7n
|
||||||
|
4CjDvB8xUv5rTjwQDGzOIYuNcjrkKBRP90/qYLM6HysbvYosfYidinRQvc2qtylRJF9i1tiFGHK/
|
||||||
|
mchmjjDIwgkaIXr6hqnmj/nqFwAA//8DABWtlxRJAQAA
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb7c3796916f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:16 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '948'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89866'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 88ms
|
||||||
|
x-request-id:
|
||||||
|
- e02c6c27229229ec129826db88fa9f23
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n4. search_files: Search Files, args: \"directory\":
|
||||||
|
\"<directory>\"\n5. write_to_file: Write to file, args: \"filename\": \"<filename>\",
|
||||||
|
\"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7.
|
||||||
|
get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary:
|
||||||
|
Get text summary, args: \"url\": \"<url>\", \"question\": \"<question>\"\n9.
|
||||||
|
list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT
|
||||||
|
Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11. start_agent:
|
||||||
|
Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nI was created and nothing new
|
||||||
|
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||||
|
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||||
|
"content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading
|
||||||
|
the instructions_1.txt file using the read_file command.\",\n \"reasoning\":
|
||||||
|
\"The first step is to understand the instructions and the tasks that need to
|
||||||
|
be performed.\",\n \"plan\": \"- Use the read_file command to read the
|
||||||
|
instructions_1.txt file\\n- Analyze the instructions and determine the next
|
||||||
|
steps\",\n \"criticism\": \"I need to ensure that I understand the instructions
|
||||||
|
completely before proceeding with any tasks.\",\n \"speak\": \"I will
|
||||||
|
start by reading the instructions file.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe
|
||||||
|
current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead
|
||||||
|
the file instructions_2.txt using the read_file command."}, {"role": "user",
|
||||||
|
"content": "Determine which next command to use, and respond using the format
|
||||||
|
specified above:"}], "temperature": 0, "max_tokens": 1131}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6479'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA7SSy4rbQBBF9/mKotaSsK2MZ9BusgjxIlnlBVEwbXVJ6rhV3XSXGDvG/x5kyePB
|
||||||
|
JoQQsr31Ore4BzQaC6xaJVXnbXr/2Dva9x/Uqlrm9bt79ebt6kvt5b3dfv2MCbrND6pkmsgq13lL
|
||||||
|
YhxjglUgJaSxmC8fFg/5cpYvE+ycJosFNl7SPLtLpQ8bl87y2RwT7KNqCIsD+uA6L2txW+KIxSJ/
|
||||||
|
WCR4Wf5cmA+6OFH20nu3fH1MsGqdqShi8e2AHcXz4uAsYYEqRhNFsQyYjoV4sHAoGQCgRGld37QS
|
||||||
|
SyxgEqcC7WQQS/zYEhiOEvpqIIrrRSY7gdpYgmGjMhxBgTVRwNUgKm4jSKsEmEiDONgQeAq1Cx3p
|
||||||
|
DFYQW9dbDYqV3f8kkJamKcUaNAmFzvCoM+0EopCPWYnJS8BAKjo23PwvyrP+b5jeKh4JU/gUx+5A
|
||||||
|
Sq8nsq4btok7iafqb1yUJafw+FcsVyhVMGIqE7uR5+KQOPaBxm+soGdNYYiMfnFmSiTZPWyodoHA
|
||||||
|
B1cRacMNPBlpQfF+7L3+QPSktueTT8baP1o9mbp5e1biuPaYnNM7ve8mvKw6Gi8+v/oKSoXmOvNj
|
||||||
|
Yei9jN8CniFOIBNPyUc8JlgbNrFdj8HEAqM4jwka1rTDYnb8fnz1CwAA//8DAIjD0lt0BAAA
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb7c9ef9316f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:26 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '10335'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '87376'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 1.748s
|
||||||
|
x-request-id:
|
||||||
|
- 0aa171294ea5788272f2d06269765a84
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\n\nYou will receive the current summary and the latest development.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'':
|
||||||
|
''system'', ''content'': ''This reminds you of these events from your past:
|
||||||
|
\\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'':
|
||||||
|
''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\":
|
||||||
|
\"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': ''Command
|
||||||
|
read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe
|
||||||
|
current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead
|
||||||
|
the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature":
|
||||||
|
0, "max_tokens": null}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '2983'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA1yPTW/TQBCG7/yK0Zw3lp2UtPUtVUFwAQkhgaDI2non8VLvjtmZTRqi/Hdk4xbU
|
||||||
|
42jer+eE3mGNbWe1DUO/uNxkTh/y18S3N7tvYf/l1+fyU/u7/Pj2TX+BBvn+J7U6O4qWw9CTeo5o
|
||||||
|
sE1klRzW1fpqebValxfXBgM76rHG3aCLVfF6oTnd86JclRUazGJ3hPUJh8Rh0Eb5gaKMAeXS4L/w
|
||||||
|
58f62qCy2v4/6WV1Nth27FsSrL+fMJA85SbuCWu0Il7URh1XclSKI8FGwIIcRSkYeA8HKzAjgI0O
|
||||||
|
Imvn4w4iHaCzAp0dBorkCnjHB9pTMmAFBkpw5Jyg5RBsdOb5GrJSmpyJNKdIDrQjmPsFeAsWtr4n
|
||||||
|
iDaQgzv0UTTldiSWpir0Ue9wmjL5ckoUFdTKQ+MdeIGqqqoCNs750WL7/ji1Q2f3BE9h5F4sUoZE
|
||||||
|
9m/oVP+ieDkXZxnpR9UobybpDFng2eDWRy9dk8gKR6xRlAc06KOjR6zL84/zqz8AAAD//wMA0Kuq
|
||||||
|
I10CAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb81dddc516f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:33 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '4060'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89275'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 483ms
|
||||||
|
x-request-id:
|
||||||
|
- 75d0c0f3853f72f74e3fc8b8ef377b60
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n4. search_files: Search Files, args: \"directory\":
|
||||||
|
\"<directory>\"\n5. write_to_file: Write to file, args: \"filename\": \"<filename>\",
|
||||||
|
\"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7.
|
||||||
|
get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary:
|
||||||
|
Get text summary, args: \"url\": \"<url>\", \"question\": \"<question>\"\n9.
|
||||||
|
list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT
|
||||||
|
Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11. start_agent:
|
||||||
|
Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nAs a system, I was created and
|
||||||
|
nothing new has happened. However, as per your command, your computer has returned
|
||||||
|
the contents of a file named \"instructions_1.txt\" and the current task_id
|
||||||
|
is 1111. Additionally, you have instructed your computer to read the file \"instructions_2.txt\"
|
||||||
|
using the read_file command."}, {"role": "user", "content": "Determine which
|
||||||
|
next command to use, and respond using the format specified above:"}, {"role":
|
||||||
|
"assistant", "content": "{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt
|
||||||
|
file contains a list of tasks that need to be performed. I should analyze the
|
||||||
|
tasks and determine the next steps.\",\n \"reasoning\": \"The instructions_2.txt
|
||||||
|
file contains a list of tasks that need to be performed. I need to analyze the
|
||||||
|
tasks and determine the next steps.\",\n \"plan\": \"- Use the read_file
|
||||||
|
command to read the instructions_2.txt file\\n- Analyze the tasks and determine
|
||||||
|
the next steps\",\n \"criticism\": \"I need to ensure that I understand
|
||||||
|
the tasks completely before proceeding with any tasks.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_2.txt file and analyze the tasks.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis
|
||||||
|
task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite
|
||||||
|
all the task_ids into the file output.txt. The file has not been created yet.
|
||||||
|
After that, use the task_complete command.\n"}, {"role": "user", "content":
|
||||||
|
"Determine which next command to use, and respond using the format specified
|
||||||
|
above:"}], "temperature": 0, "max_tokens": 1071}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6914'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA8yTQY/TQAyF7/yKkc/TqknVbcmNC9KKAwgJLgRV04mbDJ3YIeO03a3y31GalJQg
|
||||||
|
DisOcH22nz7behdwGSRgCyO2rPxs/abh4/l4/PhBnt2pfHq/fvpu3z7n9ebzuww08O4bWhkm5pbL
|
||||||
|
yqM4JtBgazSCGSTRwybeLB8Wq6WGkjP0kEBeyWw5X82kqXc8WywXEWhogskRkgtUNZeVbIUPSAGS
|
||||||
|
eLWINYzmYyF6rUFYjB+ldRy1GmzBzmKA5MsFSgw345o9QgImBBfEkHSYTILUrXBJSSmlUpCCm7yQ
|
||||||
|
kEKiBnEo4Fk6MYVHRYiZElan2gkq472SApWYcNi6LChHwldl7zwqbqRqZC5nUYayTifVBBxHhuVQ
|
||||||
|
WS7LawsrR5mzRrouIz9blQvq1j1PQd/z1WgCk6P8f4asvKGeb6Y+DfZXvq3w9gpyZ/9C8DSl0fQv
|
||||||
|
mCfItnbirAvl9K5IoakHn0dVmGO/iiC94Ng73HONqgmO8j+DT88YKjSHG8/Jef+vn9zDtfqWomH+
|
||||||
|
txCRKbHn/uXrk/VMnU/z1xe63tHi7vX389O4RlEUpSnFcRzfQK+wA3NKLbQa9o5cKLZ9iiCBIFyB
|
||||||
|
BkcZniFZtF/bVz8AAAD//wMAymrgECAFAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb837d91916f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:45 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '12002'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '87330'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 1.78s
|
||||||
|
x-request-id:
|
||||||
|
- 040a4416b064ecd9461b7457dd71db07
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\n\nYou will receive the current summary and the latest development.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'':
|
||||||
|
''system'', ''content'': ''This reminds you of these events from your past:
|
||||||
|
\\nAs a system, I was created and nothing new has happened. However, as per
|
||||||
|
your command, your computer has returned the contents of a file named \"instructions_1.txt\"
|
||||||
|
and the current task_id is 1111. Additionally, you have instructed your computer
|
||||||
|
to read the file \"instructions_2.txt\" using the read_file command.''}\n\"\"\"\n\nLatest
|
||||||
|
Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\":
|
||||||
|
\"read_file\", \"args\": {\"filename\": \"instructions_2.txt\"}}}''}]\n\"\"\"\n"}],
|
||||||
|
"temperature": 0, "max_tokens": null}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '1087'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SPTW8TMRCG7/yK0Zyd1W6+iPYW4ECKAFVCXChaOfYkdrv2rDyzSdoo/x0tKvT6
|
||||||
|
6pln3veK0WOLLlh1aehn77fjsP149215/2H/ff3y9XyRftV8Oux2xy8/0SDvH8np60XlOA09aeSM
|
||||||
|
Bl0hq+Sxbdab+Waxrtcbg4k99djicdDZolrNdCx7ntWLukGDo9gjYXvFoXAatFN+oizYzpva4Jv7
|
||||||
|
f75cGlRW27+Rq+XNoAscHQm2v66YSP5ZC/eELVqRKGqzTh05K+Wp/1bAgjyLUjKQWUPMR8h0hmAF
|
||||||
|
gh0GyuQr+MxnOlExsINgTwSFHMUTebDgOCWbPRwKJ3jmEZShkPWggeAQe4IHjFm0jG5aId280os+
|
||||||
|
IIwy/ZqoCe/+oq+yCn4EAjeWQllBrTx10UMUEI19D03TNBXeDB5ijhK6QlY4Y4uiPKDBmD1dsK1v
|
||||||
|
v2/v/gAAAP//AwDW3lkK2gEAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb8959bc416f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:27:51 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '2478'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89749'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 167ms
|
||||||
|
x-request-id:
|
||||||
|
- 5acc9d310e317d6b025800ba63f0cfa6
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n4. search_files: Search Files, args: \"directory\":
|
||||||
|
\"<directory>\"\n5. write_to_file: Write to file, args: \"filename\": \"<filename>\",
|
||||||
|
\"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7.
|
||||||
|
get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8. get_text_summary:
|
||||||
|
Get text summary, args: \"url\": \"<url>\", \"question\": \"<question>\"\n9.
|
||||||
|
list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT
|
||||||
|
Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11. start_agent:
|
||||||
|
Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nAs a system, nothing new has happened.
|
||||||
|
However, I have received a command from you to read the file \"instructions_2.txt\"
|
||||||
|
using the read_file command. The current task_id is still 1111."}, {"role":
|
||||||
|
"system", "content": "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis
|
||||||
|
task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite
|
||||||
|
all the task_ids into the file output.txt. The file has not been created yet.
|
||||||
|
After that, use the task_complete command.\n"}, {"role": "user", "content":
|
||||||
|
"Determine which next command to use, and respond using the format specified
|
||||||
|
above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"I need to write all the task_ids into the file output.txt and then use the
|
||||||
|
task_complete command to indicate that the task is complete.\",\n \"reasoning\":
|
||||||
|
\"I need to write all the task_ids into the file output.txt and then use the
|
||||||
|
task_complete command to indicate that the task is complete.\",\n \"plan\":
|
||||||
|
\"- Use the write_to_file command to write all the task_ids into the file output.txt\\n-
|
||||||
|
Use the task_complete command to indicate that the task is complete\",\n \"criticism\":
|
||||||
|
\"I need to ensure that I have written all the task_ids into the file output.txt
|
||||||
|
before using the task_complete command.\",\n \"speak\": \"I will write
|
||||||
|
all the task_ids into the file output.txt and then use the task_complete command
|
||||||
|
to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"write_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\":
|
||||||
|
\"1111\\n2222\"\n }\n }\n}"}, {"role": "system", "content": "Command
|
||||||
|
write_to_file returned: File written to successfully."}, {"role": "user", "content":
|
||||||
|
"Determine which next command to use, and respond using the format specified
|
||||||
|
above:"}], "temperature": 0, "max_tokens": 1015}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '7053'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA8yST4/aMBDF7/0UozkbBIsWUG6r9kKltifUSk2FHGdIXJxxZI8LEsp3ryDJLkvV
|
||||||
|
0/bQ6/x57zdPc0ZbYoam1mKa1k1WT6n98FHS9ktK3z69r2yxXRZV+lqFpg6o0Bc/yciwMTW+aR2J
|
||||||
|
9YwKTSAtVGI2X64f1ovlbDVX2PiSHGZYtTJZTB8nkkLhJ7PFbI4KU9QVYXbGNvimlZ34A3HE7OFx
|
||||||
|
MVP4Iv7cmK9XCsWLdi+zq/mqU2hqbw1FzL6fsaE4CgfvCDPUMdoomuWC6VmILyeccwYAyFFqn6pa
|
||||||
|
Yo4ZDMWhQSe5FHP87I8gtRbYQK1/ERyDFSEG7RxITSA6Hna2jGBZ/LWyt47AJ2mTTOUkCjZgNEOK
|
||||||
|
9LIwnEhgfNNoLkE8WC6t0UK93TgKNsI4Pc1R3VIG0tGz5er/R22d5p5yAtu3y9+pm2DFGhub3mID
|
||||||
|
THRVIo4p0NtSgYL2PhCkaLn6O/j9xbElfRh5jta5fxRr79Gp8YeH/T9emHVDvf0rvztKHar777/9
|
||||||
|
rV7g6RLVGNM1xIKIn5MUf5PWCHiFHFhz7rBTuLdsY73rlTHDKL5FhZZLOmE26350734DAAD//wMA
|
||||||
|
A50m55YEAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfdb8a62cc116f9-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 06:28:01 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '10004'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '87360'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 1.76s
|
||||||
|
x-request-id:
|
||||||
|
- 357739c33b65d5f85a7b5ef081939e92
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,80 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from autogpt.agent import Agent
|
||||||
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from tests.integration.agent_utils import run_interaction_loop
|
||||||
|
from tests.integration.challenges.utils import get_level_to_run
|
||||||
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
|
LEVEL_CURRENTLY_BEATEN = 3 # real level beaten 30 and maybe more, but we can't record it, the cassette is too big
|
||||||
|
MAX_LEVEL = 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr
|
||||||
|
@requires_api_key("OPENAI_API_KEY")
|
||||||
|
def test_memory_challenge_a(
|
||||||
|
memory_management_agent: Agent, user_selected_level: int
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
||||||
|
After reading 'n' files, the agent must write the task_id into a new file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
memory_management_agent (Agent)
|
||||||
|
user_selected_level (int)
|
||||||
|
"""
|
||||||
|
|
||||||
|
num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL)
|
||||||
|
|
||||||
|
task_id = "2314"
|
||||||
|
create_instructions_files(memory_management_agent, num_files, task_id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
run_interaction_loop(memory_management_agent, 180)
|
||||||
|
# catch system exit exceptions
|
||||||
|
except SystemExit:
|
||||||
|
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||||
|
content = read_file(file_path)
|
||||||
|
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||||
|
|
||||||
|
|
||||||
|
def create_instructions_files(
|
||||||
|
memory_management_agent: Agent,
|
||||||
|
num_files: int,
|
||||||
|
task_id: str,
|
||||||
|
base_filename: str = "instructions_",
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Creates a series of instructions files for the memory challenge.
|
||||||
|
Args:
|
||||||
|
memory_management_agent (Agent)
|
||||||
|
num_files (int)
|
||||||
|
task_id (str)
|
||||||
|
base_filename (str, optional)
|
||||||
|
"""
|
||||||
|
for i in range(1, num_files + 1):
|
||||||
|
content = generate_content(i, task_id, base_filename, num_files)
|
||||||
|
file_name = f"{base_filename}{i}.txt"
|
||||||
|
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||||
|
write_to_file(file_path, content)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_content(
|
||||||
|
index: int, task_id: str, base_filename: str, num_files: int
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
index: int
|
||||||
|
task_id: str
|
||||||
|
base_filename: str
|
||||||
|
num_files: int
|
||||||
|
|
||||||
|
Returns: str
|
||||||
|
"""
|
||||||
|
if index == 1:
|
||||||
|
return (
|
||||||
|
f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt"
|
||||||
|
)
|
||||||
|
if index != num_files:
|
||||||
|
return f"Read the file {base_filename}{index + 1}.txt"
|
||||||
|
return "Write the task_id into the file output.txt\nShutdown"
|
||||||
@@ -0,0 +1,81 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from autogpt.agent import Agent
|
||||||
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from tests.integration.agent_utils import run_interaction_loop
|
||||||
|
from tests.integration.challenges.utils import generate_noise, get_level_to_run
|
||||||
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
|
LEVEL_CURRENTLY_BEATEN = 2
|
||||||
|
MAX_LEVEL = 5
|
||||||
|
NOISE = 1000
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr
|
||||||
|
@requires_api_key("OPENAI_API_KEY")
|
||||||
|
def test_memory_challenge_b(
|
||||||
|
memory_management_agent: Agent, user_selected_level: int
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
||||||
|
the agent must write all the task_ids into a new file, filtering out the noise.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
memory_management_agent (Agent)
|
||||||
|
user_selected_level (int)
|
||||||
|
"""
|
||||||
|
|
||||||
|
current_level = get_level_to_run(
|
||||||
|
user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
|
||||||
|
)
|
||||||
|
task_ids = [str(i * 1111) for i in range(1, current_level + 1)]
|
||||||
|
create_instructions_files(memory_management_agent, current_level, task_ids)
|
||||||
|
|
||||||
|
try:
|
||||||
|
run_interaction_loop(memory_management_agent, 40)
|
||||||
|
except SystemExit:
|
||||||
|
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||||
|
content = read_file(file_path)
|
||||||
|
for task_id in task_ids:
|
||||||
|
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||||
|
|
||||||
|
|
||||||
|
def create_instructions_files(
|
||||||
|
memory_management_agent: Agent,
|
||||||
|
level: int,
|
||||||
|
task_ids: list,
|
||||||
|
base_filename: str = "instructions_",
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Creates a series of instructions files for the memory challenge.
|
||||||
|
Args:
|
||||||
|
level:
|
||||||
|
memory_management_agent (Agent)
|
||||||
|
num_files (int)
|
||||||
|
task_ids (list)
|
||||||
|
base_filename (str, optional)
|
||||||
|
"""
|
||||||
|
for i in range(1, level + 1):
|
||||||
|
content = generate_content(i, task_ids, base_filename, level)
|
||||||
|
file_name = f"{base_filename}{i}.txt"
|
||||||
|
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||||
|
write_to_file(file_path, content)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
index: int
|
||||||
|
task_ids: list
|
||||||
|
base_filename: str
|
||||||
|
num_files: int
|
||||||
|
|
||||||
|
Returns: str
|
||||||
|
"""
|
||||||
|
task_id = task_ids[index - 1]
|
||||||
|
noise = generate_noise(NOISE)
|
||||||
|
if index != level:
|
||||||
|
if level == 1:
|
||||||
|
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command."
|
||||||
|
return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||||
|
return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n"
|
||||||
44
tests/integration/challenges/utils.py
Normal file
44
tests/integration/challenges/utils.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import random
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
def get_level_to_run(
|
||||||
|
user_selected_level: Optional[int],
|
||||||
|
level_currently_beaten: Optional[int],
|
||||||
|
max_level: int,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Determines the appropriate level to run for a challenge, based on user-selected level, level currently beaten, and maximum level.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_selected_level (int | None): The level selected by the user. If not provided, the level currently beaten is used.
|
||||||
|
level_currently_beaten (int | None): The highest level beaten so far. If not provided, the test will be skipped.
|
||||||
|
max_level (int): The maximum level allowed for the challenge.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The level to run for the challenge.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the user-selected level is greater than the maximum level allowed.
|
||||||
|
"""
|
||||||
|
if user_selected_level is None:
|
||||||
|
if level_currently_beaten is None:
|
||||||
|
pytest.skip(
|
||||||
|
"No one has beaten any levels so we cannot run the test in our pipeline"
|
||||||
|
)
|
||||||
|
# by default we run the level currently beaten.
|
||||||
|
return level_currently_beaten
|
||||||
|
if user_selected_level > max_level:
|
||||||
|
raise ValueError(f"This challenge was not designed to go beyond {max_level}")
|
||||||
|
return user_selected_level
|
||||||
|
|
||||||
|
|
||||||
|
def generate_noise(noise_size) -> str:
|
||||||
|
return "".join(
|
||||||
|
random.choices(
|
||||||
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
|
||||||
|
k=noise_size,
|
||||||
|
)
|
||||||
|
)
|
||||||
0
tests/integration/goal_oriented/__init__.py
Normal file
0
tests/integration/goal_oriented/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -39,10 +39,9 @@ interactions:
|
|||||||
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||||
reminds you of these events from your past:\n\n\n"}, {"role": "user", "content":
|
which next command to use, and respond using the format specified above:"}],
|
||||||
"Determine which next command to use, and respond using the format specified
|
"temperature": 0, "max_tokens": 2738}'
|
||||||
above:"}], "temperature": 0, "max_tokens": 3223}'
|
|
||||||
headers:
|
headers:
|
||||||
Accept:
|
Accept:
|
||||||
- '*/*'
|
- '*/*'
|
||||||
@@ -51,7 +50,7 @@ interactions:
|
|||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Length:
|
Content-Length:
|
||||||
- '3490'
|
- '3401'
|
||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
method: POST
|
method: POST
|
||||||
@@ -59,20 +58,20 @@ interactions:
|
|||||||
response:
|
response:
|
||||||
body:
|
body:
|
||||||
string: !!binary |
|
string: !!binary |
|
||||||
H4sIAAAAAAAAA7yTT2/TQBDF73yK0VxycSI3f0jjWwUHisQFAQVhFG3tib3NesfdHSutIn93tLbT
|
H4sIAAAAAAAAA7yTT2/bMAzF7/sUBC+5KEHSLG3q21a0a6/DiqGoh0C1GVuLLHoSDRsL/N0H2/nT
|
||||||
tEY9gbjOG7/3m9nxEXWOCWalkqyqzXS9ubt53H+68u9+FPeON2vzcb654m/v4/vP3zFCvr2jTIYv
|
ethp2K581Hs/StQeTYoRJrmWpCjt9OpDfdd8+ni7+Hz3ePOzWa+emqf77Cb1tdzWqJBfvlMihxOz
|
||||||
ZhlXtSHRbDHCzJESyjG5eHs5X62Wi/llhBXnZDDBopbpYraaSuNueRov4guMsPGqIEyOWDuuatkK
|
hIvSkhh2qDDxpIVSjBaX64v1+8Xqaqmw4JQsRpiVMl3OVlOp/AtP58v5AhVWQWeE0R5Lz0UpG+Ed
|
||||||
78l6TNbrZYRn76f6PI4jFBZlnkqb9bKNMCtZZ+Qx+XnEivzJ1rEhTFB5r70oKwGSrZANAxxTCwCQ
|
uYDR1epa4dn7VL+YLxQKi7an0vXlvFWY5GwSChg977GgcLT1bAkj1CGYINpJB8lOyHUD7GMHABCj
|
||||||
opTcFKX4FBMYioNADxKKKV7DQRsDjSeQkuDgtNBWeLvThiDjqlI2B+FegMkHMobhhp3JJ6CtMCjo
|
5FxluYQYIzgUDwI10hVjfABHlIIwVIFAcoLaG6GN8GZrLEHCRaFd39ALMLknaxm+srfpBIwTBg19
|
||||||
Wq2qKIdJGfTtIegzeZDJLMXoebAj5dlqW/TpX0qCgpUB7f8iJIIO8lX8YF4SVOwFVF07rp1WQrBj
|
q9MFpTDJO31Td/pMGpnMYlSvsz3pwM64bAD4khOIDjvw9KMyngIU9BdpCnraP85hQi8WHAR0WXou
|
||||||
B1IGVfn9mLQ2yvaQU/j6n7aTOS060746vY0l6vzJ+sYFBiVwDaqCxmtbdEwZO0eZnHOGZSjp5PDS
|
vdFCsGUPkneqDrsxcmm1G2in8PifrinxRkxiQjF+J3Kh8h2DFng4vVrC3lMi54jhHqiRM5Nw39q1
|
||||||
YQGBUMie2s3jONvXpPav30Sg+HdT98FtdDrUwfSPOw0OPdOL1Y/QlSvGJ94LofdsMeJ4YTL+LZ7N
|
jNNCSXp3TKqNtf9yHYbgVh3X9GD625Z2DgPTG4gRuvbZeMEHoes9W4w43piMP8WrmY60PfEBPHYt
|
||||||
dKLtiAfw1LbYRrjTVvty2x81JuiFa4xQ25weMInbX+2b3wAAAP//AwAxpIznhgQAAA==
|
tgq3xpmQb4Z9xgiDcIkKjUupwWjefmvf/QIAAP//AwCaXwR3hAQAAA==
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 7be2da273c20ce54-SJC
|
- 7bfe3ef73b52fac2-SJC
|
||||||
Cache-Control:
|
Cache-Control:
|
||||||
- no-cache, must-revalidate
|
- no-cache, must-revalidate
|
||||||
Connection:
|
Connection:
|
||||||
@@ -82,7 +81,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Thu, 27 Apr 2023 00:12:22 GMT
|
- Sun, 30 Apr 2023 07:59:46 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
access-control-allow-origin:
|
access-control-allow-origin:
|
||||||
@@ -94,19 +93,103 @@ interactions:
|
|||||||
openai-organization:
|
openai-organization:
|
||||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '14714'
|
- '13160'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
- max-age=15724800; includeSubDomains
|
- max-age=15724800; includeSubDomains
|
||||||
x-ratelimit-limit-requests:
|
x-ratelimit-limit-requests:
|
||||||
- '3500'
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '3499'
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86497'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 17ms
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.335s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- 23366aac1ddcc141fc735b4a1a8b268c
|
- f665162ae22af897be24f632a031d434
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\n\nYou will receive the current summary and the latest development.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI
|
||||||
|
was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}],
|
||||||
|
"temperature": 0, "max_tokens": null}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '600'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SOQUsDMRQG7/6K8J2zZdNa3eZmeygiPSqISEmTZze6mxc2r1Qt+9+lUPU6MMOc
|
||||||
|
EAMsfOvE97mrbu+O69X3lJrdks2D2Sy/5o/rp+fgu9ViAw3evZOXizHx3OeOJHKChh/ICQVYc9NM
|
||||||
|
m2szbxqNngN1sNhnqWaTeSWHYcdVPasNNA7F7Qn2hDxwn2Ur/EGpwC6Mxn/6H2sIi+v+gKnrUcO3
|
||||||
|
HD0V2JcTeiq/zYE7goUrJRZxSc6HnITS+f5eHV1Rl2PlUlCJpY1prxIdVeuKal3OlChMMGq8xRRL
|
||||||
|
ux3IFU6wKMIZGjEF+oStx9fx6gcAAP//AwCGJ6JPSQEAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7bfe3f5a5fb3fac2-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Sun, 30 Apr 2023 07:59:49 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '634'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89866'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 88ms
|
||||||
|
x-request-id:
|
||||||
|
- 0bc738f58569b4b3635fe0c8384ebcfb
|
||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
@@ -151,23 +234,23 @@ interactions:
|
|||||||
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
reminds you of these events from your past:\nNone\n\n"}, {"role": "user", "content":
|
reminds you of these events from your past: \nI was created and nothing new
|
||||||
"Determine which next command to use, and respond using the format specified
|
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||||
above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||||
|
"content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file
|
||||||
|
command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\":
|
||||||
|
\"The task requires me to write ''Hello World'' into a file named ''hello_world.txt'',
|
||||||
|
and the write_to_file command is the most appropriate for this task.\",\n \"plan\":
|
||||||
|
\"- Use the write_to_file command to write ''Hello World'' into a file named
|
||||||
|
''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I use
|
||||||
|
the correct file name and text to write to the file.\",\n \"speak\":
|
||||||
\"I will use the write_to_file command to write ''Hello World'' into a file
|
\"I will use the write_to_file command to write ''Hello World'' into a file
|
||||||
named ''hello_world.txt''.\",\n \"reasoning\": \"The goal is to write
|
named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\":
|
||||||
''Hello World'' into a file named ''hello_world.txt'', and the write_to_file
|
\"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\":
|
||||||
command is the most appropriate for this task.\",\n \"plan\": \"- Use
|
\"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command
|
||||||
the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
write_to_file returned: File written to successfully."}, {"role": "user", "content":
|
||||||
\"I need to ensure that I am using the correct file name and that the text is
|
"Determine which next command to use, and respond using the format specified
|
||||||
written correctly.\",\n \"speak\": \"I will use the write to file command
|
above:"}], "temperature": 0, "max_tokens": 2486}'
|
||||||
to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
|
||||||
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
|
||||||
\"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"},
|
|
||||||
{"role": "system", "content": "Command write_to_file returned: File written
|
|
||||||
to successfully."}, {"role": "user", "content": "Determine which next command
|
|
||||||
to use, and respond using the format specified above:"}], "temperature": 0,
|
|
||||||
"max_tokens": 2970}'
|
|
||||||
headers:
|
headers:
|
||||||
Accept:
|
Accept:
|
||||||
- '*/*'
|
- '*/*'
|
||||||
@@ -176,7 +259,7 @@ interactions:
|
|||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Length:
|
Content-Length:
|
||||||
- '4612'
|
- '4646'
|
||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
method: POST
|
method: POST
|
||||||
@@ -184,19 +267,21 @@ interactions:
|
|||||||
response:
|
response:
|
||||||
body:
|
body:
|
||||||
string: !!binary |
|
string: !!binary |
|
||||||
H4sIAAAAAAAAA6SSQW8TMRCF7/yK0ZydKkuSttkbAiGFA+JQBIKtItc7Wbvx2pZnFoqi/e9om00T
|
H4sIAAAAAAAAA7SSTW/TQBCG7/yK0VxycaIkJST4VqmijYSEQEWIYhRt1xN7yXp3uztOgiL/d7Sx
|
||||||
QishcZ3x++bNPO/Q1ViisVpMm/zkann/9b3JZnm1XMnHmzfLd7b4IrL6xB++vUWF8e6ejIyKCxPb
|
TUpJOYC4zuf7zLwHVDmmKEvBsnJ6OL/cXV+9mX2+0+aag7u6vHlvH+7myw/bd2+XmKC9/0aSu46R
|
||||||
5ElcDKjQZNJCNZbF5fXrxWI+my8UtrEmjyU2SSazi8VEunwXJ9PZtECFHeuGsNxhyrFNspa4pcBY
|
tJXTxMoaTFB6Ekw5ppNXi+ni5WS2eJ1gZXPSmGLheHgxmg259vd2OL4YTzDBOoiCMD2g87ZyvGK7
|
||||||
FtPiWuERfmzMLhVKFO2PpWIx7xUaG50hxvL7DlviAzhHT1iiZnYsOshgMwahMKywqwIAQIViY9dY
|
IRMwnYyn8wRPw0+JxSJBtiz0KTSdzJoEZWmVpIDplwNWFPrB3mrCFEUIKrAwHGVaw2QiwiEzAAAZ
|
||||||
4QpLGItjgx5kKFa4gp/Oe+iYQCyBaN6uR4MEJratDjVIhKfa4dVFheoUmUlzDC40e+6NJWii9uD4
|
cmnrouSQYQpdsEvQnmMwwyWUYksQaikphHWt9XfYecVMBgY3pLWFT9brfADKsAUBa6UJjKgoh0EZ
|
||||||
WbmCR/CLIweZJWgjC+iUckzZaSHYxAxih+4zHpLXYT9+Ap//eyOTnTjjuD1cKhA9KilwlwedFliB
|
86tdzI94z4MR3JYEhvYMgcmBCsAW6kDAJQGLsFl18ATSVpUweSz4GeurRhkmj+V6EsEaZYpW821X
|
||||||
1T/oCXZcCbgzhpg3nfe/ztGcSG9fDAD+1e4e2atD3uPzv+IOuqX9tD+ucWZK5+b8p5xGO+Z66q8+
|
Bp4eauUpQEVn54BYM/kjjzLFOZ5Y+SeiBI4an1UfCUuCygYG4Zy3zivBBGvrgcuYPYPjtDAtyRA+
|
||||||
X/Ko7EdvVeixV7hxwbFd70lYIktMqNCFmh6wnPa3/avfAAAA//8DAKZ6myqyAwAA
|
/vNxZKSTKlT9Qw3RsZNMqH3sEwxLcN5uVU4gYCu0yqG96VFob0hTPLskOBKbfsFOaf13X21HNklv
|
||||||
|
0K78N3/Gb7Tbfhn/RJTwxVNrP/bL/zB4j3DE6Ggy02CT4FoZFcpVuxtTDGwdJqhMTntMx83X5sUP
|
||||||
|
AAAA//8DAK0qY5KVBAAA
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 7be2da943bd5ce54-SJC
|
- 7bfe3f5edbd9fac2-SJC
|
||||||
Cache-Control:
|
Cache-Control:
|
||||||
- no-cache, must-revalidate
|
- no-cache, must-revalidate
|
||||||
Connection:
|
Connection:
|
||||||
@@ -206,7 +291,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Thu, 27 Apr 2023 00:12:36 GMT
|
- Sun, 30 Apr 2023 08:00:00 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
access-control-allow-origin:
|
access-control-allow-origin:
|
||||||
@@ -218,19 +303,25 @@ interactions:
|
|||||||
openai-organization:
|
openai-organization:
|
||||||
- user-adtx4fhfg1qsiyzdoaxciooj
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '10505'
|
- '10898'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
- max-age=15724800; includeSubDomains
|
- max-age=15724800; includeSubDomains
|
||||||
x-ratelimit-limit-requests:
|
x-ratelimit-limit-requests:
|
||||||
- '3500'
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '3499'
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86482'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 17ms
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.345s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- f593fff7d415793b53e5124283ad25c9
|
- f294799d2564196ef4852c84fdc8fb17
|
||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
|
|||||||
45
tests/test_ai_config.py
Normal file
45
tests/test_ai_config.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
from autogpt.config.ai_config import AIConfig
|
||||||
|
|
||||||
|
"""
|
||||||
|
Test cases for the AIConfig class, which handles loads the AI configuration
|
||||||
|
settings from a YAML file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def test_goals_are_always_lists_of_strings(tmp_path):
|
||||||
|
"""Test if the goals attribute is always a list of strings."""
|
||||||
|
|
||||||
|
yaml_content = """
|
||||||
|
ai_goals:
|
||||||
|
- Goal 1: Make a sandwich
|
||||||
|
- Goal 2, Eat the sandwich
|
||||||
|
- Goal 3 - Go to sleep
|
||||||
|
- "Goal 4: Wake up"
|
||||||
|
ai_name: McFamished
|
||||||
|
ai_role: A hungry AI
|
||||||
|
api_budget: 0.0
|
||||||
|
"""
|
||||||
|
config_file = tmp_path / "ai_settings.yaml"
|
||||||
|
config_file.write_text(yaml_content)
|
||||||
|
|
||||||
|
ai_config = AIConfig.load(config_file)
|
||||||
|
|
||||||
|
assert len(ai_config.ai_goals) == 4
|
||||||
|
assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||||
|
assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich"
|
||||||
|
assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||||
|
assert ai_config.ai_goals[3] == "Goal 4: Wake up"
|
||||||
|
|
||||||
|
config_file.write_text("")
|
||||||
|
ai_config.save(config_file)
|
||||||
|
|
||||||
|
yaml_content2 = """ai_goals:
|
||||||
|
- 'Goal 1: Make a sandwich'
|
||||||
|
- Goal 2, Eat the sandwich
|
||||||
|
- Goal 3 - Go to sleep
|
||||||
|
- 'Goal 4: Wake up'
|
||||||
|
ai_name: McFamished
|
||||||
|
ai_role: A hungry AI
|
||||||
|
api_budget: 0.0
|
||||||
|
"""
|
||||||
|
assert config_file.read_text() == yaml_content2
|
||||||
@@ -33,18 +33,14 @@ def test_generate_context_empty_inputs(mocker):
|
|||||||
# Assert
|
# Assert
|
||||||
expected_result = (
|
expected_result = (
|
||||||
-1,
|
-1,
|
||||||
47,
|
32,
|
||||||
3,
|
2,
|
||||||
[
|
[
|
||||||
{"role": "system", "content": ""},
|
{"role": "system", "content": ""},
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": f"The current time and date is {time.strftime('%c')}",
|
"content": f"The current time and date is {time.strftime('%c')}",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": f"This reminds you of these events from your past:\n\n\n",
|
|
||||||
},
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
assert result == expected_result
|
assert result == expected_result
|
||||||
@@ -78,5 +74,5 @@ def test_generate_context_valid_inputs():
|
|||||||
assert result[0] >= 0
|
assert result[0] >= 0
|
||||||
assert result[2] >= 0
|
assert result[2] >= 0
|
||||||
assert result[1] >= 0
|
assert result[1] >= 0
|
||||||
assert len(result[3]) >= 3 # current_context should have at least 3 messages
|
assert len(result[3]) >= 2 # current_context should have at least 2 messages
|
||||||
assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens
|
assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens
|
||||||
|
|||||||
@@ -1,12 +1,18 @@
|
|||||||
|
"""
|
||||||
|
This set of unit tests is designed to test the file operations that autoGPT has access to.
|
||||||
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
|
||||||
import unittest
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from tempfile import gettempdir
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from autogpt.commands.file_operations import (
|
from autogpt.commands.file_operations import (
|
||||||
append_to_file,
|
append_to_file,
|
||||||
check_duplicate_operation,
|
check_duplicate_operation,
|
||||||
delete_file,
|
delete_file,
|
||||||
|
download_file,
|
||||||
log_operation,
|
log_operation,
|
||||||
read_file,
|
read_file,
|
||||||
search_files,
|
search_files,
|
||||||
@@ -14,118 +20,150 @@ from autogpt.commands.file_operations import (
|
|||||||
write_to_file,
|
write_to_file,
|
||||||
)
|
)
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.workspace import Workspace
|
from autogpt.utils import readable_file_size
|
||||||
|
|
||||||
|
|
||||||
class TestFileOperations(unittest.TestCase):
|
@pytest.fixture()
|
||||||
"""
|
def file_content():
|
||||||
This set of unit tests is designed to test the file operations that autoGPT has access to.
|
return "This is a test file.\n"
|
||||||
"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.config = Config()
|
|
||||||
workspace_path = os.path.join(os.path.dirname(__file__), "workspace")
|
|
||||||
self.workspace_path = Workspace.make_workspace(workspace_path)
|
|
||||||
self.config.workspace_path = workspace_path
|
|
||||||
self.config.file_logger_path = os.path.join(workspace_path, "file_logger.txt")
|
|
||||||
self.workspace = Workspace(workspace_path, restrict_to_workspace=True)
|
|
||||||
|
|
||||||
self.test_file = str(self.workspace.get_path("test_file.txt"))
|
|
||||||
self.test_file2 = "test_file2.txt"
|
|
||||||
self.test_directory = str(self.workspace.get_path("test_directory"))
|
|
||||||
self.test_nested_file = str(self.workspace.get_path("nested/test_file.txt"))
|
|
||||||
self.file_content = "This is a test file.\n"
|
|
||||||
self.file_logger_logs = "file_logger.txt"
|
|
||||||
|
|
||||||
with open(self.test_file, "w") as f:
|
|
||||||
f.write(self.file_content)
|
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
|
||||||
shutil.rmtree(self.workspace_path)
|
|
||||||
|
|
||||||
def test_check_duplicate_operation(self):
|
|
||||||
log_operation("write", self.test_file)
|
|
||||||
self.assertTrue(check_duplicate_operation("write", self.test_file))
|
|
||||||
|
|
||||||
# Test logging a file operation
|
|
||||||
def test_log_operation(self):
|
|
||||||
if os.path.exists(self.file_logger_logs):
|
|
||||||
os.remove(self.file_logger_logs)
|
|
||||||
|
|
||||||
log_operation("log_test", self.test_file)
|
|
||||||
with open(self.config.file_logger_path, "r") as f:
|
|
||||||
content = f.read()
|
|
||||||
self.assertIn(f"log_test: {self.test_file}", content)
|
|
||||||
|
|
||||||
# Test splitting a file into chunks
|
|
||||||
def test_split_file(self):
|
|
||||||
content = "abcdefghij"
|
|
||||||
chunks = list(split_file(content, max_length=4, overlap=1))
|
|
||||||
expected = ["abcd", "defg", "ghij"]
|
|
||||||
self.assertEqual(chunks, expected)
|
|
||||||
|
|
||||||
def test_read_file(self):
|
|
||||||
content = read_file(self.test_file)
|
|
||||||
self.assertEqual(content, self.file_content)
|
|
||||||
|
|
||||||
def test_write_to_file(self):
|
|
||||||
new_content = "This is new content.\n"
|
|
||||||
write_to_file(self.test_nested_file, new_content)
|
|
||||||
with open(self.test_nested_file, "r") as f:
|
|
||||||
content = f.read()
|
|
||||||
self.assertEqual(content, new_content)
|
|
||||||
|
|
||||||
def test_append_to_file(self):
|
|
||||||
append_text = "This is appended text.\n"
|
|
||||||
append_to_file(self.test_nested_file, append_text)
|
|
||||||
with open(self.test_nested_file, "r") as f:
|
|
||||||
content = f.read()
|
|
||||||
|
|
||||||
append_to_file(self.test_nested_file, append_text)
|
|
||||||
|
|
||||||
with open(self.test_nested_file, "r") as f:
|
|
||||||
content_after = f.read()
|
|
||||||
|
|
||||||
self.assertEqual(content_after, append_text + append_text)
|
|
||||||
|
|
||||||
def test_delete_file(self):
|
|
||||||
delete_file(self.test_file)
|
|
||||||
self.assertFalse(os.path.exists(self.test_file))
|
|
||||||
|
|
||||||
def test_search_files(self):
|
|
||||||
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
|
||||||
file_a = self.workspace.get_path("file_a.txt")
|
|
||||||
file_b = self.workspace.get_path("file_b.txt")
|
|
||||||
|
|
||||||
with open(file_a, "w") as f:
|
|
||||||
f.write("This is file A.")
|
|
||||||
|
|
||||||
with open(file_b, "w") as f:
|
|
||||||
f.write("This is file B.")
|
|
||||||
|
|
||||||
# Create a subdirectory and place a copy of file_a in it
|
|
||||||
if not os.path.exists(self.test_directory):
|
|
||||||
os.makedirs(self.test_directory)
|
|
||||||
|
|
||||||
with open(os.path.join(self.test_directory, file_a.name), "w") as f:
|
|
||||||
f.write("This is file A in the subdirectory.")
|
|
||||||
|
|
||||||
files = search_files(str(self.workspace.root))
|
|
||||||
self.assertIn(file_a.name, files)
|
|
||||||
self.assertIn(file_b.name, files)
|
|
||||||
self.assertIn(os.path.join(Path(self.test_directory).name, file_a.name), files)
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
os.remove(file_a)
|
|
||||||
os.remove(file_b)
|
|
||||||
os.remove(os.path.join(self.test_directory, file_a.name))
|
|
||||||
os.rmdir(self.test_directory)
|
|
||||||
|
|
||||||
# Case 2: Search for a file that does not exist and make sure we don't throw
|
|
||||||
non_existent_file = "non_existent_file.txt"
|
|
||||||
files = search_files("")
|
|
||||||
self.assertNotIn(non_existent_file, files)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
@pytest.fixture()
|
||||||
unittest.main()
|
def test_file(workspace, file_content):
|
||||||
|
test_file = str(workspace.get_path("test_file.txt"))
|
||||||
|
with open(test_file, "w") as f:
|
||||||
|
f.write(file_content)
|
||||||
|
return test_file
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def test_directory(workspace):
|
||||||
|
return str(workspace.get_path("test_directory"))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def test_nested_file(workspace):
|
||||||
|
return str(workspace.get_path("nested/test_file.txt"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_duplicate_operation(config, test_file):
|
||||||
|
log_operation("write", test_file)
|
||||||
|
assert check_duplicate_operation("write", test_file) is True
|
||||||
|
|
||||||
|
|
||||||
|
# Test logging a file operation
|
||||||
|
def test_log_operation(test_file, config):
|
||||||
|
file_logger_name = config.file_logger_path
|
||||||
|
if os.path.exists(file_logger_name):
|
||||||
|
os.remove(file_logger_name)
|
||||||
|
|
||||||
|
log_operation("log_test", test_file)
|
||||||
|
with open(config.file_logger_path, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
assert f"log_test: {test_file}" in content
|
||||||
|
|
||||||
|
|
||||||
|
# Test splitting a file into chunks
|
||||||
|
def test_split_file():
|
||||||
|
content = "abcdefghij"
|
||||||
|
chunks = list(split_file(content, max_length=4, overlap=1))
|
||||||
|
expected = ["abcd", "defg", "ghij"]
|
||||||
|
assert chunks == expected
|
||||||
|
|
||||||
|
|
||||||
|
def test_read_file(test_file, file_content):
|
||||||
|
content = read_file(test_file)
|
||||||
|
assert content == file_content
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_to_file(config, test_nested_file):
|
||||||
|
new_content = "This is new content.\n"
|
||||||
|
write_to_file(test_nested_file, new_content)
|
||||||
|
with open(test_nested_file, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
assert content == new_content
|
||||||
|
|
||||||
|
|
||||||
|
def test_append_to_file(test_nested_file):
|
||||||
|
append_text = "This is appended text.\n"
|
||||||
|
write_to_file(test_nested_file, append_text)
|
||||||
|
|
||||||
|
append_to_file(test_nested_file, append_text)
|
||||||
|
|
||||||
|
with open(test_nested_file, "r") as f:
|
||||||
|
content_after = f.read()
|
||||||
|
|
||||||
|
assert content_after == append_text + append_text
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_file(config, test_file):
|
||||||
|
delete_file(test_file)
|
||||||
|
assert os.path.exists(test_file) is False
|
||||||
|
assert delete_file(test_file) == "Error: File has already been deleted."
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_missing_file(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
try:
|
||||||
|
os.remove(test_file)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
error_string = str(e)
|
||||||
|
assert error_string in delete_file(test_file)
|
||||||
|
return
|
||||||
|
assert True, "Failed to test delete_file"
|
||||||
|
|
||||||
|
|
||||||
|
def test_search_files(config, workspace, test_directory):
|
||||||
|
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
||||||
|
file_a = workspace.get_path("file_a.txt")
|
||||||
|
file_b = workspace.get_path("file_b.txt")
|
||||||
|
|
||||||
|
with open(file_a, "w") as f:
|
||||||
|
f.write("This is file A.")
|
||||||
|
|
||||||
|
with open(file_b, "w") as f:
|
||||||
|
f.write("This is file B.")
|
||||||
|
|
||||||
|
# Create a subdirectory and place a copy of file_a in it
|
||||||
|
if not os.path.exists(test_directory):
|
||||||
|
os.makedirs(test_directory)
|
||||||
|
|
||||||
|
with open(os.path.join(test_directory, file_a.name), "w") as f:
|
||||||
|
f.write("This is file A in the subdirectory.")
|
||||||
|
|
||||||
|
files = search_files(str(workspace.root))
|
||||||
|
assert file_a.name in files
|
||||||
|
assert file_b.name in files
|
||||||
|
assert os.path.join(Path(test_directory).name, file_a.name) in files
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
os.remove(file_a)
|
||||||
|
os.remove(file_b)
|
||||||
|
os.remove(os.path.join(test_directory, file_a.name))
|
||||||
|
os.rmdir(test_directory)
|
||||||
|
|
||||||
|
# Case 2: Search for a file that does not exist and make sure we don't throw
|
||||||
|
non_existent_file = "non_existent_file.txt"
|
||||||
|
files = search_files("")
|
||||||
|
assert non_existent_file not in files
|
||||||
|
|
||||||
|
|
||||||
|
def test_download_file():
|
||||||
|
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz"
|
||||||
|
local_name = os.path.join(gettempdir(), "auto-gpt.tar.gz")
|
||||||
|
size = 365023
|
||||||
|
readable_size = readable_file_size(size)
|
||||||
|
assert (
|
||||||
|
download_file(url, local_name)
|
||||||
|
== f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})'
|
||||||
|
)
|
||||||
|
assert os.path.isfile(local_name) is True
|
||||||
|
assert os.path.getsize(local_name) == size
|
||||||
|
|
||||||
|
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz"
|
||||||
|
assert "Got an HTTP Error whilst trying to download file" in download_file(
|
||||||
|
url, local_name
|
||||||
|
)
|
||||||
|
|
||||||
|
url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz"
|
||||||
|
assert "Failed to establish a new connection:" in download_file(url, local_name)
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
# Generated by CodiumAI
|
# Generated by CodiumAI
|
||||||
import time
|
import time
|
||||||
import unittest
|
|
||||||
|
|
||||||
from autogpt.spinner import Spinner
|
from autogpt.spinner import Spinner
|
||||||
|
|
||||||
@@ -29,40 +28,43 @@ ALMOST_DONE_MESSAGE = "Almost done..."
|
|||||||
PLEASE_WAIT = "Please wait..."
|
PLEASE_WAIT = "Please wait..."
|
||||||
|
|
||||||
|
|
||||||
class TestSpinner(unittest.TestCase):
|
def test_spinner_initializes_with_default_values():
|
||||||
def test_spinner_initializes_with_default_values(self):
|
"""Tests that the spinner initializes with default values."""
|
||||||
"""Tests that the spinner initializes with default values."""
|
with Spinner() as spinner:
|
||||||
with Spinner() as spinner:
|
assert spinner.message == "Loading..."
|
||||||
self.assertEqual(spinner.message, "Loading...")
|
assert spinner.delay == 0.1
|
||||||
self.assertEqual(spinner.delay, 0.1)
|
|
||||||
|
|
||||||
def test_spinner_initializes_with_custom_values(self):
|
|
||||||
"""Tests that the spinner initializes with custom message and delay values."""
|
|
||||||
with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner:
|
|
||||||
self.assertEqual(spinner.message, PLEASE_WAIT)
|
|
||||||
self.assertEqual(spinner.delay, 0.2)
|
|
||||||
|
|
||||||
#
|
def test_spinner_initializes_with_custom_values():
|
||||||
def test_spinner_stops_spinning(self):
|
"""Tests that the spinner initializes with custom message and delay values."""
|
||||||
"""Tests that the spinner starts spinning and stops spinning without errors."""
|
with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner:
|
||||||
with Spinner() as spinner:
|
assert spinner.message == PLEASE_WAIT
|
||||||
time.sleep(1)
|
assert spinner.delay == 0.2
|
||||||
spinner.update_message(ALMOST_DONE_MESSAGE)
|
|
||||||
time.sleep(1)
|
|
||||||
self.assertFalse(spinner.running)
|
|
||||||
|
|
||||||
def test_spinner_updates_message_and_still_spins(self):
|
|
||||||
"""Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning."""
|
|
||||||
with Spinner() as spinner:
|
|
||||||
self.assertTrue(spinner.running)
|
|
||||||
time.sleep(1)
|
|
||||||
spinner.update_message(ALMOST_DONE_MESSAGE)
|
|
||||||
time.sleep(1)
|
|
||||||
self.assertEqual(spinner.message, ALMOST_DONE_MESSAGE)
|
|
||||||
self.assertFalse(spinner.running)
|
|
||||||
|
|
||||||
def test_spinner_can_be_used_as_context_manager(self):
|
#
|
||||||
"""Tests that the spinner can be used as a context manager."""
|
def test_spinner_stops_spinning():
|
||||||
with Spinner() as spinner:
|
"""Tests that the spinner starts spinning and stops spinning without errors."""
|
||||||
self.assertTrue(spinner.running)
|
with Spinner() as spinner:
|
||||||
self.assertFalse(spinner.running)
|
time.sleep(1)
|
||||||
|
spinner.update_message(ALMOST_DONE_MESSAGE)
|
||||||
|
time.sleep(1)
|
||||||
|
assert spinner.running == False
|
||||||
|
|
||||||
|
|
||||||
|
def test_spinner_updates_message_and_still_spins():
|
||||||
|
"""Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning."""
|
||||||
|
with Spinner() as spinner:
|
||||||
|
assert spinner.running == True
|
||||||
|
time.sleep(1)
|
||||||
|
spinner.update_message(ALMOST_DONE_MESSAGE)
|
||||||
|
time.sleep(1)
|
||||||
|
assert spinner.message == ALMOST_DONE_MESSAGE
|
||||||
|
assert spinner.running == False
|
||||||
|
|
||||||
|
|
||||||
|
def test_spinner_can_be_used_as_context_manager():
|
||||||
|
"""Tests that the spinner can be used as a context manager."""
|
||||||
|
with Spinner() as spinner:
|
||||||
|
assert spinner.running == True
|
||||||
|
assert spinner.running == False
|
||||||
|
|||||||
@@ -41,7 +41,10 @@ def before_record_request(request):
|
|||||||
|
|
||||||
|
|
||||||
def filter_hostnames(request):
|
def filter_hostnames(request):
|
||||||
allowed_hostnames = ["api.openai.com"] # List of hostnames you want to allow
|
allowed_hostnames = [
|
||||||
|
"api.openai.com",
|
||||||
|
"localhost:50337",
|
||||||
|
] # List of hostnames you want to allow
|
||||||
|
|
||||||
if any(hostname in request.url for hostname in allowed_hostnames):
|
if any(hostname in request.url for hostname in allowed_hostnames):
|
||||||
return request
|
return request
|
||||||
|
|||||||
Reference in New Issue
Block a user