mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-08 22:58:01 -05:00
Pass config everywhere in order to get rid of singleton (#4666)
Signed-off-by: Merwane Hamadi <merwanehamadi@gmail.com>
This commit is contained in:
@@ -143,7 +143,7 @@ class Agent:
|
||||
|
||||
try:
|
||||
assistant_reply_json = extract_json_from_response(assistant_reply)
|
||||
validate_json(assistant_reply_json)
|
||||
validate_json(assistant_reply_json, self.config)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Exception while validating assistant reply JSON: {e}")
|
||||
assistant_reply_json = {}
|
||||
@@ -158,7 +158,7 @@ class Agent:
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, self.config.speak_mode
|
||||
self.ai_name, assistant_reply_json, self.config
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if self.config.speak_mode:
|
||||
@@ -197,10 +197,12 @@ class Agent:
|
||||
)
|
||||
while True:
|
||||
if self.config.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
console_input = clean_input(
|
||||
self.config, "Waiting for your response..."
|
||||
)
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == self.config.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
|
||||
@@ -10,12 +10,12 @@ from autogpt.singleton import Singleton
|
||||
class AgentManager(metaclass=Singleton):
|
||||
"""Agent manager for managing GPT agents"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config: Config):
|
||||
self.next_key = 0
|
||||
self.agents: dict[
|
||||
int, tuple[str, list[Message], str]
|
||||
] = {} # key, (task, full_message_history, model)
|
||||
self.cfg = Config()
|
||||
self.config = config
|
||||
|
||||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
@@ -35,18 +35,18 @@ class AgentManager(metaclass=Singleton):
|
||||
"""
|
||||
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction(messages.raw()):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
agent_reply = create_chat_completion(prompt=messages, config=self.config)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
|
||||
plugins_reply = ""
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
@@ -62,7 +62,7 @@ class AgentManager(metaclass=Singleton):
|
||||
|
||||
self.agents[key] = (task, list(messages), model)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
@@ -85,19 +85,19 @@ class AgentManager(metaclass=Singleton):
|
||||
messages = ChatSequence.for_model(model, messages)
|
||||
messages.add("user", message)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_instruction():
|
||||
continue
|
||||
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
|
||||
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
|
||||
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(prompt=messages)
|
||||
agent_reply = create_chat_completion(prompt=messages, config=self.config)
|
||||
|
||||
messages.add("assistant", agent_reply)
|
||||
|
||||
plugins_reply = agent_reply
|
||||
for i, plugin in enumerate(self.cfg.plugins):
|
||||
for i, plugin in enumerate(self.config.plugins):
|
||||
if not plugin.can_handle_on_instruction():
|
||||
continue
|
||||
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
|
||||
@@ -107,7 +107,7 @@ class AgentManager(metaclass=Singleton):
|
||||
if plugins_reply and plugins_reply != "":
|
||||
messages.add("assistant", plugins_reply)
|
||||
|
||||
for plugin in self.cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_instruction():
|
||||
continue
|
||||
agent_reply = plugin.post_instruction(agent_reply)
|
||||
|
||||
@@ -19,10 +19,10 @@ def command(
|
||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||
|
||||
# TODO: Remove this in favor of better command management
|
||||
CFG = Config()
|
||||
config = Config()
|
||||
|
||||
if callable(enabled):
|
||||
enabled = enabled(CFG)
|
||||
enabled = enabled(config)
|
||||
if not enabled:
|
||||
if disabled_reason is not None:
|
||||
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
|
||||
|
||||
@@ -10,7 +10,6 @@ from autogpt.agent.agent import Agent
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.setup import CFG
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
|
||||
ALLOWLIST_CONTROL = "allowlist"
|
||||
@@ -83,7 +82,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{CFG.workspace_path}'"
|
||||
f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
|
||||
)
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
@@ -105,7 +104,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
["python", str(path)],
|
||||
capture_output=True,
|
||||
encoding="utf8",
|
||||
cwd=CFG.workspace_path,
|
||||
cwd=agent.config.workspace_path,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
@@ -174,6 +173,7 @@ def validate_command(command: str, config: Config) -> bool:
|
||||
|
||||
Args:
|
||||
command (str): The command to validate
|
||||
config (Config): The config to use to validate the command
|
||||
|
||||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
@@ -199,7 +199,7 @@ def validate_command(command: str, config: Config) -> bool:
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
enabled=lambda cfg: cfg.execute_local_commands,
|
||||
enabled=lambda config: config.execute_local_commands,
|
||||
disabled_reason="You are not allowed to run local shell commands. To execute"
|
||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
|
||||
@@ -81,6 +81,7 @@ def is_duplicate_operation(
|
||||
Args:
|
||||
operation: The operation to check for
|
||||
filename: The name of the file to check for
|
||||
config: The agent config
|
||||
checksum: The checksum of the contents to be written
|
||||
|
||||
Returns:
|
||||
@@ -137,7 +138,7 @@ def read_file(filename: str, agent: Agent) -> str:
|
||||
content = read_textual_file(filename, logger)
|
||||
|
||||
# TODO: invalidate/update memory when file is edited
|
||||
file_memory = MemoryItem.from_text_file(content, filename)
|
||||
file_memory = MemoryItem.from_text_file(content, filename, agent.config)
|
||||
if len(file_memory.chunks) > 1:
|
||||
return file_memory.summary
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ def generate_image_with_sd_webui(
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_index": "DDIM",
|
||||
"steps": 20,
|
||||
"cfg_scale": 7.0,
|
||||
"config_scale": 7.0,
|
||||
"width": size,
|
||||
"height": size,
|
||||
"n_iter": 1,
|
||||
|
||||
@@ -232,6 +232,6 @@ def summarize_memorize_webpage(
|
||||
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||
new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
|
||||
memory.add(new_memory)
|
||||
return new_memory.summary
|
||||
|
||||
@@ -59,14 +59,14 @@ class AIConfig:
|
||||
self.command_registry: CommandRegistry | None = None
|
||||
|
||||
@staticmethod
|
||||
def load(config_file: str = SAVE_FILE) -> "AIConfig":
|
||||
def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from
|
||||
yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
config_file (int): The path to the config yaml file.
|
||||
ai_settings_file (int): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
@@ -74,7 +74,7 @@ class AIConfig:
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
with open(ai_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
@@ -91,12 +91,12 @@ class AIConfig:
|
||||
# type: Type[AIConfig]
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
def save(self, config_file: str = SAVE_FILE) -> None:
|
||||
def save(self, ai_settings_file: str = SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file.
|
||||
ai_settings_file(str): The path to the config yaml file.
|
||||
DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
@@ -109,11 +109,11 @@ class AIConfig:
|
||||
"ai_goals": self.ai_goals,
|
||||
"api_budget": self.api_budget,
|
||||
}
|
||||
with open(config_file, "w", encoding="utf-8") as file:
|
||||
with open(ai_settings_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(config, file, allow_unicode=True)
|
||||
|
||||
def construct_full_prompt(
|
||||
self, prompt_generator: Optional[PromptGenerator] = None
|
||||
self, config, prompt_generator: Optional[PromptGenerator] = None
|
||||
) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
@@ -133,22 +133,20 @@ class AIConfig:
|
||||
""
|
||||
)
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.prompts.prompt import build_default_prompt_generator
|
||||
|
||||
cfg = Config()
|
||||
if prompt_generator is None:
|
||||
prompt_generator = build_default_prompt_generator()
|
||||
prompt_generator = build_default_prompt_generator(config)
|
||||
prompt_generator.goals = self.ai_goals
|
||||
prompt_generator.name = self.ai_name
|
||||
prompt_generator.role = self.ai_role
|
||||
prompt_generator.command_registry = self.command_registry
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in config.plugins:
|
||||
if not plugin.can_handle_post_prompt():
|
||||
continue
|
||||
prompt_generator = plugin.post_prompt(prompt_generator)
|
||||
|
||||
if cfg.execute_local_commands:
|
||||
if config.execute_local_commands:
|
||||
# add OS info to prompt
|
||||
os_name = platform.system()
|
||||
os_info = (
|
||||
|
||||
@@ -300,10 +300,9 @@ class Config(metaclass=Singleton):
|
||||
self.memory_backend = name
|
||||
|
||||
|
||||
def check_openai_api_key() -> None:
|
||||
def check_openai_api_key(config: Config) -> None:
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
cfg = Config()
|
||||
if not cfg.openai_api_key:
|
||||
if not config.openai_api_key:
|
||||
print(
|
||||
Fore.RED
|
||||
+ "Please set your OpenAI API key in .env or as an environment variable."
|
||||
|
||||
@@ -6,11 +6,8 @@ import yaml
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
class PromptConfig:
|
||||
"""
|
||||
@@ -22,10 +19,7 @@ class PromptConfig:
|
||||
performance_evaluations (list): Performance evaluation list for the prompt generator.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_file: str = CFG.prompt_settings_file,
|
||||
) -> None:
|
||||
def __init__(self, prompt_settings_file: str) -> None:
|
||||
"""
|
||||
Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
|
||||
yaml file if yaml file exists,
|
||||
@@ -39,13 +33,13 @@ class PromptConfig:
|
||||
None
|
||||
"""
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(config_file)
|
||||
(validated, message) = utils.validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
||||
logger.double_check()
|
||||
exit(1)
|
||||
|
||||
with open(config_file, encoding="utf-8") as file:
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
|
||||
self.constraints = config_params.get("constraints", [])
|
||||
|
||||
@@ -9,7 +9,6 @@ from jsonschema import Draft7Validator
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
|
||||
|
||||
|
||||
@@ -37,7 +36,7 @@ def llm_response_schema(
|
||||
|
||||
|
||||
def validate_json(
|
||||
json_object: object, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
|
||||
json_object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
|
||||
) -> bool:
|
||||
"""
|
||||
:type schema_name: object
|
||||
@@ -54,7 +53,7 @@ def validate_json(
|
||||
for error in errors:
|
||||
logger.error(f"JSON Validation Error: {error}")
|
||||
|
||||
if CFG.debug_mode:
|
||||
if config.debug_mode:
|
||||
logger.error(
|
||||
json.dumps(json_object, indent=4)
|
||||
) # Replace 'json_object' with the variable containing the JSON data
|
||||
|
||||
@@ -96,7 +96,7 @@ def chat_with_ai(
|
||||
current_tokens_used += 500 # Reserve space for new_summary_message
|
||||
|
||||
# Add Messages until the token limit is reached or there are no more messages to add.
|
||||
for cycle in reversed(list(agent.history.per_cycle())):
|
||||
for cycle in reversed(list(agent.history.per_cycle(agent.config))):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
tokens_to_add = count_message_tokens(messages_to_add, model)
|
||||
if current_tokens_used + tokens_to_add > send_token_limit:
|
||||
@@ -110,14 +110,14 @@ def chat_with_ai(
|
||||
# Update & add summary of trimmed messages
|
||||
if len(agent.history) > 0:
|
||||
new_summary_message, trimmed_messages = agent.history.trim_messages(
|
||||
current_message_chain=list(message_sequence),
|
||||
current_message_chain=list(message_sequence), config=agent.config
|
||||
)
|
||||
tokens_to_add = count_message_tokens([new_summary_message], model)
|
||||
message_sequence.insert(insertion_index, new_summary_message)
|
||||
current_tokens_used += tokens_to_add - 500
|
||||
|
||||
# FIXME: uncomment when memory is back in use
|
||||
# memory_store = get_memory(cfg)
|
||||
# memory_store = get_memory(config)
|
||||
# for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
|
||||
# memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
|
||||
# logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
|
||||
@@ -192,6 +192,7 @@ def chat_with_ai(
|
||||
# temperature and other settings we care about
|
||||
assistant_reply = create_chat_completion(
|
||||
prompt=message_sequence,
|
||||
config=agent.config,
|
||||
max_tokens=tokens_remaining,
|
||||
)
|
||||
|
||||
|
||||
@@ -57,18 +57,18 @@ def call_ai_function(
|
||||
|
||||
def create_text_completion(
|
||||
prompt: str,
|
||||
config: Config,
|
||||
model: Optional[str],
|
||||
temperature: Optional[float],
|
||||
max_output_tokens: Optional[int],
|
||||
) -> str:
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = cfg.fast_llm_model
|
||||
model = config.fast_llm_model
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
temperature = config.temperature
|
||||
|
||||
if cfg.use_azure:
|
||||
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
|
||||
if config.use_azure:
|
||||
kwargs = {"deployment_id": config.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
@@ -77,7 +77,7 @@ def create_text_completion(
|
||||
**kwargs,
|
||||
temperature=temperature,
|
||||
max_tokens=max_output_tokens,
|
||||
api_key=cfg.openai_api_key,
|
||||
api_key=config.openai_api_key,
|
||||
)
|
||||
logger.debug(f"Response: {response}")
|
||||
|
||||
@@ -87,6 +87,7 @@ def create_text_completion(
|
||||
# Overly simple abstraction until we create something better
|
||||
def create_chat_completion(
|
||||
prompt: ChatSequence,
|
||||
config: Config,
|
||||
model: Optional[str] = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
@@ -102,11 +103,10 @@ def create_chat_completion(
|
||||
Returns:
|
||||
str: The response from the chat completion
|
||||
"""
|
||||
cfg = Config()
|
||||
if model is None:
|
||||
model = prompt.model.name
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
temperature = config.temperature
|
||||
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
@@ -117,7 +117,7 @@ def create_chat_completion(
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in config.plugins:
|
||||
if plugin.can_handle_chat_completion(
|
||||
messages=prompt.raw(),
|
||||
**chat_completion_kwargs,
|
||||
@@ -129,11 +129,11 @@ def create_chat_completion(
|
||||
if message is not None:
|
||||
return message
|
||||
|
||||
chat_completion_kwargs["api_key"] = cfg.openai_api_key
|
||||
if cfg.use_azure:
|
||||
chat_completion_kwargs["deployment_id"] = cfg.get_azure_deployment_id_for_model(
|
||||
model
|
||||
)
|
||||
chat_completion_kwargs["api_key"] = config.openai_api_key
|
||||
if config.use_azure:
|
||||
chat_completion_kwargs[
|
||||
"deployment_id"
|
||||
] = config.get_azure_deployment_id_for_model(model)
|
||||
|
||||
response = iopenai.create_chat_completion(
|
||||
messages=prompt.raw(),
|
||||
@@ -148,7 +148,7 @@ def create_chat_completion(
|
||||
logger.error(response.error)
|
||||
raise RuntimeError(response.error)
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in config.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
resp = plugin.on_response(resp)
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Any
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter
|
||||
from autogpt.singleton import Singleton
|
||||
from autogpt.speech import say_text
|
||||
@@ -254,7 +255,7 @@ logger = Logger()
|
||||
def print_assistant_thoughts(
|
||||
ai_name: object,
|
||||
assistant_reply_json_valid: object,
|
||||
speak_mode: bool = False,
|
||||
config: Config,
|
||||
) -> None:
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
@@ -288,7 +289,7 @@ def print_assistant_thoughts(
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak:
|
||||
if speak_mode:
|
||||
say_text(assistant_thoughts_speak)
|
||||
if config.speak_mode:
|
||||
say_text(assistant_thoughts_speak, config)
|
||||
else:
|
||||
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
||||
|
||||
@@ -53,12 +53,12 @@ def run_auto_gpt(
|
||||
logger.set_level(logging.DEBUG if debug else logging.INFO)
|
||||
logger.speak_mode = speak
|
||||
|
||||
cfg = Config()
|
||||
config = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
check_openai_api_key(config)
|
||||
|
||||
create_config(
|
||||
cfg,
|
||||
config,
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
@@ -74,17 +74,17 @@ def run_auto_gpt(
|
||||
skip_news,
|
||||
)
|
||||
|
||||
if cfg.continuous_mode:
|
||||
if config.continuous_mode:
|
||||
for line in get_legal_warning().split("\n"):
|
||||
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
|
||||
|
||||
if not cfg.skip_news:
|
||||
if not config.skip_news:
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
for motd_line in motd.split("\n"):
|
||||
logger.info(motd_line, "NEWS:", Fore.GREEN)
|
||||
if is_new_motd and not cfg.chat_messages_enabled:
|
||||
if is_new_motd and not config.chat_messages_enabled:
|
||||
input(
|
||||
Fore.MAGENTA
|
||||
+ Style.BRIGHT
|
||||
@@ -123,7 +123,7 @@ def run_auto_gpt(
|
||||
# TODO: pass in the ai_settings file and the env file and have them cloned into
|
||||
# the workspace directory so we can bind them to the agent.
|
||||
workspace_directory = Workspace.make_workspace(workspace_directory)
|
||||
cfg.workspace_path = str(workspace_directory)
|
||||
config.workspace_path = str(workspace_directory)
|
||||
|
||||
# HACK: doing this here to collect some globals that depend on the workspace.
|
||||
file_logger_path = workspace_directory / "file_logger.txt"
|
||||
@@ -131,17 +131,17 @@ def run_auto_gpt(
|
||||
with file_logger_path.open(mode="w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
cfg.file_logger_path = str(file_logger_path)
|
||||
config.file_logger_path = str(file_logger_path)
|
||||
|
||||
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
|
||||
config.set_plugins(scan_plugins(config, config.debug_mode))
|
||||
# Create a CommandRegistry instance and scan default folder
|
||||
command_registry = CommandRegistry()
|
||||
|
||||
logger.debug(
|
||||
f"The following command categories are disabled: {cfg.disabled_command_categories}"
|
||||
f"The following command categories are disabled: {config.disabled_command_categories}"
|
||||
)
|
||||
enabled_command_categories = [
|
||||
x for x in COMMAND_CATEGORIES if x not in cfg.disabled_command_categories
|
||||
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
|
||||
]
|
||||
|
||||
logger.debug(
|
||||
@@ -152,7 +152,7 @@ def run_auto_gpt(
|
||||
command_registry.import_commands(command_category)
|
||||
|
||||
ai_name = ""
|
||||
ai_config = construct_main_ai_config()
|
||||
ai_config = construct_main_ai_config(config)
|
||||
ai_config.command_registry = command_registry
|
||||
if ai_config.ai_name:
|
||||
ai_name = ai_config.ai_name
|
||||
@@ -161,22 +161,22 @@ def run_auto_gpt(
|
||||
next_action_count = 0
|
||||
|
||||
# add chat plugins capable of report to logger
|
||||
if cfg.chat_messages_enabled:
|
||||
for plugin in cfg.plugins:
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
|
||||
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
|
||||
logger.chat_plugins.append(plugin)
|
||||
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg)
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
if cfg.debug_mode:
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
|
||||
system_prompt = ai_config.construct_full_prompt(config)
|
||||
if config.debug_mode:
|
||||
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
|
||||
|
||||
agent = Agent(
|
||||
@@ -188,6 +188,6 @@ def run_auto_gpt(
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace_directory,
|
||||
ai_config=ai_config,
|
||||
config=cfg,
|
||||
config=config,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
@@ -47,8 +47,7 @@ class MessageHistory:
|
||||
return self.messages.append(message)
|
||||
|
||||
def trim_messages(
|
||||
self,
|
||||
current_message_chain: list[Message],
|
||||
self, current_message_chain: list[Message], config: Config
|
||||
) -> tuple[Message, list[Message]]:
|
||||
"""
|
||||
Returns a list of trimmed messages: messages which are in the message history
|
||||
@@ -56,6 +55,7 @@ class MessageHistory:
|
||||
|
||||
Args:
|
||||
current_message_chain (list[Message]): The messages currently in the context.
|
||||
config (Config): The config to use.
|
||||
|
||||
Returns:
|
||||
Message: A message with the new running summary after adding the trimmed messages.
|
||||
@@ -75,7 +75,7 @@ class MessageHistory:
|
||||
return self.summary_message(), []
|
||||
|
||||
new_summary_message = self.update_running_summary(
|
||||
new_events=new_messages_not_in_chain
|
||||
new_events=new_messages_not_in_chain, config=config
|
||||
)
|
||||
|
||||
# Find the index of the last message processed
|
||||
@@ -84,7 +84,7 @@ class MessageHistory:
|
||||
|
||||
return new_summary_message, new_messages_not_in_chain
|
||||
|
||||
def per_cycle(self, messages: list[Message] | None = None):
|
||||
def per_cycle(self, config: Config, messages: list[Message] | None = None):
|
||||
"""
|
||||
Yields:
|
||||
Message: a message containing user input
|
||||
@@ -118,7 +118,9 @@ class MessageHistory:
|
||||
f"This reminds you of these events from your past: \n{self.summary}",
|
||||
)
|
||||
|
||||
def update_running_summary(self, new_events: list[Message]) -> Message:
|
||||
def update_running_summary(
|
||||
self, new_events: list[Message], config: Config
|
||||
) -> Message:
|
||||
"""
|
||||
This function takes a list of dictionaries representing new events and combines them with the current summary,
|
||||
focusing on key and potentially important information to remember. The updated summary is returned in a message
|
||||
@@ -135,8 +137,6 @@ class MessageHistory:
|
||||
update_running_summary(new_events)
|
||||
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
|
||||
"""
|
||||
cfg = Config()
|
||||
|
||||
if not new_events:
|
||||
return self.summary_message()
|
||||
|
||||
@@ -156,7 +156,7 @@ class MessageHistory:
|
||||
event.content = json.dumps(content_dict)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Error: Invalid JSON: {e}")
|
||||
if cfg.debug_mode:
|
||||
if config.debug_mode:
|
||||
logger.error(f"{event.content}")
|
||||
|
||||
elif event.role.lower() == "system":
|
||||
@@ -171,23 +171,23 @@ class MessageHistory:
|
||||
# Assume an upper bound length for the summary prompt template, i.e. Your task is to create a concise running summary...., in summarize_batch func
|
||||
# TODO make this default dynamic
|
||||
prompt_template_length = 100
|
||||
max_tokens = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens
|
||||
summary_tlength = count_string_tokens(str(self.summary), cfg.fast_llm_model)
|
||||
max_tokens = OPEN_AI_CHAT_MODELS.get(config.fast_llm_model).max_tokens
|
||||
summary_tlength = count_string_tokens(str(self.summary), config.fast_llm_model)
|
||||
batch = []
|
||||
batch_tlength = 0
|
||||
|
||||
# TODO Can put a cap on length of total new events and drop some previous events to save API cost, but need to think thru more how to do it without losing the context
|
||||
for event in new_events:
|
||||
event_tlength = count_string_tokens(str(event), cfg.fast_llm_model)
|
||||
event_tlength = count_string_tokens(str(event), config.fast_llm_model)
|
||||
|
||||
if (
|
||||
batch_tlength + event_tlength
|
||||
> max_tokens - prompt_template_length - summary_tlength
|
||||
):
|
||||
# The batch is full. Summarize it and start a new one.
|
||||
self.summarize_batch(batch, cfg)
|
||||
self.summarize_batch(batch, config)
|
||||
summary_tlength = count_string_tokens(
|
||||
str(self.summary), cfg.fast_llm_model
|
||||
str(self.summary), config.fast_llm_model
|
||||
)
|
||||
batch = [event]
|
||||
batch_tlength = event_tlength
|
||||
@@ -197,11 +197,11 @@ class MessageHistory:
|
||||
|
||||
if batch:
|
||||
# There's an unprocessed batch. Summarize it.
|
||||
self.summarize_batch(batch, cfg)
|
||||
self.summarize_batch(batch, config)
|
||||
|
||||
return self.summary_message()
|
||||
|
||||
def summarize_batch(self, new_events_batch, cfg):
|
||||
def summarize_batch(self, new_events_batch, config):
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
@@ -217,7 +217,9 @@ Latest Development:
|
||||
"""
|
||||
'''
|
||||
|
||||
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
|
||||
prompt = ChatSequence.for_model(
|
||||
config.fast_llm_model, [Message("user", prompt)]
|
||||
)
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
@@ -226,7 +228,7 @@ Latest Development:
|
||||
PROMPT_SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
self.summary = create_chat_completion(prompt)
|
||||
self.summary = create_chat_completion(prompt, config)
|
||||
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.ai_name,
|
||||
|
||||
@@ -39,12 +39,12 @@ supported_memory = ["json_file", "no_memory"]
|
||||
# MilvusMemory = None
|
||||
|
||||
|
||||
def get_memory(cfg: Config) -> VectorMemory:
|
||||
def get_memory(config: Config) -> VectorMemory:
|
||||
memory = None
|
||||
|
||||
match cfg.memory_backend:
|
||||
match config.memory_backend:
|
||||
case "json_file":
|
||||
memory = JSONFileMemory(cfg)
|
||||
memory = JSONFileMemory(config)
|
||||
|
||||
case "pinecone":
|
||||
raise NotImplementedError(
|
||||
@@ -59,7 +59,7 @@ def get_memory(cfg: Config) -> VectorMemory:
|
||||
# " to use Pinecone as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = PineconeMemory(cfg)
|
||||
# memory = PineconeMemory(config)
|
||||
# if clear:
|
||||
# memory.clear()
|
||||
|
||||
@@ -74,7 +74,7 @@ def get_memory(cfg: Config) -> VectorMemory:
|
||||
# " use Redis as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = RedisMemory(cfg)
|
||||
# memory = RedisMemory(config)
|
||||
|
||||
case "weaviate":
|
||||
raise NotImplementedError(
|
||||
@@ -89,7 +89,7 @@ def get_memory(cfg: Config) -> VectorMemory:
|
||||
# " use Weaviate as a memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = WeaviateMemory(cfg)
|
||||
# memory = WeaviateMemory(config)
|
||||
|
||||
case "milvus":
|
||||
raise NotImplementedError(
|
||||
@@ -104,18 +104,18 @@ def get_memory(cfg: Config) -> VectorMemory:
|
||||
# "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
|
||||
# )
|
||||
# else:
|
||||
# memory = MilvusMemory(cfg)
|
||||
# memory = MilvusMemory(config)
|
||||
|
||||
case "no_memory":
|
||||
memory = NoMemory()
|
||||
|
||||
case _:
|
||||
raise ValueError(
|
||||
f"Unknown memory backend '{cfg.memory_backend}'. Please check your config."
|
||||
f"Unknown memory backend '{config.memory_backend}'. Please check your config."
|
||||
)
|
||||
|
||||
if memory is None:
|
||||
memory = JSONFileMemory(cfg)
|
||||
memory = JSONFileMemory(config)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
@@ -36,19 +36,19 @@ class MemoryItem:
|
||||
def from_text(
|
||||
text: str,
|
||||
source_type: MemoryDocType,
|
||||
config: Config,
|
||||
metadata: dict = {},
|
||||
how_to_summarize: str | None = None,
|
||||
question_for_summary: str | None = None,
|
||||
):
|
||||
cfg = Config()
|
||||
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
|
||||
|
||||
chunks = [
|
||||
chunk
|
||||
for chunk, _ in (
|
||||
split_text(text, cfg.embedding_model)
|
||||
split_text(text, config.embedding_model, config)
|
||||
if source_type != "code_file"
|
||||
else chunk_content(text, cfg.embedding_model)
|
||||
else chunk_content(text, config.embedding_model)
|
||||
)
|
||||
]
|
||||
logger.debug("Chunks: " + str(chunks))
|
||||
@@ -58,6 +58,7 @@ class MemoryItem:
|
||||
for summary, _ in [
|
||||
summarize_text(
|
||||
text_chunk,
|
||||
config,
|
||||
instruction=how_to_summarize,
|
||||
question=question_for_summary,
|
||||
)
|
||||
@@ -66,7 +67,7 @@ class MemoryItem:
|
||||
]
|
||||
logger.debug("Chunk summaries: " + str(chunk_summaries))
|
||||
|
||||
e_chunks = get_embedding(chunks)
|
||||
e_chunks = get_embedding(chunks, config)
|
||||
|
||||
summary = (
|
||||
chunk_summaries[0]
|
||||
@@ -81,7 +82,7 @@ class MemoryItem:
|
||||
|
||||
# TODO: investigate search performance of weighted average vs summary
|
||||
# e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
|
||||
e_summary = get_embedding(summary)
|
||||
e_summary = get_embedding(summary, config)
|
||||
|
||||
metadata["source_type"] = source_type
|
||||
|
||||
@@ -96,8 +97,8 @@ class MemoryItem:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_text_file(content: str, path: str):
|
||||
return MemoryItem.from_text(content, "text_file", {"location": path})
|
||||
def from_text_file(content: str, path: str, config: Config):
|
||||
return MemoryItem.from_text(content, "text_file", config, {"location": path})
|
||||
|
||||
@staticmethod
|
||||
def from_code_file(content: str, path: str):
|
||||
@@ -137,10 +138,13 @@ class MemoryItem:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_webpage(content: str, url: str, question: str | None = None):
|
||||
def from_webpage(
|
||||
content: str, url: str, config: Config, question: str | None = None
|
||||
):
|
||||
return MemoryItem.from_text(
|
||||
text=content,
|
||||
source_type="webpage",
|
||||
config=config,
|
||||
metadata={"location": url},
|
||||
question_for_summary=question,
|
||||
)
|
||||
|
||||
@@ -17,25 +17,29 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
|
||||
def __init__(self, config: Config):
|
||||
pass
|
||||
|
||||
def get(self, query: str) -> MemoryItemRelevance | None:
|
||||
def get(self, query: str, config: Config) -> MemoryItemRelevance | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given query.
|
||||
|
||||
Args:
|
||||
data: The data to compare to.
|
||||
query: The query used to retrieve information.
|
||||
config: The config Object.
|
||||
|
||||
Returns: The most relevant Memory
|
||||
"""
|
||||
result = self.get_relevant(query, 1)
|
||||
result = self.get_relevant(query, 1, config)
|
||||
return result[0] if result else None
|
||||
|
||||
def get_relevant(self, query: str, k: int) -> Sequence[MemoryItemRelevance]:
|
||||
def get_relevant(
|
||||
self, query: str, k: int, config: Config
|
||||
) -> Sequence[MemoryItemRelevance]:
|
||||
"""
|
||||
Returns the top-k most relevant memories for the given query
|
||||
|
||||
Args:
|
||||
query: the query to compare stored memories to
|
||||
k: the number of relevant memories to fetch
|
||||
config: The config Object.
|
||||
|
||||
Returns:
|
||||
list[MemoryItemRelevance] containing the top [k] relevant memories
|
||||
@@ -48,7 +52,7 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
|
||||
f"{len(self)} memories in index"
|
||||
)
|
||||
|
||||
relevances = self.score_memories_for_relevance(query)
|
||||
relevances = self.score_memories_for_relevance(query, config)
|
||||
logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
|
||||
|
||||
# take last k items and reverse
|
||||
@@ -57,13 +61,13 @@ class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
|
||||
return [relevances[i] for i in top_k_indices]
|
||||
|
||||
def score_memories_for_relevance(
|
||||
self, for_query: str
|
||||
self, for_query: str, config: Config
|
||||
) -> Sequence[MemoryItemRelevance]:
|
||||
"""
|
||||
Returns MemoryItemRelevance for every memory in the index.
|
||||
Implementations may override this function for performance purposes.
|
||||
"""
|
||||
e_query: Embedding = get_embedding(for_query)
|
||||
e_query: Embedding = get_embedding(for_query, config)
|
||||
return [m.relevance_for(for_query, e_query) for m in self]
|
||||
|
||||
def get_stats(self) -> tuple[int, int]:
|
||||
|
||||
@@ -20,17 +20,17 @@ class JSONFileMemory(VectorMemoryProvider):
|
||||
file_path: Path
|
||||
memories: list[MemoryItem]
|
||||
|
||||
def __init__(self, cfg: Config) -> None:
|
||||
def __init__(self, config: Config) -> None:
|
||||
"""Initialize a class instance
|
||||
|
||||
Args:
|
||||
cfg: Config object
|
||||
config: Config object
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
workspace_path = Path(cfg.workspace_path)
|
||||
self.file_path = workspace_path / f"{cfg.memory_index}.json"
|
||||
workspace_path = Path(config.workspace_path)
|
||||
self.file_path = workspace_path / f"{config.memory_index}.json"
|
||||
self.file_path.touch()
|
||||
logger.debug(
|
||||
f"Initialized {__class__.__name__} with index path {self.file_path}"
|
||||
|
||||
@@ -22,7 +22,7 @@ def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
|
||||
|
||||
|
||||
def get_embedding(
|
||||
input: str | TText | list[str] | list[TText],
|
||||
input: str | TText | list[str] | list[TText], config: Config
|
||||
) -> Embedding | list[Embedding]:
|
||||
"""Get an embedding from the ada model.
|
||||
|
||||
@@ -33,7 +33,6 @@ def get_embedding(
|
||||
Returns:
|
||||
List[float]: The embedding.
|
||||
"""
|
||||
cfg = Config()
|
||||
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
|
||||
|
||||
if isinstance(input, str):
|
||||
@@ -41,22 +40,22 @@ def get_embedding(
|
||||
elif multiple and isinstance(input[0], str):
|
||||
input = [text.replace("\n", " ") for text in input]
|
||||
|
||||
model = cfg.embedding_model
|
||||
if cfg.use_azure:
|
||||
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
|
||||
model = config.embedding_model
|
||||
if config.use_azure:
|
||||
kwargs = {"engine": config.get_azure_deployment_id_for_model(model)}
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
|
||||
logger.debug(
|
||||
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
|
||||
f" with model '{model}'"
|
||||
+ (f" via Azure deployment '{kwargs['engine']}'" if cfg.use_azure else "")
|
||||
+ (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
|
||||
)
|
||||
|
||||
embeddings = iopenai.create_embedding(
|
||||
input,
|
||||
**kwargs,
|
||||
api_key=cfg.openai_api_key,
|
||||
api_key=config.openai_api_key,
|
||||
).data
|
||||
|
||||
if not multiple:
|
||||
|
||||
@@ -58,7 +58,7 @@ def write_dict_to_json_file(data: dict, file_path: str) -> None:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
|
||||
def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict:
|
||||
"""
|
||||
Fetch the manifest for a list of OpenAI plugins.
|
||||
Args:
|
||||
@@ -68,8 +68,8 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
|
||||
"""
|
||||
# TODO add directory scan
|
||||
manifests = {}
|
||||
for url in cfg.plugins_openai:
|
||||
openai_plugin_client_dir = f"{cfg.plugins_dir}/openai/{urlparse(url).netloc}"
|
||||
for url in config.plugins_openai:
|
||||
openai_plugin_client_dir = f"{config.plugins_dir}/openai/{urlparse(url).netloc}"
|
||||
create_directory_if_not_exists(openai_plugin_client_dir)
|
||||
if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
|
||||
try:
|
||||
@@ -134,18 +134,18 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
|
||||
|
||||
|
||||
def initialize_openai_plugins(
|
||||
manifests_specs: dict, cfg: Config, debug: bool = False
|
||||
manifests_specs: dict, config: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Initialize OpenAI plugins.
|
||||
Args:
|
||||
manifests_specs (dict): per url dictionary of manifest and spec.
|
||||
cfg (Config): Config instance including plugins config
|
||||
config (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
dict: per url dictionary of manifest, spec and client.
|
||||
"""
|
||||
openai_plugins_dir = f"{cfg.plugins_dir}/openai"
|
||||
openai_plugins_dir = f"{config.plugins_dir}/openai"
|
||||
if create_directory_if_not_exists(openai_plugins_dir):
|
||||
for url, manifest_spec in manifests_specs.items():
|
||||
openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
|
||||
@@ -188,13 +188,13 @@ def initialize_openai_plugins(
|
||||
|
||||
|
||||
def instantiate_openai_plugin_clients(
|
||||
manifests_specs_clients: dict, cfg: Config, debug: bool = False
|
||||
manifests_specs_clients: dict, config: Config, debug: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
|
||||
Args:
|
||||
manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
|
||||
cfg (Config): Config instance including plugins config
|
||||
config (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
Returns:
|
||||
plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
|
||||
@@ -206,11 +206,11 @@ def instantiate_openai_plugin_clients(
|
||||
return plugins
|
||||
|
||||
|
||||
def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
|
||||
def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
|
||||
"""Scan the plugins directory for plugins and loads them.
|
||||
|
||||
Args:
|
||||
cfg (Config): Config instance including plugins config
|
||||
config (Config): Config instance including plugins config
|
||||
debug (bool, optional): Enable debug logging. Defaults to False.
|
||||
|
||||
Returns:
|
||||
@@ -218,11 +218,11 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
"""
|
||||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(cfg.plugins_dir)
|
||||
plugins_config = cfg.plugins_config
|
||||
plugins_path_path = Path(config.plugins_dir)
|
||||
plugins_config = config.plugins_config
|
||||
|
||||
# Directory-based plugins
|
||||
for plugin_path in [f.path for f in os.scandir(cfg.plugins_dir) if f.is_dir()]:
|
||||
for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]:
|
||||
# Avoid going into __pycache__ or other hidden directories
|
||||
if plugin_path.startswith("__"):
|
||||
continue
|
||||
@@ -286,11 +286,11 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
||||
)
|
||||
|
||||
# OpenAI plugins
|
||||
if cfg.plugins_openai:
|
||||
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
|
||||
if config.plugins_openai:
|
||||
manifests_specs = fetch_openai_plugins_manifest_and_spec(config)
|
||||
if manifests_specs.keys():
|
||||
manifests_specs_clients = initialize_openai_plugins(
|
||||
manifests_specs, cfg, debug
|
||||
manifests_specs, config, debug
|
||||
)
|
||||
for url, openai_plugin_meta in manifests_specs_clients.items():
|
||||
if not plugins_config.is_enabled(url):
|
||||
|
||||
@@ -12,8 +12,6 @@ from autogpt.llm.utils import count_string_tokens, create_chat_completion
|
||||
from autogpt.logs import logger
|
||||
from autogpt.utils import batch
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
|
||||
model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
|
||||
@@ -60,13 +58,18 @@ def chunk_content(
|
||||
|
||||
|
||||
def summarize_text(
|
||||
text: str, instruction: Optional[str] = None, question: Optional[str] = None
|
||||
text: str,
|
||||
config: Config,
|
||||
instruction: Optional[str] = None,
|
||||
question: Optional[str] = None,
|
||||
) -> tuple[str, None | list[tuple[str, str]]]:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
||||
Args:
|
||||
text (str): The text to summarize
|
||||
config (Config): The config object
|
||||
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
|
||||
question (str): Question to answer in the summary
|
||||
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
@@ -79,7 +82,7 @@ def summarize_text(
|
||||
if instruction and question:
|
||||
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
|
||||
|
||||
model = CFG.fast_llm_model
|
||||
model = config.fast_llm_model
|
||||
|
||||
if question:
|
||||
instruction = (
|
||||
@@ -111,14 +114,18 @@ def summarize_text(
|
||||
|
||||
logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
|
||||
summary = create_chat_completion(
|
||||
summarization_prompt, temperature=0, max_tokens=500
|
||||
summarization_prompt, config, temperature=0, max_tokens=500
|
||||
)
|
||||
|
||||
logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
|
||||
return summary.strip(), None
|
||||
|
||||
summaries: list[str] = []
|
||||
chunks = list(split_text(text, for_model=model, max_chunk_length=max_chunk_length))
|
||||
chunks = list(
|
||||
split_text(
|
||||
text, for_model=model, config=config, max_chunk_length=max_chunk_length
|
||||
)
|
||||
)
|
||||
|
||||
for i, (chunk, chunk_length) in enumerate(chunks):
|
||||
logger.info(
|
||||
@@ -138,7 +145,8 @@ def summarize_text(
|
||||
|
||||
def split_text(
|
||||
text: str,
|
||||
for_model: str = CFG.fast_llm_model,
|
||||
for_model: str,
|
||||
config: Config,
|
||||
with_overlap=True,
|
||||
max_chunk_length: Optional[int] = None,
|
||||
):
|
||||
@@ -147,7 +155,9 @@ def split_text(
|
||||
Args:
|
||||
text (str): The text to split
|
||||
for_model (str): The model to chunk for; determines tokenizer and constraints
|
||||
max_length (int, optional): The maximum length of each chunk
|
||||
config (Config): The config object
|
||||
with_overlap (bool, optional): Whether to allow overlap between chunks
|
||||
max_chunk_length (int, optional): The maximum length of a chunk
|
||||
|
||||
Yields:
|
||||
str: The next chunk of text
|
||||
@@ -155,6 +165,7 @@ def split_text(
|
||||
Raises:
|
||||
ValueError: when a sentence is longer than the maximum length
|
||||
"""
|
||||
|
||||
max_length = _max_chunk_length(for_model, max_chunk_length)
|
||||
|
||||
# flatten paragraphs to improve performance
|
||||
@@ -168,7 +179,7 @@ def split_text(
|
||||
n_chunks = ceil(text_length / max_length)
|
||||
target_chunk_length = ceil(text_length / n_chunks)
|
||||
|
||||
nlp: spacy.language.Language = spacy.load(CFG.browse_spacy_language_model)
|
||||
nlp: spacy.language.Language = spacy.load(config.browse_spacy_language_model)
|
||||
nlp.add_pipe("sentencizer")
|
||||
doc = nlp(text)
|
||||
sentences = [sentence.text.strip() for sentence in doc.sents]
|
||||
|
||||
@@ -9,12 +9,10 @@ from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.setup import prompt_user
|
||||
from autogpt.utils import clean_input
|
||||
|
||||
CFG = Config()
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
|
||||
|
||||
|
||||
def build_default_prompt_generator() -> PromptGenerator:
|
||||
def build_default_prompt_generator(config: Config) -> PromptGenerator:
|
||||
"""
|
||||
This function generates a prompt string that includes various constraints,
|
||||
commands, resources, and performance evaluations.
|
||||
@@ -27,7 +25,7 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
prompt_generator = PromptGenerator()
|
||||
|
||||
# Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
|
||||
prompt_config = PromptConfig(CFG.prompt_settings_file)
|
||||
prompt_config = PromptConfig(config.prompt_settings_file)
|
||||
|
||||
# Add constraints to the PromptGenerator object
|
||||
for constraint in prompt_config.constraints:
|
||||
@@ -44,70 +42,71 @@ def build_default_prompt_generator() -> PromptGenerator:
|
||||
return prompt_generator
|
||||
|
||||
|
||||
def construct_main_ai_config() -> AIConfig:
|
||||
def construct_main_ai_config(config: Config) -> AIConfig:
|
||||
"""Construct the prompt for the AI to respond to
|
||||
|
||||
Returns:
|
||||
str: The prompt string
|
||||
"""
|
||||
config = AIConfig.load(CFG.ai_settings_file)
|
||||
if CFG.skip_reprompt and config.ai_name:
|
||||
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
||||
ai_config = AIConfig.load(config.ai_settings_file)
|
||||
if config.skip_reprompt and ai_config.ai_name:
|
||||
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
|
||||
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
|
||||
logger.typewriter_log(
|
||||
"API Budget:",
|
||||
Fore.GREEN,
|
||||
"infinite" if config.api_budget <= 0 else f"${config.api_budget}",
|
||||
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
|
||||
)
|
||||
elif config.ai_name:
|
||||
elif ai_config.ai_name:
|
||||
logger.typewriter_log(
|
||||
"Welcome back! ",
|
||||
Fore.GREEN,
|
||||
f"Would you like me to return to being {config.ai_name}?",
|
||||
f"Would you like me to return to being {ai_config.ai_name}?",
|
||||
speak_text=True,
|
||||
)
|
||||
should_continue = clean_input(
|
||||
config,
|
||||
f"""Continue with the last settings?
|
||||
Name: {config.ai_name}
|
||||
Role: {config.ai_role}
|
||||
Goals: {config.ai_goals}
|
||||
API Budget: {"infinite" if config.api_budget <= 0 else f"${config.api_budget}"}
|
||||
Continue ({CFG.authorise_key}/{CFG.exit_key}): """
|
||||
Name: {ai_config.ai_name}
|
||||
Role: {ai_config.ai_role}
|
||||
Goals: {ai_config.ai_goals}
|
||||
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
|
||||
Continue ({config.authorise_key}/{config.exit_key}): """,
|
||||
)
|
||||
if should_continue.lower() == CFG.exit_key:
|
||||
config = AIConfig()
|
||||
if should_continue.lower() == config.exit_key:
|
||||
ai_config = AIConfig()
|
||||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save(CFG.ai_settings_file)
|
||||
if not ai_config.ai_name:
|
||||
ai_config = prompt_user(config)
|
||||
ai_config.save(config.ai_settings_file)
|
||||
|
||||
if CFG.restrict_to_workspace:
|
||||
if config.restrict_to_workspace:
|
||||
logger.typewriter_log(
|
||||
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
|
||||
Fore.YELLOW,
|
||||
f"{CFG.workspace_path}",
|
||||
f"{config.workspace_path}",
|
||||
)
|
||||
# set the total api budget
|
||||
api_manager = ApiManager()
|
||||
api_manager.set_total_budget(config.api_budget)
|
||||
api_manager.set_total_budget(ai_config.api_budget)
|
||||
|
||||
# Agent Created, print message
|
||||
logger.typewriter_log(
|
||||
config.ai_name,
|
||||
ai_config.ai_name,
|
||||
Fore.LIGHTBLUE_EX,
|
||||
"has been created with the following details:",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
# Print the ai config details
|
||||
# Print the ai_config details
|
||||
# Name
|
||||
logger.typewriter_log("Name:", Fore.GREEN, config.ai_name, speak_text=False)
|
||||
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
|
||||
# Role
|
||||
logger.typewriter_log("Role:", Fore.GREEN, config.ai_role, speak_text=False)
|
||||
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
|
||||
# Goals
|
||||
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
|
||||
for goal in config.ai_goals:
|
||||
for goal in ai_config.ai_goals:
|
||||
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
|
||||
|
||||
return config
|
||||
return ai_config
|
||||
|
||||
@@ -16,10 +16,8 @@ from autogpt.prompts.default_prompts import (
|
||||
DEFAULT_USER_DESIRE_PROMPT,
|
||||
)
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def prompt_user() -> AIConfig:
|
||||
def prompt_user(config: Config) -> AIConfig:
|
||||
"""Prompt the user for input
|
||||
|
||||
Returns:
|
||||
@@ -45,7 +43,7 @@ def prompt_user() -> AIConfig:
|
||||
)
|
||||
|
||||
user_desire = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
|
||||
)
|
||||
|
||||
if user_desire == "":
|
||||
@@ -58,11 +56,11 @@ def prompt_user() -> AIConfig:
|
||||
Fore.GREEN,
|
||||
speak_text=True,
|
||||
)
|
||||
return generate_aiconfig_manual()
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
else:
|
||||
try:
|
||||
return generate_aiconfig_automatic(user_desire)
|
||||
return generate_aiconfig_automatic(user_desire, config)
|
||||
except Exception as e:
|
||||
logger.typewriter_log(
|
||||
"Unable to automatically generate AI Config based on user desire.",
|
||||
@@ -71,10 +69,10 @@ def prompt_user() -> AIConfig:
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
return generate_aiconfig_manual()
|
||||
return generate_aiconfig_manual(config)
|
||||
|
||||
|
||||
def generate_aiconfig_manual() -> AIConfig:
|
||||
def generate_aiconfig_manual(config: Config) -> AIConfig:
|
||||
"""
|
||||
Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
|
||||
|
||||
@@ -99,7 +97,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
logger.typewriter_log(
|
||||
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
||||
)
|
||||
ai_name = utils.clean_input("AI Name: ")
|
||||
ai_name = utils.clean_input(config, "AI Name: ")
|
||||
if ai_name == "":
|
||||
ai_name = "Entrepreneur-GPT"
|
||||
|
||||
@@ -114,7 +112,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
"For example, 'an AI designed to autonomously develop and run businesses with"
|
||||
" the sole goal of increasing your net worth.'",
|
||||
)
|
||||
ai_role = utils.clean_input(f"{ai_name} is: ")
|
||||
ai_role = utils.clean_input(config, f"{ai_name} is: ")
|
||||
if ai_role == "":
|
||||
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
||||
" sole goal of increasing your net worth."
|
||||
@@ -129,7 +127,9 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
logger.info("Enter nothing to load defaults, enter nothing when finished.")
|
||||
ai_goals = []
|
||||
for i in range(5):
|
||||
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
||||
ai_goal = utils.clean_input(
|
||||
config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
|
||||
)
|
||||
if ai_goal == "":
|
||||
break
|
||||
ai_goals.append(ai_goal)
|
||||
@@ -148,7 +148,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
)
|
||||
logger.info("Enter nothing to let the AI run without monetary limit")
|
||||
api_budget_input = utils.clean_input(
|
||||
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
|
||||
)
|
||||
if api_budget_input == "":
|
||||
api_budget = 0.0
|
||||
@@ -164,7 +164,7 @@ def generate_aiconfig_manual() -> AIConfig:
|
||||
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
|
||||
|
||||
|
||||
def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
|
||||
Returns:
|
||||
@@ -178,12 +178,13 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
|
||||
# Call LLM with the string as user input
|
||||
output = create_chat_completion(
|
||||
ChatSequence.for_model(
|
||||
CFG.fast_llm_model,
|
||||
config.fast_llm_model,
|
||||
[
|
||||
Message("system", system_prompt),
|
||||
Message("user", prompt_ai_config_automatic),
|
||||
],
|
||||
)
|
||||
),
|
||||
config,
|
||||
)
|
||||
|
||||
# Debug LLM Output
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import abc
|
||||
from threading import Lock
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
||||
|
||||
@@ -10,7 +11,7 @@ class VoiceBase(AbstractSingleton):
|
||||
Base class for all voice classes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config: Config):
|
||||
"""
|
||||
Initialize the voice class.
|
||||
"""
|
||||
@@ -19,7 +20,7 @@ class VoiceBase(AbstractSingleton):
|
||||
self._api_key = None
|
||||
self._voices = []
|
||||
self._mutex = Lock()
|
||||
self._setup()
|
||||
self._setup(config)
|
||||
|
||||
def say(self, text: str, voice_index: int = 0) -> bool:
|
||||
"""
|
||||
|
||||
@@ -13,14 +13,13 @@ PLACEHOLDERS = {"your-voice-id"}
|
||||
class ElevenLabsSpeech(VoiceBase):
|
||||
"""ElevenLabs speech class"""
|
||||
|
||||
def _setup(self) -> None:
|
||||
def _setup(self, config: Config) -> None:
|
||||
"""Set up the voices, API key, etc.
|
||||
|
||||
Returns:
|
||||
None: None
|
||||
"""
|
||||
|
||||
cfg = Config()
|
||||
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
voice_options = {
|
||||
"Rachel": "21m00Tcm4TlvDq8ikWAM",
|
||||
@@ -35,15 +34,15 @@ class ElevenLabsSpeech(VoiceBase):
|
||||
}
|
||||
self._headers = {
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": cfg.elevenlabs_api_key,
|
||||
"xi-api-key": config.elevenlabs_api_key,
|
||||
}
|
||||
self._voices = default_voices.copy()
|
||||
if cfg.elevenlabs_voice_id in voice_options:
|
||||
cfg.elevenlabs_voice_id = voice_options[cfg.elevenlabs_voice_id]
|
||||
if cfg.elevenlabs_voice_2_id in voice_options:
|
||||
cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
|
||||
self._use_custom_voice(cfg.elevenlabs_voice_id, 0)
|
||||
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
|
||||
if config.elevenlabs_voice_id in voice_options:
|
||||
config.elevenlabs_voice_id = voice_options[config.elevenlabs_voice_id]
|
||||
if config.elevenlabs_voice_2_id in voice_options:
|
||||
config.elevenlabs_voice_2_id = voice_options[config.elevenlabs_voice_2_id]
|
||||
self._use_custom_voice(config.elevenlabs_voice_id, 0)
|
||||
self._use_custom_voice(config.elevenlabs_voice_2_id, 1)
|
||||
|
||||
def _use_custom_voice(self, voice, voice_index) -> None:
|
||||
"""Use a custom voice if provided and not a placeholder
|
||||
|
||||
@@ -14,10 +14,9 @@ _QUEUE_SEMAPHORE = Semaphore(
|
||||
) # The amount of sounds to queue before blocking the main thread
|
||||
|
||||
|
||||
def say_text(text: str, voice_index: int = 0) -> None:
|
||||
def say_text(text: str, config: Config, voice_index: int = 0) -> None:
|
||||
"""Speak the given text using the given voice index"""
|
||||
cfg = Config()
|
||||
default_voice_engine, voice_engine = _get_voice_engine(cfg)
|
||||
default_voice_engine, voice_engine = _get_voice_engine(config)
|
||||
|
||||
def speak() -> None:
|
||||
success = voice_engine.say(text, voice_index)
|
||||
@@ -35,7 +34,7 @@ def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
|
||||
"""Get the voice engine to use for the given configuration"""
|
||||
tts_provider = config.text_to_speech_provider
|
||||
if tts_provider == "elevenlabs":
|
||||
voice_engine = ElevenLabsSpeech()
|
||||
voice_engine = ElevenLabsSpeech(config)
|
||||
elif tts_provider == "macos":
|
||||
voice_engine = MacOSTTS()
|
||||
elif tts_provider == "streamelements":
|
||||
|
||||
@@ -23,11 +23,10 @@ def batch(iterable, max_batch_length: int, overlap: int = 0):
|
||||
yield iterable[i : i + max_batch_length]
|
||||
|
||||
|
||||
def clean_input(prompt: str = "", talk=False):
|
||||
def clean_input(config: Config, prompt: str = "", talk=False):
|
||||
try:
|
||||
cfg = Config()
|
||||
if cfg.chat_messages_enabled:
|
||||
for plugin in cfg.plugins:
|
||||
if config.chat_messages_enabled:
|
||||
for plugin in config.plugins:
|
||||
if not hasattr(plugin, "can_handle_user_input"):
|
||||
continue
|
||||
if not plugin.can_handle_user_input(user_input=prompt):
|
||||
@@ -44,14 +43,14 @@ def clean_input(prompt: str = "", talk=False):
|
||||
"sure",
|
||||
"alright",
|
||||
]:
|
||||
return cfg.authorise_key
|
||||
return config.authorise_key
|
||||
elif plugin_response.lower() in [
|
||||
"no",
|
||||
"nope",
|
||||
"n",
|
||||
"negative",
|
||||
]:
|
||||
return cfg.exit_key
|
||||
return config.exit_key
|
||||
return plugin_response
|
||||
|
||||
# ask for input, default when just pressing Enter is y
|
||||
|
||||
@@ -5,7 +5,7 @@ from autogpt.commands.file_operations import ingest_file, list_files
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.vector import VectorMemory, get_memory
|
||||
|
||||
cfg = Config()
|
||||
config = Config()
|
||||
|
||||
|
||||
def configure_logging():
|
||||
@@ -70,7 +70,7 @@ def main() -> None:
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize memory
|
||||
memory = get_memory(cfg)
|
||||
memory = get_memory(config)
|
||||
if args.init:
|
||||
memory.clear()
|
||||
logger.debug("Using memory of type: " + memory.__class__.__name__)
|
||||
|
||||
@@ -52,7 +52,7 @@ def kubernetes_agent(
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
# We also give the AI a name
|
||||
ai_name="Kubernetes-Demo",
|
||||
|
||||
Submodule tests/Auto-GPT-test-cassettes updated: 43f536a193...058606a100
@@ -97,7 +97,7 @@ def agent(config: Config, workspace: Workspace) -> Agent:
|
||||
memory_json_file = get_memory(config)
|
||||
memory_json_file.clear()
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
system_prompt = ai_config.construct_full_prompt(config)
|
||||
|
||||
return Agent(
|
||||
ai_name=ai_config.ai_name,
|
||||
|
||||
@@ -55,7 +55,7 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
@@ -91,8 +91,8 @@ def file_system_agents(
|
||||
ai_goals=[ai_goal],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
agents.append(
|
||||
Agent(
|
||||
ai_name="File System Agent",
|
||||
@@ -123,7 +123,7 @@ def memory_management_agent(agent_test_config, memory_json_file, workspace: Work
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
|
||||
agent = Agent(
|
||||
ai_name="Follow-Instructions-GPT",
|
||||
@@ -159,8 +159,8 @@ def information_retrieval_agents(
|
||||
ai_goals=[ai_goal],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
agents.append(
|
||||
Agent(
|
||||
ai_name="Information Retrieval Agent",
|
||||
@@ -195,8 +195,8 @@ def kubernetes_agent(
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_json_file,
|
||||
@@ -228,8 +228,8 @@ def get_nobel_prize_agent(agent_test_config, memory_json_file, workspace: Worksp
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
|
||||
agent = Agent(
|
||||
ai_name="Get-PhysicsNobelPrize",
|
||||
@@ -254,7 +254,7 @@ def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace)
|
||||
"1- Run test.py using the execute_python_file command.",
|
||||
"2- Read code.py using the read_file command.",
|
||||
"3- Modify code.py using the write_to_file command."
|
||||
"Repeat step 1, 2 and 3 until test.py runs without errors.",
|
||||
"Repeat step 1, 2 and 3 until test.py runs without errors. Do not modify the test.py file.",
|
||||
],
|
||||
[
|
||||
"1- Run test.py.",
|
||||
@@ -273,8 +273,8 @@ def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace)
|
||||
)
|
||||
command_registry = get_command_registry(agent_test_config)
|
||||
ai_config.command_registry = command_registry
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
system_prompt = ai_config.construct_full_prompt(agent_test_config)
|
||||
agent_test_config.set_continuous_mode(False)
|
||||
agents.append(
|
||||
Agent(
|
||||
ai_name="Debug Code Agent",
|
||||
|
||||
@@ -71,11 +71,11 @@ def test_json_memory_clear(config: Config, memory_item: MemoryItem):
|
||||
def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding):
|
||||
index = JSONFileMemory(config)
|
||||
assert (
|
||||
index.get("test") == None
|
||||
index.get("test", config) == None
|
||||
), "Cannot test get() because initial index is not empty"
|
||||
|
||||
index.add(memory_item)
|
||||
retrieved = index.get("test")
|
||||
retrieved = index.get("test", config)
|
||||
assert retrieved is not None
|
||||
assert retrieved.memory_item == memory_item
|
||||
|
||||
@@ -102,20 +102,27 @@ def test_json_memory_load_index(config: Config, memory_item: MemoryItem):
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_json_memory_get_relevant(config: Config, patched_api_requestor: None) -> None:
|
||||
index = JSONFileMemory(config)
|
||||
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt")
|
||||
mem2 = MemoryItem.from_text_file("Grocery list:\n- Pancake mix", "groceries.txt")
|
||||
mem3 = MemoryItem.from_text_file("What is your favorite color?", "color.txt")
|
||||
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt", config)
|
||||
mem2 = MemoryItem.from_text_file(
|
||||
"Grocery list:\n- Pancake mix", "groceries.txt", config
|
||||
)
|
||||
mem3 = MemoryItem.from_text_file(
|
||||
"What is your favorite color?", "color.txt", config
|
||||
)
|
||||
lipsum = "Lorem ipsum dolor sit amet"
|
||||
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt")
|
||||
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt", config)
|
||||
index.add(mem1)
|
||||
index.add(mem2)
|
||||
index.add(mem3)
|
||||
index.add(mem4)
|
||||
|
||||
assert index.get_relevant(mem1.raw_content, 1)[0].memory_item == mem1
|
||||
assert index.get_relevant(mem2.raw_content, 1)[0].memory_item == mem2
|
||||
assert index.get_relevant(mem3.raw_content, 1)[0].memory_item == mem3
|
||||
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2)] == [mem4, mem1]
|
||||
assert index.get_relevant(mem1.raw_content, 1, config)[0].memory_item == mem1
|
||||
assert index.get_relevant(mem2.raw_content, 1, config)[0].memory_item == mem2
|
||||
assert index.get_relevant(mem3.raw_content, 1, config)[0].memory_item == mem3
|
||||
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2, config)] == [
|
||||
mem4,
|
||||
mem1,
|
||||
]
|
||||
|
||||
|
||||
def test_json_memory_get_stats(config: Config, memory_item: MemoryItem) -> None:
|
||||
|
||||
@@ -9,10 +9,10 @@ from tests.utils import requires_api_key
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_generate_aiconfig_automatic_default(patched_api_requestor):
|
||||
def test_generate_aiconfig_automatic_default(patched_api_requestor, config):
|
||||
user_inputs = [""]
|
||||
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = prompt_user()
|
||||
ai_config = prompt_user(config)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name is not None
|
||||
@@ -22,9 +22,9 @@ def test_generate_aiconfig_automatic_default(patched_api_requestor):
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_generate_aiconfig_automatic_typical(patched_api_requestor):
|
||||
def test_generate_aiconfig_automatic_typical(patched_api_requestor, config):
|
||||
user_prompt = "Help me create a rock opera about cybernetic giraffes"
|
||||
ai_config = generate_aiconfig_automatic(user_prompt)
|
||||
ai_config = generate_aiconfig_automatic(user_prompt, config)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name is not None
|
||||
@@ -34,7 +34,7 @@ def test_generate_aiconfig_automatic_typical(patched_api_requestor):
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
|
||||
def test_generate_aiconfig_automatic_fallback(patched_api_requestor, config):
|
||||
user_inputs = [
|
||||
"T&GF£OIBECC()!*",
|
||||
"Chef-GPT",
|
||||
@@ -45,7 +45,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
|
||||
"",
|
||||
]
|
||||
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = prompt_user()
|
||||
ai_config = prompt_user(config)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name == "Chef-GPT"
|
||||
@@ -55,7 +55,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_prompt_user_manual_mode(patched_api_requestor):
|
||||
def test_prompt_user_manual_mode(patched_api_requestor, config):
|
||||
user_inputs = [
|
||||
"--manual",
|
||||
"Chef-GPT",
|
||||
@@ -66,7 +66,7 @@ def test_prompt_user_manual_mode(patched_api_requestor):
|
||||
"",
|
||||
]
|
||||
with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
|
||||
ai_config = prompt_user()
|
||||
ai_config = prompt_user(config)
|
||||
|
||||
assert isinstance(ai_config, AIConfig)
|
||||
assert ai_config.ai_name == "Chef-GPT"
|
||||
|
||||
@@ -5,9 +5,9 @@ from autogpt.llm.chat import create_chat_completion
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent_manager():
|
||||
def agent_manager(config):
|
||||
# Hack, real gross. Singletons are not good times.
|
||||
yield AgentManager()
|
||||
yield AgentManager(config)
|
||||
del AgentManager._instances[AgentManager]
|
||||
|
||||
|
||||
|
||||
@@ -19,10 +19,10 @@ ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
config_file = tmp_path / "ai_settings.yaml"
|
||||
config_file.write_text(yaml_content)
|
||||
ai_settings_file = tmp_path / "ai_settings.yaml"
|
||||
ai_settings_file.write_text(yaml_content)
|
||||
|
||||
ai_config = AIConfig.load(config_file)
|
||||
ai_config = AIConfig.load(ai_settings_file)
|
||||
|
||||
assert len(ai_config.ai_goals) == 4
|
||||
assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||
@@ -30,8 +30,8 @@ api_budget: 0.0
|
||||
assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||
assert ai_config.ai_goals[3] == "Goal 4: Wake up"
|
||||
|
||||
config_file.write_text("")
|
||||
ai_config.save(config_file)
|
||||
ai_settings_file.write_text("")
|
||||
ai_config.save(ai_settings_file)
|
||||
|
||||
yaml_content2 = """ai_goals:
|
||||
- 'Goal 1: Make a sandwich'
|
||||
@@ -42,15 +42,15 @@ ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
assert config_file.read_text() == yaml_content2
|
||||
assert ai_settings_file.read_text() == yaml_content2
|
||||
|
||||
|
||||
def test_ai_config_file_not_exists(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
config_file = workspace.get_path("ai_settings.yaml")
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
|
||||
ai_config = AIConfig.load(str(config_file))
|
||||
ai_config = AIConfig.load(str(ai_settings_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
@@ -62,10 +62,10 @@ def test_ai_config_file_not_exists(workspace):
|
||||
def test_ai_config_file_is_empty(workspace):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
config_file = workspace.get_path("ai_settings.yaml")
|
||||
config_file.write_text("")
|
||||
ai_settings_file = workspace.get_path("ai_settings.yaml")
|
||||
ai_settings_file.write_text("")
|
||||
|
||||
ai_config = AIConfig.load(str(config_file))
|
||||
ai_config = AIConfig.load(str(ai_settings_file))
|
||||
assert ai_config.ai_name == ""
|
||||
assert ai_config.ai_role == ""
|
||||
assert ai_config.ai_goals == []
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
Test cases for the Config class, which handles the configuration settings
|
||||
Test cases for the config class, which handles the configuration settings
|
||||
for the AI and ensures it behaves as a singleton.
|
||||
"""
|
||||
from unittest import mock
|
||||
@@ -7,14 +7,14 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.config import Config
|
||||
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
|
||||
|
||||
def test_initial_values(config: Config):
|
||||
"""
|
||||
Test if the initial values of the Config class attributes are set correctly.
|
||||
Test if the initial values of the config class attributes are set correctly.
|
||||
"""
|
||||
assert config.debug_mode == False
|
||||
assert config.continuous_mode == False
|
||||
|
||||
@@ -13,6 +13,7 @@ from pytest_mock import MockerFixture
|
||||
|
||||
import autogpt.commands.file_operations as file_ops
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.vector.memory_item import MemoryItem
|
||||
from autogpt.memory.vector.utils import Embedding
|
||||
from autogpt.workspace import Workspace
|
||||
@@ -24,11 +25,13 @@ def file_content():
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_MemoryItem_from_text(mocker: MockerFixture, mock_embedding: Embedding):
|
||||
def mock_MemoryItem_from_text(
|
||||
mocker: MockerFixture, mock_embedding: Embedding, config: Config
|
||||
):
|
||||
mocker.patch.object(
|
||||
file_ops.MemoryItem,
|
||||
"from_text",
|
||||
new=lambda content, source_type, metadata: MemoryItem(
|
||||
new=lambda content, source_type, config, metadata: MemoryItem(
|
||||
raw_content=content,
|
||||
summary=f"Summary of content '{content}'",
|
||||
chunk_summaries=[f"Summary of content '{content}'"],
|
||||
|
||||
@@ -38,8 +38,7 @@ def agent(config: Config):
|
||||
return agent
|
||||
|
||||
|
||||
def test_message_history_batch_summary(mocker, agent):
|
||||
config = Config()
|
||||
def test_message_history_batch_summary(mocker, agent, config):
|
||||
history = MessageHistory(agent)
|
||||
model = config.fast_llm_model
|
||||
message_tlength = 0
|
||||
@@ -114,7 +113,7 @@ def test_message_history_batch_summary(mocker, agent):
|
||||
history.append(user_input_msg)
|
||||
|
||||
# only take the last cycle of the message history, trim the rest of previous messages, and generate a summary for them
|
||||
for cycle in reversed(list(history.per_cycle())):
|
||||
for cycle in reversed(list(history.per_cycle(config))):
|
||||
messages_to_add = [msg for msg in cycle if msg is not None]
|
||||
message_sequence.insert(insertion_index, *messages_to_add)
|
||||
break
|
||||
@@ -127,7 +126,7 @@ def test_message_history_batch_summary(mocker, agent):
|
||||
|
||||
# test the main trim_message function
|
||||
new_summary_message, trimmed_messages = history.trim_messages(
|
||||
current_message_chain=list(message_sequence),
|
||||
current_message_chain=list(message_sequence), config=config
|
||||
)
|
||||
|
||||
expected_call_count = math.ceil(
|
||||
|
||||
@@ -23,10 +23,10 @@ performance_evaluations:
|
||||
- Another test performance evaluation
|
||||
- A third test performance evaluation
|
||||
"""
|
||||
config_file = tmp_path / "test_prompt_settings.yaml"
|
||||
config_file.write_text(yaml_content)
|
||||
prompt_settings_file = tmp_path / "test_prompt_settings.yaml"
|
||||
prompt_settings_file.write_text(yaml_content)
|
||||
|
||||
prompt_config = PromptConfig(config_file)
|
||||
prompt_config = PromptConfig(prompt_settings_file)
|
||||
|
||||
assert len(prompt_config.constraints) == 3
|
||||
assert prompt_config.constraints[0] == "A test constraint"
|
||||
|
||||
@@ -4,6 +4,7 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
|
||||
from autogpt.utils import (
|
||||
get_bulletin_from_web,
|
||||
@@ -185,12 +186,12 @@ def test_get_current_git_branch_failure(mock_repo):
|
||||
assert branch_name == ""
|
||||
|
||||
|
||||
def test_validate_json_valid(valid_json_response):
|
||||
assert validate_json(valid_json_response)
|
||||
def test_validate_json_valid(valid_json_response, config: Config):
|
||||
assert validate_json(valid_json_response, config)
|
||||
|
||||
|
||||
def test_validate_json_invalid(invalid_json_response):
|
||||
assert not validate_json(valid_json_response)
|
||||
def test_validate_json_invalid(invalid_json_response, config: Config):
|
||||
assert not validate_json(valid_json_response, config)
|
||||
|
||||
|
||||
def test_extract_json_from_response(valid_json_response: dict):
|
||||
|
||||
Reference in New Issue
Block a user