Release v0.4.6 (#5039)

This commit is contained in:
Luke
2023-07-28 14:37:32 +02:00
committed by GitHub
67 changed files with 1512 additions and 1222 deletions

View File

@@ -16,13 +16,13 @@ OPENAI_API_KEY=your-openai-api-key
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
@@ -58,7 +58,7 @@ OPENAI_API_KEY=your-openai-api-key
## USE_AZURE - Use Azure OpenAI or not (Default: False)
# USE_AZURE=False
## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
# AZURE_CONFIG_FILE=azure.yaml

View File

@@ -162,7 +162,7 @@ jobs:
PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }}
AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }}
AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }}
OPENAI_API_KEY: ${{ github.event_name == 'pull_request' && secrets.OPENAI_API_KEY || '' }}
OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }}
PLAIN_OUTPUT: True
- name: Upload coverage reports to Codecov

2
.gitignore vendored
View File

@@ -1,7 +1,7 @@
## Original ignores
autogpt/keys.py
autogpt/*.json
**/auto_gpt_workspace/*
auto_gpt_workspace/*
*.mpeg
.env
azure.yaml

View File

@@ -4,23 +4,24 @@
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
# v0.4.5 RELEASE HIGHLIGHTS! 🚀
# v0.4.6 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
This release includes under-the-hood improvements and bug fixes, such as more
accurate token counts for OpenAI functions, faster CI builds, improved plugin
handling, and refactoring of the Config class for better maintainability.
This release includes under-the-hood improvements and bug fixes, including better UTF-8
special character support, workspace write access for sandboxed Python execution,
more robust path resolution for config files and the workspace, and a full restructure
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
We have also released some documentation updates, including:
- *How to share your system logs*
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
via a log analyzer graciously contributed by https://www.e2b.dev/
- *Auto-GPT re-architecture documentation*
You can learn more about the inner-workings of the Auto-GPT re-architecture
You can learn more about the inner-workings of the Auto-GPT re-architecture
released last cycle, via these links:
* [autogpt/core/README.md]
* [autogpt/core/ARCHITECTURE_NOTES.md]
Take a look at the Release Notes on Github for the full changelog!
Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases.

View File

@@ -1,5 +1,5 @@
"""Auto-GPT: A GPT powered AI Assistant"""
import autogpt.cli
import autogpt.app.cli
if __name__ == "__main__":
autogpt.cli.main()
autogpt.app.cli.main()

View File

@@ -1,3 +1,4 @@
from .agent import Agent
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
__all__ = ["Agent"]
__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]

View File

@@ -1,315 +1,213 @@
from __future__ import annotations
import json
import signal
import sys
import time
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional
from colorama import Fore, Style
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
from autogpt.llm.base import ChatModelResponse, ChatSequence
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_registry import CommandRegistry
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.llm import ChatModelResponse
from autogpt.llm.chat import chat_with_ai
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import (
from autogpt.logs import logger
from autogpt.logs.log_cycle import (
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
USER_INPUT_FILE_NAME,
LogCycleHandler,
logger,
print_assistant_thoughts,
remove_ansi_escape,
)
from autogpt.memory.message_history import MessageHistory
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_registry import CommandRegistry
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input
from autogpt.workspace import Workspace
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
class Agent:
"""Agent class for interacting with Auto-GPT.
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
next_action_count: The number of actions to execute.
system_prompt: The system prompt is the initial prompt that defines everything
the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are
ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering.
For Auto-GPT, this prompt is:
Determine exactly one command to use, and respond using the format specified
above:
The triggering prompt is not part of the system prompt because between the
system prompt and the triggering
prompt we have contextual information that can distract the AI and make it
forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
The triggering prompt reminds the AI about its short term meta task
(defining the next task)
"""
class Agent(BaseAgent):
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorMemory,
next_action_count: int,
command_registry: CommandRegistry,
ai_config: AIConfig,
system_prompt: str,
command_registry: CommandRegistry,
memory: VectorMemory,
triggering_prompt: str,
workspace_directory: str | Path,
config: Config,
cycle_budget: Optional[int] = None,
):
self.ai_name = ai_name
super().__init__(
ai_config=ai_config,
command_registry=command_registry,
config=config,
default_cycle_instruction=triggering_prompt,
cycle_budget=cycle_budget,
)
self.memory = memory
self.history = MessageHistory.for_model(config.smart_llm, agent=self)
self.next_action_count = next_action_count
self.command_registry = command_registry
self.config = config
self.ai_config = ai_config
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
"""VectorMemoryProvider used to manage the agent's context (TODO)"""
self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace)
"""Workspace that the agent has access to, e.g. for reading/writing files."""
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
self.cycle_count = 0
"""Timestamp the agent was created; only used for structured debug logging."""
self.log_cycle_handler = LogCycleHandler()
self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens
"""LogCycleHandler for structured debug logging."""
def start_interaction_loop(self):
# Interaction Loop
self.cycle_count = 0
command_name = None
arguments = None
user_input = ""
def construct_base_prompt(self, *args, **kwargs) -> ChatSequence:
if kwargs.get("prepend_messages") is None:
kwargs["prepend_messages"] = []
# Signal handler for interrupting y -N
def signal_handler(signum, frame):
if self.next_action_count == 0:
sys.exit()
else:
print(
Fore.RED
+ "Interrupt signal received. Stopping continuous command execution."
+ Style.RESET_ALL
)
self.next_action_count = 0
# Clock
kwargs["prepend_messages"].append(
Message("system", f"The current time and date is {time.strftime('%c')}"),
)
signal.signal(signal.SIGINT, signal_handler)
# Add budget information (if any) to prompt
api_manager = ApiManager()
if api_manager.get_total_budget() > 0.0:
remaining_budget = (
api_manager.get_total_budget() - api_manager.get_total_cost()
)
if remaining_budget < 0:
remaining_budget = 0
while True:
# Discontinue if continuous limit is reached
self.cycle_count += 1
self.log_cycle_handler.log_count_within_cycle = 0
budget_msg = Message(
"system",
f"Your remaining API budget is ${remaining_budget:.3f}"
+ (
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
if remaining_budget == 0
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
if remaining_budget < 0.005
else " Budget nearly exceeded. Finish up.\n\n"
if remaining_budget < 0.01
else ""
),
)
logger.debug(budget_msg)
if kwargs.get("append_messages") is None:
kwargs["append_messages"] = []
kwargs["append_messages"].append(budget_msg)
return super().construct_base_prompt(*args, **kwargs)
def on_before_think(self, *args, **kwargs) -> ChatSequence:
prompt = super().on_before_think(*args, **kwargs)
self.log_cycle_handler.log_count_within_cycle = 0
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.history.raw(),
FULL_MESSAGE_HISTORY_FILE_NAME,
)
return prompt
def execute(
self,
command_name: str | None,
command_args: dict[str, str] | None,
user_input: str | None,
) -> str:
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Could not execute command: {command_name}{command_args}"
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
[m.raw() for m in self.history],
FULL_MESSAGE_HISTORY_FILE_NAME,
user_input,
USER_INPUT_FILE_NAME,
)
if (
self.config.continuous_mode
and self.config.continuous_limit > 0
and self.cycle_count > self.config.continuous_limit
):
logger.typewriter_log(
"Continuous Limit Reached: ",
Fore.YELLOW,
f"{self.config.continuous_limit}",
)
break
# Send message to AI, get response
with Spinner("Thinking... ", plain_output=self.config.plain_output):
assistant_reply = chat_with_ai(
self.config,
self,
self.system_prompt,
self.triggering_prompt,
self.smart_token_limit,
self.config.smart_llm,
)
try:
assistant_reply_json = extract_json_from_response(
assistant_reply.content
)
validate_json(assistant_reply_json, self.config)
except json.JSONDecodeError as e:
logger.error(f"Exception while validating assistant reply JSON: {e}")
assistant_reply_json = {}
else:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_command():
continue
command_name, arguments = plugin.pre_command(command_name, command_args)
command_result = execute_command(
command_name=command_name,
arguments=command_args,
agent=self,
)
result = f"Command {command_name} returned: " f"{command_result}"
result_tlength = count_string_tokens(str(command_result), self.llm.name)
memory_tlength = count_string_tokens(
str(self.history.summary_message()), self.llm.name
)
if result_tlength + memory_tlength > self.send_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."
for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
if not plugin.can_handle_post_command():
continue
assistant_reply_json = plugin.post_planning(assistant_reply_json)
result = plugin.post_command(command_name, result)
# Check if there's a result from the command append it to the message
if result is None:
self.history.add("system", "Unable to execute command", "action_result")
else:
self.history.add("system", result, "action_result")
# Print Assistant thoughts
if assistant_reply_json != {}:
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, self.config
)
command_name, arguments = extract_command(
assistant_reply_json, assistant_reply, self.config
)
if self.config.speak_mode:
say_text(f"I want to execute {command_name}", self.config)
return result
except Exception as e:
logger.error("Error: \n", str(e))
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
assistant_reply_json,
NEXT_ACTION_FILE_NAME,
def parse_and_process_response(
self, llm_response: ChatModelResponse, *args, **kwargs
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
if not llm_response.content:
raise SyntaxError("Assistant response has no text content")
assistant_reply_dict = extract_dict_from_response(llm_response.content)
valid, errors = validate_dict(assistant_reply_dict, self.config)
if not valid:
raise SyntaxError(
"Validation of response failed:\n "
+ ";\n ".join([str(e) for e in errors])
)
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
assistant_reply_dict = plugin.post_planning(assistant_reply_dict)
if not self.config.continuous_mode and self.next_action_count == 0:
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.info(
f"Enter '{self.config.authorise_key}' to authorise command, "
f"'{self.config.authorise_key} -N' to run N continuous commands, "
f"'{self.config.exit_key}' to exit program, or enter feedback for "
f"{self.ai_name}..."
)
while True:
if self.config.chat_messages_enabled:
console_input = clean_input(
self.config, "Waiting for your response..."
)
else:
console_input = clean_input(
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().strip() == self.config.authorise_key:
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().strip() == "":
logger.warn("Invalid input format.")
continue
elif console_input.lower().startswith(
f"{self.config.authorise_key} -"
):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
logger.warn(
f"Invalid input format. Please enter '{self.config.authorise_key} -n' "
"where n is the number of continuous tasks."
)
continue
break
elif console_input.lower() == self.config.exit_key:
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
user_input,
USER_INPUT_FILE_NAME,
)
break
response = None, None, assistant_reply_dict
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif user_input == "EXIT":
logger.info("Exiting...")
break
else:
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
# Print authorized commands left value
logger.typewriter_log(
f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
# Print Assistant thoughts
if assistant_reply_dict != {}:
# Get command name and arguments
try:
command_name, arguments = extract_command(
assistant_reply_dict, llm_response, self.config
)
response = command_name, arguments, assistant_reply_dict
except Exception as e:
logger.error("Error: \n", str(e))
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Could not execute command: {arguments}"
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
else:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_command():
continue
command_name, arguments = plugin.pre_command(
command_name, arguments
)
command_result = execute_command(
command_name=command_name,
arguments=arguments,
agent=self,
)
result = f"Command {command_name} returned: " f"{command_result}"
result_tlength = count_string_tokens(
str(command_result), self.config.smart_llm
)
memory_tlength = count_string_tokens(
str(self.history.summary_message()), self.config.smart_llm
)
if result_tlength + memory_tlength + 600 > self.smart_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."
for plugin in self.config.plugins:
if not plugin.can_handle_post_command():
continue
result = plugin.post_command(command_name, result)
if self.next_action_count > 0:
self.next_action_count -= 1
# Check if there's a result from the command append it to the message
# history
if result is not None:
self.history.add("system", result, "action_result")
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.history.add("system", "Unable to execute command", "action_result")
logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
assistant_reply_dict,
NEXT_ACTION_FILE_NAME,
)
return response
def extract_command(
assistant_reply_json: dict, assistant_reply: ChatModelResponse, config: Config
):
) -> tuple[str, dict[str, str]]:
"""Parse the response and return the command name and arguments
Args:
@@ -327,27 +225,29 @@ def extract_command(
"""
if config.openai_functions:
if assistant_reply.function_call is None:
return "Error:", "No 'function_call' in assistant reply"
return "Error:", {"message": "No 'function_call' in assistant reply"}
assistant_reply_json["command"] = {
"name": assistant_reply.function_call.name,
"args": json.loads(assistant_reply.function_call.arguments),
}
try:
if "command" not in assistant_reply_json:
return "Error:", "Missing 'command' object in JSON"
return "Error:", {"message": "Missing 'command' object in JSON"}
if not isinstance(assistant_reply_json, dict):
return (
"Error:",
f"The previous message sent was not a dictionary {assistant_reply_json}",
{
"message": f"The previous message sent was not a dictionary {assistant_reply_json}"
},
)
command = assistant_reply_json["command"]
if not isinstance(command, dict):
return "Error:", "'command' object is not a dictionary"
return "Error:", {"message": "'command' object is not a dictionary"}
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
return "Error:", {"message": "Missing 'name' field in 'command' object"}
command_name = command["name"]
@@ -356,17 +256,17 @@ def extract_command(
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
return "Error:", {"message": "Invalid JSON"}
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
return "Error:", {"message": str(e)}
def execute_command(
command_name: str,
arguments: dict[str, str],
agent: Agent,
):
) -> Any:
"""Execute the command and return the result
Args:

318
autogpt/agents/base.py Normal file
View File

@@ -0,0 +1,318 @@
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
from autogpt.models.command_registry import CommandRegistry
from autogpt.llm.base import ChatModelResponse, ChatSequence, Message
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.logs import logger
from autogpt.memory.message_history import MessageHistory
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
CommandName = str
CommandArgs = dict[str, str]
AgentThoughts = dict[str, Any]
class BaseAgent(metaclass=ABCMeta):
"""Base class for all Auto-GPT agents."""
def __init__(
self,
ai_config: AIConfig,
command_registry: CommandRegistry,
config: Config,
big_brain: bool = True,
default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT,
cycle_budget: Optional[int] = 1,
send_token_limit: Optional[int] = None,
summary_max_tlength: Optional[int] = None,
):
self.ai_config = ai_config
"""The AIConfig or "personality" object associated with this agent."""
self.command_registry = command_registry
"""The registry containing all commands available to the agent."""
self.config = config
"""The applicable application configuration."""
self.big_brain = big_brain
"""
Whether this agent uses the configured smart LLM (default) to think,
as opposed to the configured fast LLM.
"""
self.default_cycle_instruction = default_cycle_instruction
"""The default instruction passed to the AI for a thinking cycle."""
self.cycle_budget = cycle_budget
"""
The number of cycles that the agent is allowed to run unsupervised.
`None` for unlimited continuous execution,
`1` to require user approval for every step,
`0` to stop the agent.
"""
self.cycles_remaining = cycle_budget
"""The number of cycles remaining within the `cycle_budget`."""
self.cycle_count = 0
"""The number of cycles that the agent has run since its initialization."""
self.system_prompt = ai_config.construct_full_prompt(config)
"""
The system prompt sets up the AI's personality and explains its goals,
available resources, and restrictions.
"""
llm_name = self.config.smart_llm if self.big_brain else self.config.fast_llm
self.llm = OPEN_AI_CHAT_MODELS[llm_name]
"""The LLM that the agent uses to think."""
self.send_token_limit = send_token_limit or self.llm.max_tokens * 3 // 4
"""
The token limit for prompt construction. Should leave room for the completion;
defaults to 75% of `llm.max_tokens`.
"""
self.history = MessageHistory(
self.llm,
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
)
def think(
self,
instruction: Optional[str] = None,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Runs the agent for one cycle.
Params:
instruction: The instruction to put at the end of the prompt.
Returns:
The command name and arguments, if any, and the agent's thoughts.
"""
instruction = instruction or self.default_cycle_instruction
prompt: ChatSequence = self.construct_prompt(instruction)
prompt = self.on_before_think(prompt, instruction)
raw_response = create_chat_completion(
prompt,
self.config,
functions=get_openai_command_specs(self.command_registry)
if self.config.openai_functions
else None,
)
self.cycle_count += 1
return self.on_response(raw_response, prompt, instruction)
@abstractmethod
def execute(
self,
command_name: str | None,
command_args: dict[str, str] | None,
user_input: str | None,
) -> str:
"""Executes the given command, if any, and returns the agent's response.
Params:
command_name: The name of the command to execute, if any.
command_args: The arguments to pass to the command, if any.
user_input: The user's input, if any.
Returns:
The results of the command.
"""
...
def construct_base_prompt(
self,
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. `prepend_messages`
3. Message history of the agent, truncated & prepended with running summary as needed
4. `append_messages`
Params:
prepend_messages: Messages to insert between the system prompt and message history
append_messages: Messages to insert after the message history
reserve_tokens: Number of tokens to reserve for content that is added later
"""
prompt = ChatSequence.for_model(
self.llm.name,
[Message("system", self.system_prompt)] + prepend_messages,
)
# Reserve tokens for messages to be appended later, if any
reserve_tokens += self.history.max_summary_tlength
if append_messages:
reserve_tokens += count_message_tokens(append_messages, self.llm.name)
# Fill message history, up to a margin of reserved_tokens.
# Trim remaining historical messages and add them to the running summary.
history_start_index = len(prompt)
trimmed_history = add_history_upto_token_limit(
prompt, self.history, self.send_token_limit - reserve_tokens
)
if trimmed_history:
new_summary_msg, _ = self.history.trim_messages(list(prompt), self.config)
prompt.insert(history_start_index, new_summary_msg)
if append_messages:
prompt.extend(append_messages)
return prompt
def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. Message history of the agent, truncated & prepended with running summary as needed
3. `cycle_instruction`
Params:
cycle_instruction: The final instruction for a thinking cycle
"""
if not cycle_instruction:
raise ValueError("No instruction given")
cycle_instruction_msg = Message("user", cycle_instruction)
cycle_instruction_tlength = count_message_tokens(
cycle_instruction_msg, self.llm.name
)
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)
# ADD user input message ("triggering prompt")
prompt.append(cycle_instruction_msg)
return prompt
def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
"""Called after constructing the prompt but before executing it.
Calls the `on_planning` hook of any enabled and capable plugins, adding their
output to the prompt.
Params:
instruction: The instruction for the current cycle, also used in constructing the prompt
Returns:
The prompt to execute
"""
current_tokens_used = prompt.token_length
plugin_count = len(self.config.plugins)
for i, plugin in enumerate(self.config.plugins):
if not plugin.can_handle_on_planning():
continue
plugin_response = plugin.on_planning(
self.ai_config.prompt_generator, prompt.raw()
)
if not plugin_response or plugin_response == "":
continue
message_to_add = Message("system", plugin_response)
tokens_to_add = count_message_tokens(message_to_add, self.llm.name)
if current_tokens_used + tokens_to_add > self.send_token_limit:
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
break
prompt.insert(
-1, message_to_add
) # HACK: assumes cycle instruction to be at the end
current_tokens_used += tokens_to_add
return prompt
def on_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Called upon receiving a response from the chat model.
Adds the last/newest message in the prompt and the response to `history`,
and calls `self.parse_and_process_response()` to do the rest.
Params:
llm_response: The raw response from the chat model
prompt: The prompt that was executed
instruction: The instruction for the current cycle, also used in constructing the prompt
Returns:
The parsed command name and command args, if any, and the agent thoughts.
"""
# Save assistant reply to message history
self.history.append(prompt[-1])
self.history.add(
"assistant", llm_response.content, "ai_response"
) # FIXME: support function calls
try:
return self.parse_and_process_response(llm_response, prompt, instruction)
except SyntaxError as e:
logger.error(f"Response could not be parsed: {e}")
# TODO: tune this message
self.history.add(
"system",
f"Your response could not be parsed: {e}"
"\n\nRemember to only respond using the specified format above!",
)
return None, None, {}
# TODO: update memory/context
@abstractmethod
def parse_and_process_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Validate, parse & process the LLM's response.
Must be implemented by derivative classes: no base implementation is provided,
since the implementation depends on the role of the derivative Agent.
Params:
llm_response: The raw response from the chat model
prompt: The prompt that was executed
instruction: The instruction for the current cycle, also used in constructing the prompt
Returns:
The parsed command name and command args, if any, and the agent thoughts.
"""
pass
def add_history_upto_token_limit(
prompt: ChatSequence, history: MessageHistory, t_limit: int
) -> list[Message]:
current_prompt_length = prompt.token_length
insertion_index = len(prompt)
limit_reached = False
trimmed_messages: list[Message] = []
for cycle in reversed(list(history.per_cycle())):
messages_to_add = [msg for msg in cycle if msg is not None]
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
if current_prompt_length + tokens_to_add > t_limit:
limit_reached = True
if not limit_reached:
# Add the most recent message to the start of the chain,
# after the system prompts.
prompt.insert(insertion_index, *messages_to_add)
current_prompt_length += tokens_to_add
else:
trimmed_messages = messages_to_add + trimmed_messages
return trimmed_messages

0
autogpt/app/__init__.py Normal file
View File

View File

@@ -1,4 +1,5 @@
"""Main script for the autogpt package."""
from pathlib import Path
from typing import Optional
import click
@@ -15,7 +16,10 @@ import click
@click.option(
"--ai-settings",
"-C",
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
help=(
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
" root directory. Will also automatically skip the re-prompt."
),
)
@click.option(
"--prompt-settings",
@@ -111,28 +115,31 @@ def main(
Start an Auto-GPT assistant.
"""
# Put imports inside function to avoid importing everything when starting the CLI
from autogpt.main import run_auto_gpt
from autogpt.app.main import run_auto_gpt
if ctx.invoked_subcommand is None:
run_auto_gpt(
continuous,
continuous_limit,
ai_settings,
prompt_settings,
skip_reprompt,
speak,
debug,
gpt3only,
gpt4only,
memory_type,
browser_name,
allow_downloads,
skip_news,
workspace_directory,
install_plugin_deps,
ai_name,
ai_role,
ai_goal,
continuous=continuous,
continuous_limit=continuous_limit,
ai_settings=ai_settings,
prompt_settings=prompt_settings,
skip_reprompt=skip_reprompt,
speak=speak,
debug=debug,
gpt3only=gpt3only,
gpt4only=gpt4only,
memory_type=memory_type,
browser_name=browser_name,
allow_downloads=allow_downloads,
skip_news=skip_news,
working_directory=Path(
__file__
).parent.parent.parent, # TODO: make this an option
workspace_directory=workspace_directory,
install_plugin_deps=install_plugin_deps,
ai_name=ai_name,
ai_role=ai_role,
ai_goals=ai_goal,
)

View File

@@ -1,20 +1,18 @@
"""Configurator module."""
from __future__ import annotations
from typing import TYPE_CHECKING
from typing import Literal
import click
from colorama import Back, Fore, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
from autogpt.llm.utils import check_model
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.memory.vector import get_supported_memory_backends
if TYPE_CHECKING:
from autogpt.config import Config
def create_config(
config: Config,
@@ -165,3 +163,25 @@ def create_config(
if skip_news:
config.skip_news = True
def check_model(
model_name: str,
model_type: Literal["smart_llm", "fast_llm"],
config: Config,
) -> str:
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
openai_credentials = config.get_openai_credentials(model_name)
api_manager = ApiManager()
models = api_manager.get_models(**openai_credentials)
if any(model_name in m["id"] for m in models):
return model_name
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"You do not have access to {model_name}. Setting {model_type} to "
f"gpt-3.5-turbo.",
)
return "gpt-3.5-turbo"

597
autogpt/app/main.py Normal file
View File

@@ -0,0 +1,597 @@
"""The application entry point. Can be invoked by a CLI or any other front end application."""
import enum
import logging
import math
import signal
import sys
from pathlib import Path
from types import FrameType
from typing import Optional
from colorama import Fore, Style
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
from autogpt.app.configurator import create_config
from autogpt.app.setup import prompt_user
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import (
clean_input,
get_current_git_branch,
get_latest_bulletin,
get_legal_warning,
markdown_to_ansi_style,
)
from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies
def run_auto_gpt(
continuous: bool,
continuous_limit: int,
ai_settings: str,
prompt_settings: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
gpt3only: bool,
gpt4only: bool,
memory_type: str,
browser_name: str,
allow_downloads: bool,
skip_news: bool,
working_directory: Path,
workspace_directory: str | Path,
install_plugin_deps: bool,
ai_name: Optional[str] = None,
ai_role: Optional[str] = None,
ai_goals: tuple[str] = tuple(),
):
# Configure logging before we do anything else.
logger.set_level(logging.DEBUG if debug else logging.INFO)
config = ConfigBuilder.build_config_from_env(workdir=working_directory)
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
# or import it directly.
logger.config = config
# TODO: fill in llm values here
check_openai_api_key(config)
create_config(
config,
continuous,
continuous_limit,
ai_settings,
prompt_settings,
skip_reprompt,
speak,
debug,
gpt3only,
gpt4only,
memory_type,
browser_name,
allow_downloads,
skip_news,
)
if config.continuous_mode:
for line in get_legal_warning().split("\n"):
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
if not config.skip_news:
motd, is_new_motd = get_latest_bulletin()
if motd:
motd = markdown_to_ansi_style(motd)
for motd_line in motd.split("\n"):
logger.info(motd_line, "NEWS:", Fore.GREEN)
if is_new_motd and not config.chat_messages_enabled:
input(
Fore.MAGENTA
+ Style.BRIGHT
+ "NEWS: Bulletin was updated! Press Enter to continue..."
+ Style.RESET_ALL
)
git_branch = get_current_git_branch()
if git_branch and git_branch != "stable":
logger.typewriter_log(
"WARNING: ",
Fore.RED,
f"You are running on `{git_branch}` branch "
"- this is not a supported branch.",
)
if sys.version_info < (3, 10):
logger.typewriter_log(
"WARNING: ",
Fore.RED,
"You are running on an older version of Python. "
"Some people have observed problems with certain "
"parts of Auto-GPT with this version. "
"Please consider upgrading to Python 3.10 or higher.",
)
if install_plugin_deps:
install_plugin_dependencies()
# TODO: have this directory live outside the repository (e.g. in a user's
# home directory) and have it come in as a command line argument or part of
# the env file.
Workspace.set_workspace_directory(config, workspace_directory)
# HACK: doing this here to collect some globals that depend on the workspace.
Workspace.set_file_logger_path(config, config.workspace_path)
config.plugins = scan_plugins(config, config.debug_mode)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()
logger.debug(
f"The following command categories are disabled: {config.disabled_command_categories}"
)
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
logger.debug(
f"The following command categories are enabled: {enabled_command_categories}"
)
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
# Unregister commands that are incompatible with the current config
incompatible_commands = []
for command in command_registry.commands.values():
if callable(command.enabled) and not command.enabled(config):
command.enabled = False
incompatible_commands.append(command)
for command in incompatible_commands:
command_registry.unregister(command)
logger.debug(
f"Unregistering incompatible command: {command.name}, "
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
)
ai_config = construct_main_ai_config(
config,
name=ai_name,
role=ai_role,
goals=ai_goals,
)
ai_config.command_registry = command_registry
# print(prompt)
# add chat plugins capable of report to logger
if config.chat_messages_enabled:
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
logger.chat_plugins.append(plugin)
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(config)
memory.clear()
logger.typewriter_log(
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
agent = Agent(
memory=memory,
command_registry=command_registry,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
ai_config=ai_config,
config=config,
)
run_interaction_loop(agent)
def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | None:
# Translate from the continuous_mode/continuous_limit config
# to a cycle_budget (maximum number of cycles to run without checking in with the
# user) and a count of cycles_remaining before we check in..
if continuous_mode:
cycle_budget = continuous_limit if continuous_limit else math.inf
else:
cycle_budget = 1
return cycle_budget
class UserFeedback(str, enum.Enum):
"""Enum for user feedback."""
AUTHORIZE = "GENERATE NEXT COMMAND JSON"
EXIT = "EXIT"
TEXT = "TEXT"
def run_interaction_loop(
agent: Agent,
) -> None:
"""Run the main interaction loop for the agent.
Args:
agent: The agent to run the interaction loop for.
Returns:
None
"""
# These contain both application config and agent config, so grab them here.
config = agent.config
ai_config = agent.ai_config
logger.debug(f"{ai_config.ai_name} System Prompt: {agent.system_prompt}")
cycle_budget = cycles_remaining = _get_cycle_budget(
config.continuous_mode, config.continuous_limit
)
spinner = Spinner("Thinking...", plain_output=config.plain_output)
def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None:
nonlocal cycle_budget, cycles_remaining, spinner
if cycles_remaining in [0, 1, math.inf]:
logger.typewriter_log(
"Interrupt signal received. Stopping continuous command execution "
"immediately.",
Fore.RED,
)
sys.exit()
else:
restart_spinner = spinner.running
if spinner.running:
spinner.stop()
logger.typewriter_log(
"Interrupt signal received. Stopping continuous command execution.",
Fore.RED,
)
cycles_remaining = 1
if restart_spinner:
spinner.start()
# Set up an interrupt signal for the agent.
signal.signal(signal.SIGINT, graceful_agent_interrupt)
#########################
# Application Main Loop #
#########################
while cycles_remaining > 0:
logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
########
# Plan #
########
# Have the agent determine the next action to take.
with spinner:
command_name, command_args, assistant_reply_dict = agent.think()
###############
# Update User #
###############
# Print the assistant's thoughts and the next command to the user.
update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
##################
# Get user input #
##################
if cycles_remaining == 1: # Last cycle
user_feedback, user_input, new_cycles_remaining = get_user_feedback(
config,
ai_config,
)
if user_feedback == UserFeedback.AUTHORIZE:
if new_cycles_remaining is not None:
# Case 1: User is altering the cycle budget.
if cycle_budget > 1:
cycle_budget = new_cycles_remaining + 1
# Case 2: User is running iteratively and
# has initiated a one-time continuous cycle
cycles_remaining = new_cycles_remaining + 1
else:
# Case 1: Continuous iteration was interrupted -> resume
if cycle_budget > 1:
logger.typewriter_log(
"RESUMING CONTINUOUS EXECUTION: ",
Fore.MAGENTA,
f"The cycle budget is {cycle_budget}.",
)
# Case 2: The agent used up its cycle budget -> reset
cycles_remaining = cycle_budget + 1
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif user_feedback == UserFeedback.EXIT:
logger.typewriter_log("Exiting...", Fore.YELLOW)
exit()
else: # user_feedback == UserFeedback.TEXT
command_name = "human_feedback"
else:
user_input = None
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
if cycles_remaining != math.inf:
# Print authorized commands left value
logger.typewriter_log(
"AUTHORISED COMMANDS LEFT: ", Fore.CYAN, f"{cycles_remaining}"
)
###################
# Execute Command #
###################
# Decrement the cycle counter first to reduce the likelihood of a SIGINT
# happening during command execution, setting the cycles remaining to 1,
# and then having the decrement set it to 0, exiting the application.
if command_name != "human_feedback":
cycles_remaining -= 1
result = agent.execute(command_name, command_args, user_input)
if result is not None:
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
def update_user(
config: Config,
ai_config: AIConfig,
command_name: CommandName | None,
command_args: CommandArgs | None,
assistant_reply_dict: AgentThoughts,
) -> None:
"""Prints the assistant's thoughts and the next command to the user.
Args:
config: The program's configuration.
ai_config: The AI's configuration.
command_name: The name of the command to execute.
command_args: The arguments for the command.
assistant_reply_dict: The assistant's reply.
"""
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
if command_name is not None:
if config.speak_mode:
say_text(f"I want to execute {command_name}", config)
# First log new-line so user can differentiate sections better in console
logger.typewriter_log("\n")
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
)
elif command_name.lower().startswith("error"):
logger.typewriter_log(
"ERROR: ",
Fore.RED,
f"The Agent failed to select an action. " f"Error message: {command_name}",
)
else:
logger.typewriter_log(
"NO ACTION SELECTED: ",
Fore.RED,
f"The Agent failed to select an action.",
)
def get_user_feedback(
config: Config,
ai_config: AIConfig,
) -> tuple[UserFeedback, str, int | None]:
"""Gets the user's feedback on the assistant's reply.
Args:
config: The program's configuration.
ai_config: The AI's configuration.
Returns:
A tuple of the user's feedback, the user's input, and the number of
cycles remaining if the user has initiated a continuous cycle.
"""
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
logger.info(
f"Enter '{config.authorise_key}' to authorise command, "
f"'{config.authorise_key} -N' to run N continuous commands, "
f"'{config.exit_key}' to exit program, or enter feedback for "
f"{ai_config.ai_name}..."
)
user_feedback = None
user_input = ""
new_cycles_remaining = None
while user_feedback is None:
# Get input from user
if config.chat_messages_enabled:
console_input = clean_input(config, "Waiting for your response...")
else:
console_input = clean_input(
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
# Parse user input
if console_input.lower().strip() == config.authorise_key:
user_feedback = UserFeedback.AUTHORIZE
elif console_input.lower().strip() == "":
logger.warn("Invalid input format.")
elif console_input.lower().startswith(f"{config.authorise_key} -"):
try:
user_feedback = UserFeedback.AUTHORIZE
new_cycles_remaining = abs(int(console_input.split(" ")[1]))
except ValueError:
logger.warn(
f"Invalid input format. "
f"Please enter '{config.authorise_key} -N'"
" where N is the number of continuous tasks."
)
elif console_input.lower() in [config.exit_key, "exit"]:
user_feedback = UserFeedback.EXIT
else:
user_feedback = UserFeedback.TEXT
user_input = console_input
return user_feedback, user_input, new_cycles_remaining
def construct_main_ai_config(
config: Config,
name: Optional[str] = None,
role: Optional[str] = None,
goals: tuple[str] = tuple(),
) -> AIConfig:
"""Construct the prompt for the AI to respond to
Returns:
str: The prompt string
"""
ai_config = AIConfig.load(config.workdir / config.ai_settings_file)
# Apply overrides
if name:
ai_config.ai_name = name
if role:
ai_config.ai_role = role
if goals:
ai_config.ai_goals = list(goals)
if (
all([name, role, goals])
or config.skip_reprompt
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
):
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
logger.typewriter_log(
"API Budget:",
Fore.GREEN,
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
)
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {ai_config.ai_name}?",
speak_text=True,
)
should_continue = clean_input(
config,
f"""Continue with the last settings?
Name: {ai_config.ai_name}
Role: {ai_config.ai_role}
Goals: {ai_config.ai_goals}
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
Continue ({config.authorise_key}/{config.exit_key}): """,
)
if should_continue.lower() == config.exit_key:
ai_config = AIConfig()
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
ai_config = prompt_user(config)
ai_config.save(config.workdir / config.ai_settings_file)
if config.restrict_to_workspace:
logger.typewriter_log(
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
Fore.YELLOW,
f"{config.workspace_path}",
)
# set the total api budget
api_manager = ApiManager()
api_manager.set_total_budget(ai_config.api_budget)
# Agent Created, print message
logger.typewriter_log(
ai_config.ai_name,
Fore.LIGHTBLUE_EX,
"has been created with the following details:",
speak_text=True,
)
# Print the ai_config details
# Name
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
# Role
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
# Goals
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
for goal in ai_config.ai_goals:
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
return ai_config
def print_assistant_thoughts(
ai_name: str,
assistant_reply_json_valid: dict,
config: Config,
) -> None:
from autogpt.speech import say_text
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
if assistant_thoughts:
assistant_thoughts_reasoning = remove_ansi_escape(
assistant_thoughts.get("reasoning", "")
)
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
assistant_thoughts_criticism = remove_ansi_escape(
assistant_thoughts.get("criticism", "")
)
assistant_thoughts_speak = remove_ansi_escape(
assistant_thoughts.get("speak", "")
)
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
)
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
# Speak the assistant's thoughts
if assistant_thoughts_speak:
if config.speak_mode:
say_text(assistant_thoughts_speak, config)
else:
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
def remove_ansi_escape(s: str) -> str:
return s.replace("\x1B", "")

View File

@@ -9,7 +9,7 @@ from autogpt import utils
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.chat import create_chat_completion
from autogpt.llm.utils import create_chat_completion
from autogpt.logs import logger
from autogpt.prompts.default_prompts import (
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,

View File

@@ -0,0 +1,7 @@
COMMAND_CATEGORIES = [
"autogpt.commands.execute_code",
"autogpt.commands.file_operations",
"autogpt.commands.web_search",
"autogpt.commands.web_selenium",
"autogpt.commands.task_statuses",
]

View File

@@ -145,11 +145,14 @@ def execute_python_file(filename: str, agent: Agent) -> str:
logger.debug(f"Running {file_path} in a {image_name} container...")
container: DockerContainer = client.containers.run(
image_name,
["python", str(file_path.relative_to(agent.workspace.root))],
[
"python",
file_path.relative_to(agent.workspace.root).as_posix(),
],
volumes={
agent.config.workspace_path: {
str(agent.config.workspace_path): {
"bind": "/workspace",
"mode": "ro",
"mode": "rw",
}
},
working_dir="/workspace",

View File

@@ -37,7 +37,7 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
Returns:
str: The filename of the image
"""
filename = f"{agent.config.workspace_path}/{str(uuid.uuid4())}.jpg"
filename = agent.config.workspace_path / f"{str(uuid.uuid4())}.jpg"
# DALL-E
if agent.config.image_provider == "dalle":

View File

@@ -4,7 +4,6 @@ A module that contains the AIConfig class object that contains the configuration
"""
from __future__ import annotations
import os
import platform
from pathlib import Path
from typing import TYPE_CHECKING, Optional
@@ -16,9 +15,6 @@ if TYPE_CHECKING:
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.generator import PromptGenerator
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
class AIConfig:
"""
@@ -57,14 +53,13 @@ class AIConfig:
self.command_registry: CommandRegistry | None = None
@staticmethod
def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
def load(ai_settings_file: str | Path) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
loaded from yaml file if yaml file exists, else returns class with no parameters.
Parameters:
ai_settings_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
ai_settings_file (Path): The path to the config yaml file.
Returns:
cls (object): An instance of given cls object
@@ -85,16 +80,15 @@ class AIConfig:
for goal in config_params.get("ai_goals", [])
]
api_budget = config_params.get("api_budget", 0.0)
# type: Type[AIConfig]
return AIConfig(ai_name, ai_role, ai_goals, api_budget)
def save(self, ai_settings_file: str = SAVE_FILE) -> None:
def save(self, ai_settings_file: str | Path) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
Parameters:
ai_settings_file(str): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
ai_settings_file (Path): The path to the config yaml file.
Returns:
None

View File

@@ -4,6 +4,7 @@ from __future__ import annotations
import contextlib
import os
import re
from pathlib import Path
from typing import Any, Dict, Optional, Union
import yaml
@@ -14,10 +15,11 @@ from pydantic import Field, validator
from autogpt.core.configuration.schema import Configurable, SystemSettings
from autogpt.plugins.plugins_config import PluginsConfig
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
PLUGINS_CONFIG_FILE = os.path.join(
os.path.dirname(__file__), "../..", "plugins_config.yaml"
)
AI_SETTINGS_FILE = "ai_settings.yaml"
AZURE_CONFIG_FILE = "azure.yaml"
PLUGINS_CONFIG_FILE = "plugins_config.yaml"
PROMPT_SETTINGS_FILE = "prompt_settings.yaml"
GPT_4_MODEL = "gpt-4"
GPT_3_MODEL = "gpt-3.5-turbo"
@@ -45,9 +47,10 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# Agent Control Settings #
##########################
# Paths
ai_settings_file: str = "ai_settings.yaml"
prompt_settings_file: str = "prompt_settings.yaml"
workspace_path: Optional[str] = None
ai_settings_file: str = AI_SETTINGS_FILE
prompt_settings_file: str = PROMPT_SETTINGS_FILE
workdir: Path = None
workspace_path: Optional[Path] = None
file_logger_path: Optional[str] = None
# Model configuration
fast_llm: str = "gpt-3.5-turbo"
@@ -210,15 +213,18 @@ class ConfigBuilder(Configurable[Config]):
default_settings = Config()
@classmethod
def build_config_from_env(cls) -> Config:
def build_config_from_env(cls, workdir: Path) -> Config:
"""Initialize the Config class"""
config_dict = {
"workdir": workdir,
"authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
"exit_key": os.getenv("EXIT_KEY"),
"plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
"shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
"ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
"prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
"ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE),
"prompt_settings_file": os.getenv(
"PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE
),
"fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
"smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
"embedding_model": os.getenv("EMBEDDING_MODEL"),
@@ -255,7 +261,9 @@ class ConfigBuilder(Configurable[Config]):
"redis_password": os.getenv("REDIS_PASSWORD"),
"wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
"plugins_dir": os.getenv("PLUGINS_DIR"),
"plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"),
"plugins_config_file": os.getenv(
"PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE
),
"chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
}
@@ -277,16 +285,16 @@ class ConfigBuilder(Configurable[Config]):
config_dict["elevenlabs_voice_id"] = os.getenv(
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
)
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
if os.getenv("USE_MAC_OS_TTS"):
default_tts_provider = "macos"
elif elevenlabs_api_key:
default_tts_provider = "elevenlabs"
elif os.getenv("USE_BRIAN_TTS"):
default_tts_provider = "streamelements"
else:
default_tts_provider = "gtts"
config_dict["text_to_speech_provider"] = default_tts_provider
if not config_dict["text_to_speech_provider"]:
if os.getenv("USE_MAC_OS_TTS"):
default_tts_provider = "macos"
elif config_dict["elevenlabs_api_key"]:
default_tts_provider = "elevenlabs"
elif os.getenv("USE_BRIAN_TTS"):
default_tts_provider = "streamelements"
else:
default_tts_provider = "gtts"
config_dict["text_to_speech_provider"] = default_tts_provider
config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
@@ -299,7 +307,9 @@ class ConfigBuilder(Configurable[Config]):
config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
if config_dict["use_azure"]:
azure_config = cls.load_azure_config(config_dict["azure_config_file"])
azure_config = cls.load_azure_config(
workdir / config_dict["azure_config_file"]
)
config_dict.update(azure_config)
elif os.getenv("OPENAI_API_BASE_URL"):
@@ -318,7 +328,7 @@ class ConfigBuilder(Configurable[Config]):
# Set secondary config variables (that depend on other config variables)
config.plugins_config = PluginsConfig.load_config(
config.plugins_config_file,
config.workdir / config.plugins_config_file,
config.plugins_denylist,
config.plugins_allowlist,
)
@@ -326,13 +336,13 @@ class ConfigBuilder(Configurable[Config]):
return config
@classmethod
def load_azure_config(cls, config_file: str = AZURE_CONFIG_FILE) -> Dict[str, str]:
def load_azure_config(cls, config_file: Path) -> Dict[str, str]:
"""
Loads the configuration parameters for Azure hosting from the specified file
path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
config_file (Path): The path to the config yaml file.
Returns:
Dict

View File

@@ -2,6 +2,12 @@ import click
from autogpt.core.agent import AgentSettings, SimpleAgent
from autogpt.core.runner.client_lib.logging import get_client_logger
from autogpt.core.runner.client_lib.parser import (
parse_ability_result,
parse_agent_name_and_goals,
parse_agent_plan,
parse_next_ability,
)
async def run_auto_gpt(user_configuration: dict):
@@ -61,50 +67,3 @@ async def run_auto_gpt(user_configuration: dict):
)
ability_result = await agent.execute_next_ability(user_input)
print(parse_ability_result(ability_result))
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
parsed_response += "Agent Goals:\n"
for i, goal in enumerate(name_and_goals["agent_goals"]):
parsed_response += f"{i+1}. {goal}\n"
return parsed_response
def parse_agent_plan(plan: dict) -> str:
parsed_response = f"Agent Plan:\n"
for i, task in enumerate(plan["task_list"]):
parsed_response += f"{i+1}. {task['objective']}\n"
parsed_response += f"Task type: {task['type']} "
parsed_response += f"Priority: {task['priority']}\n"
parsed_response += f"Ready Criteria:\n"
for j, criteria in enumerate(task["ready_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += f"Acceptance Criteria:\n"
for j, criteria in enumerate(task["acceptance_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += "\n"
return parsed_response
def parse_next_ability(current_task, next_ability: dict) -> str:
parsed_response = f"Current Task: {current_task.objective}\n"
ability_args = ", ".join(
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
)
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
parsed_response += f"Motivation: {next_ability['motivation']}\n"
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
return parsed_response
def parse_ability_result(ability_result) -> str:
parsed_response = f"Ability: {ability_result['ability_name']}\n"
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
parsed_response = f"Ability Result: {ability_result['success']}\n"
parsed_response += f"Message: {ability_result['message']}\n"
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
return parsed_response

View File

@@ -0,0 +1,45 @@
def parse_agent_name_and_goals(name_and_goals: dict) -> str:
parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
parsed_response += "Agent Goals:\n"
for i, goal in enumerate(name_and_goals["agent_goals"]):
parsed_response += f"{i+1}. {goal}\n"
return parsed_response
def parse_agent_plan(plan: dict) -> str:
parsed_response = f"Agent Plan:\n"
for i, task in enumerate(plan["task_list"]):
parsed_response += f"{i+1}. {task['objective']}\n"
parsed_response += f"Task type: {task['type']} "
parsed_response += f"Priority: {task['priority']}\n"
parsed_response += f"Ready Criteria:\n"
for j, criteria in enumerate(task["ready_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += f"Acceptance Criteria:\n"
for j, criteria in enumerate(task["acceptance_criteria"]):
parsed_response += f" {j+1}. {criteria}\n"
parsed_response += "\n"
return parsed_response
def parse_next_ability(current_task, next_ability: dict) -> str:
parsed_response = f"Current Task: {current_task.objective}\n"
ability_args = ", ".join(
f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
)
parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
parsed_response += f"Motivation: {next_ability['motivation']}\n"
parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
return parsed_response
def parse_ability_result(ability_result) -> str:
parsed_response = f"Ability: {ability_result['ability_name']}\n"
parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
parsed_response += f"Ability Result: {ability_result['success']}\n"
parsed_response += f"Message: {ability_result['message']}\n"
parsed_response += f"Data: {ability_result['new_knowledge']}\n"
return parsed_response

View File

@@ -2,7 +2,7 @@
import ast
import json
import os.path
from typing import Any
from typing import Any, Literal
from jsonschema import Draft7Validator
@@ -12,7 +12,7 @@ from autogpt.logs import logger
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
def extract_json_from_response(response_content: str) -> dict:
def extract_dict_from_response(response_content: str) -> dict[str, Any]:
# Sometimes the response includes the JSON in a code block with ```
if response_content.startswith("```") and response_content.endswith("```"):
# Discard the first and last ```, then re-join in case the response naturally included ```
@@ -33,16 +33,19 @@ def llm_response_schema(
) -> dict[str, Any]:
filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
with open(filename, "r") as f:
json_schema = json.load(f)
try:
json_schema = json.load(f)
except Exception as e:
raise RuntimeError(f"Failed to load JSON schema: {e}")
if config.openai_functions:
del json_schema["properties"]["command"]
json_schema["required"].remove("command")
return json_schema
def validate_json(
json_object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> bool:
def validate_dict(
object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
) -> tuple[Literal[True], None] | tuple[Literal[False], list]:
"""
:type schema_name: object
:param schema_name: str
@@ -50,24 +53,23 @@ def validate_json(
Returns:
bool: Whether the json_object is valid or not
list: Errors found in the json_object, or None if the object is valid
"""
schema = llm_response_schema(config, schema_name)
validator = Draft7Validator(schema)
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
if errors := sorted(validator.iter_errors(object), key=lambda e: e.path):
for error in errors:
logger.debug(f"JSON Validation Error: {error}")
if config.debug_mode:
logger.error(
json.dumps(json_object, indent=4)
) # Replace 'json_object' with the variable containing the JSON data
logger.error(json.dumps(object, indent=4))
logger.error("The following issues were found:")
for error in errors:
logger.error(f"Error: {error.message}")
return False
return False, errors
logger.debug("The JSON object is valid.")
return True
return True, None

View File

@@ -1,6 +1,7 @@
from autogpt.llm.base import (
ChatModelInfo,
ChatModelResponse,
ChatSequence,
EmbeddingModelInfo,
EmbeddingModelResponse,
LLMResponse,
@@ -10,6 +11,7 @@ from autogpt.llm.base import (
__all__ = [
"Message",
"ChatSequence",
"ModelInfo",
"ChatModelInfo",
"EmbeddingModelInfo",

View File

@@ -1,203 +0,0 @@
from __future__ import annotations
import time
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from autogpt.agents.agent import Agent
from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.providers.openai import (
count_openai_functions_tokens,
get_openai_command_specs,
)
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.logs import CURRENT_CONTEXT_FILE_NAME, logger
# TODO: Change debug from hardcode to argument
def chat_with_ai(
config: Config,
agent: Agent,
system_prompt: str,
triggering_prompt: str,
token_limit: int,
model: str | None = None,
):
"""
Interact with the OpenAI API, sending the prompt, user input,
message history, and permanent memory.
Args:
config (Config): The config to use.
agent (Agent): The agent to use.
system_prompt (str): The prompt explaining the rules to the AI.
triggering_prompt (str): The input from the user.
token_limit (int): The maximum number of tokens allowed in the API call.
model (str, optional): The model to use. By default, the config.smart_llm will be used.
Returns:
str: The AI's response.
"""
if model is None:
model = config.smart_llm
# Reserve 1000 tokens for the response
logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
# if len(agent.history) == 0:
# relevant_memory = ""
# else:
# recent_history = agent.history[-5:]
# shuffle(recent_history)
# relevant_memories = agent.memory.get_relevant(
# str(recent_history), 5
# )
# if relevant_memories:
# shuffle(relevant_memories)
# relevant_memory = str(relevant_memories)
# logger.debug(f"Memory Stats: {agent.memory.get_stats()}")
relevant_memory = []
message_sequence = ChatSequence.for_model(
model,
[
Message("system", system_prompt),
Message("system", f"The current time and date is {time.strftime('%c')}"),
# Message(
# "system",
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
# ),
],
)
# Count the currently used tokens
current_tokens_used = message_sequence.token_length
insertion_index = len(message_sequence)
# Account for tokens used by OpenAI functions
openai_functions = None
if agent.config.openai_functions:
openai_functions = get_openai_command_specs(agent.command_registry)
functions_tlength = count_openai_functions_tokens(openai_functions, model)
current_tokens_used += functions_tlength
logger.debug(f"OpenAI Functions take up {functions_tlength} tokens in API call")
# Account for user input (appended later)
user_input_msg = Message("user", triggering_prompt)
current_tokens_used += count_message_tokens(user_input_msg, model)
current_tokens_used += agent.history.max_summary_tlength # Reserve space
current_tokens_used += 500 # Reserve space for the openai functions TODO improve
# Add historical Messages until the token limit is reached
# or there are no more messages to add.
for cycle in reversed(list(agent.history.per_cycle())):
messages_to_add = [msg for msg in cycle if msg is not None]
tokens_to_add = count_message_tokens(messages_to_add, model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
# Add the most recent message to the start of the chain,
# after the system prompts.
message_sequence.insert(insertion_index, *messages_to_add)
current_tokens_used += tokens_to_add
# Update & add summary of trimmed messages
if len(agent.history) > 0:
new_summary_message, trimmed_messages = agent.history.trim_messages(
current_message_chain=list(message_sequence), config=agent.config
)
tokens_to_add = count_message_tokens(new_summary_message, model)
message_sequence.insert(insertion_index, new_summary_message)
current_tokens_used += tokens_to_add - agent.history.max_summary_tlength
# FIXME: uncomment when memory is back in use
# memory_store = get_memory(config)
# for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
# memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
# logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
# memory_store.add(memory_to_add)
api_manager = ApiManager()
# inform the AI about its remaining budget (if it has one)
if api_manager.get_total_budget() > 0.0:
remaining_budget = api_manager.get_total_budget() - api_manager.get_total_cost()
if remaining_budget < 0:
remaining_budget = 0
budget_message = f"Your remaining API budget is ${remaining_budget:.3f}" + (
" BUDGET EXCEEDED! SHUT DOWN!\n\n"
if remaining_budget == 0
else " Budget very nearly exceeded! Shut down gracefully!\n\n"
if remaining_budget < 0.005
else " Budget nearly exceeded. Finish up.\n\n"
if remaining_budget < 0.01
else "\n\n"
)
logger.debug(budget_message)
message_sequence.add("system", budget_message)
current_tokens_used += count_message_tokens(message_sequence[-1], model)
# Append user input, the length of this is accounted for above
message_sequence.append(user_input_msg)
plugin_count = len(config.plugins)
for i, plugin in enumerate(config.plugins):
if not plugin.can_handle_on_planning():
continue
plugin_response = plugin.on_planning(
agent.ai_config.prompt_generator, message_sequence.raw()
)
if not plugin_response or plugin_response == "":
continue
tokens_to_add = count_message_tokens(Message("system", plugin_response), model)
if current_tokens_used + tokens_to_add > send_token_limit:
logger.debug(f"Plugin response too long, skipping: {plugin_response}")
logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
break
message_sequence.add("system", plugin_response)
current_tokens_used += tokens_to_add
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative.
# This should never happen, please submit a bug report at
# https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context
logger.debug(f"Token limit: {token_limit}")
logger.debug(f"Send Token Count: {current_tokens_used}")
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
logger.debug("------------ CONTEXT SENT TO AI ---------------")
for message in message_sequence:
# Skip printing the prompt
if message.role == "system" and message.content == system_prompt:
continue
logger.debug(f"{message.role.capitalize()}: {message.content}")
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
agent.log_cycle_handler.log_cycle(
agent.ai_name,
agent.created_at,
agent.cycle_count,
message_sequence.raw(),
CURRENT_CONTEXT_FILE_NAME,
)
# TODO: use a model defined elsewhere, so that model can contain
# temperature and other settings we care about
assistant_reply = create_chat_completion(
prompt=message_sequence,
config=agent.config,
functions=openai_functions,
max_tokens=tokens_remaining,
)
# Update full message history
agent.history.append(user_input_msg)
agent.history.add("assistant", assistant_reply.content, "ai_response")
return assistant_reply

View File

@@ -53,7 +53,7 @@ OPEN_AI_CHAT_MODELS = {
name="gpt-4-0613",
prompt_token_cost=0.03,
completion_token_cost=0.06,
max_tokens=8192,
max_tokens=8191,
),
ChatModelInfo(
name="gpt-4-32k-0314",

View File

@@ -183,25 +183,3 @@ def create_chat_completion(
if function_call
else None,
)
def check_model(
model_name: str,
model_type: Literal["smart_llm", "fast_llm"],
config: Config,
) -> str:
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
openai_credentials = config.get_openai_credentials(model_name)
api_manager = ApiManager()
models = api_manager.get_models(**openai_credentials)
if any(model_name in m["id"] for m in models):
return model_name
logger.typewriter_log(
"WARNING: ",
Fore.YELLOW,
f"You do not have access to {model_name}. Setting {model_type} to "
f"gpt-3.5-turbo.",
)
return "gpt-3.5-turbo"

View File

@@ -12,4 +12,3 @@ from .log_cycle import (
LogCycleHandler,
)
from .logger import Logger, logger
from .utils import print_assistant_thoughts, remove_ansi_escape

View File

@@ -2,6 +2,7 @@ import json
import logging
import random
import time
from pathlib import Path
class ConsoleHandler(logging.StreamHandler):
@@ -38,7 +39,7 @@ class TypingConsoleHandler(logging.StreamHandler):
class JsonFileHandler(logging.FileHandler):
def __init__(self, filename: str, mode="a", encoding=None, delay=False):
def __init__(self, filename: str | Path, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
def emit(self, record: logging.LogRecord):

View File

@@ -1,5 +1,6 @@
import json
import os
from pathlib import Path
from typing import Any, Dict, Union
from .logger import logger
@@ -23,38 +24,33 @@ class LogCycleHandler:
def __init__(self):
self.log_count_within_cycle = 0
@staticmethod
def create_directory_if_not_exists(directory_path: str) -> None:
if not os.path.exists(directory_path):
os.makedirs(directory_path, exist_ok=True)
def create_outer_directory(self, ai_name: str, created_at: str) -> str:
log_directory = logger.get_log_directory()
def create_outer_directory(self, ai_name: str, created_at: str) -> Path:
if os.environ.get("OVERWRITE_DEBUG") == "1":
outer_folder_name = "auto_gpt"
else:
ai_name_short = self.get_agent_short_name(ai_name)
outer_folder_name = f"{created_at}_{ai_name_short}"
outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name)
self.create_directory_if_not_exists(outer_folder_path)
outer_folder_path = logger.log_dir / "DEBUG" / outer_folder_name
if not outer_folder_path.exists():
outer_folder_path.mkdir(parents=True)
return outer_folder_path
def get_agent_short_name(self, ai_name: str) -> str:
return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX
def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
def create_inner_directory(self, outer_folder_path: Path, cycle_count: int) -> Path:
nested_folder_name = str(cycle_count).zfill(3)
nested_folder_path = os.path.join(outer_folder_path, nested_folder_name)
self.create_directory_if_not_exists(nested_folder_path)
nested_folder_path = outer_folder_path / nested_folder_name
if not nested_folder_path.exists():
nested_folder_path.mkdir()
return nested_folder_path
def create_nested_directory(
self, ai_name: str, created_at: str, cycle_count: int
) -> str:
) -> Path:
outer_folder_path = self.create_outer_directory(ai_name, created_at)
nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count)
@@ -75,14 +71,10 @@ class LogCycleHandler:
data (Any): The data to be logged.
file_name (str): The name of the file to save the logged data.
"""
nested_folder_path = self.create_nested_directory(
ai_name, created_at, cycle_count
)
cycle_log_dir = self.create_nested_directory(ai_name, created_at, cycle_count)
json_data = json.dumps(data, ensure_ascii=False, indent=4)
log_file_path = os.path.join(
nested_folder_path, f"{self.log_count_within_cycle}_{file_name}"
)
log_file_path = cycle_log_dir / f"{self.log_count_within_cycle}_{file_name}"
logger.log_json(json_data, log_file_path)
self.log_count_within_cycle += 1

View File

@@ -2,7 +2,7 @@
from __future__ import annotations
import logging
import os
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional
from colorama import Fore
@@ -25,10 +25,10 @@ class Logger(metaclass=Singleton):
def __init__(self):
# create log directory if it doesn't exist
this_files_dir_path = os.path.dirname(__file__)
log_dir = os.path.join(this_files_dir_path, "../logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# TODO: use workdir from config
self.log_dir = Path(__file__).parent.parent.parent / "logs"
if not self.log_dir.exists():
self.log_dir.mkdir()
log_file = "activity.log"
error_file = "error.log"
@@ -46,9 +46,7 @@ class Logger(metaclass=Singleton):
self.console_handler.setFormatter(console_formatter)
# Info handler in activity.log
self.file_handler = logging.FileHandler(
os.path.join(log_dir, log_file), "a", "utf-8"
)
self.file_handler = logging.FileHandler(self.log_dir / log_file, "a", "utf-8")
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
@@ -56,9 +54,7 @@ class Logger(metaclass=Singleton):
self.file_handler.setFormatter(info_formatter)
# Error handler error.log
error_handler = logging.FileHandler(
os.path.join(log_dir, error_file), "a", "utf-8"
)
error_handler = logging.FileHandler(self.log_dir / error_file, "a", "utf-8")
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
@@ -179,13 +175,9 @@ class Logger(metaclass=Singleton):
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
def log_json(self, data: Any, file_name: str) -> None:
# Define log directory
this_files_dir_path = os.path.dirname(__file__)
log_dir = os.path.join(this_files_dir_path, "../logs")
def log_json(self, data: Any, file_name: str | Path) -> None:
# Create a handler for JSON files
json_file_path = os.path.join(log_dir, file_name)
json_file_path = self.log_dir / file_name
json_data_handler = JsonFileHandler(json_file_path)
json_data_handler.setFormatter(JsonFormatter())
@@ -194,10 +186,5 @@ class Logger(metaclass=Singleton):
self.json_logger.debug(data)
self.json_logger.removeHandler(json_data_handler)
def get_log_directory(self) -> str:
this_files_dir_path = os.path.dirname(__file__)
log_dir = os.path.join(this_files_dir_path, "../../logs")
return os.path.abspath(log_dir)
logger = Logger()

View File

@@ -1,65 +0,0 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from colorama import Fore
if TYPE_CHECKING:
from autogpt.config import Config
from .logger import logger
def print_assistant_thoughts(
ai_name: str,
assistant_reply_json_valid: dict,
config: Config,
) -> None:
from autogpt.speech import say_text
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
if assistant_thoughts:
assistant_thoughts_reasoning = remove_ansi_escape(
assistant_thoughts.get("reasoning", "")
)
assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
assistant_thoughts_criticism = remove_ansi_escape(
assistant_thoughts.get("criticism", "")
)
assistant_thoughts_speak = remove_ansi_escape(
assistant_thoughts.get("speak", "")
)
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
)
logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
# Speak the assistant's thoughts
if assistant_thoughts_speak:
if config.speak_mode:
say_text(assistant_thoughts_speak, config)
else:
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
def remove_ansi_escape(s: str) -> str:
return s.replace("\x1B", "")

View File

@@ -1,204 +0,0 @@
"""The application entry point. Can be invoked by a CLI or any other front end application."""
import logging
import sys
from pathlib import Path
from typing import Optional
from colorama import Fore, Style
from autogpt.agents import Agent
from autogpt.config.config import ConfigBuilder, check_openai_api_key
from autogpt.configurator import create_config
from autogpt.logs import logger
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
from autogpt.utils import (
get_current_git_branch,
get_latest_bulletin,
get_legal_warning,
markdown_to_ansi_style,
)
from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies
COMMAND_CATEGORIES = [
"autogpt.commands.execute_code",
"autogpt.commands.file_operations",
"autogpt.commands.web_search",
"autogpt.commands.web_selenium",
"autogpt.commands.task_statuses",
]
def run_auto_gpt(
continuous: bool,
continuous_limit: int,
ai_settings: str,
prompt_settings: str,
skip_reprompt: bool,
speak: bool,
debug: bool,
gpt3only: bool,
gpt4only: bool,
memory_type: str,
browser_name: str,
allow_downloads: bool,
skip_news: bool,
workspace_directory: str | Path,
install_plugin_deps: bool,
ai_name: Optional[str] = None,
ai_role: Optional[str] = None,
ai_goals: tuple[str] = tuple(),
):
# Configure logging before we do anything else.
logger.set_level(logging.DEBUG if debug else logging.INFO)
config = ConfigBuilder.build_config_from_env()
# HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
# or import it directly.
logger.config = config
# TODO: fill in llm values here
check_openai_api_key(config)
create_config(
config,
continuous,
continuous_limit,
ai_settings,
prompt_settings,
skip_reprompt,
speak,
debug,
gpt3only,
gpt4only,
memory_type,
browser_name,
allow_downloads,
skip_news,
)
if config.continuous_mode:
for line in get_legal_warning().split("\n"):
logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
if not config.skip_news:
motd, is_new_motd = get_latest_bulletin()
if motd:
motd = markdown_to_ansi_style(motd)
for motd_line in motd.split("\n"):
logger.info(motd_line, "NEWS:", Fore.GREEN)
if is_new_motd and not config.chat_messages_enabled:
input(
Fore.MAGENTA
+ Style.BRIGHT
+ "NEWS: Bulletin was updated! Press Enter to continue..."
+ Style.RESET_ALL
)
git_branch = get_current_git_branch()
if git_branch and git_branch != "stable":
logger.typewriter_log(
"WARNING: ",
Fore.RED,
f"You are running on `{git_branch}` branch "
"- this is not a supported branch.",
)
if sys.version_info < (3, 10):
logger.typewriter_log(
"WARNING: ",
Fore.RED,
"You are running on an older version of Python. "
"Some people have observed problems with certain "
"parts of Auto-GPT with this version. "
"Please consider upgrading to Python 3.10 or higher.",
)
if install_plugin_deps:
install_plugin_dependencies()
# TODO: have this directory live outside the repository (e.g. in a user's
# home directory) and have it come in as a command line argument or part of
# the env file.
workspace_directory = Workspace.get_workspace_directory(config, workspace_directory)
# HACK: doing this here to collect some globals that depend on the workspace.
Workspace.build_file_logger_path(config, workspace_directory)
config.plugins = scan_plugins(config, config.debug_mode)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()
logger.debug(
f"The following command categories are disabled: {config.disabled_command_categories}"
)
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
logger.debug(
f"The following command categories are enabled: {enabled_command_categories}"
)
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
# Unregister commands that are incompatible with the current config
incompatible_commands = []
for command in command_registry.commands.values():
if callable(command.enabled) and not command.enabled(config):
command.enabled = False
incompatible_commands.append(command)
for command in incompatible_commands:
command_registry.unregister(command)
logger.debug(
f"Unregistering incompatible command: {command.name}, "
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
)
ai_config = construct_main_ai_config(
config,
name=ai_name,
role=ai_role,
goals=ai_goals,
)
ai_config.command_registry = command_registry
ai_name = ai_config.ai_name
# print(prompt)
# Initialize variables
next_action_count = 0
# add chat plugins capable of report to logger
if config.chat_messages_enabled:
for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
logger.chat_plugins.append(plugin)
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(config)
memory.clear()
logger.typewriter_log(
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
system_prompt = ai_config.construct_full_prompt(config)
if config.debug_mode:
logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
agent = Agent(
ai_name=ai_name,
memory=memory,
next_action_count=next_action_count,
command_registry=command_registry,
system_prompt=system_prompt,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=workspace_directory,
ai_config=ai_config,
config=config,
)
agent.start_interaction_loop()

View File

@@ -3,13 +3,13 @@ from __future__ import annotations
import copy
import json
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING, Iterator, Optional
if TYPE_CHECKING:
from autogpt.agents import Agent
from autogpt.agents import Agent, BaseAgent
from autogpt.config import Config
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_json_from_response
from autogpt.json_utils.utilities import extract_dict_from_response
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.llm.utils import (
@@ -17,13 +17,18 @@ from autogpt.llm.utils import (
count_string_tokens,
create_chat_completion,
)
from autogpt.logs import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME, logger
from autogpt.logs import (
PROMPT_SUMMARY_FILE_NAME,
SUMMARY_FILE_NAME,
LogCycleHandler,
logger,
)
@dataclass
class MessageHistory(ChatSequence):
max_summary_tlength: int = 500
agent: Optional[Agent] = None
agent: Optional[BaseAgent | Agent] = None
summary: str = "I was created"
last_trimmed_index: int = 0
@@ -80,7 +85,9 @@ Latest Development:
return new_summary_message, new_messages_not_in_chain
def per_cycle(self, messages: list[Message] | None = None):
def per_cycle(
self, messages: Optional[list[Message]] = None
) -> Iterator[tuple[Message | None, Message, Message]]:
"""
Yields:
Message: a message containing user input
@@ -98,7 +105,7 @@ Latest Development:
result_message = messages[i + 1]
try:
assert (
extract_json_from_response(ai_message.content) != {}
extract_dict_from_response(ai_message.content) != {}
), "AI response is not a valid JSON object"
assert result_message.type == "action_result"
@@ -153,7 +160,7 @@ Latest Development:
# Remove "thoughts" dictionary from "content"
try:
content_dict = extract_json_from_response(event.content)
content_dict = extract_dict_from_response(event.content)
if "thoughts" in content_dict:
del content_dict["thoughts"]
event.content = json.dumps(content_dict)
@@ -177,7 +184,7 @@ Latest Development:
)
max_input_tokens = summ_model.max_tokens - max_summary_length
summary_tlength = count_string_tokens(self.summary, summ_model.name)
batch = []
batch: list[Message] = []
batch_tlength = 0
# TODO: Put a cap on length of total new events and drop some previous events to
@@ -190,7 +197,7 @@ Latest Development:
> max_input_tokens - prompt_template_length - summary_tlength
):
# The batch is full. Summarize it and start a new one.
self.summarize_batch(batch, config, max_summary_length)
self._update_summary_with_batch(batch, config, max_summary_length)
summary_tlength = count_string_tokens(self.summary, summ_model.name)
batch = [event]
batch_tlength = event_tlength
@@ -200,19 +207,25 @@ Latest Development:
if batch:
# There's an unprocessed batch. Summarize it.
self.summarize_batch(batch, config, max_summary_length)
self._update_summary_with_batch(batch, config, max_summary_length)
return self.summary_message()
def summarize_batch(
def _update_summary_with_batch(
self, new_events_batch: list[Message], config: Config, max_output_length: int
):
) -> None:
prompt = MessageHistory.SUMMARIZATION_PROMPT.format(
summary=self.summary, new_events=new_events_batch
)
prompt = ChatSequence.for_model(config.fast_llm, [Message("user", prompt)])
if self.agent:
if (
self.agent is not None
and hasattr(self.agent, "created_at")
and isinstance(
getattr(self.agent, "log_cycle_handler", None), LogCycleHandler
)
):
self.agent.log_cycle_handler.log_cycle(
self.agent.ai_config.ai_name,
self.agent.created_at,
@@ -225,7 +238,13 @@ Latest Development:
prompt, config, max_tokens=max_output_length
).content
if self.agent:
if (
self.agent is not None
and hasattr(self.agent, "created_at")
and isinstance(
getattr(self.agent, "log_cycle_handler", None), LogCycleHandler
)
):
self.agent.log_cycle_handler.log_cycle(
self.agent.ai_config.ai_name,
self.agent.created_at,

View File

@@ -4,6 +4,7 @@ import dataclasses
import json
from typing import Literal
import ftfy
import numpy as np
from autogpt.config import Config
@@ -43,6 +44,9 @@ class MemoryItem:
):
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
# Fix encoding, e.g. removing unicode surrogates (see issue #778)
text = ftfy.fix_text(text)
chunks = [
chunk
for chunk, _ in (

View File

@@ -29,8 +29,7 @@ class JSONFileMemory(VectorMemoryProvider):
Returns:
None
"""
workspace_path = Path(config.workspace_path)
self.file_path = workspace_path / f"{config.memory_index}.json"
self.file_path = config.workspace_path / f"{config.memory_index}.json"
self.file_path.touch()
logger.debug(
f"Initialized {__class__.__name__} with index path {self.file_path}"

View File

@@ -1,3 +1,4 @@
from contextlib import suppress
from typing import Any, overload
import numpy as np
@@ -12,12 +13,12 @@ Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
@overload
def get_embedding(input: str | TText) -> Embedding:
def get_embedding(input: str | TText, config: Config) -> Embedding:
...
@overload
def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
def get_embedding(input: list[str] | list[TText], config: Config) -> list[Embedding]:
...
@@ -37,9 +38,16 @@ def get_embedding(
if isinstance(input, str):
input = input.replace("\n", " ")
with suppress(NotImplementedError):
return _get_embedding_with_plugin(input, config)
elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input]
with suppress(NotImplementedError):
return [_get_embedding_with_plugin(i, config) for i in input]
model = config.embedding_model
kwargs = {"model": model}
kwargs.update(config.get_openai_credentials(model))
@@ -49,8 +57,6 @@ def get_embedding(
f" with model '{model}'"
+ (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
)
if config.use_azure:
breakpoint()
embeddings = iopenai.create_embedding(
input,
@@ -62,3 +68,13 @@ def get_embedding(
embeddings = sorted(embeddings, key=lambda x: x["index"])
return [d["embedding"] for d in embeddings]
def _get_embedding_with_plugin(text: str, config: Config) -> Embedding:
for plugin in config.plugins:
if plugin.can_handle_text_embedding(text):
embedding = plugin.handle_text_embedding(text)
if embedding is not None:
return embedding
raise NotImplementedError

View File

@@ -198,18 +198,20 @@ class BaseOpenAIPlugin(AutoGPTPluginTemplate):
def can_handle_text_embedding(self, text: str) -> bool:
"""This method is called to check that the plugin can
handle the text_embedding method.
Args:
text (str): The text to be convert to embedding.
Returns:
bool: True if the plugin can handle the text_embedding method."""
return False
def handle_text_embedding(self, text: str) -> list:
"""This method is called when the chat completion is done.
Args:
text (str): The text to be convert to embedding.
Returns:
list: The text embedding.
bool: True if the plugin can handle the text_embedding method."""
return False
def handle_text_embedding(self, text: str) -> list[float]:
"""This method is called to create a text embedding.
Args:
text (str): The text to be convert to embedding.
Returns:
list[float]: The created embedding vector.
"""
def can_handle_user_input(self, user_input: str) -> bool:

View File

@@ -23,10 +23,6 @@ if TYPE_CHECKING:
from autogpt.logs import logger
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
DEFAULT_PLUGINS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
)
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
"""
@@ -234,7 +230,11 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
plugin_module_name = plugin_module_path[-1]
qualified_module_name = ".".join(plugin_module_path)
__import__(qualified_module_name)
try:
__import__(qualified_module_name)
except:
logger.error(f"Failed to load {qualified_module_name}")
continue
plugin = sys.modules[qualified_module_name]
if not plugins_config.is_enabled(plugin_module_name):
@@ -258,7 +258,10 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
module = Path(module)
logger.debug(f"Zipped Plugin: {plugin}, Module: {module}")
zipped_package = zipimporter(str(plugin))
zipped_module = zipped_package.load_module(str(module.parent))
try:
zipped_module = zipped_package.load_module(str(module.parent))
except:
logger.error(f"Failed to load {str(module.parent)}")
for key in dir(zipped_module):
if key.startswith("__"):
@@ -291,9 +294,11 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
f"Zipped plugins should use the class name ({plugin_name}) as the key."
)
else:
if a_module.__name__ != "AutoGPTPluginTemplate":
if (
module_name := getattr(a_module, "__name__", str(a_module))
) != "AutoGPTPluginTemplate":
logger.debug(
f"Skipping '{key}' because it doesn't subclass AutoGPTPluginTemplate."
f"Skipping '{module_name}' because it doesn't subclass AutoGPTPluginTemplate."
)
# OpenAI plugins

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
import os
from pathlib import Path
from typing import Union
import yaml
@@ -28,7 +28,7 @@ class PluginsConfig(BaseModel):
@classmethod
def load_config(
cls,
plugins_config_file: str,
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
) -> "PluginsConfig":
@@ -56,11 +56,11 @@ class PluginsConfig(BaseModel):
@classmethod
def deserialize_config_file(
cls,
plugins_config_file: str,
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
) -> dict[str, PluginConfig]:
if not os.path.exists(plugins_config_file):
if not plugins_config_file.is_file():
logger.warn("plugins_config.yaml does not exist, creating base config.")
cls.create_empty_plugins_config(
plugins_config_file,
@@ -87,7 +87,7 @@ class PluginsConfig(BaseModel):
@staticmethod
def create_empty_plugins_config(
plugins_config_file: str,
plugins_config_file: Path,
plugins_denylist: list[str],
plugins_allowlist: list[str],
):

View File

@@ -1,15 +1,6 @@
from typing import Optional
from colorama import Fore
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.config.prompt_config import PromptConfig
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.prompts.generator import PromptGenerator
from autogpt.setup import prompt_user
from autogpt.utils import clean_input
DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
@@ -42,91 +33,3 @@ def build_default_prompt_generator(config: Config) -> PromptGenerator:
prompt_generator.add_performance_evaluation(performance_evaluation)
return prompt_generator
def construct_main_ai_config(
config: Config,
name: Optional[str] = None,
role: Optional[str] = None,
goals: tuple[str] = tuple(),
) -> AIConfig:
"""Construct the prompt for the AI to respond to
Returns:
str: The prompt string
"""
ai_config = AIConfig.load(config.ai_settings_file)
# Apply overrides
if name:
ai_config.ai_name = name
if role:
ai_config.ai_role = role
if goals:
ai_config.ai_goals = list(goals)
if (
all([name, role, goals])
or config.skip_reprompt
and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
):
logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
logger.typewriter_log(
"API Budget:",
Fore.GREEN,
"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
)
elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {ai_config.ai_name}?",
speak_text=True,
)
should_continue = clean_input(
config,
f"""Continue with the last settings?
Name: {ai_config.ai_name}
Role: {ai_config.ai_role}
Goals: {ai_config.ai_goals}
API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
Continue ({config.authorise_key}/{config.exit_key}): """,
)
if should_continue.lower() == config.exit_key:
ai_config = AIConfig()
if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
ai_config = prompt_user(config)
ai_config.save(config.ai_settings_file)
if config.restrict_to_workspace:
logger.typewriter_log(
"NOTE:All files/directories created by this agent can be found inside its workspace at:",
Fore.YELLOW,
f"{config.workspace_path}",
)
# set the total api budget
api_manager = ApiManager()
api_manager.set_total_budget(ai_config.api_budget)
# Agent Created, print message
logger.typewriter_log(
ai_config.ai_name,
Fore.LIGHTBLUE_EX,
"has been created with the following details:",
speak_text=True,
)
# Print the ai_config details
# Name
logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
# Role
logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
# Goals
logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
for goal in ai_config.ai_goals:
logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
return ai_config

View File

@@ -42,12 +42,21 @@ class Spinner:
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
sys.stdout.flush()
def __enter__(self):
"""Start the spinner"""
def start(self):
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
def stop(self):
self.running = False
if self.spinner_thread is not None:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
def __enter__(self):
"""Start the spinner"""
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
@@ -58,19 +67,4 @@ class Spinner:
exc_value (Exception): The exception value.
exc_traceback (Exception): The exception traceback.
"""
self.running = False
if self.spinner_thread is not None:
self.spinner_thread.join()
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
sys.stdout.flush()
def update_message(self, new_message, delay=0.1):
"""Update the spinner message
Args:
new_message (str): New message to display.
delay (float): The delay in seconds between each spinner update.
"""
self.delay = delay
self.message = new_message
if self.plain_output:
self.print_message()
self.stop()

View File

@@ -55,7 +55,11 @@ def clean_input(config: Config, prompt: str = "", talk=False):
# ask for input, default when just pressing Enter is y
logger.info("Asking user via keyboard...")
answer = session.prompt(ANSI(prompt))
# handle_sigint must be set to False, so the signal handler in the
# autogpt/main.py could be employed properly. This referes to
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
answer = session.prompt(ANSI(prompt), handle_sigint=False)
return answer
except KeyboardInterrupt:
logger.info("You interrupted Auto-GPT")

View File

@@ -19,7 +19,7 @@ from autogpt.logs import logger
class Workspace:
"""A class that represents a workspace for an AutoGPT agent."""
NULL_BYTES = ["\0", "\000", "\x00", r"\z", "\u0000", "%00"]
NULL_BYTES = ["\0", "\000", "\x00", "\u0000"]
def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool):
self._root = self._sanitize_path(workspace_root)
@@ -144,7 +144,7 @@ class Workspace:
return full_path
@staticmethod
def build_file_logger_path(config: Config, workspace_directory: Path):
def set_file_logger_path(config: Config, workspace_directory: Path):
file_logger_path = workspace_directory / "file_logger.txt"
if not file_logger_path.exists():
with file_logger_path.open(mode="w", encoding="utf-8") as f:
@@ -152,15 +152,13 @@ class Workspace:
config.file_logger_path = str(file_logger_path)
@staticmethod
def get_workspace_directory(
def set_workspace_directory(
config: Config, workspace_directory: Optional[str | Path] = None
):
) -> None:
if workspace_directory is None:
workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
workspace_directory = config.workdir / "auto_gpt_workspace"
elif type(workspace_directory) == str:
workspace_directory = Path(workspace_directory)
# TODO: pass in the ai_settings file and the env file and have them cloned into
# the workspace directory so we can bind them to the agent.
workspace_directory = Workspace.make_workspace(workspace_directory)
config.workspace_path = str(workspace_directory)
return workspace_directory
config.workspace_path = Workspace.make_workspace(workspace_directory)

View File

@@ -1,44 +1,43 @@
from pathlib import Path
from autogpt.agents import Agent
from autogpt.app.main import run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.main import COMMAND_CATEGORIES
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace
PROJECT_DIR = Path().resolve()
def run_task(task) -> None:
agent = bootstrap_agent(task)
agent.start_interaction_loop()
run_interaction_loop(agent)
def bootstrap_agent(task):
config = ConfigBuilder.build_config_from_env()
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
config.continuous_mode = False
config.temperature = 0
config.plain_output = True
command_registry = get_command_registry(config)
config.memory_backend = "no_memory"
workspace_directory = Workspace.get_workspace_directory(config)
workspace_directory_path = Workspace.make_workspace(workspace_directory)
Workspace.build_file_logger_path(config, workspace_directory_path)
Workspace.set_workspace_directory(config)
Workspace.set_file_logger_path(config, config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task.user_input],
)
ai_config.command_registry = command_registry
system_prompt = ai_config.construct_full_prompt(config)
return Agent(
ai_name="Auto-GPT",
memory=get_memory(config),
command_registry=command_registry,
ai_config=ai_config,
config=config,
next_action_count=0,
system_prompt=system_prompt,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=str(workspace_directory_path),
)

View File

@@ -54,14 +54,10 @@ def kubernetes_agent(
system_prompt = ai_config.construct_full_prompt()
agent_test_config.set_continuous_mode(False)
agent = Agent(
# We also give the AI a name
ai_name="Kubernetes-Demo",
memory=memory_json_file,
full_message_history=[],
command_registry=command_registry,
config=ai_config,
next_action_count=0,
system_prompt=system_prompt,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=workspace.root,
)

View File

@@ -1,6 +1,6 @@
!!! warning
The Pinecone, Milvus and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed in `master`.
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
@@ -18,6 +18,12 @@ to the value that you want:
* `milvus` will use the milvus cache that you configured
* `weaviate` will use the weaviate cache that you configured
!!! warning
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
## Memory Backend Setup
Links to memory backends
@@ -27,6 +33,12 @@ Links to memory backends
- [Redis](https://redis.io)
- [Weaviate](https://weaviate.io)
!!! warning
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
### Redis Setup
!!! important
@@ -62,6 +74,12 @@ Links to memory backends
See [redis-stack-server](https://hub.docker.com/r/redis/redis-stack-server) for
setting a password and additional configuration.
!!! warning
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
### 🌲 Pinecone API Key Setup
Pinecone lets you store vast amounts of vector-based memory, allowing the agent to load only relevant memories at any given time.
@@ -76,6 +94,12 @@ In the `.env` file set:
- `PINECONE_ENV` (example: `us-east4-gcp`)
- `MEMORY_BACKEND=pinecone`
!!! warning
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
### Milvus Setup
[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store
@@ -114,6 +138,12 @@ deployed with docker, or as a cloud service provided by [Zilliz Cloud](https://z
- `MILVUS_COLLECTION` to change the collection name to use in Milvus.
Defaults to `autogpt`.
!!! warning
The Pinecone, Milvus, Redis, and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
### Weaviate Setup
[Weaviate](https://weaviate.io/) is an open-source vector database. It allows to store
data objects and vector embeddings from ML-models and scales seamlessly to billion of
@@ -154,6 +184,15 @@ View memory usage by using the `--debug` flag :)
## 🧠 Memory pre-seeding
!!! warning
Data ingestion is broken in v0.4.6 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates.
[Issue 4435](https://github.com/Significant-Gravitas/Auto-GPT/issues/4435)
[Issue 4024](https://github.com/Significant-Gravitas/Auto-GPT/issues/4024)
[Issue 2076](https://github.com/Significant-Gravitas/Auto-GPT/issues/2076)
Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT.
``` shell
@@ -173,7 +212,7 @@ options:
# python data_ingestion.py --dir DataFolder --init --overlap 100 --max_length 2000
```
In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/autogpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000.
In the example above, the script initializes the memory, ingests all files within the `Auto-Gpt/auto_gpt_workspace/DataFolder` directory into memory with an overlap between chunks of 100 and a maximum length of each chunk of 2000.
Note that you can also use the `--file` argument to ingest a single file into memory and that data_ingestion.py will only ingest files within the `/auto_gpt_workspace` directory.

View File

@@ -4,9 +4,10 @@ Configuration is controlled through the `Config` object. You can set configurati
## Environment Variables
- `AI_SETTINGS_FILE`: Location of AI Settings file. Default: ai_settings.yaml
- `AI_SETTINGS_FILE`: Location of the AI Settings file relative to the Auto-GPT root directory. Default: ai_settings.yaml
- `AUDIO_TO_TEXT_PROVIDER`: Audio To Text Provider. Only option currently is `huggingface`. Default: huggingface
- `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y
- `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the Auto-GPT root directory. Default: azure.yaml
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
@@ -32,8 +33,8 @@ Configuration is controlled through the `Config` object. You can set configurati
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
- `PLUGINS_CONFIG_FILE`: Path of plugins_config.yaml file. Default: plugins_config.yaml
- `PROMPT_SETTINGS_FILE`: Location of Prompt Settings file. Default: prompt_settings.yaml
- `PLUGINS_CONFIG_FILE`: Path of the Plugins Config file relative to the Auto-GPT root directory. Default: plugins_config.yaml
- `PROMPT_SETTINGS_FILE`: Location of the Prompt Settings file relative to the Auto-GPT root directory. Default: prompt_settings.yaml
- `REDIS_HOST`: Redis Host. Default: localhost
- `REDIS_PASSWORD`: Redis Password. Optional. Default:
- `REDIS_PORT`: Redis Port. Default: 6379

View File

@@ -51,7 +51,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
- .env
profiles: ["exclude-from-up"]
volumes:
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
- ./auto_gpt_workspace:/app/auto_gpt_workspace
- ./data:/app/data
## allow auto-gpt to write logs to disk
- ./logs:/app/logs

View File

@@ -1 +0,0 @@
from autogpt import main

View File

@@ -7,6 +7,7 @@ nav:
- Usage: usage.md
- Plugins: plugins.md
- Configuration:
- Options: configuration/options.md
- Search: configuration/search.md
- Memory: configuration/memory.md
- Voice: configuration/voice.md

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "agpt"
version = "0.4.5"
version = "0.4.6"
authors = [
{ name="Torantulino", email="support@agpt.co" },
]

View File

@@ -19,6 +19,7 @@ google-api-python-client #(https://developers.google.com/custom-search/v1/overvi
pinecone-client==2.2.1
redis
orjson==3.8.10
ftfy>=6.1.1
Pillow
selenium==4.1.4
webdriver-manager

View File

@@ -1,21 +0,0 @@
import unittest
import coverage
if __name__ == "__main__":
# Start coverage collection
cov = coverage.Coverage()
cov.start()
# Load all tests from the 'autogpt/tests' package
suite = unittest.defaultTestLoader.discover("./tests")
# Run the tests
unittest.TextTestRunner().run(suite)
# Stop coverage collection
cov.stop()
cov.save()
# Report the coverage
cov.report(show_missing=True)

View File

@@ -57,7 +57,7 @@ def test_debug_code_challenge_a(
output = execute_python_file(
get_workspace_path(workspace, TEST_FILE_PATH),
dummy_agent,
agent=dummy_agent,
)
assert "error" not in output.lower(), f"Errors found in output: {output}!"

View File

@@ -38,7 +38,7 @@ def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
yield from input_sequence
gen = input_generator()
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _: next(gen))
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen))
def setup_mock_log_cycle_agent_name(

View File

@@ -6,9 +6,8 @@ import pytest
import yaml
from pytest_mock import MockerFixture
from autogpt.agents.agent import Agent
from autogpt.agents import Agent
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.config.ai_config import AIConfig
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.memory.vector import get_memory
@@ -49,10 +48,12 @@ def temp_plugins_config_file():
def config(
temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace
) -> Config:
config = ConfigBuilder.build_config_from_env()
config = ConfigBuilder.build_config_from_env(workspace.root.parent)
if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "sk-dummy"
Workspace.set_workspace_directory(config, workspace.root)
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
logger.config = config
@@ -85,7 +86,7 @@ def api_manager() -> ApiManager:
@pytest.fixture
def agent(config: Config, workspace: Workspace) -> Agent:
def agent(config: Config) -> Agent:
ai_config = AIConfig(
ai_name="Base",
ai_role="A base AI",
@@ -98,16 +99,10 @@ def agent(config: Config, workspace: Workspace) -> Agent:
memory_json_file = get_memory(config)
memory_json_file.clear()
system_prompt = ai_config.construct_full_prompt(config)
return Agent(
ai_name=ai_config.ai_name,
memory=memory_json_file,
command_registry=command_registry,
ai_config=ai_config,
config=config,
next_action_count=0,
system_prompt=system_prompt,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=workspace.root,
)

View File

@@ -4,7 +4,6 @@ from autogpt.agents import Agent
from autogpt.config import AIConfig, Config
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.workspace import Workspace
@pytest.fixture
@@ -20,7 +19,7 @@ def memory_json_file(config: Config):
@pytest.fixture
def dummy_agent(config: Config, memory_json_file, workspace: Workspace):
def dummy_agent(config: Config, memory_json_file):
command_registry = CommandRegistry()
ai_config = AIConfig(
@@ -33,15 +32,11 @@ def dummy_agent(config: Config, memory_json_file, workspace: Workspace):
ai_config.command_registry = command_registry
agent = Agent(
ai_name="Dummy Agent",
memory=memory_json_file,
command_registry=command_registry,
ai_config=ai_config,
config=config,
next_action_count=0,
system_prompt="dummy_prompt",
triggering_prompt="dummy triggering prompt",
workspace_directory=workspace.root,
)
return agent

View File

@@ -37,7 +37,7 @@ def test_execute_python_file(python_test_file: str, random_string: str, agent: A
def test_execute_python_code(random_code: str, random_string: str, agent: Agent):
ai_name = agent.ai_name
ai_name = agent.ai_config.ai_name
result: str = sut.execute_python_code(random_code, "test_code", agent=agent)
assert result.replace("\r", "") == f"Hello {random_string}!\n"
@@ -65,7 +65,7 @@ def test_execute_python_code_disallows_name_arg_path_traversal(
def test_execute_python_code_overwrites_file(random_code: str, agent: Agent):
ai_name = agent.ai_name
ai_name = agent.ai_config.ai_name
destination = os.path.join(
agent.config.workspace_path, ai_name, "executed_code", "test_code.py"
)

View File

@@ -2,8 +2,8 @@ from unittest.mock import patch
import pytest
from autogpt.app.setup import generate_aiconfig_automatic, prompt_user
from autogpt.config.ai_config import AIConfig
from autogpt.setup import generate_aiconfig_automatic, prompt_user
@pytest.mark.vcr

View File

@@ -54,6 +54,7 @@ def test_dummy_plugin_default_methods(dummy_plugin):
assert not dummy_plugin.can_handle_pre_command()
assert not dummy_plugin.can_handle_post_command()
assert not dummy_plugin.can_handle_chat_completion(None, None, None, None)
assert not dummy_plugin.can_handle_text_embedding(None)
assert dummy_plugin.on_response("hello") == "hello"
assert dummy_plugin.post_prompt(None) is None
@@ -77,3 +78,4 @@ def test_dummy_plugin_default_methods(dummy_plugin):
assert isinstance(post_command, str)
assert post_command == "upgraded successfully!"
assert dummy_plugin.handle_chat_completion(None, None, None, None) is None
assert dummy_plugin.handle_text_embedding(None) is None

View File

@@ -2,9 +2,10 @@ from autogpt.agents.agent import Agent, execute_command
def test_agent_initialization(agent: Agent):
assert agent.ai_name == "Base"
assert agent.ai_config.ai_name == "Base"
assert agent.history.messages == []
assert agent.next_action_count == 0
assert agent.cycle_budget is None
assert "You are Base" in agent.system_prompt
def test_execute_command_plugin(agent: Agent):

View File

@@ -8,8 +8,8 @@ from unittest.mock import patch
import pytest
from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
from autogpt.config import Config, ConfigBuilder
from autogpt.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config
from autogpt.workspace.workspace import Workspace
@@ -161,7 +161,7 @@ azure_model_map:
os.environ["USE_AZURE"] = "True"
os.environ["AZURE_CONFIG_FILE"] = str(config_file)
config = ConfigBuilder.build_config_from_env()
config = ConfigBuilder.build_config_from_env(workspace.root.parent)
assert config.openai_api_type == "azure"
assert config.openai_api_base == "https://dummy.openai.azure.com"

View File

@@ -15,31 +15,23 @@ from autogpt.memory.message_history import MessageHistory
@pytest.fixture
def agent(config: Config):
ai_name = "Test AI"
memory = MagicMock()
next_action_count = 0
command_registry = MagicMock()
ai_config = AIConfig(ai_name=ai_name)
system_prompt = "System prompt"
ai_config = AIConfig(ai_name="Test AI")
triggering_prompt = "Triggering prompt"
workspace_directory = "workspace_directory"
agent = Agent(
ai_name=ai_name,
memory=memory,
next_action_count=next_action_count,
command_registry=command_registry,
ai_config=ai_config,
config=config,
system_prompt=system_prompt,
triggering_prompt=triggering_prompt,
workspace_directory=workspace_directory,
)
return agent
def test_message_history_batch_summary(mocker, agent: Agent, config: Config):
history = MessageHistory.for_model(agent.config.smart_llm, agent=agent)
history = MessageHistory(agent.llm, agent=agent)
model = config.fast_llm
message_tlength = 0
message_count = 0

View File

@@ -71,7 +71,7 @@ def test_create_base_config(config: Config):
os.remove(config.plugins_config_file)
plugins_config = PluginsConfig.load_config(
plugins_config_file=config.plugins_config_file,
plugins_config_file=config.workdir / config.plugins_config_file,
plugins_denylist=config.plugins_denylist,
plugins_allowlist=config.plugins_allowlist,
)
@@ -107,7 +107,7 @@ def test_load_config(config: Config):
# Load the config from disk
plugins_config = PluginsConfig.load_config(
plugins_config_file=config.plugins_config_file,
plugins_config_file=config.workdir / config.plugins_config_file,
plugins_denylist=config.plugins_denylist,
plugins_allowlist=config.plugins_allowlist,
)

View File

@@ -47,24 +47,11 @@ def test_spinner_stops_spinning():
"""Tests that the spinner starts spinning and stops spinning without errors."""
with Spinner() as spinner:
time.sleep(1)
spinner.update_message(ALMOST_DONE_MESSAGE)
time.sleep(1)
assert spinner.running == False
def test_spinner_updates_message_and_still_spins():
"""Tests that the spinner message can be updated while the spinner is running and the spinner continues spinning."""
with Spinner() as spinner:
assert spinner.running == True
time.sleep(1)
spinner.update_message(ALMOST_DONE_MESSAGE)
time.sleep(1)
assert spinner.message == ALMOST_DONE_MESSAGE
assert spinner.running == False
assert not spinner.running
def test_spinner_can_be_used_as_context_manager():
"""Tests that the spinner can be used as a context manager."""
with Spinner() as spinner:
assert spinner.running == True
assert spinner.running == False
assert spinner.running
assert not spinner.running

View File

@@ -5,7 +5,7 @@ import pytest
import requests
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
from autogpt.utils import (
get_bulletin_from_web,
get_current_git_branch,
@@ -187,22 +187,26 @@ def test_get_current_git_branch_failure(mock_repo):
def test_validate_json_valid(valid_json_response, config: Config):
assert validate_json(valid_json_response, config)
valid, errors = validate_dict(valid_json_response, config)
assert valid
assert errors is None
def test_validate_json_invalid(invalid_json_response, config: Config):
assert not validate_json(valid_json_response, config)
valid, errors = validate_dict(valid_json_response, config)
assert not valid
assert errors is not None
def test_extract_json_from_response(valid_json_response: dict):
emulated_response_from_openai = str(valid_json_response)
assert (
extract_json_from_response(emulated_response_from_openai) == valid_json_response
extract_dict_from_response(emulated_response_from_openai) == valid_json_response
)
def test_extract_json_from_response_wrapped_in_code_block(valid_json_response: dict):
emulated_response_from_openai = "```" + str(valid_json_response) + "```"
assert (
extract_json_from_response(emulated_response_from_openai) == valid_json_response
extract_dict_from_response(emulated_response_from_openai) == valid_json_response
)

View File

@@ -72,6 +72,10 @@ def patched_api_requestor(mocker: MockerFixture):
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
print(
f"[DEBUG] Outgoing API request: {headers}\n{data.decode() if data else None}"
)
# Add hash header for cheap & fast matching on cassette playback
headers["X-Content-Hash"] = sha256(
freeze_request_body(data), usedforsecurity=False