mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-08 22:58:01 -05:00
refactor(forge): Clean forge (#7117)
Remove unused `forge` code and improve structure of `forge`.
* Put all Agent Protocol stuff together in `forge.agent_protocol`
* ... including `forge.agent_protocol.database` (was `forge.db`)
* Remove duplicate/unused parts from `forge`
* `forge.actions`, containing old commands; replaced by `forge.components` from `autogpt`
* `forge/agent.py` (the old one, `ForgeAgent`)
* `forge/app.py`, which was used to serve and run the `ForgeAgent`
* `forge/db.py` (`ForgeDatabase`), which was used for `ForgeAgent`
* `forge/llm.py`, which has been replaced by new `forge.llm` module which was ported from `autogpt.core.resource.model_providers`
* `forge.memory`, which is not in use and not being maintained
* `forge.sdk`, much of which was moved into other modules and the rest is deprecated
* `AccessDeniedError`: unused
* `forge_log.py`: replaced with `logging`
* `validate_yaml_file`: not needed
* `ai_settings_file` and associated loading logic and env var `AI_SETTINGS_FILE`: unused
* `prompt_settings_file` and associated loading logic and env var `PROMPT_SETTINGS_FILE`: default directives are now provided by the `SystemComponent`
* `request_user_double_check`, which was only used in `AIDirectives.load`
* `TypingConsoleHandler`: not used
This commit is contained in:
committed by
GitHub
parent
2cca4fa47f
commit
bcc5282aba
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,8 +6,6 @@ auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
data/*
|
||||
logs/*
|
||||
|
||||
@@ -44,12 +44,6 @@
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
|
||||
2
autogpts/autogpt/.gitignore
vendored
2
autogpts/autogpt/.gitignore
vendored
@@ -4,8 +4,6 @@ autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
|
||||
@@ -48,7 +48,6 @@ RUN poetry install --no-cache --no-root --without dev \
|
||||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD COPY README.md ./README.md
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
|
||||
@@ -64,11 +64,6 @@ Options:
|
||||
-c, --continuous Enable Continuous Mode
|
||||
-y, --skip-reprompt Skips the re-prompting messages at the
|
||||
beginning of the script
|
||||
-C, --ai-settings FILE Specifies which ai_settings.yaml file to
|
||||
use, relative to the AutoGPT root directory.
|
||||
Will also automatically skip the re-prompt.
|
||||
-P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
|
||||
use.
|
||||
-l, --continuous-limit INTEGER Defines the number of times to run in
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
@@ -118,8 +113,6 @@ Usage: python -m autogpt serve [OPTIONS]
|
||||
agent for every task.
|
||||
|
||||
Options:
|
||||
-P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
|
||||
use.
|
||||
--debug Enable Debug Mode
|
||||
--gpt3only Enable GPT3.5 Only Mode
|
||||
--gpt4only Enable GPT4 Only Mode
|
||||
|
||||
@@ -20,8 +20,7 @@ def create_agent(
|
||||
) -> Agent:
|
||||
if not task:
|
||||
raise ValueError("No task specified for new agent")
|
||||
if not directives:
|
||||
directives = AIDirectives.from_file(app_config.prompt_settings_file)
|
||||
directives = directives or AIDirectives()
|
||||
|
||||
agent = _configure_agent(
|
||||
agent_id=agent_id,
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -21,7 +20,6 @@ async def generate_agent_for_task(
|
||||
file_storage: FileStorage,
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> Agent:
|
||||
base_directives = AIDirectives.from_file(app_config.prompt_settings_file)
|
||||
ai_profile, task_directives = await generate_agent_profile_for_task(
|
||||
task=task,
|
||||
app_config=app_config,
|
||||
@@ -31,7 +29,7 @@ async def generate_agent_for_task(
|
||||
agent_id=agent_id,
|
||||
task=task,
|
||||
ai_profile=ai_profile,
|
||||
directives=base_directives + task_directives,
|
||||
directives=task_directives,
|
||||
app_config=app_config,
|
||||
file_storage=file_storage,
|
||||
llm_provider=llm_provider,
|
||||
|
||||
@@ -10,13 +10,10 @@ from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ChatModelProvider, ModelProviderBudget
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.sdk.db import AgentDB
|
||||
from forge.sdk.middlewares import AgentMiddleware
|
||||
from forge.sdk.model import (
|
||||
from forge.agent_protocol.api_router import base_router
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.agent_protocol.middlewares import AgentMiddleware
|
||||
from forge.agent_protocol.models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
@@ -26,7 +23,10 @@ from forge.sdk.model import (
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.sdk.routes.agent_protocol import base_router
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ChatModelProvider, ModelProviderBudget
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.utils.const import ASK_COMMAND, FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentFinished, NotFoundError
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
@@ -123,7 +123,7 @@ class AgentProtocolServer:
|
||||
config.bind = [f"0.0.0.0:{port}"]
|
||||
|
||||
logger.info(f"AutoGPT server starting on http://localhost:{port}")
|
||||
await hypercorn_serve(app, config)
|
||||
await hypercorn_serve(app, config) # type: ignore
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
|
||||
@@ -64,15 +64,6 @@ def cli(ctx: click.Context):
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help=(
|
||||
"Specifies which ai_settings.yaml file to use, relative to the AutoGPT"
|
||||
" root directory. Will also automatically skip the re-prompt."
|
||||
),
|
||||
)
|
||||
@click.option(
|
||||
"--ai-name",
|
||||
type=str,
|
||||
@@ -83,12 +74,6 @@ def cli(ctx: click.Context):
|
||||
type=str,
|
||||
help="AI role override",
|
||||
)
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option(
|
||||
"--constraint",
|
||||
type=str,
|
||||
@@ -157,10 +142,8 @@ def run(
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
ai_settings: Optional[Path],
|
||||
ai_name: Optional[str],
|
||||
ai_role: Optional[str],
|
||||
prompt_settings: Optional[Path],
|
||||
resource: tuple[str],
|
||||
constraint: tuple[str],
|
||||
best_practice: tuple[str],
|
||||
@@ -180,8 +163,6 @@ def run(
|
||||
run_auto_gpt(
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings=ai_settings,
|
||||
prompt_settings=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
speak=speak,
|
||||
debug=debug,
|
||||
@@ -205,12 +186,6 @@ def run(
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--prompt-settings",
|
||||
"-P",
|
||||
type=click.Path(exists=True, dir_okay=False, path_type=Path),
|
||||
help="Specifies which prompt_settings.yaml file to use.",
|
||||
)
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
@@ -250,7 +225,6 @@ def run(
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
prompt_settings: Optional[Path],
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
browser_name: Optional[str],
|
||||
@@ -269,7 +243,6 @@ def serve(
|
||||
from autogpt.app.main import run_auto_gpt_server
|
||||
|
||||
run_auto_gpt_server(
|
||||
prompt_settings=prompt_settings,
|
||||
debug=debug,
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
|
||||
@@ -2,15 +2,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
from forge.config.config import GPT_3_MODEL, GPT_4_MODEL, Config
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
from forge.logging.helpers import request_user_double_check
|
||||
from forge.utils.yaml_validator import validate_yaml_file
|
||||
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
|
||||
@@ -21,8 +18,6 @@ async def apply_overrides_to_config(
|
||||
config: Config,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
ai_settings_file: Optional[Path] = None,
|
||||
prompt_settings_file: Optional[Path] = None,
|
||||
skip_reprompt: bool = False,
|
||||
gpt3only: bool = False,
|
||||
gpt4only: bool = False,
|
||||
@@ -37,8 +32,6 @@ async def apply_overrides_to_config(
|
||||
config (Config): The config object to update.
|
||||
continuous (bool): Whether to run in continuous mode.
|
||||
continuous_limit (int): The number of times to run in continuous mode.
|
||||
ai_settings_file (Path): The path to the ai_settings.yaml file.
|
||||
prompt_settings_file (Path): The path to the prompt_settings.yaml file.
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages on start.
|
||||
speak (bool): Whether to enable speak mode.
|
||||
debug (bool): Whether to enable debug mode.
|
||||
@@ -102,31 +95,6 @@ async def apply_overrides_to_config(
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
|
||||
request_user_double_check()
|
||||
exit(1)
|
||||
|
||||
config.ai_settings_file = config.project_root / file
|
||||
config.skip_reprompt = True
|
||||
|
||||
if prompt_settings_file:
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
|
||||
request_user_double_check()
|
||||
exit(1)
|
||||
|
||||
config.prompt_settings_file = config.project_root / file
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
|
||||
@@ -14,18 +14,17 @@ from types import FrameType
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.agent_protocol.database import AgentDB
|
||||
from forge.components.code_executor import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder, assert_config_has_openai_api_key
|
||||
from forge.db import AgentDB
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
from forge.logging.helpers import print_attribute, speak
|
||||
from forge.logging.utils import print_attribute, speak
|
||||
from forge.models.action import ActionInterruptedByHuman, ActionProposal
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.const import FINISH_COMMAND
|
||||
@@ -57,8 +56,6 @@ from .utils import (
|
||||
async def run_auto_gpt(
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
ai_settings: Optional[Path] = None,
|
||||
prompt_settings: Optional[Path] = None,
|
||||
skip_reprompt: bool = False,
|
||||
speak: bool = False,
|
||||
debug: bool = False,
|
||||
@@ -108,8 +105,6 @@ async def run_auto_gpt(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
ai_settings_file=ai_settings,
|
||||
prompt_settings_file=prompt_settings,
|
||||
skip_reprompt=skip_reprompt,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
@@ -134,7 +129,7 @@ async def run_auto_gpt(
|
||||
)
|
||||
|
||||
if not config.skip_news:
|
||||
print_motd(config, logger)
|
||||
print_motd(logger)
|
||||
print_git_branch_info(logger)
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
@@ -146,10 +141,6 @@ async def run_auto_gpt(
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if ai_settings:
|
||||
print_attribute("Using AI Settings File", ai_settings)
|
||||
if prompt_settings:
|
||||
print_attribute("Using Prompt Settings File", prompt_settings)
|
||||
if config.allow_downloads:
|
||||
print_attribute("Native Downloading", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
@@ -267,14 +258,12 @@ async def run_auto_gpt(
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
|
||||
base_ai_directives = AIDirectives.from_file(config.prompt_settings_file)
|
||||
|
||||
ai_profile, task_oriented_ai_directives = await generate_agent_profile_for_task(
|
||||
task,
|
||||
app_config=config,
|
||||
llm_provider=llm_provider,
|
||||
)
|
||||
ai_directives = base_ai_directives + task_oriented_ai_directives
|
||||
ai_directives = task_oriented_ai_directives
|
||||
apply_overrides_to_ai_settings(
|
||||
ai_profile=ai_profile,
|
||||
directives=ai_directives,
|
||||
@@ -347,7 +336,6 @@ async def run_auto_gpt(
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
prompt_settings: Optional[Path] = None,
|
||||
debug: bool = False,
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
@@ -384,7 +372,6 @@ async def run_auto_gpt_server(
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
prompt_settings_file=prompt_settings,
|
||||
gpt3only=gpt3only,
|
||||
gpt4only=gpt4only,
|
||||
browser_name=browser_name,
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing import Optional
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.logging.helpers import print_attribute
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
|
||||
@@ -5,15 +5,11 @@ import re
|
||||
import socket
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git import InvalidGitRepositoryError, Repo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from forge.config.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -159,7 +155,7 @@ By using the System, you agree to indemnify, defend, and hold harmless the Proje
|
||||
return legal_text
|
||||
|
||||
|
||||
def print_motd(config: "Config", logger: logging.Logger):
|
||||
def print_motd(logger: logging.Logger):
|
||||
motd, is_new_motd = get_latest_bulletin()
|
||||
if motd:
|
||||
motd = markdown_to_ansi_style(motd)
|
||||
|
||||
1
autogpts/autogpt/poetry.lock
generated
1
autogpts/autogpt/poetry.lock
generated
@@ -5255,6 +5255,7 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
constraints: [
|
||||
'Exclusively use the commands listed below.',
|
||||
'You can only act proactively, and are unable to start background jobs or set up webhooks for yourself. Take this into account when planning your actions.',
|
||||
'You are unable to interact with physical objects. If this is absolutely necessary to fulfill a task or objective or to complete a step, you must ask the user to do it for you. If the user refuses this, and there is no other way to achieve your goals, you must terminate to avoid wasting time and energy.'
|
||||
]
|
||||
resources: [
|
||||
'You are a Large Language Model, trained on millions of pages of text, including a lot of factual knowledge. Make use of this factual knowledge to avoid unnecessary gathering of information.'
|
||||
]
|
||||
best_practices: [
|
||||
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
|
||||
'Constructively self-criticize your big-picture behavior constantly.',
|
||||
'Reflect on past decisions and strategies to refine your approach.',
|
||||
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
|
||||
'Only make use of your information gathering abilities to find information that you don''t yet have knowledge of.'
|
||||
]
|
||||
@@ -1,71 +0,0 @@
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage import FileStorage
|
||||
|
||||
"""
|
||||
Test cases for the AIProfile class, which handles loads the AI configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_goals_are_always_lists_of_strings(tmp_path):
|
||||
"""Test if the goals attribute is always a list of strings."""
|
||||
|
||||
yaml_content = """
|
||||
ai_goals:
|
||||
- Goal 1: Make a sandwich
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- "Goal 4: Wake up"
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
ai_settings_file = tmp_path / "ai_settings.yaml"
|
||||
ai_settings_file.write_text(yaml_content)
|
||||
|
||||
ai_profile = AIProfile.load(ai_settings_file)
|
||||
|
||||
assert len(ai_profile.ai_goals) == 4
|
||||
assert ai_profile.ai_goals[0] == "Goal 1: Make a sandwich"
|
||||
assert ai_profile.ai_goals[1] == "Goal 2, Eat the sandwich"
|
||||
assert ai_profile.ai_goals[2] == "Goal 3 - Go to sleep"
|
||||
assert ai_profile.ai_goals[3] == "Goal 4: Wake up"
|
||||
|
||||
ai_settings_file.write_text("")
|
||||
ai_profile.save(ai_settings_file)
|
||||
|
||||
yaml_content2 = """ai_goals:
|
||||
- 'Goal 1: Make a sandwich'
|
||||
- Goal 2, Eat the sandwich
|
||||
- Goal 3 - Go to sleep
|
||||
- 'Goal 4: Wake up'
|
||||
ai_name: McFamished
|
||||
ai_role: A hungry AI
|
||||
api_budget: 0.0
|
||||
"""
|
||||
assert ai_settings_file.read_text() == yaml_content2
|
||||
|
||||
|
||||
def test_ai_profile_file_not_exists(storage: FileStorage):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = storage.get_path("ai_settings.yaml")
|
||||
|
||||
ai_profile = AIProfile.load(str(ai_settings_file))
|
||||
assert ai_profile.ai_name == ""
|
||||
assert ai_profile.ai_role == ""
|
||||
assert ai_profile.ai_goals == []
|
||||
assert ai_profile.api_budget == 0.0
|
||||
|
||||
|
||||
def test_ai_profile_file_is_empty(storage: FileStorage):
|
||||
"""Test if file does not exist."""
|
||||
|
||||
ai_settings_file = storage.get_path("ai_settings.yaml")
|
||||
ai_settings_file.write_text("")
|
||||
|
||||
ai_profile = AIProfile.load(str(ai_settings_file))
|
||||
assert ai_profile.ai_name == ""
|
||||
assert ai_profile.ai_role == ""
|
||||
assert ai_profile.ai_goals == []
|
||||
assert ai_profile.api_budget == 0.0
|
||||
@@ -1,42 +0,0 @@
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
|
||||
"""
|
||||
Test cases for the PromptConfig class, which handles loads the Prompts configuration
|
||||
settings from a YAML file.
|
||||
"""
|
||||
|
||||
|
||||
def test_prompt_config_loading(tmp_path):
|
||||
"""Test if the prompt configuration loads correctly"""
|
||||
|
||||
yaml_content = """
|
||||
constraints:
|
||||
- A test constraint
|
||||
- Another test constraint
|
||||
- A third test constraint
|
||||
resources:
|
||||
- A test resource
|
||||
- Another test resource
|
||||
- A third test resource
|
||||
best_practices:
|
||||
- A test best-practice
|
||||
- Another test best-practice
|
||||
- A third test best-practice
|
||||
"""
|
||||
prompt_settings_file = tmp_path / "test_prompt_settings.yaml"
|
||||
prompt_settings_file.write_text(yaml_content)
|
||||
|
||||
prompt_config = AIDirectives.from_file(prompt_settings_file)
|
||||
|
||||
assert len(prompt_config.constraints) == 3
|
||||
assert prompt_config.constraints[0] == "A test constraint"
|
||||
assert prompt_config.constraints[1] == "Another test constraint"
|
||||
assert prompt_config.constraints[2] == "A third test constraint"
|
||||
assert len(prompt_config.resources) == 3
|
||||
assert prompt_config.resources[0] == "A test resource"
|
||||
assert prompt_config.resources[1] == "Another test resource"
|
||||
assert prompt_config.resources[2] == "A third test resource"
|
||||
assert len(prompt_config.best_practices) == 3
|
||||
assert prompt_config.best_practices[0] == "A test best-practice"
|
||||
assert prompt_config.best_practices[1] == "Another test best-practice"
|
||||
assert prompt_config.best_practices[2] == "A third test best-practice"
|
||||
@@ -6,7 +6,6 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
import requests
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.utils.yaml_validator import validate_yaml_file
|
||||
from git import InvalidGitRepositoryError
|
||||
|
||||
import autogpt.app.utils
|
||||
@@ -58,41 +57,6 @@ def invalid_json_response() -> dict:
|
||||
}
|
||||
|
||||
|
||||
def test_validate_yaml_file_valid():
|
||||
with open("valid_test_file.yaml", "w") as f:
|
||||
f.write("setting: value")
|
||||
result, message = validate_yaml_file("valid_test_file.yaml")
|
||||
os.remove("valid_test_file.yaml")
|
||||
|
||||
assert result is True
|
||||
assert "Successfully validated" in message
|
||||
|
||||
|
||||
def test_validate_yaml_file_not_found():
|
||||
result, message = validate_yaml_file("non_existent_file.yaml")
|
||||
|
||||
assert result is False
|
||||
assert "wasn't found" in message
|
||||
|
||||
|
||||
def test_validate_yaml_file_invalid():
|
||||
with open("invalid_test_file.yaml", "w") as f:
|
||||
f.write(
|
||||
"settings:\n"
|
||||
" first_setting: value\n"
|
||||
" second_setting: value\n"
|
||||
" nested_setting: value\n"
|
||||
" third_setting: value\n"
|
||||
"unindented_setting: value"
|
||||
)
|
||||
result, message = validate_yaml_file("invalid_test_file.yaml")
|
||||
os.remove("invalid_test_file.yaml")
|
||||
print(result)
|
||||
print(message)
|
||||
assert result is False
|
||||
assert "There was an issue while trying to read" in message
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
def test_get_bulletin_from_web_success(mock_get):
|
||||
expected_content = "Test bulletin from web"
|
||||
|
||||
@@ -5,4 +5,3 @@ OPENAI_API_KEY=abc
|
||||
LOG_LEVEL=INFO
|
||||
DATABASE_STRING="sqlite:///agent.db"
|
||||
PORT=8000
|
||||
AGENT_WORKSPACE="agbenchmark_config/workspace"
|
||||
|
||||
2
autogpts/forge/.gitignore
vendored
2
autogpts/forge/.gitignore
vendored
@@ -4,8 +4,6 @@ autogpt/*.json
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
ai_settings.yaml
|
||||
last_run_ai_settings.yaml
|
||||
.vscode
|
||||
.idea/*
|
||||
auto-gpt.json
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import uvicorn
|
||||
from dotenv import load_dotenv
|
||||
|
||||
import forge.sdk.forge_log
|
||||
from forge.logging.config import configure_logging
|
||||
|
||||
LOG = forge.sdk.forge_log.ForgeLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
logo = """\n\n
|
||||
@@ -36,9 +37,9 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
|
||||
if __name__ == "__main__":
|
||||
print(logo)
|
||||
port = os.getenv("PORT", 8000)
|
||||
LOG.info(f"Agent server starting on http://localhost:{port}")
|
||||
configure_logging()
|
||||
logger.info(f"Agent server starting on http://localhost:{port}")
|
||||
load_dotenv()
|
||||
forge.sdk.forge_log.setup_logger()
|
||||
|
||||
uvicorn.run(
|
||||
"forge.app:app",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from .registry import Action, ActionParameter, ActionRegister, action
|
||||
@@ -1,78 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
from ..registry import action
|
||||
|
||||
|
||||
@action(
|
||||
name="list_files",
|
||||
description="List files in a directory",
|
||||
parameters=[
|
||||
{
|
||||
"name": "path",
|
||||
"description": "Path to the directory",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
}
|
||||
],
|
||||
output_type="list[str]",
|
||||
)
|
||||
async def list_files(agent, task_id: str, path: str) -> List[str]:
|
||||
"""
|
||||
List files in a workspace directory
|
||||
"""
|
||||
return agent.workspace.list(task_id=task_id, path=str(path))
|
||||
|
||||
|
||||
@action(
|
||||
name="write_file",
|
||||
description="Write data to a file",
|
||||
parameters=[
|
||||
{
|
||||
"name": "file_path",
|
||||
"description": "Path to the file",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"description": "Data to write to the file",
|
||||
"type": "bytes",
|
||||
"required": True,
|
||||
},
|
||||
],
|
||||
output_type="None",
|
||||
)
|
||||
async def write_file(agent, task_id: str, file_path: str, data: bytes):
|
||||
"""
|
||||
Write data to a file
|
||||
"""
|
||||
if isinstance(data, str):
|
||||
data = data.encode()
|
||||
|
||||
agent.workspace.write(task_id=task_id, path=file_path, data=data)
|
||||
return await agent.db.create_artifact(
|
||||
task_id=task_id,
|
||||
file_name=file_path.split("/")[-1],
|
||||
relative_path=file_path,
|
||||
agent_created=True,
|
||||
)
|
||||
|
||||
|
||||
@action(
|
||||
name="read_file",
|
||||
description="Read data from a file",
|
||||
parameters=[
|
||||
{
|
||||
"name": "file_path",
|
||||
"description": "Path to the file",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
},
|
||||
],
|
||||
output_type="bytes",
|
||||
)
|
||||
async def read_file(agent, task_id: str, file_path: str) -> bytes:
|
||||
"""
|
||||
Read data from a file
|
||||
"""
|
||||
return agent.workspace.read(task_id=task_id, path=file_path)
|
||||
@@ -1,38 +0,0 @@
|
||||
from forge.sdk.forge_log import ForgeLogger
|
||||
|
||||
from .registry import action
|
||||
|
||||
logger = ForgeLogger(__name__)
|
||||
|
||||
|
||||
@action(
|
||||
name="finish",
|
||||
description="Use this to shut down once you have accomplished all of your goals,"
|
||||
" or when there are insurmountable problems that make it impossible"
|
||||
" for you to finish your task.",
|
||||
parameters=[
|
||||
{
|
||||
"name": "reason",
|
||||
"description": "A summary to the user of how the goals were accomplished",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
}
|
||||
],
|
||||
output_type="None",
|
||||
)
|
||||
async def finish(
|
||||
agent,
|
||||
task_id: str,
|
||||
reason: str,
|
||||
) -> str:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
Parameters:
|
||||
reason (str): A summary to the user of how the goals were accomplished.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to
|
||||
improve the code.
|
||||
"""
|
||||
logger.info(reason, extra={"title": "Shutting down...\n"})
|
||||
return reason
|
||||
@@ -1,193 +0,0 @@
|
||||
import glob
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
from typing import Any, Callable, List
|
||||
|
||||
import pydantic
|
||||
|
||||
|
||||
class ActionParameter(pydantic.BaseModel):
|
||||
"""
|
||||
This class represents a parameter for an action.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the parameter.
|
||||
description (str): A brief description of what the parameter does.
|
||||
type (str): The type of the parameter.
|
||||
required (bool): A flag indicating whether the parameter is required or optional.
|
||||
"""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
type: str
|
||||
required: bool
|
||||
|
||||
|
||||
class Action(pydantic.BaseModel):
|
||||
"""
|
||||
This class represents an action in the system.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the action.
|
||||
description (str): A brief description of what the action does.
|
||||
method (Callable): The method that implements the action.
|
||||
parameters (List[ActionParameter]): A list of parameters that the action requires.
|
||||
output_type (str): The type of the output that the action returns.
|
||||
"""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
method: Callable
|
||||
parameters: List[ActionParameter]
|
||||
output_type: str
|
||||
category: str | None = None
|
||||
|
||||
def __call__(self, *args: Any, **kwds: Any) -> Any:
|
||||
"""
|
||||
This method allows the class instance to be called as a function.
|
||||
|
||||
Args:
|
||||
*args: Variable length argument list.
|
||||
**kwds: Arbitrary keyword arguments.
|
||||
|
||||
Returns:
|
||||
Any: The result of the method call.
|
||||
"""
|
||||
return self.method(*args, **kwds)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
This method returns a string representation of the class instance.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the class instance.
|
||||
"""
|
||||
func_summary = f"{self.name}("
|
||||
for param in self.parameters:
|
||||
func_summary += f"{param.name}: {param.type}, "
|
||||
func_summary = func_summary[:-2] + ")"
|
||||
func_summary += f" -> {self.output_type}. Usage: {self.description},"
|
||||
return func_summary
|
||||
|
||||
|
||||
def action(
|
||||
name: str, description: str, parameters: List[ActionParameter], output_type: str
|
||||
):
|
||||
def decorator(func):
|
||||
func_params = inspect.signature(func).parameters
|
||||
param_names = set(
|
||||
[ActionParameter.parse_obj(param).name for param in parameters]
|
||||
)
|
||||
param_names.add("agent")
|
||||
param_names.add("task_id")
|
||||
func_param_names = set(func_params.keys())
|
||||
if param_names != func_param_names:
|
||||
raise ValueError(
|
||||
f"Mismatch in parameter names. Action Annotation includes {param_names}, but function actually takes {func_param_names} in function {func.__name__} signature"
|
||||
)
|
||||
func.action = Action(
|
||||
name=name,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
method=func,
|
||||
output_type=output_type,
|
||||
)
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ActionRegister:
|
||||
def __init__(self, agent) -> None:
|
||||
self.abilities = {}
|
||||
self.register_abilities()
|
||||
self.agent = agent
|
||||
|
||||
def register_abilities(self) -> None:
|
||||
for action_path in glob.glob(
|
||||
os.path.join(os.path.dirname(__file__), "**/*.py"), recursive=True
|
||||
):
|
||||
if not os.path.basename(action_path) in [
|
||||
"__init__.py",
|
||||
"registry.py",
|
||||
]:
|
||||
action = os.path.relpath(
|
||||
action_path, os.path.dirname(__file__)
|
||||
).replace("/", ".")
|
||||
try:
|
||||
module = importlib.import_module(
|
||||
f".{action[:-3]}", package="forge.actions"
|
||||
)
|
||||
for attr in dir(module):
|
||||
func = getattr(module, attr)
|
||||
if hasattr(func, "action"):
|
||||
ab = func.action
|
||||
|
||||
ab.category = (
|
||||
action.split(".")[0].lower().replace("_", " ")
|
||||
if len(action.split(".")) > 1
|
||||
else "general"
|
||||
)
|
||||
self.abilities[func.action.name] = func.action
|
||||
except Exception as e:
|
||||
print(f"Error occurred while registering abilities: {str(e)}")
|
||||
|
||||
def list_abilities(self) -> List[Action]:
|
||||
return self.abilities
|
||||
|
||||
def list_abilities_for_prompt(self) -> List[str]:
|
||||
return [str(action) for action in self.abilities.values()]
|
||||
|
||||
def abilities_description(self) -> str:
|
||||
abilities_by_category = {}
|
||||
for action in self.abilities.values():
|
||||
if action.category not in abilities_by_category:
|
||||
abilities_by_category[action.category] = []
|
||||
abilities_by_category[action.category].append(str(action))
|
||||
|
||||
abilities_description = ""
|
||||
for category, abilities in abilities_by_category.items():
|
||||
if abilities_description != "":
|
||||
abilities_description += "\n"
|
||||
abilities_description += f"{category}:"
|
||||
for action in abilities:
|
||||
abilities_description += f" {action}"
|
||||
|
||||
return abilities_description
|
||||
|
||||
async def run_action(
|
||||
self, task_id: str, action_name: str, *args: Any, **kwds: Any
|
||||
) -> Any:
|
||||
"""
|
||||
This method runs a specified action with the provided arguments and keyword arguments.
|
||||
|
||||
The agent is passed as the first argument to the action. This allows the action to access and manipulate
|
||||
the agent's state as needed.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task that the action is being run for.
|
||||
action_name (str): The name of the action to run.
|
||||
*args: Variable length argument list.
|
||||
**kwds: Arbitrary keyword arguments.
|
||||
|
||||
Returns:
|
||||
Any: The result of the action execution.
|
||||
|
||||
Raises:
|
||||
Exception: If there is an error in running the action.
|
||||
"""
|
||||
try:
|
||||
action = self.abilities[action_name]
|
||||
return await action(self.agent, task_id, *args, **kwds)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.path.append("/Users/swifty/dev/forge/forge")
|
||||
register = ActionRegister(agent=None)
|
||||
print(register.abilities_description())
|
||||
print(register.run_action("abc", "list_files", "/Users/swifty/dev/forge/forge"))
|
||||
@@ -1,72 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
from ..registry import action
|
||||
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
|
||||
|
||||
@action(
|
||||
name="web_search",
|
||||
description="Searches the web",
|
||||
parameters=[
|
||||
{
|
||||
"name": "query",
|
||||
"description": "The search query",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
}
|
||||
],
|
||||
output_type="list[str]",
|
||||
)
|
||||
async def web_search(agent, task_id: str, query: str) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
query (str): The search query.
|
||||
num_results (int): The number of results to return.
|
||||
|
||||
Returns:
|
||||
str: The results of the search.
|
||||
"""
|
||||
search_results = []
|
||||
attempts = 0
|
||||
num_results = 8
|
||||
|
||||
while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
search_results = DDGS().text(query, max_results=num_results)
|
||||
|
||||
if search_results:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
attempts += 1
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
|
||||
|
||||
def safe_google_results(results: str | list) -> str:
|
||||
"""
|
||||
Return the results of a Google search in a safe format.
|
||||
|
||||
Args:
|
||||
results (str | list): The search results.
|
||||
|
||||
Returns:
|
||||
str: The results of the search.
|
||||
"""
|
||||
if isinstance(results, list):
|
||||
safe_message = json.dumps(
|
||||
[result.encode("utf-8", "ignore").decode("utf-8") for result in results]
|
||||
)
|
||||
else:
|
||||
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
|
||||
return safe_message
|
||||
@@ -1,366 +0,0 @@
|
||||
"""Commands for browsing a website"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
COMMAND_CATEGORY = "web_browse"
|
||||
COMMAND_CATEGORY_TITLE = "Web Browsing"
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple, Type
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from requests.compat import urljoin
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.chrome.service import Service as ChromeDriverService
|
||||
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.common.options import ArgOptions as BrowserOptions
|
||||
from selenium.webdriver.edge.options import Options as EdgeOptions
|
||||
from selenium.webdriver.edge.service import Service as EdgeDriverService
|
||||
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.firefox.service import Service as GeckoDriverService
|
||||
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
|
||||
from ..registry import action
|
||||
|
||||
|
||||
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
|
||||
"""Extract hyperlinks from a BeautifulSoup object
|
||||
|
||||
Args:
|
||||
soup (BeautifulSoup): The BeautifulSoup object
|
||||
base_url (str): The base URL
|
||||
|
||||
Returns:
|
||||
List[Tuple[str, str]]: The extracted hyperlinks
|
||||
"""
|
||||
return [
|
||||
(link.text, urljoin(base_url, link["href"]))
|
||||
for link in soup.find_all("a", href=True)
|
||||
]
|
||||
|
||||
|
||||
def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
|
||||
"""Format hyperlinks to be displayed to the user
|
||||
|
||||
Args:
|
||||
hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
|
||||
|
||||
Returns:
|
||||
List[str]: The formatted hyperlinks
|
||||
"""
|
||||
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
|
||||
|
||||
|
||||
def validate_url(func: Callable[..., Any]) -> Any:
|
||||
"""The method decorator validate_url is used to validate urls for any command that requires
|
||||
a url as an argument"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(url: str, *args, **kwargs) -> Any:
|
||||
"""Check if the URL is valid using a basic check, urllib check, and local file check
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
the result of the wrapped function
|
||||
|
||||
Raises:
|
||||
ValueError if the url fails any of the validation tests
|
||||
"""
|
||||
# Most basic check if the URL is valid:
|
||||
if not re.match(r"^https?://", url):
|
||||
raise ValueError("Invalid URL format")
|
||||
if not is_valid_url(url):
|
||||
raise ValueError("Missing Scheme or Network location")
|
||||
# Restrict access to local files
|
||||
if check_local_file_access(url):
|
||||
raise ValueError("Access to local files is restricted")
|
||||
# Check URL length
|
||||
if len(url) > 2000:
|
||||
raise ValueError("URL is too long")
|
||||
|
||||
return func(sanitize_url(url), *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
"""Check if the URL is valid
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = urlparse(url)
|
||||
return all([result.scheme, result.netloc])
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def sanitize_url(url: str) -> str:
|
||||
"""Sanitize the URL
|
||||
|
||||
Args:
|
||||
url (str): The URL to sanitize
|
||||
|
||||
Returns:
|
||||
str: The sanitized URL
|
||||
"""
|
||||
parsed_url = urlparse(url)
|
||||
reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}"
|
||||
return urljoin(url, reconstructed_url)
|
||||
|
||||
|
||||
def check_local_file_access(url: str) -> bool:
|
||||
"""Check if the URL is a local file
|
||||
|
||||
Args:
|
||||
url (str): The URL to check
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is a local file, False otherwise
|
||||
"""
|
||||
local_prefixes = [
|
||||
"file:///",
|
||||
"file://localhost/",
|
||||
"file://localhost",
|
||||
"http://localhost",
|
||||
"http://localhost/",
|
||||
"https://localhost",
|
||||
"https://localhost/",
|
||||
"http://2130706433",
|
||||
"http://2130706433/",
|
||||
"https://2130706433",
|
||||
"https://2130706433/",
|
||||
"http://127.0.0.1/",
|
||||
"http://127.0.0.1",
|
||||
"https://127.0.0.1/",
|
||||
"https://127.0.0.1",
|
||||
"https://0.0.0.0/",
|
||||
"https://0.0.0.0",
|
||||
"http://0.0.0.0/",
|
||||
"http://0.0.0.0",
|
||||
"http://0000",
|
||||
"http://0000/",
|
||||
"https://0000",
|
||||
"https://0000/",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
TOKENS_TO_TRIGGER_SUMMARY = 50
|
||||
LINKS_TO_RETURN = 20
|
||||
|
||||
|
||||
class BrowsingError(CommandExecutionError):
|
||||
"""An error occurred while trying to browse the page"""
|
||||
|
||||
|
||||
@action(
|
||||
name="read_webpage",
|
||||
description="Read a webpage, and extract specific information from it if a question is specified. If you are looking to extract specific information from the webpage, you should specify a question.",
|
||||
parameters=[
|
||||
{
|
||||
"name": "url",
|
||||
"description": "The URL to visit",
|
||||
"type": "string",
|
||||
"required": True,
|
||||
},
|
||||
{
|
||||
"name": "question",
|
||||
"description": "A question that you want to answer using the content of the webpage.",
|
||||
"type": "string",
|
||||
"required": False,
|
||||
},
|
||||
],
|
||||
output_type="string",
|
||||
)
|
||||
@validate_url
|
||||
async def read_webpage(
|
||||
agent, task_id: str, url: str, question: str = ""
|
||||
) -> Tuple(str, List[str]):
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
url (str): The url of the website to browse
|
||||
question (str): The question to answer using the content of the webpage
|
||||
|
||||
Returns:
|
||||
str: The answer and links to the user and the webdriver
|
||||
"""
|
||||
driver = None
|
||||
try:
|
||||
driver = open_page_in_browser(url)
|
||||
|
||||
text = scrape_text_with_selenium(driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
if not text:
|
||||
return f"Website did not contain any text.\n\nLinks: {links}"
|
||||
|
||||
# Limit links to LINKS_TO_RETURN
|
||||
if len(links) > LINKS_TO_RETURN:
|
||||
links = links[:LINKS_TO_RETURN]
|
||||
return (text, links)
|
||||
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
msg = e.msg.split("\n")[0]
|
||||
if "net::" in msg:
|
||||
raise BrowsingError(
|
||||
f"A networking error occurred while trying to load the page: "
|
||||
+ re.sub(r"^unknown error: ", "", msg)
|
||||
)
|
||||
raise CommandExecutionError(msg)
|
||||
finally:
|
||||
if driver:
|
||||
close_browser(driver)
|
||||
|
||||
|
||||
def scrape_text_with_selenium(driver: WebDriver) -> str:
|
||||
"""Scrape text from a browser window using selenium
|
||||
|
||||
Args:
|
||||
driver (WebDriver): A driver object representing the browser window to scrape
|
||||
|
||||
Returns:
|
||||
str: the text scraped from the website
|
||||
"""
|
||||
|
||||
# Get the HTML content directly from the browser's DOM
|
||||
page_source = driver.execute_script("return document.body.outerHTML;")
|
||||
soup = BeautifulSoup(page_source, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
return text
|
||||
|
||||
|
||||
def scrape_links_with_selenium(driver: WebDriver, base_url: str) -> list[str]:
|
||||
"""Scrape links from a website using selenium
|
||||
|
||||
Args:
|
||||
driver (WebDriver): A driver object representing the browser window to scrape
|
||||
base_url (str): The base URL to use for resolving relative links
|
||||
|
||||
Returns:
|
||||
List[str]: The links scraped from the website
|
||||
"""
|
||||
page_source = driver.page_source
|
||||
soup = BeautifulSoup(page_source, "html.parser")
|
||||
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
|
||||
hyperlinks = extract_hyperlinks(soup, base_url)
|
||||
|
||||
return format_hyperlinks(hyperlinks)
|
||||
|
||||
|
||||
def open_page_in_browser(url: str) -> WebDriver:
|
||||
"""Open a browser window and load a web page using Selenium
|
||||
|
||||
Params:
|
||||
url (str): The URL of the page to load
|
||||
|
||||
Returns:
|
||||
driver (WebDriver): A driver object representing the browser window to scrape
|
||||
"""
|
||||
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
||||
selenium_web_browser = "chrome"
|
||||
selenium_headless = True
|
||||
options_available: dict[str, Type[BrowserOptions]] = {
|
||||
"chrome": ChromeOptions,
|
||||
"edge": EdgeOptions,
|
||||
"firefox": FirefoxOptions,
|
||||
"safari": SafariOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if selenium_web_browser == "firefox":
|
||||
if selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
)
|
||||
elif selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
else:
|
||||
if platform == "linux" or platform == "linux2":
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
chromium_driver_path = Path("/usr/bin/chromedriver")
|
||||
|
||||
driver = ChromeDriver(
|
||||
service=ChromeDriverService(str(chromium_driver_path))
|
||||
if chromium_driver_path.exists()
|
||||
else ChromeDriverService(ChromeDriverManager().install()),
|
||||
options=options,
|
||||
)
|
||||
driver.get(url)
|
||||
|
||||
WebDriverWait(driver, 10).until(
|
||||
EC.presence_of_element_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
return driver
|
||||
|
||||
|
||||
def close_browser(driver: WebDriver) -> None:
|
||||
"""Close the browser
|
||||
|
||||
Args:
|
||||
driver (WebDriver): The webdriver to close
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
driver.quit()
|
||||
@@ -1,146 +0,0 @@
|
||||
from forge.actions import ActionRegister
|
||||
from forge.sdk import (
|
||||
Agent,
|
||||
AgentDB,
|
||||
ForgeLogger,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskRequestBody,
|
||||
Workspace,
|
||||
)
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
|
||||
|
||||
class ForgeAgent(Agent):
|
||||
"""
|
||||
The goal of the Forge is to take care of the boilerplate code, so you can focus on
|
||||
agent design.
|
||||
|
||||
There is a great paper surveying the agent landscape: https://arxiv.org/abs/2308.11432
|
||||
Which I would highly recommend reading as it will help you understand the possabilities.
|
||||
|
||||
Here is a summary of the key components of an agent:
|
||||
|
||||
Anatomy of an agent:
|
||||
- Profile
|
||||
- Memory
|
||||
- Planning
|
||||
- Action
|
||||
|
||||
Profile:
|
||||
|
||||
Agents typically perform a task by assuming specific roles. For example, a teacher,
|
||||
a coder, a planner etc. In using the profile in the llm prompt it has been shown to
|
||||
improve the quality of the output. https://arxiv.org/abs/2305.14688
|
||||
|
||||
Additionally, based on the profile selected, the agent could be configured to use a
|
||||
different llm. The possibilities are endless and the profile can be selected
|
||||
dynamically based on the task at hand.
|
||||
|
||||
Memory:
|
||||
|
||||
Memory is critical for the agent to accumulate experiences, self-evolve, and behave
|
||||
in a more consistent, reasonable, and effective manner. There are many approaches to
|
||||
memory. However, some thoughts: there is long term and short term or working memory.
|
||||
You may want different approaches for each. There has also been work exploring the
|
||||
idea of memory reflection, which is the ability to assess its memories and re-evaluate
|
||||
them. For example, condensing short term memories into long term memories.
|
||||
|
||||
Planning:
|
||||
|
||||
When humans face a complex task, they first break it down into simple subtasks and then
|
||||
solve each subtask one by one. The planning module empowers LLM-based agents with the ability
|
||||
to think and plan for solving complex tasks, which makes the agent more comprehensive,
|
||||
powerful, and reliable. The two key methods to consider are: Planning with feedback and planning
|
||||
without feedback.
|
||||
|
||||
Action:
|
||||
|
||||
Actions translate the agent's decisions into specific outcomes. For example, if the agent
|
||||
decides to write a file, the action would be to write the file. There are many approaches you
|
||||
could implement actions.
|
||||
|
||||
The Forge has a basic module for each of these areas. However, you are free to implement your own.
|
||||
This is just a starting point.
|
||||
"""
|
||||
|
||||
def __init__(self, database: AgentDB, workspace: Workspace):
|
||||
"""
|
||||
The database is used to store tasks, steps and artifact metadata. The workspace is used to
|
||||
store artifacts. The workspace is a directory on the file system.
|
||||
|
||||
Feel free to create subclasses of the database and workspace to implement your own storage
|
||||
"""
|
||||
super().__init__(database, workspace)
|
||||
self.abilities = ActionRegister(self)
|
||||
|
||||
async def create_task(self, task_request: TaskRequestBody) -> Task:
|
||||
"""
|
||||
The agent protocol, which is the core of the Forge, works by creating a task and then
|
||||
executing steps for that task. This method is called when the agent is asked to create
|
||||
a task.
|
||||
|
||||
We are hooking into function to add a custom log message. Though you can do anything you
|
||||
want here.
|
||||
"""
|
||||
task = await super().create_task(task_request)
|
||||
LOG.info(
|
||||
f"📦 Task created: {task.task_id} input: {task.input[:40]}{'...' if len(task.input) > 40 else ''}"
|
||||
)
|
||||
return task
|
||||
|
||||
async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
|
||||
"""
|
||||
For a tutorial on how to add your own logic please see the offical tutorial series:
|
||||
https://aiedge.medium.com/autogpt-forge-e3de53cc58ec
|
||||
|
||||
The agent protocol, which is the core of the Forge, works by creating a task and then
|
||||
executing steps for that task. This method is called when the agent is asked to execute
|
||||
a step.
|
||||
|
||||
The task that is created contains an input string, for the benchmarks this is the task
|
||||
the agent has been asked to solve and additional input, which is a dictionary and
|
||||
could contain anything.
|
||||
|
||||
If you want to get the task use:
|
||||
|
||||
```
|
||||
task = await self.db.get_task(task_id)
|
||||
```
|
||||
|
||||
The step request body is essentially the same as the task request and contains an input
|
||||
string, for the benchmarks this is the task the agent has been asked to solve and
|
||||
additional input, which is a dictionary and could contain anything.
|
||||
|
||||
You need to implement logic that will take in this step input and output the completed step
|
||||
as a step object. You can do everything in a single step or you can break it down into
|
||||
multiple steps. Returning a request to continue in the step output, the user can then decide
|
||||
if they want the agent to continue or not.
|
||||
"""
|
||||
# An example that
|
||||
step = await self.db.create_step(
|
||||
task_id=task_id, input=step_request, is_last=True
|
||||
)
|
||||
|
||||
self.workspace.write(task_id=task_id, path="output.txt", data=b"Washington D.C")
|
||||
|
||||
await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
step_id=step.step_id,
|
||||
file_name="output.txt",
|
||||
relative_path="",
|
||||
agent_created=True,
|
||||
)
|
||||
|
||||
step.output = "Washington D.C"
|
||||
|
||||
LOG.info(
|
||||
f"\t✅ Final Step completed: {step.step_id}. \n"
|
||||
+ f"Output should be placeholder text Washington D.C. You'll need to \n"
|
||||
+ f"modify execute_step to include LLM behavior. Follow the tutorial "
|
||||
+ f"if confused. "
|
||||
)
|
||||
|
||||
return step
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from io import BytesIO
|
||||
@@ -9,12 +10,10 @@ from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
|
||||
from forge.utils.exceptions import NotFoundError
|
||||
|
||||
from .db import AgentDB
|
||||
from .forge_log import ForgeLogger
|
||||
from .middlewares import AgentMiddleware
|
||||
from .model import (
|
||||
from forge.agent_protocol.api_router import base_router
|
||||
from forge.agent_protocol.database.db import AgentDB
|
||||
from forge.agent_protocol.middlewares import AgentMiddleware
|
||||
from forge.agent_protocol.models.task import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
@@ -24,14 +23,14 @@ from .model import (
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from .routes.agent_protocol import base_router
|
||||
from .workspace import Workspace
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.exceptions import NotFoundError
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Agent:
|
||||
def __init__(self, database: AgentDB, workspace: Workspace):
|
||||
def __init__(self, database: AgentDB, workspace: FileStorage):
|
||||
self.db = database
|
||||
self.workspace = workspace
|
||||
|
||||
@@ -187,7 +186,7 @@ class Agent:
|
||||
else:
|
||||
file_path = os.path.join(relative_path, file_name)
|
||||
|
||||
self.workspace.write(task_id, file_path, data)
|
||||
await self.workspace.write_file(file_path, data)
|
||||
|
||||
artifact = await self.db.create_artifact(
|
||||
task_id=task_id,
|
||||
@@ -209,7 +208,7 @@ class Agent:
|
||||
file_path = os.path.join(artifact.relative_path, artifact.file_name)
|
||||
else:
|
||||
file_path = artifact.relative_path
|
||||
retrieved_artifact = self.workspace.read(task_id=task_id, path=file_path)
|
||||
retrieved_artifact = self.workspace.read_file(file_path)
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except FileNotFoundError as e:
|
||||
@@ -1,15 +1,25 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from forge.agent_protocol.database.db import AgentDB
|
||||
from forge.agent_protocol.models.task import (
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
)
|
||||
from forge.file_storage.base import FileStorageConfiguration
|
||||
from forge.file_storage.local import LocalFileStorage
|
||||
|
||||
from .agent import Agent
|
||||
from .db import AgentDB
|
||||
from .model import StepRequestBody, Task, TaskListResponse, TaskRequestBody
|
||||
from .workspace import LocalWorkspace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
db = AgentDB("sqlite:///test.db")
|
||||
workspace = LocalWorkspace("./test_workspace")
|
||||
config = FileStorageConfiguration(root=Path("./test_workspace"))
|
||||
workspace = LocalFileStorage(config)
|
||||
return Agent(db, workspace)
|
||||
|
||||
|
||||
@@ -30,13 +30,7 @@ from forge.agent.components import (
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import ConfigBuilder
|
||||
from forge.llm.prompting.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from forge.llm.providers import (
|
||||
CHAT_MODELS,
|
||||
AssistantFunctionCall,
|
||||
ModelName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from forge.llm.providers import CHAT_MODELS, ModelName, OpenAIModelName
|
||||
from forge.llm.providers.schema import ChatModelInfo
|
||||
from forge.models.config import (
|
||||
Configurable,
|
||||
@@ -50,6 +44,12 @@ logger = logging.getLogger(__name__)
|
||||
T = TypeVar("T")
|
||||
P = ParamSpec("P")
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
|
||||
|
||||
class BaseAgentConfiguration(SystemConfiguration):
|
||||
allow_fs_access: bool = UserConfigurable(default=False)
|
||||
@@ -114,11 +114,7 @@ class BaseAgentSettings(SystemSettings):
|
||||
ai_profile: AIProfile = Field(default_factory=lambda: AIProfile(ai_name="AutoGPT"))
|
||||
"""The AI profile or "personality" of the agent."""
|
||||
|
||||
directives: AIDirectives = Field(
|
||||
default_factory=lambda: AIDirectives.from_file(
|
||||
ConfigBuilder.default_settings.prompt_settings_file
|
||||
)
|
||||
)
|
||||
directives: AIDirectives = Field(default_factory=AIDirectives)
|
||||
"""Directives (general instructional guidelines) for the agent."""
|
||||
|
||||
task: str = "Terminate immediately" # FIXME: placeholder for forge.sdk.schema.Task
|
||||
|
||||
@@ -23,13 +23,19 @@ Developers and contributors should be especially careful when making modificatio
|
||||
consistency and correctness in the system's behavior.
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from forge.sdk.forge_log import ForgeLogger
|
||||
from forge.sdk.model import (
|
||||
from forge.utils.exceptions import (
|
||||
NotFoundError,
|
||||
get_detailed_traceback,
|
||||
get_exception_message,
|
||||
)
|
||||
|
||||
from .models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
@@ -39,15 +45,9 @@ from forge.sdk.model import (
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.utils.exceptions import (
|
||||
NotFoundError,
|
||||
get_detailed_traceback,
|
||||
get_exception_message,
|
||||
)
|
||||
|
||||
base_router = APIRouter()
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@base_router.get("/", tags=["root"])
|
||||
@@ -103,7 +103,7 @@ async def create_agent_task(request: Request, task_request: TaskRequestBody) ->
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to create a task: {task_request}")
|
||||
logger.exception(f"Error whilst trying to create a task: {task_request}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -167,14 +167,14 @@ async def list_agent_tasks(
|
||||
media_type="application/json",
|
||||
)
|
||||
except NotFoundError:
|
||||
LOG.exception("Error whilst trying to list tasks")
|
||||
logger.exception("Error whilst trying to list tasks")
|
||||
return Response(
|
||||
content=json.dumps({"error": "Tasks not found"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception("Error whilst trying to list tasks")
|
||||
logger.exception("Error whilst trying to list tasks")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -250,14 +250,14 @@ async def get_agent_task(request: Request, task_id: str) -> Task:
|
||||
media_type="application/json",
|
||||
)
|
||||
except NotFoundError:
|
||||
LOG.exception(f"Error whilst trying to get task: {task_id}")
|
||||
logger.exception(f"Error whilst trying to get task: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps({"error": "Task not found"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to get task: {task_id}")
|
||||
logger.exception(f"Error whilst trying to get task: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -325,14 +325,14 @@ async def list_agent_task_steps(
|
||||
media_type="application/json",
|
||||
)
|
||||
except NotFoundError:
|
||||
LOG.exception("Error whilst trying to list steps")
|
||||
logger.exception("Error whilst trying to list steps")
|
||||
return Response(
|
||||
content=json.dumps({"error": "Steps not found"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception("Error whilst trying to list steps")
|
||||
logger.exception("Error whilst trying to list steps")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -403,14 +403,14 @@ async def execute_agent_task_step(
|
||||
media_type="application/json",
|
||||
)
|
||||
except NotFoundError:
|
||||
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
|
||||
logger.exception(f"Error whilst trying to execute a task step: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps({"error": f"Task not found {task_id}"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to execute a task step: {task_id}")
|
||||
logger.exception(f"Error whilst trying to execute a task step: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -456,14 +456,14 @@ async def get_agent_task_step(request: Request, task_id: str, step_id: str) -> S
|
||||
|
||||
return Response(content=step.json(), status_code=200)
|
||||
except NotFoundError:
|
||||
LOG.exception(f"Error whilst trying to get step: {step_id}")
|
||||
logger.exception(f"Error whilst trying to get step: {step_id}")
|
||||
return Response(
|
||||
content=json.dumps({"error": "Step not found"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to get step: {step_id}")
|
||||
logger.exception(f"Error whilst trying to get step: {step_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -526,14 +526,14 @@ async def list_agent_task_artifacts(
|
||||
)
|
||||
return artifacts
|
||||
except NotFoundError:
|
||||
LOG.exception("Error whilst trying to list artifacts")
|
||||
logger.exception("Error whilst trying to list artifacts")
|
||||
return Response(
|
||||
content=json.dumps({"error": "Artifacts not found for task_id"}),
|
||||
status_code=404,
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception("Error whilst trying to list artifacts")
|
||||
logger.exception("Error whilst trying to list artifacts")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -596,7 +596,7 @@ async def upload_agent_task_artifacts(
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to upload artifact: {task_id}")
|
||||
logger.exception(f"Error whilst trying to upload artifact: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -640,7 +640,7 @@ async def download_agent_task_artifact(
|
||||
try:
|
||||
return await agent.get_artifact(task_id, artifact_id)
|
||||
except NotFoundError:
|
||||
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
|
||||
logger.exception(f"Error whilst trying to download artifact: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
@@ -652,7 +652,7 @@ async def download_agent_task_artifact(
|
||||
media_type="application/json",
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception(f"Error whilst trying to download artifact: {task_id}")
|
||||
logger.exception(f"Error whilst trying to download artifact: {task_id}")
|
||||
return Response(
|
||||
content=json.dumps(
|
||||
{
|
||||
1
autogpts/forge/forge/agent_protocol/database/__init__.py
Normal file
1
autogpts/forge/forge/agent_protocol/database/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .db import AgentDB
|
||||
@@ -5,6 +5,7 @@ IT IS NOT ADVISED TO USE THIS IN PRODUCTION!
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Literal, Optional, Tuple
|
||||
@@ -23,10 +24,11 @@ from sqlalchemy.orm import DeclarativeBase, joinedload, relationship, sessionmak
|
||||
|
||||
from forge.utils.exceptions import NotFoundError
|
||||
|
||||
from .forge_log import ForgeLogger
|
||||
from .model import Artifact, Pagination, Status, Step, StepRequestBody, Task
|
||||
from ..models.artifact import Artifact
|
||||
from ..models.pagination import Pagination
|
||||
from ..models.task import Step, StepRequestBody, StepStatus, Task
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
@@ -87,7 +89,7 @@ class ArtifactModel(Base):
|
||||
|
||||
def convert_to_task(task_obj: TaskModel, debug_enabled: bool = False) -> Task:
|
||||
if debug_enabled:
|
||||
LOG.debug(f"Converting TaskModel to Task for task_id: {task_obj.task_id}")
|
||||
logger.debug(f"Converting TaskModel to Task for task_id: {task_obj.task_id}")
|
||||
task_artifacts = [convert_to_artifact(artifact) for artifact in task_obj.artifacts]
|
||||
return Task(
|
||||
task_id=task_obj.task_id,
|
||||
@@ -101,11 +103,13 @@ def convert_to_task(task_obj: TaskModel, debug_enabled: bool = False) -> Task:
|
||||
|
||||
def convert_to_step(step_model: StepModel, debug_enabled: bool = False) -> Step:
|
||||
if debug_enabled:
|
||||
LOG.debug(f"Converting StepModel to Step for step_id: {step_model.step_id}")
|
||||
logger.debug(f"Converting StepModel to Step for step_id: {step_model.step_id}")
|
||||
step_artifacts = [
|
||||
convert_to_artifact(artifact) for artifact in step_model.artifacts
|
||||
]
|
||||
status = Status.completed if step_model.status == "completed" else Status.created
|
||||
status = (
|
||||
StepStatus.completed if step_model.status == "completed" else StepStatus.created
|
||||
)
|
||||
return Step(
|
||||
task_id=step_model.task_id,
|
||||
step_id=step_model.step_id,
|
||||
@@ -139,7 +143,9 @@ class AgentDB:
|
||||
super().__init__()
|
||||
self.debug_enabled = debug_enabled
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Initializing AgentDB with database_string: {database_string}")
|
||||
logger.debug(
|
||||
f"Initializing AgentDB with database_string: {database_string}"
|
||||
)
|
||||
self.engine = create_engine(database_string)
|
||||
Base.metadata.create_all(self.engine)
|
||||
self.Session = sessionmaker(bind=self.engine)
|
||||
@@ -148,7 +154,7 @@ class AgentDB:
|
||||
self, input: Optional[str], additional_input: Optional[dict] = {}
|
||||
) -> Task:
|
||||
if self.debug_enabled:
|
||||
LOG.debug("Creating new task")
|
||||
logger.debug("Creating new task")
|
||||
|
||||
try:
|
||||
with self.Session() as session:
|
||||
@@ -161,15 +167,15 @@ class AgentDB:
|
||||
session.commit()
|
||||
session.refresh(new_task)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Created new task with task_id: {new_task.task_id}")
|
||||
logger.debug(f"Created new task with task_id: {new_task.task_id}")
|
||||
return convert_to_task(new_task, self.debug_enabled)
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while creating task: {e}")
|
||||
logger.error(f"SQLAlchemy error while creating task: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while creating task: {e}")
|
||||
logger.error(f"Unexpected error while creating task: {e}")
|
||||
raise
|
||||
|
||||
async def create_step(
|
||||
@@ -180,7 +186,7 @@ class AgentDB:
|
||||
additional_input: Optional[Dict[str, Any]] = {},
|
||||
) -> Step:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Creating new step for task_id: {task_id}")
|
||||
logger.debug(f"Creating new step for task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
new_step = StepModel(
|
||||
@@ -196,15 +202,15 @@ class AgentDB:
|
||||
session.commit()
|
||||
session.refresh(new_step)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Created new step with step_id: {new_step.step_id}")
|
||||
logger.debug(f"Created new step with step_id: {new_step.step_id}")
|
||||
return convert_to_step(new_step, self.debug_enabled)
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while creating step: {e}")
|
||||
logger.error(f"SQLAlchemy error while creating step: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while creating step: {e}")
|
||||
logger.error(f"Unexpected error while creating step: {e}")
|
||||
raise
|
||||
|
||||
async def create_artifact(
|
||||
@@ -216,7 +222,7 @@ class AgentDB:
|
||||
step_id: str | None = None,
|
||||
) -> Artifact:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Creating new artifact for task_id: {task_id}")
|
||||
logger.debug(f"Creating new artifact for task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if (
|
||||
@@ -230,7 +236,7 @@ class AgentDB:
|
||||
):
|
||||
session.close()
|
||||
if self.debug_enabled:
|
||||
LOG.debug(
|
||||
logger.debug(
|
||||
f"Artifact already exists with relative_path: {relative_path}"
|
||||
)
|
||||
return convert_to_artifact(existing_artifact)
|
||||
@@ -247,23 +253,23 @@ class AgentDB:
|
||||
session.commit()
|
||||
session.refresh(new_artifact)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(
|
||||
logger.debug(
|
||||
f"Created new artifact with artifact_id: {new_artifact.artifact_id}"
|
||||
)
|
||||
return convert_to_artifact(new_artifact)
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while creating step: {e}")
|
||||
logger.error(f"SQLAlchemy error while creating step: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while creating step: {e}")
|
||||
logger.error(f"Unexpected error while creating step: {e}")
|
||||
raise
|
||||
|
||||
async def get_task(self, task_id: str) -> Task:
|
||||
"""Get a task by its id"""
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Getting task with task_id: {task_id}")
|
||||
logger.debug(f"Getting task with task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if task_obj := (
|
||||
@@ -274,20 +280,20 @@ class AgentDB:
|
||||
):
|
||||
return convert_to_task(task_obj, self.debug_enabled)
|
||||
else:
|
||||
LOG.error(f"Task not found with task_id: {task_id}")
|
||||
logger.error(f"Task not found with task_id: {task_id}")
|
||||
raise NotFoundError("Task not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting task: {e}")
|
||||
logger.error(f"SQLAlchemy error while getting task: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting task: {e}")
|
||||
logger.error(f"Unexpected error while getting task: {e}")
|
||||
raise
|
||||
|
||||
async def get_step(self, task_id: str, step_id: str) -> Step:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Getting step with task_id: {task_id} and step_id: {step_id}")
|
||||
logger.debug(f"Getting step with task_id: {task_id} and step_id: {step_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if step := (
|
||||
@@ -299,22 +305,22 @@ class AgentDB:
|
||||
return convert_to_step(step, self.debug_enabled)
|
||||
|
||||
else:
|
||||
LOG.error(
|
||||
logger.error(
|
||||
f"Step not found with task_id: {task_id} and step_id: {step_id}"
|
||||
)
|
||||
raise NotFoundError("Step not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting step: {e}")
|
||||
logger.error(f"SQLAlchemy error while getting step: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting step: {e}")
|
||||
logger.error(f"Unexpected error while getting step: {e}")
|
||||
raise
|
||||
|
||||
async def get_artifact(self, artifact_id: str) -> Artifact:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Getting artifact with and artifact_id: {artifact_id}")
|
||||
logger.debug(f"Getting artifact with and artifact_id: {artifact_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if (
|
||||
@@ -324,15 +330,17 @@ class AgentDB:
|
||||
):
|
||||
return convert_to_artifact(artifact_model)
|
||||
else:
|
||||
LOG.error(f"Artifact not found with and artifact_id: {artifact_id}")
|
||||
logger.error(
|
||||
f"Artifact not found with and artifact_id: {artifact_id}"
|
||||
)
|
||||
raise NotFoundError("Artifact not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting artifact: {e}")
|
||||
logger.error(f"SQLAlchemy error while getting artifact: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting artifact: {e}")
|
||||
logger.error(f"Unexpected error while getting artifact: {e}")
|
||||
raise
|
||||
|
||||
async def update_step(
|
||||
@@ -345,7 +353,9 @@ class AgentDB:
|
||||
additional_output: Optional[Dict[str, Any]] = None,
|
||||
) -> Step:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Updating step with task_id: {task_id} and step_id: {step_id}")
|
||||
logger.debug(
|
||||
f"Updating step with task_id: {task_id} and step_id: {step_id}"
|
||||
)
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if (
|
||||
@@ -364,17 +374,17 @@ class AgentDB:
|
||||
session.commit()
|
||||
return await self.get_step(task_id, step_id)
|
||||
else:
|
||||
LOG.error(
|
||||
logger.error(
|
||||
f"Step not found for update with task_id: {task_id} and step_id: {step_id}"
|
||||
)
|
||||
raise NotFoundError("Step not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting step: {e}")
|
||||
logger.error(f"SQLAlchemy error while getting step: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting step: {e}")
|
||||
logger.error(f"Unexpected error while getting step: {e}")
|
||||
raise
|
||||
|
||||
async def update_artifact(
|
||||
@@ -385,7 +395,7 @@ class AgentDB:
|
||||
relative_path: str = "",
|
||||
agent_created: Optional[Literal[True]] = None,
|
||||
) -> Artifact:
|
||||
LOG.debug(f"Updating artifact with artifact_id: {artifact_id}")
|
||||
logger.debug(f"Updating artifact with artifact_id: {artifact_id}")
|
||||
with self.Session() as session:
|
||||
if (
|
||||
artifact := session.query(ArtifactModel)
|
||||
@@ -401,14 +411,14 @@ class AgentDB:
|
||||
session.commit()
|
||||
return await self.get_artifact(artifact_id)
|
||||
else:
|
||||
LOG.error(f"Artifact not found with artifact_id: {artifact_id}")
|
||||
logger.error(f"Artifact not found with artifact_id: {artifact_id}")
|
||||
raise NotFoundError("Artifact not found")
|
||||
|
||||
async def list_tasks(
|
||||
self, page: int = 1, per_page: int = 10
|
||||
) -> Tuple[List[Task], Pagination]:
|
||||
if self.debug_enabled:
|
||||
LOG.debug("Listing tasks")
|
||||
logger.debug("Listing tasks")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
tasks = (
|
||||
@@ -429,19 +439,19 @@ class AgentDB:
|
||||
convert_to_task(task, self.debug_enabled) for task in tasks
|
||||
], pagination
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while listing tasks: {e}")
|
||||
logger.error(f"SQLAlchemy error while listing tasks: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while listing tasks: {e}")
|
||||
logger.error(f"Unexpected error while listing tasks: {e}")
|
||||
raise
|
||||
|
||||
async def list_steps(
|
||||
self, task_id: str, page: int = 1, per_page: int = 10
|
||||
) -> Tuple[List[Step], Pagination]:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Listing steps for task_id: {task_id}")
|
||||
logger.debug(f"Listing steps for task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
steps = (
|
||||
@@ -463,19 +473,19 @@ class AgentDB:
|
||||
convert_to_step(step, self.debug_enabled) for step in steps
|
||||
], pagination
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while listing steps: {e}")
|
||||
logger.error(f"SQLAlchemy error while listing steps: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while listing steps: {e}")
|
||||
logger.error(f"Unexpected error while listing steps: {e}")
|
||||
raise
|
||||
|
||||
async def list_artifacts(
|
||||
self, task_id: str, page: int = 1, per_page: int = 10
|
||||
) -> Tuple[List[Artifact], Pagination]:
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Listing artifacts for task_id: {task_id}")
|
||||
logger.debug(f"Listing artifacts for task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
artifacts = (
|
||||
@@ -497,10 +507,10 @@ class AgentDB:
|
||||
convert_to_artifact(artifact) for artifact in artifacts
|
||||
], pagination
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while listing artifacts: {e}")
|
||||
logger.error(f"SQLAlchemy error while listing artifacts: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while listing artifacts: {e}")
|
||||
logger.error(f"Unexpected error while listing artifacts: {e}")
|
||||
raise
|
||||
@@ -4,7 +4,7 @@ from datetime import datetime
|
||||
|
||||
import pytest
|
||||
|
||||
from forge.sdk.db import (
|
||||
from forge.agent_protocol.database.db import (
|
||||
AgentDB,
|
||||
ArtifactModel,
|
||||
StepModel,
|
||||
@@ -13,7 +13,13 @@ from forge.sdk.db import (
|
||||
convert_to_step,
|
||||
convert_to_task,
|
||||
)
|
||||
from forge.sdk.model import Artifact, Status, Step, StepRequestBody, Task
|
||||
from forge.agent_protocol.models import (
|
||||
Artifact,
|
||||
Step,
|
||||
StepRequestBody,
|
||||
StepStatus,
|
||||
Task,
|
||||
)
|
||||
from forge.utils.exceptions import NotFoundError as DataNotFoundError
|
||||
|
||||
|
||||
@@ -77,7 +83,7 @@ async def test_step_schema():
|
||||
modified_at=now,
|
||||
name="Write to file",
|
||||
input="Write the words you receive to the file 'output.txt'.",
|
||||
status=Status.created,
|
||||
status=StepStatus.created,
|
||||
output="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>",
|
||||
artifacts=[
|
||||
Artifact(
|
||||
@@ -94,7 +100,7 @@ async def test_step_schema():
|
||||
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
|
||||
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
|
||||
assert step.name == "Write to file"
|
||||
assert step.status == Status.created
|
||||
assert step.status == StepStatus.created
|
||||
assert (
|
||||
step.output
|
||||
== "I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')>"
|
||||
@@ -157,7 +163,7 @@ async def test_convert_to_step():
|
||||
assert step.task_id == "50da533e-3904-4401-8a07-c49adf88b5eb"
|
||||
assert step.step_id == "6bb1801a-fd80-45e8-899a-4dd723cc602e"
|
||||
assert step.name == "Write to file"
|
||||
assert step.status == Status.created
|
||||
assert step.status == StepStatus.created
|
||||
assert len(step.artifacts) == 1
|
||||
assert step.artifacts[0].artifact_id == "b225e278-8b4c-4f99-a696-8facf19f0e56"
|
||||
assert step.is_last == False
|
||||
@@ -1,9 +1,4 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .agent import Agent
|
||||
from starlette.types import ASGIApp
|
||||
|
||||
|
||||
class AgentMiddleware:
|
||||
@@ -11,7 +6,7 @@ class AgentMiddleware:
|
||||
Middleware that injects the agent instance into the request scope.
|
||||
"""
|
||||
|
||||
def __init__(self, app: FastAPI, agent: "Agent"):
|
||||
def __init__(self, app: ASGIApp, agent):
|
||||
"""
|
||||
|
||||
Args:
|
||||
12
autogpts/forge/forge/agent_protocol/models/__init__.py
Normal file
12
autogpts/forge/forge/agent_protocol/models/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from .artifact import Artifact, ArtifactUpload
|
||||
from .pagination import Pagination
|
||||
from .task import (
|
||||
Step,
|
||||
StepRequestBody,
|
||||
StepStatus,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
47
autogpts/forge/forge/agent_protocol/models/artifact.py
Normal file
47
autogpts/forge/forge/agent_protocol/models/artifact.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from datetime import datetime
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ArtifactUpload(BaseModel):
|
||||
file: str = Field(..., description="File to upload.", format="binary")
|
||||
relative_path: str = Field(
|
||||
...,
|
||||
description="Relative path of the artifact in the agent's workspace.",
|
||||
example="python/code",
|
||||
)
|
||||
|
||||
|
||||
class Artifact(BaseModel):
|
||||
created_at: datetime = Field(
|
||||
...,
|
||||
description="The creation datetime of the task.",
|
||||
example="2023-01-01T00:00:00Z",
|
||||
json_encoders={datetime: lambda v: v.isoformat()},
|
||||
)
|
||||
modified_at: datetime = Field(
|
||||
...,
|
||||
description="The modification datetime of the task.",
|
||||
example="2023-01-01T00:00:00Z",
|
||||
json_encoders={datetime: lambda v: v.isoformat()},
|
||||
)
|
||||
artifact_id: str = Field(
|
||||
...,
|
||||
description="ID of the artifact.",
|
||||
example="b225e278-8b4c-4f99-a696-8facf19f0e56",
|
||||
)
|
||||
agent_created: bool = Field(
|
||||
...,
|
||||
description="Whether the artifact has been created by the agent.",
|
||||
example=False,
|
||||
)
|
||||
relative_path: str = Field(
|
||||
...,
|
||||
description="Relative path of the artifact in the agents workspace.",
|
||||
example="/my_folder/my_other_folder/",
|
||||
)
|
||||
file_name: str = Field(
|
||||
...,
|
||||
description="Filename of the artifact.",
|
||||
example="main.py",
|
||||
)
|
||||
8
autogpts/forge/forge/agent_protocol/models/pagination.py
Normal file
8
autogpts/forge/forge/agent_protocol/models/pagination.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Pagination(BaseModel):
|
||||
total_items: int = Field(..., description="Total number of items.", example=42)
|
||||
total_pages: int = Field(..., description="Total number of pages.", example=97)
|
||||
current_page: int = Field(..., description="Current_page page number.", example=1)
|
||||
page_size: int = Field(..., description="Number of items per page.", example=25)
|
||||
@@ -1,7 +1,3 @@
|
||||
# generated by fastapi-codegen:
|
||||
# filename: ../../postman/schemas/openapi.yaml
|
||||
# timestamp: 2023-08-25T10:36:11+00:00
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
@@ -10,60 +6,8 @@ from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ArtifactUpload(BaseModel):
|
||||
file: str = Field(..., description="File to upload.", format="binary")
|
||||
relative_path: str = Field(
|
||||
...,
|
||||
description="Relative path of the artifact in the agent's workspace.",
|
||||
example="python/code",
|
||||
)
|
||||
|
||||
|
||||
class Pagination(BaseModel):
|
||||
total_items: int = Field(..., description="Total number of items.", example=42)
|
||||
total_pages: int = Field(..., description="Total number of pages.", example=97)
|
||||
current_page: int = Field(..., description="Current_page page number.", example=1)
|
||||
page_size: int = Field(..., description="Number of items per page.", example=25)
|
||||
|
||||
|
||||
class Artifact(BaseModel):
|
||||
created_at: datetime = Field(
|
||||
...,
|
||||
description="The creation datetime of the task.",
|
||||
example="2023-01-01T00:00:00Z",
|
||||
json_encoders={datetime: lambda v: v.isoformat()},
|
||||
)
|
||||
modified_at: datetime = Field(
|
||||
...,
|
||||
description="The modification datetime of the task.",
|
||||
example="2023-01-01T00:00:00Z",
|
||||
json_encoders={datetime: lambda v: v.isoformat()},
|
||||
)
|
||||
artifact_id: str = Field(
|
||||
...,
|
||||
description="ID of the artifact.",
|
||||
example="b225e278-8b4c-4f99-a696-8facf19f0e56",
|
||||
)
|
||||
agent_created: bool = Field(
|
||||
...,
|
||||
description="Whether the artifact has been created by the agent.",
|
||||
example=False,
|
||||
)
|
||||
relative_path: str = Field(
|
||||
...,
|
||||
description="Relative path of the artifact in the agents workspace.",
|
||||
example="/my_folder/my_other_folder/",
|
||||
)
|
||||
file_name: str = Field(
|
||||
...,
|
||||
description="Filename of the artifact.",
|
||||
example="main.py",
|
||||
)
|
||||
|
||||
|
||||
class StepOutput(BaseModel):
|
||||
pass
|
||||
from .artifact import Artifact
|
||||
from .pagination import Pagination
|
||||
|
||||
|
||||
class TaskRequestBody(BaseModel):
|
||||
@@ -116,7 +60,7 @@ class StepRequestBody(BaseModel):
|
||||
additional_input: Optional[dict] = None
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
class StepStatus(Enum):
|
||||
created = "created"
|
||||
running = "running"
|
||||
completed = "completed"
|
||||
@@ -148,7 +92,7 @@ class Step(StepRequestBody):
|
||||
name: Optional[str] = Field(
|
||||
None, description="The name of the task step.", example="Write to file"
|
||||
)
|
||||
status: Status = Field(
|
||||
status: StepStatus = Field(
|
||||
..., description="The status of the task step.", example="created"
|
||||
)
|
||||
output: Optional[str] = Field(
|
||||
@@ -1,13 +0,0 @@
|
||||
import os
|
||||
|
||||
from forge.agent import ForgeAgent
|
||||
from forge.sdk import LocalWorkspace
|
||||
|
||||
from .db import ForgeDatabase
|
||||
|
||||
database_name = os.getenv("DATABASE_STRING")
|
||||
workspace = LocalWorkspace(os.getenv("AGENT_WORKSPACE"))
|
||||
database = ForgeDatabase(database_name, debug_enabled=False)
|
||||
agent = ForgeAgent(database=database, workspace=workspace)
|
||||
|
||||
app = agent.get_agent_app()
|
||||
@@ -1,10 +1,9 @@
|
||||
import dataclasses
|
||||
from pydantic import BaseModel
|
||||
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CommandParameter:
|
||||
class CommandParameter(BaseModel):
|
||||
name: str
|
||||
spec: JSONSchema
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Callable, Generic, Iterator, Optional
|
||||
|
||||
from forge.agent.protocols import AfterExecute, AfterParse, MessageProvider
|
||||
@@ -18,7 +20,7 @@ class ActionHistoryComponent(MessageProvider, AfterParse, AfterExecute, Generic[
|
||||
event_history: EpisodicActionHistory[AP],
|
||||
max_tokens: int,
|
||||
count_tokens: Callable[[str], int],
|
||||
legacy_config: "Config",
|
||||
legacy_config: Config,
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> None:
|
||||
self.event_history = event_history
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from .code_executor import (
|
||||
ALLOWLIST_CONTROL,
|
||||
DENYLIST_CONTROL,
|
||||
CodeExecutionError,
|
||||
CodeExecutorComponent,
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
|
||||
@@ -16,7 +16,6 @@ from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.utils.exceptions import (
|
||||
CodeExecutionError,
|
||||
CommandExecutionError,
|
||||
InvalidArgumentError,
|
||||
OperationNotAllowedError,
|
||||
@@ -51,6 +50,10 @@ def is_docker_available() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
class CodeExecutionError(CommandExecutionError):
|
||||
"""The operation (an attempt to run arbitrary code) returned an error"""
|
||||
|
||||
|
||||
class CodeExecutorComponent(CommandProvider):
|
||||
"""Provides commands to execute Python code and shell commands."""
|
||||
|
||||
|
||||
@@ -27,6 +27,42 @@ class SystemComponent(DirectiveProvider, MessageProvider, CommandProvider):
|
||||
f"It takes money to let you run. "
|
||||
f"Your API budget is ${self.profile.api_budget:.3f}"
|
||||
)
|
||||
yield "Exclusively use the commands listed below."
|
||||
yield (
|
||||
"You can only act proactively, and are unable to start background jobs or "
|
||||
"set up webhooks for yourself. "
|
||||
"Take this into account when planning your actions."
|
||||
)
|
||||
yield (
|
||||
"You are unable to interact with physical objects. "
|
||||
"If this is absolutely necessary to fulfill a task or objective or "
|
||||
"to complete a step, you must ask the user to do it for you. "
|
||||
"If the user refuses this, and there is no other way to achieve your goals, "
|
||||
"you must terminate to avoid wasting time and energy."
|
||||
)
|
||||
|
||||
def get_resources(self) -> Iterator[str]:
|
||||
yield (
|
||||
"You are a Large Language Model, trained on millions of pages of text, "
|
||||
"including a lot of factual knowledge. Make use of this factual knowledge "
|
||||
"to avoid unnecessary gathering of information."
|
||||
)
|
||||
|
||||
def get_best_practices(self) -> Iterator[str]:
|
||||
yield (
|
||||
"Continuously review and analyze your actions to ensure "
|
||||
"you are performing to the best of your abilities."
|
||||
)
|
||||
yield "Constructively self-criticize your big-picture behavior constantly."
|
||||
yield "Reflect on past decisions and strategies to refine your approach."
|
||||
yield (
|
||||
"Every command has a cost, so be smart and efficient. "
|
||||
"Aim to complete tasks in the least number of steps."
|
||||
)
|
||||
yield (
|
||||
"Only make use of your information gathering abilities to find "
|
||||
"information that you don't yet have knowledge of."
|
||||
)
|
||||
|
||||
def get_messages(self) -> Iterator[ChatMessage]:
|
||||
# Clock
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
from .search import WebSearchComponent
|
||||
from .selenium import BrowsingError, WebSeleniumComponent
|
||||
from .selenium import BrowsingError, TooMuchOutputError, WebSeleniumComponent
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from forge.utils.yaml_validator import validate_yaml_file
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -22,26 +20,7 @@ class AIDirectives(BaseModel):
|
||||
constraints: list[str] = Field(default_factory=list)
|
||||
best_practices: list[str] = Field(default_factory=list)
|
||||
|
||||
@staticmethod
|
||||
def from_file(prompt_settings_file: Path) -> "AIDirectives":
|
||||
from forge.logging.helpers import request_user_double_check
|
||||
|
||||
(validated, message) = validate_yaml_file(prompt_settings_file)
|
||||
if not validated:
|
||||
logger.error(message, extra={"title": "FAILED FILE VALIDATION"})
|
||||
request_user_double_check()
|
||||
raise RuntimeError(f"File validation failed: {message}")
|
||||
|
||||
with open(prompt_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.SafeLoader)
|
||||
|
||||
return AIDirectives(
|
||||
constraints=config_params.get("constraints", []),
|
||||
resources=config_params.get("resources", []),
|
||||
best_practices=config_params.get("best_practices", []),
|
||||
)
|
||||
|
||||
def __add__(self, other: "AIDirectives") -> "AIDirectives":
|
||||
def __add__(self, other: AIDirectives) -> AIDirectives:
|
||||
return AIDirectives(
|
||||
resources=self.resources + other.resources,
|
||||
constraints=self.constraints + other.constraints,
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@@ -19,50 +16,3 @@ class AIProfile(BaseModel):
|
||||
ai_role: str = ""
|
||||
ai_goals: list[str] = Field(default_factory=list[str])
|
||||
api_budget: float = 0.0
|
||||
|
||||
@staticmethod
|
||||
def load(ai_settings_file: str | Path) -> "AIProfile":
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
|
||||
loaded from yaml file if it exists, else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
|
||||
Returns:
|
||||
cls (object): An instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(ai_settings_file, encoding="utf-8") as file:
|
||||
config_params = yaml.load(file, Loader=yaml.SafeLoader) or {}
|
||||
except FileNotFoundError:
|
||||
config_params = {}
|
||||
|
||||
ai_name = config_params.get("ai_name", "")
|
||||
ai_role = config_params.get("ai_role", "")
|
||||
ai_goals = [
|
||||
str(goal).strip("{}").replace("'", "").replace('"', "")
|
||||
if isinstance(goal, dict)
|
||||
else str(goal)
|
||||
for goal in config_params.get("ai_goals", [])
|
||||
]
|
||||
api_budget = config_params.get("api_budget", 0.0)
|
||||
|
||||
return AIProfile(
|
||||
ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget
|
||||
)
|
||||
|
||||
def save(self, ai_settings_file: str | Path) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
ai_settings_file (Path): The path to the config yaml file.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
with open(ai_settings_file, "w", encoding="utf-8") as file:
|
||||
yaml.dump(self.dict(), file, allow_unicode=True)
|
||||
|
||||
@@ -22,9 +22,7 @@ from forge.speech.say import TTSConfig
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(forge.__file__).parent.parent
|
||||
AI_SETTINGS_FILE = Path("ai_settings.yaml")
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml")
|
||||
|
||||
GPT_4_MODEL = OpenAIModelName.GPT4
|
||||
GPT_3_MODEL = OpenAIModelName.GPT3
|
||||
@@ -57,15 +55,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Paths
|
||||
ai_settings_file: Path = UserConfigurable(
|
||||
default=AI_SETTINGS_FILE, from_env="AI_SETTINGS_FILE"
|
||||
)
|
||||
prompt_settings_file: Path = UserConfigurable(
|
||||
default=PROMPT_SETTINGS_FILE,
|
||||
from_env="PROMPT_SETTINGS_FILE",
|
||||
)
|
||||
|
||||
# Model configuration
|
||||
fast_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT3,
|
||||
@@ -218,8 +207,6 @@ class ConfigBuilder(Configurable[Config]):
|
||||
|
||||
# Make relative paths absolute
|
||||
for k in {
|
||||
"ai_settings_file", # TODO: deprecate or repurpose
|
||||
"prompt_settings_file", # TODO: deprecate or repurpose
|
||||
"azure_config_file", # TODO: move from project root
|
||||
}:
|
||||
setattr(config, k, project_root / getattr(config, k))
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Text processing functions"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import math
|
||||
@@ -56,7 +57,7 @@ def chunk_content(
|
||||
async def summarize_text(
|
||||
text: str,
|
||||
llm_provider: ChatModelProvider,
|
||||
config: "Config",
|
||||
config: Config,
|
||||
question: Optional[str] = None,
|
||||
instruction: Optional[str] = None,
|
||||
) -> tuple[str, list[tuple[str, str]]]:
|
||||
@@ -89,7 +90,7 @@ async def extract_information(
|
||||
source_text: str,
|
||||
topics_of_interest: list[str],
|
||||
llm_provider: ChatModelProvider,
|
||||
config: "Config",
|
||||
config: Config,
|
||||
) -> list[str]:
|
||||
fmt_topics_list = "\n".join(f"* {topic}." for topic in topics_of_interest)
|
||||
instruction = (
|
||||
@@ -113,7 +114,7 @@ async def _process_text(
|
||||
text: str,
|
||||
instruction: str,
|
||||
llm_provider: ChatModelProvider,
|
||||
config: "Config",
|
||||
config: Config,
|
||||
output_type: type[str | list[str]] = str,
|
||||
) -> tuple[str, list[tuple[str, str]]] | list[str]:
|
||||
"""Process text using the OpenAI API for summarization or information extraction
|
||||
@@ -122,7 +123,7 @@ async def _process_text(
|
||||
text (str): The text to process.
|
||||
instruction (str): Additional instruction for processing.
|
||||
llm_provider: LLM provider to use.
|
||||
config ("Config"): The global application config.
|
||||
config (Config): The global application config.
|
||||
output_type: `str` for summaries or `list[str]` for piece-wise info extraction.
|
||||
|
||||
Returns:
|
||||
@@ -220,7 +221,7 @@ async def _process_text(
|
||||
|
||||
def split_text(
|
||||
text: str,
|
||||
config: "Config",
|
||||
config: Config,
|
||||
max_chunk_length: int,
|
||||
tokenizer: ModelTokenizer,
|
||||
with_overlap: bool = True,
|
||||
@@ -230,7 +231,7 @@ def split_text(
|
||||
|
||||
Args:
|
||||
text (str): The text to split.
|
||||
config ("Config"): "Config" object containing the Spacy model setting.
|
||||
config (Config): Config object containing the Spacy model setting.
|
||||
max_chunk_length (int, optional): The maximum length of a chunk.
|
||||
tokenizer (ModelTokenizer): Tokenizer to use for determining chunk length.
|
||||
with_overlap (bool, optional): Whether to allow overlap between chunks.
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from sqlalchemy import Column, DateTime, String
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from .sdk import AgentDB, Base, ForgeLogger, NotFoundError
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
|
||||
|
||||
class ChatModel(Base):
|
||||
__tablename__ = "chat"
|
||||
msg_id = Column(String, primary_key=True, index=True)
|
||||
task_id = Column(String)
|
||||
role = Column(String)
|
||||
content = Column(String)
|
||||
created_at = Column(DateTime, default=datetime.datetime.utcnow)
|
||||
modified_at = Column(
|
||||
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
|
||||
)
|
||||
|
||||
|
||||
class ActionModel(Base):
|
||||
__tablename__ = "action"
|
||||
action_id = Column(String, primary_key=True, index=True)
|
||||
task_id = Column(String)
|
||||
name = Column(String)
|
||||
args = Column(String)
|
||||
created_at = Column(DateTime, default=datetime.datetime.utcnow)
|
||||
modified_at = Column(
|
||||
DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow
|
||||
)
|
||||
|
||||
|
||||
class ForgeDatabase(AgentDB):
|
||||
async def add_chat_history(self, task_id, messages):
|
||||
for message in messages:
|
||||
await self.add_chat_message(task_id, message["role"], message["content"])
|
||||
|
||||
async def add_chat_message(self, task_id, role, content):
|
||||
if self.debug_enabled:
|
||||
LOG.debug("Creating new task")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
mew_msg = ChatModel(
|
||||
msg_id=str(uuid.uuid4()),
|
||||
task_id=task_id,
|
||||
role=role,
|
||||
content=content,
|
||||
)
|
||||
session.add(mew_msg)
|
||||
session.commit()
|
||||
session.refresh(mew_msg)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(
|
||||
f"Created new Chat message with task_id: {mew_msg.msg_id}"
|
||||
)
|
||||
return mew_msg
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while creating task: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while creating task: {e}")
|
||||
raise
|
||||
|
||||
async def get_chat_history(self, task_id):
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Getting chat history with task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if messages := (
|
||||
session.query(ChatModel)
|
||||
.filter(ChatModel.task_id == task_id)
|
||||
.order_by(ChatModel.created_at)
|
||||
.all()
|
||||
):
|
||||
return [{"role": m.role, "content": m.content} for m in messages]
|
||||
|
||||
else:
|
||||
LOG.error(f"Chat history not found with task_id: {task_id}")
|
||||
raise NotFoundError("Chat history not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting chat history: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting chat history: {e}")
|
||||
raise
|
||||
|
||||
async def create_action(self, task_id, name, args):
|
||||
try:
|
||||
with self.Session() as session:
|
||||
new_action = ActionModel(
|
||||
action_id=str(uuid.uuid4()),
|
||||
task_id=task_id,
|
||||
name=name,
|
||||
args=str(args),
|
||||
)
|
||||
session.add(new_action)
|
||||
session.commit()
|
||||
session.refresh(new_action)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(
|
||||
f"Created new Action with task_id: {new_action.action_id}"
|
||||
)
|
||||
return new_action
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while creating action: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while creating action: {e}")
|
||||
raise
|
||||
|
||||
async def get_action_history(self, task_id):
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Getting action history with task_id: {task_id}")
|
||||
try:
|
||||
with self.Session() as session:
|
||||
if actions := (
|
||||
session.query(ActionModel)
|
||||
.filter(ActionModel.task_id == task_id)
|
||||
.order_by(ActionModel.created_at)
|
||||
.all()
|
||||
):
|
||||
return [{"name": a.name, "args": a.args} for a in actions]
|
||||
|
||||
else:
|
||||
LOG.error(f"Action history not found with task_id: {task_id}")
|
||||
raise NotFoundError("Action history not found")
|
||||
except SQLAlchemyError as e:
|
||||
LOG.error(f"SQLAlchemy error while getting action history: {e}")
|
||||
raise
|
||||
except NotFoundError as e:
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error(f"Unexpected error while getting action history: {e}")
|
||||
raise
|
||||
@@ -1,61 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from litellm import AuthenticationError, InvalidRequestError, ModelResponse, acompletion
|
||||
from openai import OpenAI
|
||||
from openai.types import CreateEmbeddingResponse
|
||||
from openai.types.audio import Transcription
|
||||
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
||||
|
||||
from .sdk.forge_log import ForgeLogger
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
|
||||
|
||||
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
|
||||
async def chat_completion_request(model, messages, **kwargs) -> ModelResponse:
|
||||
"""Generate a response to a list of messages using OpenAI's API"""
|
||||
try:
|
||||
kwargs["model"] = model
|
||||
kwargs["messages"] = messages
|
||||
|
||||
resp = await acompletion(**kwargs)
|
||||
return resp
|
||||
except AuthenticationError as e:
|
||||
LOG.exception("Authentication Error")
|
||||
raise
|
||||
except InvalidRequestError as e:
|
||||
LOG.exception("Invalid Request Error")
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Unable to generate ChatCompletion response")
|
||||
LOG.error(f"Exception: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
|
||||
async def create_embedding_request(
|
||||
messages, model="text-embedding-ada-002"
|
||||
) -> CreateEmbeddingResponse:
|
||||
"""Generate an embedding for a list of messages using OpenAI's API"""
|
||||
try:
|
||||
return OpenAI().embeddings.create(
|
||||
input=[f"{m['role']}: {m['content']}" for m in messages],
|
||||
model=model,
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("Unable to generate ChatCompletion response")
|
||||
LOG.error(f"Exception: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
|
||||
async def transcribe_audio(audio_file: Path) -> Transcription:
|
||||
"""Transcribe an audio file using OpenAI's API"""
|
||||
try:
|
||||
return OpenAI().audio.transcriptions.create(
|
||||
model="whisper-1", file=audio_file.open(mode="rb")
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("Unable to generate ChatCompletion response")
|
||||
LOG.error(f"Exception: {e}")
|
||||
raise
|
||||
@@ -1,5 +0,0 @@
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine exactly one command to use next based on the given goals "
|
||||
"and the progress you have made so far, "
|
||||
"and respond using the JSON schema specified previously:"
|
||||
)
|
||||
@@ -1,11 +1,9 @@
|
||||
from .config import configure_logging
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import FancyConsoleFormatter
|
||||
from .helpers import user_friendly_output
|
||||
|
||||
__all__ = [
|
||||
"configure_logging",
|
||||
"BelowLevelFilter",
|
||||
"FancyConsoleFormatter",
|
||||
"user_friendly_output",
|
||||
]
|
||||
|
||||
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
|
||||
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import ForgeFormatter, StructuredLoggingFormatter
|
||||
from .handlers import TTSHandler, TypingConsoleHandler
|
||||
from .handlers import TTSHandler
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
@@ -153,22 +153,6 @@ def configure_logging(
|
||||
stderr.setFormatter(console_formatter)
|
||||
log_handlers += [stdout, stderr]
|
||||
|
||||
# Console output handler which simulates typing
|
||||
typing_console_handler = TypingConsoleHandler(stream=sys.stdout)
|
||||
typing_console_handler.setLevel(logging.INFO)
|
||||
typing_console_handler.setFormatter(console_formatter)
|
||||
|
||||
# User friendly output logger (text + speech)
|
||||
user_friendly_output_logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
|
||||
user_friendly_output_logger.setLevel(logging.INFO)
|
||||
user_friendly_output_logger.addHandler(
|
||||
typing_console_handler if not config.plain_console_output else stdout
|
||||
)
|
||||
if tts_config:
|
||||
user_friendly_output_logger.addHandler(TTSHandler(tts_config))
|
||||
user_friendly_output_logger.addHandler(stderr)
|
||||
user_friendly_output_logger.propagate = False
|
||||
|
||||
# File output handlers
|
||||
if config.log_file_format is not None:
|
||||
if config.level < logging.ERROR:
|
||||
@@ -184,7 +168,6 @@ def configure_logging(
|
||||
activity_log_handler.setLevel(config.level)
|
||||
activity_log_handler.setFormatter(file_output_formatter)
|
||||
log_handlers += [activity_log_handler]
|
||||
user_friendly_output_logger.addHandler(activity_log_handler)
|
||||
|
||||
# ERROR log file handler
|
||||
error_log_handler = logging.FileHandler(
|
||||
@@ -193,7 +176,6 @@ def configure_logging(
|
||||
error_log_handler.setLevel(logging.ERROR)
|
||||
error_log_handler.setFormatter(ForgeFormatter(DEBUG_LOG_FORMAT, no_color=True))
|
||||
log_handlers += [error_log_handler]
|
||||
user_friendly_output_logger.addHandler(error_log_handler)
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
|
||||
@@ -2,9 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from forge.logging.utils import remove_color_codes
|
||||
@@ -14,39 +11,6 @@ if TYPE_CHECKING:
|
||||
from forge.speech import TTSConfig
|
||||
|
||||
|
||||
class TypingConsoleHandler(logging.StreamHandler):
|
||||
"""Output stream to console using simulated typing"""
|
||||
|
||||
# Typing speed settings in WPS (Words Per Second)
|
||||
MIN_WPS = 25
|
||||
MAX_WPS = 100
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
min_typing_interval = 1 / TypingConsoleHandler.MAX_WPS
|
||||
max_typing_interval = 1 / TypingConsoleHandler.MIN_WPS
|
||||
|
||||
msg = self.format(record)
|
||||
try:
|
||||
# Split without discarding whitespace
|
||||
words = re.findall(r"\S+\s*", msg)
|
||||
|
||||
for i, word in enumerate(words):
|
||||
self.stream.write(word)
|
||||
self.flush()
|
||||
if i >= len(words) - 1:
|
||||
self.stream.write(self.terminator)
|
||||
self.flush()
|
||||
break
|
||||
|
||||
interval = random.uniform(min_typing_interval, max_typing_interval)
|
||||
# type faster after each word
|
||||
min_typing_interval = min_typing_interval * 0.95
|
||||
max_typing_interval = max_typing_interval * 0.95
|
||||
time.sleep(interval)
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
|
||||
class TTSHandler(logging.Handler):
|
||||
"""Output messages to the configured TTS engine (if any)"""
|
||||
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER
|
||||
|
||||
|
||||
def user_friendly_output(
|
||||
message: str,
|
||||
level: int = logging.INFO,
|
||||
title: str = "",
|
||||
title_color: str = "",
|
||||
preserve_message_color: bool = False,
|
||||
) -> None:
|
||||
"""Outputs a message to the user in a user-friendly way.
|
||||
|
||||
This function outputs on up to two channels:
|
||||
1. The console, in typewriter style
|
||||
2. Text To Speech, if configured
|
||||
"""
|
||||
logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)
|
||||
|
||||
logger.log(
|
||||
level,
|
||||
message,
|
||||
extra={
|
||||
"title": title,
|
||||
"title_color": title_color,
|
||||
"preserve_color": preserve_message_color,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def request_user_double_check(additionalText: Optional[str] = None) -> None:
|
||||
if not additionalText:
|
||||
additionalText = (
|
||||
"Please ensure you've setup and configured everything correctly. "
|
||||
"Read https://docs.agpt.co/autogpt/setup/ to double check. "
|
||||
"You can also create a github issue or join the discord and ask there!"
|
||||
)
|
||||
|
||||
user_friendly_output(
|
||||
additionalText,
|
||||
level=logging.WARN,
|
||||
title="DOUBLE CHECK CONFIGURATION",
|
||||
preserve_message_color=True,
|
||||
)
|
||||
|
||||
|
||||
def speak(message: str, level: int = logging.INFO) -> None:
|
||||
logging.getLogger(SPEECH_OUTPUT_LOGGER).log(level, message)
|
||||
@@ -1,4 +1,8 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
@@ -7,3 +11,23 @@ def remove_color_codes(s: str) -> str:
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def speak(message: str, level: int = logging.INFO) -> None:
|
||||
from .config import SPEECH_OUTPUT_LOGGER
|
||||
|
||||
logging.getLogger(SPEECH_OUTPUT_LOGGER).log(level, message)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
from .chroma_memstore import ChromaMemStore
|
||||
from .memstore import MemStore
|
||||
@@ -1,161 +0,0 @@
|
||||
import hashlib
|
||||
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
from .memstore import MemStore
|
||||
|
||||
|
||||
class ChromaMemStore:
|
||||
"""
|
||||
A class used to represent a Memory Store
|
||||
"""
|
||||
|
||||
def __init__(self, store_path: str):
|
||||
"""
|
||||
Initialize the MemStore with a given store path.
|
||||
|
||||
Args:
|
||||
store_path (str): The path to the store.
|
||||
"""
|
||||
self.client = chromadb.PersistentClient(
|
||||
path=store_path, settings=Settings(anonymized_telemetry=False)
|
||||
)
|
||||
|
||||
def add(self, task_id: str, document: str, metadatas: dict) -> None:
|
||||
"""
|
||||
Add a document to the MemStore.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
document (str): The document to be added.
|
||||
metadatas (dict): The metadata of the document.
|
||||
"""
|
||||
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
|
||||
collection = self.client.get_or_create_collection(task_id)
|
||||
collection.add(documents=[document], metadatas=[metadatas], ids=[doc_id])
|
||||
|
||||
def query(
|
||||
self,
|
||||
task_id: str,
|
||||
query: str,
|
||||
filters: dict = None,
|
||||
document_search: dict = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Query the MemStore.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
query (str): The query string.
|
||||
filters (dict, optional): The filters to be applied. Defaults to None.
|
||||
search_string (str, optional): The search string. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: The query results.
|
||||
"""
|
||||
collection = self.client.get_or_create_collection(task_id)
|
||||
|
||||
kwargs = {
|
||||
"query_texts": [query],
|
||||
"n_results": 10,
|
||||
}
|
||||
|
||||
if filters:
|
||||
kwargs["where"] = filters
|
||||
|
||||
if document_search:
|
||||
kwargs["where_document"] = document_search
|
||||
|
||||
return collection.query(**kwargs)
|
||||
|
||||
def get(self, task_id: str, doc_ids: list = None, filters: dict = None) -> dict:
|
||||
"""
|
||||
Get documents from the MemStore.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_ids (list, optional): The IDs of the documents to be retrieved. Defaults to None.
|
||||
filters (dict, optional): The filters to be applied. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: The retrieved documents.
|
||||
"""
|
||||
collection = self.client.get_or_create_collection(task_id)
|
||||
kwargs = {}
|
||||
if doc_ids:
|
||||
kwargs["ids"] = doc_ids
|
||||
if filters:
|
||||
kwargs["where"] = filters
|
||||
return collection.get(**kwargs)
|
||||
|
||||
def update(self, task_id: str, doc_ids: list, documents: list, metadatas: list):
|
||||
"""
|
||||
Update documents in the MemStore.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_ids (list): The IDs of the documents to be updated.
|
||||
documents (list): The updated documents.
|
||||
metadatas (list): The updated metadata.
|
||||
"""
|
||||
collection = self.client.get_or_create_collection(task_id)
|
||||
collection.update(ids=doc_ids, documents=documents, metadatas=metadatas)
|
||||
|
||||
def delete(self, task_id: str, doc_id: str):
|
||||
"""
|
||||
Delete a document from the MemStore.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_id (str): The ID of the document to be deleted.
|
||||
"""
|
||||
collection = self.client.get_or_create_collection(task_id)
|
||||
collection.delete(ids=[doc_id])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("#############################################")
|
||||
# Initialize MemStore
|
||||
mem = ChromaMemStore(".agent_mem_store")
|
||||
|
||||
# Test add function
|
||||
task_id = "test_task"
|
||||
document = "This is a another new test document."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
mem.add(task_id, document, metadatas)
|
||||
|
||||
task_id = "test_task"
|
||||
document = "The quick brown fox jumps over the lazy dog."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
mem.add(task_id, document, metadatas)
|
||||
|
||||
task_id = "test_task"
|
||||
document = "AI is a new technology that will change the world."
|
||||
metadatas = {"timestamp": 1623936000}
|
||||
mem.add(task_id, document, metadatas)
|
||||
|
||||
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
|
||||
# Test query function
|
||||
query = "test"
|
||||
filters = {"metadata": {"$eq": "test"}}
|
||||
search_string = {"$contains": "test"}
|
||||
doc_ids = [doc_id]
|
||||
documents = ["This is an updated test document."]
|
||||
updated_metadatas = {"metadata": "updated_test_metadata"}
|
||||
|
||||
print("Query:")
|
||||
print(mem.query(task_id, query))
|
||||
|
||||
# Test get function
|
||||
print("Get:")
|
||||
|
||||
print(mem.get(task_id))
|
||||
|
||||
# Test update function
|
||||
print("Update:")
|
||||
print(mem.update(task_id, doc_ids, documents, updated_metadatas))
|
||||
|
||||
print("Delete:")
|
||||
# Test delete function
|
||||
print(mem.delete(task_id, doc_ids[0]))
|
||||
@@ -1,151 +0,0 @@
|
||||
import abc
|
||||
import hashlib
|
||||
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
|
||||
class MemStore(abc.ABC):
|
||||
"""
|
||||
An abstract class that represents a Memory Store
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def __init__(self, store_path: str):
|
||||
"""
|
||||
Initialize the MemStore with a given store path.
|
||||
|
||||
Args:
|
||||
store_path (str): The path to the store.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_task_memory(self, task_id: str, document: str, metadatas: dict) -> None:
|
||||
"""
|
||||
Add a document to the current tasks MemStore.
|
||||
This function calls the base version with the task_id as the collection_name.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
document (str): The document to be added.
|
||||
metadatas (dict): The metadata of the document.
|
||||
"""
|
||||
self.add(collection_name=task_id, document=document, metadatas=metadatas)
|
||||
|
||||
@abc.abstractmethod
|
||||
def query_task_memory(
|
||||
self,
|
||||
task_id: str,
|
||||
query: str,
|
||||
filters: dict = None,
|
||||
document_search: dict = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Query the current tasks MemStore.
|
||||
This function calls the base version with the task_id as the collection_name.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
query (str): The query string.
|
||||
filters (dict, optional): The filters to be applied. Defaults to None.
|
||||
document_search (dict, optional): The search string. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: The query results.
|
||||
"""
|
||||
return self.query(
|
||||
collection_name=task_id,
|
||||
query=query,
|
||||
filters=filters,
|
||||
document_search=document_search,
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_task_memory(
|
||||
self, task_id: str, doc_ids: list = None, filters: dict = None
|
||||
) -> dict:
|
||||
"""
|
||||
Get documents from the current tasks MemStore.
|
||||
This function calls the base version with the task_id as the collection_name.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_ids (list, optional): The IDs of the documents to be retrieved. Defaults to None.
|
||||
filters (dict, optional): The filters to be applied. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: The retrieved documents.
|
||||
"""
|
||||
return self.get(collection_name=task_id, doc_ids=doc_ids, filters=filters)
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_task_memory(
|
||||
self, task_id: str, doc_ids: list, documents: list, metadatas: list
|
||||
):
|
||||
"""
|
||||
Update documents in the current tasks MemStore.
|
||||
This function calls the base version with the task_id as the collection_name.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_ids (list): The IDs of the documents to be updated.
|
||||
documents (list): The updated documents.
|
||||
metadatas (list): The updated metadata.
|
||||
"""
|
||||
self.update(
|
||||
collection_name=task_id,
|
||||
doc_ids=doc_ids,
|
||||
documents=documents,
|
||||
metadatas=metadatas,
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_task_memory(self, task_id: str, doc_id: str):
|
||||
"""
|
||||
Delete a document from the current tasks MemStore.
|
||||
This function calls the base version with the task_id as the collection_name.
|
||||
|
||||
Args:
|
||||
task_id (str): The ID of the task.
|
||||
doc_id (str): The ID of the document to be deleted.
|
||||
"""
|
||||
self.delete(collection_name=task_id, doc_id=doc_id)
|
||||
|
||||
@abc.abstractmethod
|
||||
def add(self, collection_name: str, document: str, metadatas: dict) -> None:
|
||||
"""
|
||||
Add a document to the current collection's MemStore.
|
||||
|
||||
Args:
|
||||
collection_name (str): The name of the collection.
|
||||
document (str): The document to be added.
|
||||
metadatas (dict): The metadata of the document.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def query(
|
||||
self,
|
||||
collection_name: str,
|
||||
query: str,
|
||||
filters: dict = None,
|
||||
document_search: dict = None,
|
||||
) -> dict:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(
|
||||
self, collection_name: str, doc_ids: list = None, filters: dict = None
|
||||
) -> dict:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def update(
|
||||
self, collection_name: str, doc_ids: list, documents: list, metadatas: list
|
||||
):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self, collection_name: str, doc_id: str):
|
||||
pass
|
||||
@@ -1,58 +0,0 @@
|
||||
import hashlib
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
|
||||
from forge.memory.chroma_memstore import ChromaMemStore
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memstore():
|
||||
mem = ChromaMemStore(".test_mem_store")
|
||||
yield mem
|
||||
shutil.rmtree(".test_mem_store")
|
||||
|
||||
|
||||
def test_add(memstore):
|
||||
task_id = "test_task"
|
||||
document = "This is a test document."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
memstore.add(task_id, document, metadatas)
|
||||
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
|
||||
assert memstore.client.get_or_create_collection(task_id).count() == 1
|
||||
|
||||
|
||||
def test_query(memstore):
|
||||
task_id = "test_task"
|
||||
document = "This is a test document."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
memstore.add(task_id, document, metadatas)
|
||||
query = "test"
|
||||
assert len(memstore.query(task_id, query)["documents"]) == 1
|
||||
|
||||
|
||||
def test_update(memstore):
|
||||
task_id = "test_task"
|
||||
document = "This is a test document."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
memstore.add(task_id, document, metadatas)
|
||||
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
|
||||
updated_document = "This is an updated test document."
|
||||
updated_metadatas = {"metadata": "updated_test_metadata"}
|
||||
memstore.update(task_id, [doc_id], [updated_document], [updated_metadatas])
|
||||
assert memstore.get(task_id, [doc_id]) == {
|
||||
"documents": [updated_document],
|
||||
"metadatas": [updated_metadatas],
|
||||
"embeddings": None,
|
||||
"ids": [doc_id],
|
||||
}
|
||||
|
||||
|
||||
def test_delete(memstore):
|
||||
task_id = "test_task"
|
||||
document = "This is a test document."
|
||||
metadatas = {"metadata": "test_metadata"}
|
||||
memstore.add(task_id, document, metadatas)
|
||||
doc_id = hashlib.sha256(document.encode()).hexdigest()[:20]
|
||||
memstore.delete(task_id, doc_id)
|
||||
assert memstore.client.get_or_create_collection(task_id).count() == 0
|
||||
@@ -1,41 +0,0 @@
|
||||
"""
|
||||
The Forge SDK. This is the core of the Forge. It contains the agent protocol, which is the
|
||||
core of the Forge.
|
||||
"""
|
||||
from forge.utils.exceptions import (
|
||||
AccessDeniedError,
|
||||
AgentException,
|
||||
AgentFinished,
|
||||
AgentTerminated,
|
||||
CodeExecutionError,
|
||||
CommandExecutionError,
|
||||
ConfigurationError,
|
||||
InvalidAgentResponseError,
|
||||
InvalidArgumentError,
|
||||
NotFoundError,
|
||||
OperationNotAllowedError,
|
||||
TooMuchOutputError,
|
||||
UnknownCommandError,
|
||||
get_detailed_traceback,
|
||||
get_exception_message,
|
||||
)
|
||||
|
||||
from .agent import Agent
|
||||
from .db import AgentDB, Base
|
||||
from .forge_log import ForgeLogger
|
||||
from .model import (
|
||||
Artifact,
|
||||
ArtifactUpload,
|
||||
Pagination,
|
||||
Status,
|
||||
Step,
|
||||
StepOutput,
|
||||
StepRequestBody,
|
||||
Task,
|
||||
TaskArtifactsListResponse,
|
||||
TaskListResponse,
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from .prompting import PromptEngine
|
||||
from .workspace import LocalWorkspace, Workspace
|
||||
@@ -1,203 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import queue
|
||||
|
||||
JSON_LOGGING = os.environ.get("JSON_LOGGING", "false").lower() == "true"
|
||||
|
||||
CHAT = 29
|
||||
logging.addLevelName(CHAT, "CHAT")
|
||||
|
||||
RESET_SEQ: str = "\033[0m"
|
||||
COLOR_SEQ: str = "\033[1;%dm"
|
||||
BOLD_SEQ: str = "\033[1m"
|
||||
UNDERLINE_SEQ: str = "\033[04m"
|
||||
|
||||
ORANGE: str = "\033[33m"
|
||||
YELLOW: str = "\033[93m"
|
||||
WHITE: str = "\33[37m"
|
||||
BLUE: str = "\033[34m"
|
||||
LIGHT_BLUE: str = "\033[94m"
|
||||
RED: str = "\033[91m"
|
||||
GREY: str = "\33[90m"
|
||||
GREEN: str = "\033[92m"
|
||||
|
||||
EMOJIS: dict[str, str] = {
|
||||
"DEBUG": "🐛",
|
||||
"INFO": "📝",
|
||||
"CHAT": "💬",
|
||||
"WARNING": "⚠️",
|
||||
"ERROR": "❌",
|
||||
"CRITICAL": "💥",
|
||||
}
|
||||
|
||||
KEYWORD_COLORS: dict[str, str] = {
|
||||
"DEBUG": WHITE,
|
||||
"INFO": LIGHT_BLUE,
|
||||
"CHAT": GREEN,
|
||||
"WARNING": YELLOW,
|
||||
"ERROR": ORANGE,
|
||||
"CRITICAL": RED,
|
||||
}
|
||||
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
def format(self, record):
|
||||
return json.dumps(record.__dict__)
|
||||
|
||||
|
||||
def formatter_message(message: str, use_color: bool = True) -> str:
|
||||
"""
|
||||
Syntax highlight certain keywords
|
||||
"""
|
||||
if use_color:
|
||||
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
|
||||
else:
|
||||
message = message.replace("$RESET", "").replace("$BOLD", "")
|
||||
return message
|
||||
|
||||
|
||||
def format_word(
|
||||
message: str, word: str, color_seq: str, bold: bool = False, underline: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Surround the fiven word with a sequence
|
||||
"""
|
||||
replacer = color_seq + word + RESET_SEQ
|
||||
if underline:
|
||||
replacer = UNDERLINE_SEQ + replacer
|
||||
if bold:
|
||||
replacer = BOLD_SEQ + replacer
|
||||
return message.replace(word, replacer)
|
||||
|
||||
|
||||
class ConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, fmt: str, datefmt: str = None, style: str = "%", use_color: bool = True
|
||||
):
|
||||
super().__init__(fmt, datefmt, style)
|
||||
self.use_color = use_color
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""
|
||||
Format and highlight certain keywords
|
||||
"""
|
||||
rec = record
|
||||
levelname = rec.levelname
|
||||
if self.use_color and levelname in KEYWORD_COLORS:
|
||||
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
|
||||
rec.levelname = levelname_color
|
||||
rec.name = f"{GREY}{rec.name:<15}{RESET_SEQ}"
|
||||
rec.msg = (
|
||||
KEYWORD_COLORS[levelname] + EMOJIS[levelname] + " " + rec.msg + RESET_SEQ
|
||||
)
|
||||
return logging.Formatter.format(self, rec)
|
||||
|
||||
|
||||
class ForgeLogger(logging.Logger):
|
||||
"""
|
||||
This adds extra logging functions such as logger.trade and also
|
||||
sets the logger to use the custom formatter
|
||||
"""
|
||||
|
||||
CONSOLE_FORMAT: str = (
|
||||
"[%(asctime)s] [$BOLD%(name)-15s$RESET] [%(levelname)-8s]\t%(message)s"
|
||||
)
|
||||
FORMAT: str = "%(asctime)s %(name)-15s %(levelname)-8s %(message)s"
|
||||
COLOR_FORMAT: str = formatter_message(CONSOLE_FORMAT, True)
|
||||
JSON_FORMAT: str = '{"time": "%(asctime)s", "name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}'
|
||||
|
||||
def __init__(self, name: str, logLevel: str = "DEBUG"):
|
||||
logging.Logger.__init__(self, name, logLevel)
|
||||
|
||||
# Queue Handler
|
||||
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
|
||||
json_formatter = logging.Formatter(self.JSON_FORMAT)
|
||||
queue_handler.setFormatter(json_formatter)
|
||||
self.addHandler(queue_handler)
|
||||
|
||||
if JSON_LOGGING:
|
||||
console_formatter = JsonFormatter()
|
||||
else:
|
||||
console_formatter = ConsoleFormatter(self.COLOR_FORMAT)
|
||||
console = logging.StreamHandler()
|
||||
console.setFormatter(console_formatter)
|
||||
self.addHandler(console)
|
||||
|
||||
def chat(self, role: str, openai_repsonse: dict, messages=None, *args, **kws):
|
||||
"""
|
||||
Parse the content, log the message and extract the usage into prometheus metrics
|
||||
"""
|
||||
role_emojis = {
|
||||
"system": "🖥️",
|
||||
"user": "👤",
|
||||
"assistant": "🤖",
|
||||
"function": "⚙️",
|
||||
}
|
||||
if self.isEnabledFor(CHAT):
|
||||
if messages:
|
||||
for message in messages:
|
||||
self._log(
|
||||
CHAT,
|
||||
f"{role_emojis.get(message['role'], '🔵')}: {message['content']}",
|
||||
)
|
||||
else:
|
||||
response = json.loads(openai_repsonse)
|
||||
|
||||
self._log(
|
||||
CHAT,
|
||||
f"{role_emojis.get(role, '🔵')}: {response['choices'][0]['message']['content']}",
|
||||
)
|
||||
|
||||
|
||||
class QueueLogger(logging.Logger):
|
||||
"""
|
||||
Custom logger class with queue
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, level: int = logging.NOTSET):
|
||||
super().__init__(name, level)
|
||||
queue_handler = logging.handlers.QueueHandler(queue.Queue(-1))
|
||||
self.addHandler(queue_handler)
|
||||
|
||||
|
||||
logging_config: dict = dict(
|
||||
version=1,
|
||||
formatters={
|
||||
"console": {
|
||||
"()": ConsoleFormatter,
|
||||
"format": ForgeLogger.COLOR_FORMAT,
|
||||
},
|
||||
},
|
||||
handlers={
|
||||
"h": {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": "console",
|
||||
"level": logging.INFO,
|
||||
},
|
||||
},
|
||||
root={
|
||||
"handlers": ["h"],
|
||||
"level": logging.INFO,
|
||||
},
|
||||
loggers={
|
||||
"autogpt": {
|
||||
"handlers": ["h"],
|
||||
"level": logging.INFO,
|
||||
"propagate": False,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def setup_logger():
|
||||
"""
|
||||
Setup the logger with the specified format
|
||||
"""
|
||||
logging.config.dictConfig(logging_config)
|
||||
@@ -1,117 +0,0 @@
|
||||
"""
|
||||
Relative to this file I will have a prompt directory its located ../prompts
|
||||
In this directory there will be a techniques directory and a directory for each model - gpt-3.5-turbo gpt-4, llama-2-70B, code-llama-7B etc
|
||||
|
||||
Each directory will have jinga2 templates for the prompts.
|
||||
prompts in the model directories can use the techniques in the techniques directory.
|
||||
|
||||
Write the code I'd need to load and populate the templates.
|
||||
|
||||
I want the following functions:
|
||||
|
||||
class PromptEngine:
|
||||
|
||||
def __init__(self, model):
|
||||
pass
|
||||
|
||||
def load_prompt(model, prompt_name, prompt_ags) -> str:
|
||||
pass
|
||||
"""
|
||||
|
||||
import glob
|
||||
import os
|
||||
from difflib import get_close_matches
|
||||
from typing import List
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from .forge_log import ForgeLogger
|
||||
|
||||
LOG = ForgeLogger(__name__)
|
||||
|
||||
|
||||
class PromptEngine:
|
||||
"""
|
||||
Class to handle loading and populating Jinja2 templates for prompts.
|
||||
"""
|
||||
|
||||
def __init__(self, model: str, debug_enabled: bool = False):
|
||||
"""
|
||||
Initialize the PromptEngine with the specified model.
|
||||
|
||||
Args:
|
||||
model (str): The model to use for loading prompts.
|
||||
debug_enabled (bool): Enable or disable debug logging.
|
||||
"""
|
||||
self.model = model
|
||||
self.debug_enabled = debug_enabled
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Initializing PromptEngine for model: {model}")
|
||||
|
||||
try:
|
||||
# Get the list of all model directories
|
||||
models_dir = os.path.abspath(
|
||||
os.path.join(os.path.dirname(__file__), "../prompts")
|
||||
)
|
||||
model_names = [
|
||||
os.path.basename(os.path.normpath(d))
|
||||
for d in glob.glob(os.path.join(models_dir, "*/"))
|
||||
if os.path.isdir(d) and "techniques" not in d
|
||||
]
|
||||
|
||||
self.model = self.get_closest_match(self.model, model_names)
|
||||
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Using the closest match model for prompts: {self.model}")
|
||||
|
||||
self.env = Environment(loader=FileSystemLoader(models_dir))
|
||||
except Exception as e:
|
||||
LOG.error(f"Error initializing Environment: {e}")
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def get_closest_match(target: str, model_dirs: List[str]) -> str:
|
||||
"""
|
||||
Find the closest match to the target in the list of model directories.
|
||||
|
||||
Args:
|
||||
target (str): The target model.
|
||||
model_dirs (list): The list of available model directories.
|
||||
|
||||
Returns:
|
||||
str: The closest match to the target.
|
||||
"""
|
||||
try:
|
||||
matches = get_close_matches(target, model_dirs, n=1, cutoff=0.1)
|
||||
if matches:
|
||||
matches_str = ", ".join(matches)
|
||||
LOG.debug(matches_str)
|
||||
for m in matches:
|
||||
LOG.info(m)
|
||||
return matches[0]
|
||||
except Exception as e:
|
||||
LOG.error(f"Error finding closest match: {e}")
|
||||
raise
|
||||
|
||||
def load_prompt(self, template: str, **kwargs) -> str:
|
||||
"""
|
||||
Load and populate the specified template.
|
||||
|
||||
Args:
|
||||
template (str): The name of the template to load.
|
||||
**kwargs: The arguments to populate the template with.
|
||||
|
||||
Returns:
|
||||
str: The populated template.
|
||||
"""
|
||||
try:
|
||||
template = os.path.join(self.model, template)
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Loading template: {template}")
|
||||
template = self.env.get_template(f"{template}.j2")
|
||||
if self.debug_enabled:
|
||||
LOG.debug(f"Rendering template: {template} with args: {kwargs}")
|
||||
return template.render(**kwargs)
|
||||
except Exception as e:
|
||||
LOG.error(f"Error loading or rendering template: {e}")
|
||||
raise
|
||||
@@ -1,133 +0,0 @@
|
||||
import abc
|
||||
import os
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
from google.cloud import storage
|
||||
|
||||
|
||||
class Workspace(abc.ABC):
|
||||
@abc.abstractclassmethod
|
||||
def __init__(self, base_path: str) -> None:
|
||||
self.base_path = base_path
|
||||
|
||||
@abc.abstractclassmethod
|
||||
def read(self, task_id: str, path: str) -> bytes:
|
||||
pass
|
||||
|
||||
@abc.abstractclassmethod
|
||||
def write(self, task_id: str, path: str, data: bytes) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractclassmethod
|
||||
def delete(
|
||||
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractclassmethod
|
||||
def exists(self, task_id: str, path: str) -> bool:
|
||||
pass
|
||||
|
||||
@abc.abstractclassmethod
|
||||
def list(self, task_id: str, path: str) -> typing.List[str]:
|
||||
pass
|
||||
|
||||
|
||||
class LocalWorkspace(Workspace):
|
||||
def __init__(self, base_path: str):
|
||||
self.base_path = Path(base_path).resolve()
|
||||
|
||||
def _resolve_path(self, task_id: str, path: str) -> Path:
|
||||
path = str(path)
|
||||
path = path if not path.startswith("/") else path[1:]
|
||||
abs_path = (self.base_path / task_id / path).resolve()
|
||||
if not str(abs_path).startswith(str(self.base_path)):
|
||||
print("Error")
|
||||
raise ValueError(f"Directory traversal is not allowed! - {abs_path}")
|
||||
try:
|
||||
abs_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
except FileExistsError:
|
||||
pass
|
||||
return abs_path
|
||||
|
||||
def read(self, task_id: str, path: str) -> bytes:
|
||||
with open(self._resolve_path(task_id, path), "rb") as f:
|
||||
return f.read()
|
||||
|
||||
def write(self, task_id: str, path: str, data: bytes) -> None:
|
||||
file_path = self._resolve_path(task_id, path)
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(data)
|
||||
|
||||
def delete(
|
||||
self, task_id: str, path: str, directory: bool = False, recursive: bool = False
|
||||
) -> None:
|
||||
path = self.base_path / task_id / path
|
||||
resolved_path = self._resolve_path(task_id, path)
|
||||
if directory:
|
||||
if recursive:
|
||||
os.rmdir(resolved_path)
|
||||
else:
|
||||
os.removedirs(resolved_path)
|
||||
else:
|
||||
os.remove(resolved_path)
|
||||
|
||||
def exists(self, task_id: str, path: str) -> bool:
|
||||
path = self.base_path / task_id / path
|
||||
return self._resolve_path(task_id, path).exists()
|
||||
|
||||
def list(self, task_id: str, path: str) -> typing.List[str]:
|
||||
path = self.base_path / task_id / path
|
||||
base = self._resolve_path(task_id, path)
|
||||
if not base.exists() or not base.is_dir():
|
||||
return []
|
||||
return [str(p.relative_to(self.base_path / task_id)) for p in base.iterdir()]
|
||||
|
||||
|
||||
class GCSWorkspace(Workspace):
|
||||
def __init__(self, bucket_name: str, base_path: str = ""):
|
||||
self.bucket_name = bucket_name
|
||||
self.base_path = Path(base_path).resolve() if base_path else ""
|
||||
self.storage_client = storage.Client()
|
||||
self.bucket = self.storage_client.get_bucket(self.bucket_name)
|
||||
|
||||
def _resolve_path(self, task_id: str, path: str) -> Path:
|
||||
path = str(path)
|
||||
path = path if not path.startswith("/") else path[1:]
|
||||
abs_path = (self.base_path / task_id / path).resolve()
|
||||
if not str(abs_path).startswith(str(self.base_path)):
|
||||
print("Error")
|
||||
raise ValueError(f"Directory traversal is not allowed! - {abs_path}")
|
||||
return abs_path
|
||||
|
||||
def read(self, task_id: str, path: str) -> bytes:
|
||||
blob = self.bucket.blob(self._resolve_path(task_id, path))
|
||||
if not blob.exists():
|
||||
raise FileNotFoundError()
|
||||
return blob.download_as_bytes()
|
||||
|
||||
def write(self, task_id: str, path: str, data: bytes) -> None:
|
||||
blob = self.bucket.blob(self._resolve_path(task_id, path))
|
||||
blob.upload_from_string(data)
|
||||
|
||||
def delete(self, task_id: str, path: str, directory=False, recursive=False):
|
||||
if directory and not recursive:
|
||||
raise ValueError("recursive must be True when deleting a directory")
|
||||
blob = self.bucket.blob(self._resolve_path(task_id, path))
|
||||
if not blob.exists():
|
||||
return
|
||||
if directory:
|
||||
for b in list(self.bucket.list_blobs(prefix=blob.name)):
|
||||
b.delete()
|
||||
else:
|
||||
blob.delete()
|
||||
|
||||
def exists(self, task_id: str, path: str) -> bool:
|
||||
blob = self.bucket.blob(self._resolve_path(task_id, path))
|
||||
return blob.exists()
|
||||
|
||||
def list(self, task_id: str, path: str) -> typing.List[str]:
|
||||
prefix = os.path.join(task_id, self.base_path, path).replace("\\", "/") + "/"
|
||||
blobs = list(self.bucket.list_blobs(prefix=prefix))
|
||||
return [str(Path(b.name).relative_to(prefix[:-1])) for b in blobs]
|
||||
@@ -1,47 +0,0 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
# Assuming the classes are defined in a file named workspace.py
|
||||
from .workspace import LocalWorkspace
|
||||
|
||||
# Constants
|
||||
TEST_BASE_PATH = "/tmp/test_workspace"
|
||||
TEST_FILE_CONTENT = b"Hello World"
|
||||
TEST_TASK_ID = "1234"
|
||||
|
||||
|
||||
# Setup and Teardown for LocalWorkspace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_local_workspace():
|
||||
os.makedirs(TEST_BASE_PATH, exist_ok=True)
|
||||
yield
|
||||
os.system(f"rm -rf {TEST_BASE_PATH}") # Cleanup after tests
|
||||
|
||||
|
||||
def test_local_read_write_delete_exists(setup_local_workspace):
|
||||
workspace = LocalWorkspace(TEST_BASE_PATH)
|
||||
|
||||
# Write
|
||||
workspace.write(TEST_TASK_ID, "test_file.txt", TEST_FILE_CONTENT)
|
||||
|
||||
# Exists
|
||||
assert workspace.exists(TEST_TASK_ID, "test_file.txt")
|
||||
|
||||
# Read
|
||||
assert workspace.read(TEST_TASK_ID, "test_file.txt") == TEST_FILE_CONTENT
|
||||
|
||||
# Delete
|
||||
workspace.delete(TEST_TASK_ID, "test_file.txt")
|
||||
assert not workspace.exists(TEST_TASK_ID, "test_file.txt")
|
||||
|
||||
|
||||
def test_local_list(setup_local_workspace):
|
||||
workspace = LocalWorkspace(TEST_BASE_PATH)
|
||||
workspace.write(TEST_TASK_ID, "test1.txt", TEST_FILE_CONTENT)
|
||||
workspace.write(TEST_TASK_ID, "test2.txt", TEST_FILE_CONTENT)
|
||||
|
||||
files = workspace.list(TEST_TASK_ID, ".")
|
||||
assert set(files) == {"test1.txt", "test2.txt"}
|
||||
@@ -91,13 +91,5 @@ class OperationNotAllowedError(CommandExecutionError):
|
||||
"""The agent is not allowed to execute the proposed operation"""
|
||||
|
||||
|
||||
class AccessDeniedError(CommandExecutionError):
|
||||
"""The operation failed because access to a required resource was denied"""
|
||||
|
||||
|
||||
class CodeExecutionError(CommandExecutionError):
|
||||
"""The operation (an attempt to run arbitrary code) returned an error"""
|
||||
|
||||
|
||||
class TooMuchOutputError(CommandExecutionError):
|
||||
"""The operation generated more output than what the Agent can process"""
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def validate_yaml_file(file: str | Path):
|
||||
try:
|
||||
with open(file, encoding="utf-8") as fp:
|
||||
yaml.load(fp.read(), Loader=yaml.SafeLoader)
|
||||
except FileNotFoundError:
|
||||
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
|
||||
except yaml.YAMLError as e:
|
||||
return (
|
||||
False,
|
||||
f"There was an issue while trying to read with your AI Settings file: {e}",
|
||||
)
|
||||
|
||||
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
|
||||
@@ -1,15 +0,0 @@
|
||||
constraints: [
|
||||
'Exclusively use the commands listed below.',
|
||||
'You can only act proactively, and are unable to start background jobs or set up webhooks for yourself. Take this into account when planning your actions.',
|
||||
'You are unable to interact with physical objects. If this is absolutely necessary to fulfill a task or objective or to complete a step, you must ask the user to do it for you. If the user refuses this, and there is no other way to achieve your goals, you must terminate to avoid wasting time and energy.'
|
||||
]
|
||||
resources: [
|
||||
'You are a Large Language Model, trained on millions of pages of text, including a lot of factual knowledge. Make use of this factual knowledge to avoid unnecessary gathering of information.'
|
||||
]
|
||||
best_practices: [
|
||||
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
|
||||
'Constructively self-criticize your big-picture behavior constantly.',
|
||||
'Reflect on past decisions and strategies to refine your approach.',
|
||||
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
|
||||
'Only make use of your information gathering abilities to find information that you don''t yet have knowledge of.'
|
||||
]
|
||||
@@ -4,7 +4,6 @@ Configuration is controlled through the `Config` object. You can set configurati
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `AI_SETTINGS_FILE`: Location of the AI Settings file relative to the AutoGPT root directory. Default: ai_settings.yaml
|
||||
- `AUDIO_TO_TEXT_PROVIDER`: Audio To Text Provider. Only option currently is `huggingface`. Default: huggingface
|
||||
- `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y
|
||||
- `ANTHROPIC_API_KEY`: Set this if you want to use Anthropic models with AutoGPT
|
||||
@@ -34,7 +33,6 @@ Configuration is controlled through the `Config` object. You can set configurati
|
||||
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
|
||||
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
|
||||
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
|
||||
- `PROMPT_SETTINGS_FILE`: Location of the Prompt Settings file relative to the AutoGPT root directory. Default: prompt_settings.yaml
|
||||
- `REDIS_HOST`: Redis Host. Default: localhost
|
||||
- `REDIS_PASSWORD`: Redis Password. Optional. Default:
|
||||
- `REDIS_PORT`: Redis Port. Default: 6379
|
||||
|
||||
@@ -47,12 +47,6 @@
|
||||
#- type: bind
|
||||
# source: ./azure.yaml
|
||||
# target: /app/azure.yaml
|
||||
#- type: bind
|
||||
# source: ./ai_settings.yaml
|
||||
# target: /app/ai_settings.yaml
|
||||
#- type: bind
|
||||
# source: ./prompt_settings.yaml
|
||||
# target: /app/prompt_settings.yaml
|
||||
```
|
||||
</details>
|
||||
|
||||
@@ -77,12 +71,6 @@
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you want to make use of these files
|
||||
## you must have them existing in the same folder as this docker-compose.yml
|
||||
#- type: bind
|
||||
# source: ./ai_settings.yaml
|
||||
# target: /app/ai_settings.yaml
|
||||
#- type: bind
|
||||
# source: ./prompt_settings.yaml
|
||||
# target: /app/prompt_settings.yaml
|
||||
```
|
||||
</details>
|
||||
|
||||
@@ -117,9 +105,6 @@
|
||||
|
||||
5. Save and close the `.env` file.
|
||||
|
||||
Templates for the optional extra configuration files (e.g. `prompt_settings.yml`) can be
|
||||
found in the [repository].
|
||||
|
||||
!!! info "Using a GPT Azure-instance"
|
||||
If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and
|
||||
make an Azure configuration file:
|
||||
@@ -141,7 +126,6 @@ found in the [repository].
|
||||
|
||||
**Note:** Azure support has been dropped in `master`, so these instructions will only work with v0.4.7 (or earlier).
|
||||
|
||||
[repository]: https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/autogpt
|
||||
[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5
|
||||
[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files
|
||||
[openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints
|
||||
|
||||
@@ -54,11 +54,6 @@ Options:
|
||||
-c, --continuous Enable Continuous Mode
|
||||
-y, --skip-reprompt Skips the re-prompting messages at the
|
||||
beginning of the script
|
||||
-C, --ai-settings FILE Specifies which ai_settings.yaml file to
|
||||
use, relative to the AutoGPT root directory.
|
||||
Will also automatically skip the re-prompt.
|
||||
-P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
|
||||
use.
|
||||
-l, --continuous-limit INTEGER Defines the number of times to run in
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
@@ -130,8 +125,6 @@ Usage: python -m autogpt serve [OPTIONS]
|
||||
agent for every task.
|
||||
|
||||
Options:
|
||||
-P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
|
||||
use.
|
||||
--debug Enable Debug Mode
|
||||
--gpt3only Enable GPT3.5 Only Mode
|
||||
--gpt4only Enable GPT4 Only Mode
|
||||
|
||||
Reference in New Issue
Block a user