mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-08 22:58:01 -05:00
refactor(agent, forge): Move library code from autogpt to forge (#7106)
Moved from `autogpt` to `forge`:
- `autogpt.config` -> `forge.config`
- `autogpt.processing` -> `forge.content_processing`
- `autogpt.file_storage` -> `forge.file_storage`
- `autogpt.logs` -> `forge.logging`
- `autogpt.speech` -> `forge.speech`
- `autogpt.agents.(base|components|protocols)` -> `forge.agent.*`
- `autogpt.command_decorator` -> `forge.command.decorator`
- `autogpt.models.(command|command_parameter)` -> `forge.command.(command|parameter)`
- `autogpt.(commands|components|features)` -> `forge.components`
- `autogpt.core.utils.json_utils` -> `forge.json.parsing`
- `autogpt.prompts.utils` -> `forge.llm.prompting.utils`
- `autogpt.core.prompting.(base|schema|utils)` -> `forge.llm.prompting.*`
- `autogpt.core.resource.model_providers` -> `forge.llm.providers`
- `autogpt.llm.providers.openai` + `autogpt.core.resource.model_providers.utils`
-> `forge.llm.providers.utils`
- `autogpt.models.action_history:Action*` -> `forge.models.action`
- `autogpt.core.configuration.schema` -> `forge.models.config`
- `autogpt.core.utils.json_schema` -> `forge.models.json_schema`
- `autogpt.core.resource.schema` -> `forge.models.providers`
- `autogpt.models.utils` -> `forge.models.utils`
- `forge.sdk.(errors|utils)` + `autogpt.utils.(exceptions|file_operations_utils|validators)`
-> `forge.utils.(exceptions|file_operations|url_validator)`
- `autogpt.utils.utils` -> `forge.utils.const` + `forge.utils.yaml_validator`
Moved within `forge`:
- forge/prompts/* -> forge/llm/prompting/*
The rest are mostly import updates, and some sporadic removals and necessary updates (for example to fix circular deps):
- Changed `CommandOutput = Any` to remove coupling with `ContextItem` (no longer needed)
- Removed unused `Singleton` class
- Reluctantly moved `speech` to forge due to coupling (tts needs to be changed into component)
- Moved `function_specs_from_commands` and `core/resource/model_providers` to `llm/providers` (resources were a `core` thing and are no longer relevant)
- Keep tests in `autogpt` to reduce changes in this PR
- Removed unused memory-related code from tests
- Removed duplicated classes: `FancyConsoleFormatter`, `BelowLevelFilter`
- `prompt_settings.yaml` is in both `autogpt` and `forge` because for some reason doesn't work when placed in just one dir (need to be taken care of)
- Removed `config` param from `clean_input`, it wasn't used and caused circular dependency
- Renamed `BaseAgentActionProposal` to `ActionProposal`
- Updated `pyproject.toml` in `forge` and `autogpt`
- Moved `Action*` models from `forge/components/action_history/model.py` to `forge/models/action.py` as those are relevant to the entire agent and not just `EventHistoryComponent` + to reduce coupling
- Renamed `DEFAULT_ASK_COMMAND` to `ASK_COMMAND` and `DEFAULT_FINISH_COMMAND` to `FINISH_COMMAND`
- Renamed `AutoGptFormatter` to `ForgeFormatter` and moved to `forge`
Includes changes from PR https://github.com/Significant-Gravitas/AutoGPT/pull/7148
---------
Co-authored-by: Reinier van der Leer <pwuts@agpt.co>
This commit is contained in:
committed by
GitHub
parent
4f81246fd4
commit
e8d7dfa386
@@ -3,12 +3,14 @@ import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import ConfigBuilder
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.logging.config import configure_logging
|
||||
|
||||
from autogpt.agent_manager.agent_manager import AgentManager
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.main import _configure_llm_provider, run_interaction_loop
|
||||
from autogpt.config import AIProfile, ConfigBuilder
|
||||
from autogpt.file_storage import FileStorageBackendName, get_storage
|
||||
from autogpt.logs.config import configure_logging
|
||||
|
||||
LOG_DIR = Path(__file__).parent / "logs"
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import ChatModelProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
|
||||
|
||||
def create_agent(
|
||||
|
||||
@@ -2,16 +2,16 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers.schema import ChatModelProvider
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers.schema import ChatModelProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
from .profile_generator import generate_agent_profile_for_task
|
||||
|
||||
|
||||
async def generate_agent_for_task(
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import (
|
||||
ChatPrompt,
|
||||
LanguageModelClassification,
|
||||
PromptStrategy,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -3,8 +3,9 @@ from __future__ import annotations
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
|
||||
from autogpt.agents.agent import AgentSettings
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
|
||||
|
||||
class AgentManager:
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
from .agent import Agent, OneShotAgentActionProposal
|
||||
from .base import BaseAgent, BaseAgentActionProposal
|
||||
from .agent import Agent
|
||||
from .prompt_strategies.one_shot import OneShotAgentActionProposal
|
||||
|
||||
__all__ = [
|
||||
"BaseAgent",
|
||||
"Agent",
|
||||
"BaseAgentActionProposal",
|
||||
"OneShotAgentActionProposal",
|
||||
]
|
||||
|
||||
@@ -6,66 +6,67 @@ from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import sentry_sdk
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.commands.execute_code import CodeExecutorComponent
|
||||
from autogpt.commands.git_operations import GitOperationsComponent
|
||||
from autogpt.commands.image_gen import ImageGeneratorComponent
|
||||
from autogpt.commands.system import SystemComponent
|
||||
from autogpt.commands.user_interaction import UserInteractionComponent
|
||||
from autogpt.commands.web_search import WebSearchComponent
|
||||
from autogpt.commands.web_selenium import WebSeleniumComponent
|
||||
from autogpt.components.event_history import EventHistoryComponent
|
||||
from autogpt.core.configuration import Configurable
|
||||
from autogpt.core.prompting import ChatPrompt
|
||||
from autogpt.core.resource.model_providers import (
|
||||
AssistantFunctionCall,
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
ChatModelResponse,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.llm.providers.openai import function_specs_from_commands
|
||||
from autogpt.logs.log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.models.action_history import (
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from autogpt.models.command import Command, CommandOutput
|
||||
from autogpt.utils.exceptions import (
|
||||
AgentException,
|
||||
AgentTerminated,
|
||||
CommandExecutionError,
|
||||
UnknownCommandError,
|
||||
)
|
||||
|
||||
from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from .features.agent_file_manager import FileManagerComponent
|
||||
from .features.context import AgentContext, ContextComponent
|
||||
from .features.watchdog import WatchdogComponent
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentActionProposal,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
from .protocols import (
|
||||
from forge.agent.base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from forge.agent.protocols import (
|
||||
AfterExecute,
|
||||
AfterParse,
|
||||
CommandProvider,
|
||||
DirectiveProvider,
|
||||
MessageProvider,
|
||||
)
|
||||
from forge.command.command import Command, CommandOutput
|
||||
from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.code_executor.code_executor import CodeExecutorComponent
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from forge.components.system import SystemComponent
|
||||
from forge.components.user_interaction import UserInteractionComponent
|
||||
from forge.components.watchdog import WatchdogComponent
|
||||
from forge.components.web import WebSearchComponent, WebSeleniumComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.prompting.schema import ChatPrompt
|
||||
from forge.llm.providers import (
|
||||
AssistantFunctionCall,
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
ChatModelResponse,
|
||||
)
|
||||
from forge.llm.providers.utils import function_specs_from_commands
|
||||
from forge.models.action import (
|
||||
ActionErrorResult,
|
||||
ActionInterruptedByHuman,
|
||||
ActionResult,
|
||||
ActionSuccessResult,
|
||||
)
|
||||
from forge.models.config import Configurable
|
||||
from forge.utils.exceptions import (
|
||||
AgentException,
|
||||
AgentTerminated,
|
||||
CommandExecutionError,
|
||||
UnknownCommandError,
|
||||
)
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.app.log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
|
||||
|
||||
from .prompt_strategies.one_shot import (
|
||||
OneShotAgentActionProposal,
|
||||
OneShotAgentPromptStrategy,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -114,13 +115,13 @@ class Agent(BaseAgent, Configurable[AgentSettings]):
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent(legacy_config, settings.ai_profile)
|
||||
self.history = EventHistoryComponent(
|
||||
self.history = ActionHistoryComponent(
|
||||
settings.history,
|
||||
self.send_token_limit,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
legacy_config,
|
||||
llm_provider,
|
||||
)
|
||||
).run_after(WatchdogComponent)
|
||||
self.user_interaction = UserInteractionComponent(legacy_config)
|
||||
self.file_manager = FileManagerComponent(settings, file_storage)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
@@ -135,7 +136,9 @@ class Agent(BaseAgent, Configurable[AgentSettings]):
|
||||
self.web_search = WebSearchComponent(legacy_config)
|
||||
self.web_selenium = WebSeleniumComponent(legacy_config, llm_provider, self.llm)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
"""Timestamp the agent was created; only used for structured debug logging."""
|
||||
|
||||
@@ -6,26 +6,22 @@ import re
|
||||
from logging import Logger
|
||||
|
||||
import distro
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.agents.base import BaseAgentActionProposal
|
||||
from autogpt.config import AIDirectives, AIProfile
|
||||
from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import (
|
||||
ChatPrompt,
|
||||
LanguageModelClassification,
|
||||
PromptStrategy,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import format_numbered_list
|
||||
from forge.llm.providers.schema import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.core.utils.json_utils import extract_dict_from_json
|
||||
from autogpt.models.utils import ModelWithSummary
|
||||
from autogpt.prompts.utils import format_numbered_list
|
||||
from autogpt.utils.exceptions import InvalidAgentResponseError
|
||||
from forge.models.action import ActionProposal
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.exceptions import InvalidAgentResponseError
|
||||
from pydantic import Field
|
||||
|
||||
_RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
@@ -46,7 +42,7 @@ class AssistantThoughts(ModelWithSummary):
|
||||
return self.text
|
||||
|
||||
|
||||
class OneShotAgentActionProposal(BaseAgentActionProposal):
|
||||
class OneShotAgentActionProposal(ActionProposal):
|
||||
thoughts: AssistantThoughts
|
||||
|
||||
|
||||
|
||||
@@ -10,8 +10,11 @@ from fastapi import APIRouter, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ChatModelProvider, ModelProviderBudget
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
from forge.sdk.db import AgentDB
|
||||
from forge.sdk.errors import NotFoundError
|
||||
from forge.sdk.middlewares import AgentMiddleware
|
||||
from forge.sdk.model import (
|
||||
Artifact,
|
||||
@@ -24,6 +27,8 @@ from forge.sdk.model import (
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.sdk.routes.agent_protocol import base_router
|
||||
from forge.utils.const import ASK_COMMAND, FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentFinished, NotFoundError
|
||||
from hypercorn.asyncio import serve as hypercorn_serve
|
||||
from hypercorn.config import Config as HypercornConfig
|
||||
from sentry_sdk import set_user
|
||||
@@ -32,12 +37,6 @@ from autogpt.agent_factory.configurators import configure_agent_with_state
|
||||
from autogpt.agent_factory.generators import generate_agent_for_task
|
||||
from autogpt.agent_manager import AgentManager
|
||||
from autogpt.app.utils import is_port_free
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider, ModelProviderBudget
|
||||
from autogpt.file_storage import FileStorage
|
||||
from autogpt.models.action_history import ActionErrorResult, ActionSuccessResult
|
||||
from autogpt.utils.exceptions import AgentFinished
|
||||
from autogpt.utils.utils import DEFAULT_ASK_COMMAND, DEFAULT_FINISH_COMMAND
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -226,7 +225,7 @@ class AgentProtocolServer:
|
||||
input=step_request,
|
||||
is_last=(
|
||||
last_proposal is not None
|
||||
and last_proposal.use_tool.name == DEFAULT_FINISH_COMMAND
|
||||
and last_proposal.use_tool.name == FINISH_COMMAND
|
||||
and execute_approved
|
||||
),
|
||||
)
|
||||
@@ -240,7 +239,7 @@ class AgentProtocolServer:
|
||||
)
|
||||
)
|
||||
|
||||
if last_proposal.use_tool.name == DEFAULT_ASK_COMMAND:
|
||||
if last_proposal.use_tool.name == ASK_COMMAND:
|
||||
tool_result = ActionSuccessResult(outputs=user_input)
|
||||
agent.event_history.register_result(tool_result)
|
||||
elif execute_approved:
|
||||
@@ -296,13 +295,13 @@ class AgentProtocolServer:
|
||||
+ ("\n\n" if "\n" in str(tool_result) else " ")
|
||||
+ f"{tool_result}\n\n"
|
||||
)
|
||||
if last_proposal and last_proposal.use_tool.name != DEFAULT_ASK_COMMAND
|
||||
if last_proposal and last_proposal.use_tool.name != ASK_COMMAND
|
||||
else ""
|
||||
)
|
||||
output += f"{assistant_response.thoughts.speak}\n\n"
|
||||
output += (
|
||||
f"Next Command: {next_tool_to_use}"
|
||||
if next_tool_to_use.name != DEFAULT_ASK_COMMAND
|
||||
if next_tool_to_use.name != ASK_COMMAND
|
||||
else next_tool_to_use.arguments["question"]
|
||||
)
|
||||
|
||||
|
||||
@@ -4,8 +4,7 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
from autogpt.logs.config import LogFormatName
|
||||
from forge.logging.config import LogFormatName
|
||||
|
||||
from .telemetry import setup_telemetry
|
||||
|
||||
|
||||
@@ -7,13 +7,12 @@ from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
from forge.config.config import GPT_3_MODEL, GPT_4_MODEL, Config
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
from forge.logging.helpers import request_user_double_check
|
||||
from forge.utils.yaml_validator import validate_yaml_file
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
|
||||
from autogpt.core.resource.model_providers import ModelName, MultiProvider
|
||||
from autogpt.logs.helpers import request_user_double_check
|
||||
from autogpt.memory.vector import get_supported_memory_backends
|
||||
from autogpt.utils import utils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -107,7 +106,7 @@ async def apply_overrides_to_config(
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
(validated, message) = validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
|
||||
request_user_double_check()
|
||||
@@ -120,7 +119,7 @@ async def apply_overrides_to_config(
|
||||
file = prompt_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
(validated, message) = validate_yaml_file(file)
|
||||
if not validated:
|
||||
logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message)
|
||||
request_user_double_check()
|
||||
|
||||
19
autogpts/autogpt/autogpt/app/input.py
Normal file
19
autogpts/autogpt/autogpt/app/input.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import logging
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clean_input(prompt: str = ""):
|
||||
try:
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.debug("Asking user via keyboard...")
|
||||
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default="", show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted AutoGPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from .config import LOG_DIR
|
||||
from forge.logging.config import LOG_DIR
|
||||
|
||||
DEFAULT_PREFIX = "agent"
|
||||
CURRENT_CONTEXT_FILE_NAME = "current_context.json"
|
||||
@@ -14,42 +14,37 @@ from types import FrameType
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.sdk.db import AgentDB
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.agents.base import BaseAgentActionProposal
|
||||
from forge.components.code_executor import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder, assert_config_has_openai_api_key
|
||||
from forge.db import AgentDB
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
from forge.logging.helpers import print_attribute, speak
|
||||
from forge.models.action import ActionInterruptedByHuman, ActionProposal
|
||||
from forge.models.utils import ModelWithSummary
|
||||
from forge.utils.const import FINISH_COMMAND
|
||||
from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agent_factory.profile_generator import generate_agent_profile_for_task
|
||||
from autogpt.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.commands.execute_code import (
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from autogpt.config import (
|
||||
AIDirectives,
|
||||
AIProfile,
|
||||
Config,
|
||||
ConfigBuilder,
|
||||
assert_config_has_openai_api_key,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import MultiProvider
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
from autogpt.file_storage import FileStorageBackendName, get_storage
|
||||
from autogpt.logs.config import configure_logging
|
||||
from autogpt.logs.helpers import print_attribute, speak
|
||||
from autogpt.models.action_history import ActionInterruptedByHuman
|
||||
from autogpt.models.utils import ModelWithSummary
|
||||
from autogpt.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
from autogpt.utils.utils import DEFAULT_FINISH_COMMAND
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .input import clean_input
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .utils import (
|
||||
clean_input,
|
||||
get_legal_warning,
|
||||
markdown_to_ansi_style,
|
||||
print_git_branch_info,
|
||||
@@ -176,7 +171,6 @@ async def run_auto_gpt(
|
||||
+ "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1))
|
||||
)
|
||||
load_existing_agent = clean_input(
|
||||
config,
|
||||
"Enter the number or name of the agent to run,"
|
||||
" or hit enter to create a new one:",
|
||||
)
|
||||
@@ -203,7 +197,7 @@ async def run_auto_gpt(
|
||||
if load_existing_agent:
|
||||
agent_state = None
|
||||
while True:
|
||||
answer = clean_input(config, "Resume? [Y/n]")
|
||||
answer = clean_input("Resume? [Y/n]")
|
||||
if answer == "" or answer.lower() == "y":
|
||||
agent_state = agent_manager.load_agent_state(load_existing_agent)
|
||||
break
|
||||
@@ -230,14 +224,14 @@ async def run_auto_gpt(
|
||||
|
||||
if (
|
||||
(current_episode := agent.event_history.current_episode)
|
||||
and current_episode.action.use_tool.name == DEFAULT_FINISH_COMMAND
|
||||
and current_episode.action.use_tool.name == FINISH_COMMAND
|
||||
and not current_episode.result
|
||||
):
|
||||
# Agent was resumed after `finish` -> rewrite result of `finish` action
|
||||
finish_reason = current_episode.action.use_tool.arguments["reason"]
|
||||
print(f"Agent previously self-terminated; reason: '{finish_reason}'")
|
||||
new_assignment = clean_input(
|
||||
config, "Please give a follow-up question or assignment:"
|
||||
"Please give a follow-up question or assignment:"
|
||||
)
|
||||
agent.event_history.register_result(
|
||||
ActionInterruptedByHuman(feedback=new_assignment)
|
||||
@@ -269,7 +263,6 @@ async def run_auto_gpt(
|
||||
task = ""
|
||||
while task.strip() == "":
|
||||
task = clean_input(
|
||||
config,
|
||||
"Enter the task that you want AutoGPT to execute,"
|
||||
" with as much detail as possible:",
|
||||
)
|
||||
@@ -343,7 +336,6 @@ async def run_auto_gpt(
|
||||
|
||||
# Allow user to Save As other ID
|
||||
save_as_id = clean_input(
|
||||
config,
|
||||
f"Press enter to save as '{agent_id}',"
|
||||
" or enter a different ID to save to:",
|
||||
)
|
||||
@@ -626,7 +618,7 @@ async def run_interaction_loop(
|
||||
|
||||
def update_user(
|
||||
ai_profile: AIProfile,
|
||||
action_proposal: "BaseAgentActionProposal",
|
||||
action_proposal: "ActionProposal",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Prints the assistant's thoughts and the next command to the user.
|
||||
@@ -695,7 +687,7 @@ async def get_user_feedback(
|
||||
|
||||
while user_feedback is None:
|
||||
# Get input from user
|
||||
console_input = clean_input(config, Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
console_input = clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
|
||||
|
||||
# Parse user input
|
||||
if console_input.lower().strip() == config.authorise_key:
|
||||
|
||||
@@ -2,9 +2,12 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.app.utils import clean_input
|
||||
from autogpt.config import AIDirectives, AIProfile, Config
|
||||
from autogpt.logs.helpers import print_attribute
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.logging.helpers import print_attribute
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -69,20 +72,18 @@ async def interactively_revise_ai_settings(
|
||||
)
|
||||
|
||||
if (
|
||||
clean_input(app_config, "Continue with these settings? [Y/n]").lower()
|
||||
clean_input("Continue with these settings? [Y/n]").lower()
|
||||
or app_config.authorise_key
|
||||
) == app_config.authorise_key:
|
||||
break
|
||||
|
||||
# Ask for revised ai_profile
|
||||
ai_profile.ai_name = (
|
||||
clean_input(app_config, "Enter AI name (or press enter to keep current):")
|
||||
clean_input("Enter AI name (or press enter to keep current):")
|
||||
or ai_profile.ai_name
|
||||
)
|
||||
ai_profile.ai_role = (
|
||||
clean_input(
|
||||
app_config, "Enter new AI role (or press enter to keep current):"
|
||||
)
|
||||
clean_input("Enter new AI role (or press enter to keep current):")
|
||||
or ai_profile.ai_role
|
||||
)
|
||||
|
||||
@@ -93,7 +94,6 @@ async def interactively_revise_ai_settings(
|
||||
print_attribute(f"Constraint {i+1}:", f'"{constraint}"')
|
||||
new_constraint = (
|
||||
clean_input(
|
||||
app_config,
|
||||
f"Enter new constraint {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
@@ -111,7 +111,6 @@ async def interactively_revise_ai_settings(
|
||||
# Add new constraints
|
||||
while True:
|
||||
new_constraint = clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or enter a constraint to add:",
|
||||
)
|
||||
if not new_constraint:
|
||||
@@ -125,7 +124,6 @@ async def interactively_revise_ai_settings(
|
||||
print_attribute(f"Resource {i+1}:", f'"{resource}"')
|
||||
new_resource = (
|
||||
clean_input(
|
||||
app_config,
|
||||
f"Enter new resource {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
@@ -142,7 +140,6 @@ async def interactively_revise_ai_settings(
|
||||
# Add new resources
|
||||
while True:
|
||||
new_resource = clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or enter a resource to add:",
|
||||
)
|
||||
if not new_resource:
|
||||
@@ -156,7 +153,6 @@ async def interactively_revise_ai_settings(
|
||||
print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"')
|
||||
new_best_practice = (
|
||||
clean_input(
|
||||
app_config,
|
||||
f"Enter new best practice {i+1}"
|
||||
" (press enter to keep current, or '-' to remove):",
|
||||
)
|
||||
@@ -173,7 +169,6 @@ async def interactively_revise_ai_settings(
|
||||
# Add new best practices
|
||||
while True:
|
||||
new_best_practice = clean_input(
|
||||
app_config,
|
||||
"Press enter to finish, or add a best practice to add:",
|
||||
)
|
||||
if not new_best_practice:
|
||||
|
||||
@@ -7,31 +7,16 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import click
|
||||
import requests
|
||||
from colorama import Fore, Style
|
||||
from git import InvalidGitRepositoryError, Repo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clean_input(config: "Config", prompt: str = ""):
|
||||
try:
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.debug("Asking user via keyboard...")
|
||||
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default="", show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted AutoGPT")
|
||||
logger.info("Quitting...")
|
||||
exit(0)
|
||||
|
||||
|
||||
def get_bulletin_from_web():
|
||||
try:
|
||||
response = requests.get(
|
||||
|
||||
@@ -3,13 +3,13 @@ from pprint import pformat
|
||||
from typing import Any, ClassVar
|
||||
|
||||
import inflection
|
||||
from forge.llm.providers import CompletionModelFunction
|
||||
from forge.models.config import SystemConfiguration
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.base import PluginLocation
|
||||
from autogpt.core.resource.model_providers import CompletionModelFunction
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
from .schema import AbilityResult
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import logging
|
||||
from typing import ClassVar
|
||||
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
|
||||
|
||||
class CreateNewAbility(Ability):
|
||||
|
||||
@@ -2,10 +2,11 @@ import logging
|
||||
import os
|
||||
from typing import ClassVar
|
||||
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
import logging
|
||||
from typing import ClassVar
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.providers import (
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.planning.simple import LanguageModelConfiguration
|
||||
from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
|
||||
|
||||
|
||||
class QueryLanguageModel(Ability):
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.providers import (
|
||||
ChatModelProvider,
|
||||
CompletionModelFunction,
|
||||
ModelProviderName,
|
||||
)
|
||||
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
|
||||
|
||||
from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
|
||||
from autogpt.core.ability.builtins import BUILTIN_ABILITIES
|
||||
from autogpt.core.ability.schema import AbilityResult
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.plugin.simple import SimplePluginService
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from forge.llm.providers import CompletionModelFunction, OpenAIProvider, OpenAISettings
|
||||
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.ability import (
|
||||
@@ -11,7 +13,6 @@ from autogpt.core.ability import (
|
||||
SimpleAbilityRegistry,
|
||||
)
|
||||
from autogpt.core.agent.base import Agent
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from autogpt.core.memory import MemorySettings, SimpleMemory
|
||||
from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
|
||||
from autogpt.core.plugin.simple import (
|
||||
@@ -19,11 +20,6 @@ from autogpt.core.plugin.simple import (
|
||||
PluginStorageFormat,
|
||||
SimplePluginService,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
CompletionModelFunction,
|
||||
OpenAIProvider,
|
||||
OpenAISettings,
|
||||
)
|
||||
from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
|
||||
|
||||
|
||||
@@ -92,9 +88,7 @@ class SimpleAgent(Agent, Configurable):
|
||||
),
|
||||
openai_provider=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
storage_route=(
|
||||
"autogpt.core.resource.model_providers.OpenAIProvider"
|
||||
),
|
||||
storage_route=("forge.llm.model_providers.OpenAIProvider"),
|
||||
),
|
||||
planning=PluginLocation(
|
||||
storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
"""The configuration encapsulates settings for all Agent subsystems."""
|
||||
from autogpt.core.configuration.schema import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Configurable",
|
||||
"SystemConfiguration",
|
||||
"SystemSettings",
|
||||
"UserConfigurable",
|
||||
]
|
||||
@@ -1,7 +1,8 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
|
||||
from forge.models.config import Configurable, SystemConfiguration, SystemSettings
|
||||
|
||||
from autogpt.core.memory.base import Memory
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.schema import Task, TaskType
|
||||
from autogpt.core.prompting import PromptStrategy
|
||||
from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
|
||||
from autogpt.core.prompting.utils import to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import to_numbered_list
|
||||
from forge.llm.providers import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.core.planning.schema import Task, TaskType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.prompting import PromptStrategy
|
||||
from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import logging
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
from autogpt.core.planning.schema import Task
|
||||
from autogpt.core.prompting import PromptStrategy
|
||||
from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification
|
||||
from autogpt.core.prompting.utils import to_numbered_list
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.prompting.utils import to_numbered_list
|
||||
from forge.llm.providers import (
|
||||
AssistantChatMessage,
|
||||
ChatMessage,
|
||||
CompletionModelFunction,
|
||||
)
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.core.planning.schema import Task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -3,24 +3,24 @@ import platform
|
||||
import time
|
||||
|
||||
import distro
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.planning import prompt_strategies
|
||||
from autogpt.core.planning.schema import Task
|
||||
from autogpt.core.prompting import PromptStrategy
|
||||
from autogpt.core.prompting.schema import LanguageModelClassification
|
||||
from autogpt.core.resource.model_providers import (
|
||||
from forge.llm.prompting import PromptStrategy
|
||||
from forge.llm.prompting.schema import LanguageModelClassification
|
||||
from forge.llm.providers import (
|
||||
ChatModelProvider,
|
||||
ChatModelResponse,
|
||||
CompletionModelFunction,
|
||||
ModelProviderName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from forge.models.config import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
|
||||
from autogpt.core.planning import prompt_strategies
|
||||
from autogpt.core.planning.schema import Task
|
||||
from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
|
||||
from autogpt.core.workspace import Workspace
|
||||
|
||||
|
||||
@@ -2,17 +2,14 @@ import abc
|
||||
import enum
|
||||
from typing import TYPE_CHECKING, Type
|
||||
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.core.configuration import SystemConfiguration, UserConfigurable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from forge.llm.providers import ChatModelProvider, EmbeddingModelProvider
|
||||
|
||||
from autogpt.core.ability import Ability, AbilityRegistry
|
||||
from autogpt.core.memory import Memory
|
||||
from autogpt.core.resource.model_providers import (
|
||||
ChatModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
)
|
||||
|
||||
# Expand to other types as needed
|
||||
PluginType = (
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
def to_numbered_list(
|
||||
items: list[str], no_items_response: str = "", **template_args
|
||||
) -> str:
|
||||
if items:
|
||||
return "\n".join(
|
||||
f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
|
||||
)
|
||||
else:
|
||||
return no_items_response
|
||||
@@ -1,15 +0,0 @@
|
||||
from autogpt.core.resource.schema import (
|
||||
ProviderBudget,
|
||||
ProviderCredentials,
|
||||
ProviderSettings,
|
||||
ProviderUsage,
|
||||
ResourceType,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ProviderBudget",
|
||||
"ProviderCredentials",
|
||||
"ProviderSettings",
|
||||
"ProviderUsage",
|
||||
"ResourceType",
|
||||
]
|
||||
@@ -1,12 +1,13 @@
|
||||
import logging
|
||||
|
||||
from agent_protocol import StepHandler, StepResult
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import ConfigBuilder
|
||||
from forge.llm.prompting.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from forge.logging.helpers import user_friendly_output
|
||||
|
||||
from autogpt.agents import Agent
|
||||
from autogpt.app.main import UserFeedback
|
||||
from autogpt.config import AIProfile, ConfigBuilder
|
||||
from autogpt.logs.helpers import user_friendly_output
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
|
||||
async def task_handler(task_input) -> StepHandler:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from colorama import Fore, Style
|
||||
from forge.logging import BelowLevelFilter, FancyConsoleFormatter
|
||||
from openai._base_client import log as openai_logger
|
||||
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(message)s"
|
||||
@@ -25,58 +25,3 @@ def configure_root_logger():
|
||||
|
||||
# Disable debug logging from OpenAI library
|
||||
openai_logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif not type(record.msg) is str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class BelowLevelFilter(logging.Filter):
|
||||
"""Filter for logging levels below a certain threshold."""
|
||||
|
||||
def __init__(self, below_level: int):
|
||||
super().__init__()
|
||||
self.below_level = below_level
|
||||
|
||||
def filter(self, record: logging.LogRecord):
|
||||
return record.levelno < self.below_level
|
||||
|
||||
@@ -2,8 +2,9 @@ from math import ceil, floor
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.prompting import ChatPrompt
|
||||
from autogpt.core.resource.model_providers import ChatMessage
|
||||
from forge.llm.prompting import ChatPrompt
|
||||
from forge.llm.providers.schema import ChatMessage
|
||||
|
||||
|
||||
SEPARATOR_LENGTH = 42
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import typing
|
||||
from pathlib import Path
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from autogpt.core.configuration import AgentConfiguration
|
||||
from autogpt.core.agent.simple import AgentConfiguration
|
||||
|
||||
|
||||
class Workspace(abc.ABC):
|
||||
|
||||
@@ -3,14 +3,14 @@ import logging
|
||||
import typing
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import SecretField
|
||||
|
||||
from autogpt.core.configuration import (
|
||||
from forge.models.config import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from pydantic import SecretField
|
||||
|
||||
from autogpt.core.workspace.base import Workspace
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Callable, Iterable, TypeVar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.models.command import Command
|
||||
|
||||
from autogpt.core.resource.model_providers import CompletionModelFunction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
T = TypeVar("T", bound=Callable)
|
||||
|
||||
|
||||
def function_specs_from_commands(
|
||||
commands: Iterable[Command],
|
||||
) -> list[CompletionModelFunction]:
|
||||
"""Get OpenAI-consumable function specs for the agent's available commands.
|
||||
see https://platform.openai.com/docs/guides/gpt/function-calling
|
||||
"""
|
||||
return [
|
||||
CompletionModelFunction(
|
||||
name=command.names[0],
|
||||
description=command.description,
|
||||
parameters={param.name: param.spec for param in command.parameters},
|
||||
)
|
||||
for command in commands
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
from .config import configure_logging
|
||||
from .helpers import user_friendly_output
|
||||
from .log_cycle import (
|
||||
CURRENT_CONTEXT_FILE_NAME,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
PROMPT_SUMMARY_FILE_NAME,
|
||||
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
SUMMARY_FILE_NAME,
|
||||
SUPERVISOR_FEEDBACK_FILE_NAME,
|
||||
USER_INPUT_FILE_NAME,
|
||||
LogCycleHandler,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"configure_logging",
|
||||
"user_friendly_output",
|
||||
"CURRENT_CONTEXT_FILE_NAME",
|
||||
"NEXT_ACTION_FILE_NAME",
|
||||
"PROMPT_SUMMARY_FILE_NAME",
|
||||
"PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME",
|
||||
"SUMMARY_FILE_NAME",
|
||||
"SUPERVISOR_FEEDBACK_FILE_NAME",
|
||||
"USER_INPUT_FILE_NAME",
|
||||
"LogCycleHandler",
|
||||
]
|
||||
@@ -1,4 +1,4 @@
|
||||
from autogpt.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
from .memory_item import MemoryItem, MemoryItemFactory, MemoryItemRelevance
|
||||
from .providers.base import VectorMemoryProvider as VectorMemory
|
||||
|
||||
@@ -6,16 +6,11 @@ from typing import Literal
|
||||
|
||||
import ftfy
|
||||
import numpy as np
|
||||
from forge.config.config import Config
|
||||
from forge.content_processing.text import chunk_content, split_text, summarize_text
|
||||
from forge.llm.providers import ChatMessage, ChatModelProvider, EmbeddingModelProvider
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers import (
|
||||
ChatMessage,
|
||||
ChatModelProvider,
|
||||
EmbeddingModelProvider,
|
||||
)
|
||||
from autogpt.processing.text import chunk_content, split_text, summarize_text
|
||||
|
||||
from .utils import Embedding, get_embedding
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -4,8 +4,7 @@ import logging
|
||||
from typing import MutableSet, Sequence
|
||||
|
||||
import numpy as np
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
from .. import MemoryItem, MemoryItemRelevance
|
||||
from ..utils import Embedding, get_embedding
|
||||
|
||||
@@ -5,8 +5,7 @@ from pathlib import Path
|
||||
from typing import Iterator
|
||||
|
||||
import orjson
|
||||
|
||||
from autogpt.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
from ..memory_item import MemoryItem
|
||||
from .base import VectorMemoryProvider
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import Iterator, Optional
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from forge.config.config import Config
|
||||
|
||||
from .. import MemoryItem
|
||||
from .base import VectorMemoryProvider
|
||||
|
||||
@@ -2,9 +2,8 @@ import logging
|
||||
from typing import Any, Sequence, overload
|
||||
|
||||
import numpy as np
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.resource.model_providers import EmbeddingModelProvider
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers import EmbeddingModelProvider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
"""This module contains the speech recognition and speech synthesis functions."""
|
||||
from autogpt.speech.say import TextToSpeechProvider, TTSConfig
|
||||
|
||||
__all__ = ["TextToSpeechProvider", "TTSConfig"]
|
||||
@@ -1,31 +0,0 @@
|
||||
import inspect
|
||||
from typing import Optional
|
||||
|
||||
import sentry_sdk
|
||||
|
||||
|
||||
def retry(retry_count: int = 3, pass_exception: str = "exception"):
|
||||
"""Decorator to retry a function multiple times on failure.
|
||||
Can pass the exception to the function as a keyword argument."""
|
||||
|
||||
def decorator(func):
|
||||
params = inspect.signature(func).parameters
|
||||
|
||||
async def wrapper(*args, **kwargs):
|
||||
exception: Optional[Exception] = None
|
||||
attempts = 0
|
||||
while attempts < retry_count:
|
||||
try:
|
||||
if pass_exception in params:
|
||||
kwargs[pass_exception] = exception
|
||||
return await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
attempts += 1
|
||||
exception = e
|
||||
sentry_sdk.capture_exception(e)
|
||||
if attempts >= retry_count:
|
||||
raise e
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -1,16 +0,0 @@
|
||||
"""The singleton metaclass for ensuring only one instance of a class."""
|
||||
import abc
|
||||
|
||||
|
||||
class Singleton(abc.ABCMeta, type):
|
||||
"""
|
||||
Singleton metaclass for ensuring only one instance of a class.
|
||||
"""
|
||||
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
"""Call method for the singleton metaclass."""
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
151
autogpts/autogpt/poetry.lock
generated
151
autogpts/autogpt/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "agbenchmark"
|
||||
@@ -311,36 +311,55 @@ description = ""
|
||||
optional = false
|
||||
python-versions = "^3.10"
|
||||
files = []
|
||||
develop = false
|
||||
develop = true
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.5"
|
||||
anthropic = "^0.25.1"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
boto3 = "^1.33.6"
|
||||
bs4 = "^0.0.1"
|
||||
charset-normalizer = "^3.1.0"
|
||||
chromadb = "^0.4.10"
|
||||
colorlog = "^6.7.0"
|
||||
click = "*"
|
||||
colorama = "^0.4.6"
|
||||
demjson3 = "^3.0.0"
|
||||
docker = "*"
|
||||
duckduckgo-search = "^5.0.0"
|
||||
fastapi = "^0.109.1"
|
||||
gitpython = "^3.1.32"
|
||||
google-api-python-client = "*"
|
||||
google-cloud-storage = "^2.13.0"
|
||||
jinja2 = "^3.1.2"
|
||||
jsonschema = "*"
|
||||
litellm = "^1.17.9"
|
||||
openai = "^1.7.2"
|
||||
Pillow = "*"
|
||||
playsound = "~1.2.2"
|
||||
pydantic = "*"
|
||||
pylatexenc = "*"
|
||||
pypdf = "^3.1.0"
|
||||
python-docx = "*"
|
||||
python-dotenv = "^1.0.0"
|
||||
python-multipart = "^0.0.7"
|
||||
pyyaml = "^6.0"
|
||||
requests = "*"
|
||||
selenium = "^4.13.0"
|
||||
sentry-sdk = "^1.40.4"
|
||||
spacy = "^3.0.0"
|
||||
sqlalchemy = "^2.0.19"
|
||||
tenacity = "^8.2.2"
|
||||
tiktoken = "^0.5.0"
|
||||
toml = "^0.10.2"
|
||||
uvicorn = "^0.23.2"
|
||||
webdriver-manager = "^4.0.1"
|
||||
|
||||
[package.extras]
|
||||
benchmark = ["agbenchmark @ git+https://github.com/Significant-Gravitas/AutoGPT.git#subdirectory=benchmark"]
|
||||
benchmark = ["agbenchmark @ file:///home/reinier/code/agpt/Auto-GPT/benchmark"]
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/Significant-Gravitas/AutoGPT.git"
|
||||
reference = "HEAD"
|
||||
resolved_reference = "fd3f8fa5fc86271e4e319258fefdb3065d1aa0d4"
|
||||
subdirectory = "autogpts/forge"
|
||||
type = "directory"
|
||||
url = "../forge"
|
||||
|
||||
[[package]]
|
||||
name = "backoff"
|
||||
@@ -1367,23 +1386,6 @@ humanfriendly = ">=9.1"
|
||||
[package.extras]
|
||||
cron = ["capturer (>=2.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "colorlog"
|
||||
version = "6.8.0"
|
||||
description = "Add colours to the output of Python's logging module."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "colorlog-6.8.0-py3-none-any.whl", hash = "sha256:4ed23b05a1154294ac99f511fabe8c1d6d4364ec1f7fc989c7fb515ccc29d375"},
|
||||
{file = "colorlog-6.8.0.tar.gz", hash = "sha256:fbb6fdf9d5685f2517f388fb29bb27d54e8654dd31f58bc2a3b217e967a95ca6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
|
||||
[package.extras]
|
||||
development = ["black", "flake8", "mypy", "pytest", "types-colorama"]
|
||||
|
||||
[[package]]
|
||||
name = "confection"
|
||||
version = "0.1.4"
|
||||
@@ -1682,25 +1684,6 @@ files = [
|
||||
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dnspython"
|
||||
version = "2.4.2"
|
||||
description = "DNS toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
files = [
|
||||
{file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"},
|
||||
{file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dnssec = ["cryptography (>=2.6,<42.0)"]
|
||||
doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"]
|
||||
doq = ["aioquic (>=0.9.20)"]
|
||||
idna = ["idna (>=2.1,<4.0)"]
|
||||
trio = ["trio (>=0.14,<0.23)"]
|
||||
wmi = ["wmi (>=1.5.1,<2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "docker"
|
||||
version = "7.0.0"
|
||||
@@ -3210,24 +3193,6 @@ tokenizers = "*"
|
||||
extra-proxy = ["streamlit (>=1.29.0,<2.0.0)"]
|
||||
proxy = ["backoff", "fastapi (>=0.104.1,<0.105.0)", "gunicorn (>=21.2.0,<22.0.0)", "orjson (>=3.9.7,<4.0.0)", "pyyaml (>=6.0,<7.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "loguru"
|
||||
version = "0.7.2"
|
||||
description = "Python logging made (stupidly) simple"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"},
|
||||
{file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""}
|
||||
win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "5.1.0"
|
||||
@@ -4418,31 +4383,6 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa
|
||||
typing = ["typing-extensions"]
|
||||
xmp = ["defusedxml"]
|
||||
|
||||
[[package]]
|
||||
name = "pinecone-client"
|
||||
version = "2.2.4"
|
||||
description = "Pinecone client and SDK"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pinecone-client-2.2.4.tar.gz", hash = "sha256:2c1cc1d6648b2be66e944db2ffa59166a37b9164d1135ad525d9cd8b1e298168"},
|
||||
{file = "pinecone_client-2.2.4-py3-none-any.whl", hash = "sha256:5bf496c01c2f82f4e5c2dc977cc5062ecd7168b8ed90743b09afcc8c7eb242ec"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
dnspython = ">=2.0.0"
|
||||
loguru = ">=0.5.0"
|
||||
numpy = ">=1.22.0"
|
||||
python-dateutil = ">=2.5.3"
|
||||
pyyaml = ">=5.4"
|
||||
requests = ">=2.19.0"
|
||||
tqdm = ">=4.64.1"
|
||||
typing-extensions = ">=3.7.4"
|
||||
urllib3 = ">=1.21.1"
|
||||
|
||||
[package.extras]
|
||||
grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv2 (==0.1.0)", "grpcio (>=1.44.0)", "lz4 (>=3.1.3)", "protobuf (>=3.20.0,<3.21.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.1.0"
|
||||
@@ -5315,7 +5255,6 @@ files = [
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
@@ -5369,24 +5308,6 @@ lxml = "*"
|
||||
[package.extras]
|
||||
test = ["timeout-decorator"]
|
||||
|
||||
[[package]]
|
||||
name = "redis"
|
||||
version = "5.0.1"
|
||||
description = "Python client for Redis database and key-value store"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "redis-5.0.1-py3-none-any.whl", hash = "sha256:ed4802971884ae19d640775ba3b03aa2e7bd5e8fb8dfaed2decce4d0fc48391f"},
|
||||
{file = "redis-5.0.1.tar.gz", hash = "sha256:0dab495cd5753069d3bc650a0dde8a8f9edde16fc5691b689a566eda58100d0f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""}
|
||||
|
||||
[package.extras]
|
||||
hiredis = ["hiredis (>=1.0.0)"]
|
||||
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "referencing"
|
||||
version = "0.32.1"
|
||||
@@ -7045,20 +6966,6 @@ files = [
|
||||
{file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "win32-setctime"
|
||||
version = "1.1.0"
|
||||
description = "A small Python utility to set file creation time on Windows"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"},
|
||||
{file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "wrapt"
|
||||
version = "1.16.0"
|
||||
@@ -7276,4 +7183,4 @@ benchmark = ["agbenchmark"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "32d6e9f337ee33e712c42a21a0abca1f5d8d18be44bb2a26c08b375070eda1f9"
|
||||
content-hash = "d79316409dd12b59677b9d5c31717f8147bac58ee96d42ce9fb0d01cdcf826b0"
|
||||
|
||||
@@ -23,21 +23,16 @@ serve = "autogpt.app.cli:serve"
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
anthropic = "^0.25.1"
|
||||
# autogpt-forge = { path = "../forge" }
|
||||
autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge"}
|
||||
autogpt-forge = { path = "../forge", develop = true }
|
||||
# autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge"}
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
boto3 = "^1.33.6"
|
||||
charset-normalizer = "^3.1.0"
|
||||
click = "*"
|
||||
colorama = "^0.4.6"
|
||||
demjson3 = "^3.0.0"
|
||||
distro = "^1.8.0"
|
||||
docker = "*"
|
||||
duckduckgo-search = "^5.0.0"
|
||||
en-core-web-sm = {url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl"}
|
||||
fastapi = "^0.109.1"
|
||||
ftfy = "^6.1.1"
|
||||
gitpython = "^3.1.32"
|
||||
google-api-python-client = "*"
|
||||
gTTS = "^2.3.1"
|
||||
hypercorn = "^0.14.4"
|
||||
@@ -47,23 +42,16 @@ numpy = "*"
|
||||
openai = "^1.7.2"
|
||||
orjson = "^3.8.10"
|
||||
Pillow = "*"
|
||||
pinecone-client = "^2.2.1"
|
||||
playsound = "~1.2.2"
|
||||
pydantic = "*"
|
||||
pylatexenc = "*"
|
||||
pypdf = "^3.1.0"
|
||||
python-docx = "*"
|
||||
python-dotenv = "^1.0.0"
|
||||
pyyaml = "^6.0"
|
||||
readability-lxml = "^0.8.1"
|
||||
redis = "*"
|
||||
requests = "*"
|
||||
selenium = "^4.11.2"
|
||||
sentry-sdk = "^1.40.4"
|
||||
spacy = "^3.7.4"
|
||||
tenacity = "^8.2.2"
|
||||
tiktoken = "^0.5.0"
|
||||
webdriver-manager = "*"
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client = "^0.14.0"
|
||||
|
||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from forge.llm.providers import ChatMessage, MultiProvider
|
||||
from forge.llm.providers.anthropic import AnthropicModelName
|
||||
from git import Repo, TagReference
|
||||
|
||||
from autogpt.core.resource.model_providers import ChatMessage, MultiProvider
|
||||
from autogpt.core.resource.model_providers.anthropic import AnthropicModelName
|
||||
from autogpt.core.runner.client_lib.utils import coroutine
|
||||
|
||||
|
||||
@@ -132,8 +132,7 @@ Do not mention the changes in the example when writing your release notes!
|
||||
|
||||
if __name__ == "__main__":
|
||||
import dotenv
|
||||
|
||||
from autogpt.logs.config import configure_logging
|
||||
from forge.logging.config import configure_logging
|
||||
|
||||
configure_logging(debug=True)
|
||||
|
||||
|
||||
@@ -5,22 +5,21 @@ import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.main import _configure_llm_provider
|
||||
from autogpt.config import AIProfile, Config, ConfigBuilder
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from autogpt.file_storage.local import (
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder
|
||||
from forge.file_storage.local import (
|
||||
FileStorage,
|
||||
FileStorageConfiguration,
|
||||
LocalFileStorage,
|
||||
)
|
||||
from autogpt.logs.config import configure_logging
|
||||
from forge.llm.providers import ChatModelProvider
|
||||
from forge.logging.config import configure_logging
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.main import _configure_llm_provider
|
||||
|
||||
pytest_plugins = [
|
||||
"tests.integration.agent_factory",
|
||||
"tests.integration.memory.utils",
|
||||
"tests.vcr",
|
||||
]
|
||||
|
||||
@@ -50,7 +49,6 @@ def storage(app_data_dir: Path) -> FileStorage:
|
||||
def config(
|
||||
tmp_project_root: Path,
|
||||
app_data_dir: Path,
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
os.environ["OPENAI_API_KEY"] = "sk-dummy"
|
||||
|
||||
@@ -1,22 +1,9 @@
|
||||
import pytest
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.agents.prompt_strategies.one_shot import OneShotAgentPromptStrategy
|
||||
from autogpt.config import AIProfile, Config
|
||||
from autogpt.file_storage import FileStorageBackendName, get_storage
|
||||
from autogpt.memory.vector import get_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_json_file(config: Config):
|
||||
was_memory_backend = config.memory_backend
|
||||
|
||||
config.memory_backend = "json_file"
|
||||
memory = get_memory(config)
|
||||
memory.clear()
|
||||
yield memory
|
||||
|
||||
config.memory_backend = was_memory_backend
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -29,10 +16,6 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
],
|
||||
)
|
||||
|
||||
agent_prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(
|
||||
deep=True
|
||||
)
|
||||
agent_prompt_config.use_functions_api = config.openai_functions
|
||||
agent_settings = AgentSettings(
|
||||
name=Agent.default_settings.name,
|
||||
description=Agent.default_settings.description,
|
||||
@@ -42,7 +25,6 @@ def dummy_agent(config: Config, llm_provider, memory_json_file):
|
||||
smart_llm=config.smart_llm,
|
||||
use_functions_api=config.openai_functions,
|
||||
),
|
||||
prompt_config=agent_prompt_config,
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -4,15 +4,15 @@ import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.commands.execute_code import (
|
||||
from forge.components.code_executor import (
|
||||
ALLOWLIST_CONTROL,
|
||||
CodeExecutorComponent,
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from autogpt.utils.exceptions import InvalidArgumentError, OperationNotAllowedError
|
||||
from forge.utils.exceptions import InvalidArgumentError, OperationNotAllowedError
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -4,10 +4,10 @@ from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.commands.image_gen import ImageGeneratorComponent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
|
||||
from autogpt.app.setup import (
|
||||
apply_overrides_to_ai_settings,
|
||||
interactively_revise_ai_settings,
|
||||
)
|
||||
from autogpt.config import AIDirectives, Config
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from forge.components.web.selenium import BrowsingError, WebSeleniumComponent
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.commands.web_selenium import BrowsingError, WebSeleniumComponent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
"""Tests for JSONFileMemory class"""
|
||||
import orjson
|
||||
import pytest
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.file_storage import FileStorage
|
||||
from autogpt.memory.vector import JSONFileMemory, MemoryItem
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import numpy
|
||||
import pytest
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers import OPEN_AI_EMBEDDING_MODELS
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
import autogpt.memory.vector.memory_item as vector_memory_item
|
||||
import autogpt.memory.vector.providers.base as memory_provider_base
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.core.resource.model_providers import OPEN_AI_EMBEDDING_MODELS
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.memory.vector.utils import Embedding
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.file_storage import FileStorage
|
||||
|
||||
"""
|
||||
Test cases for the AIProfile class, which handles loads the AI configuration
|
||||
|
||||
@@ -8,16 +8,13 @@ from typing import Any
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
from forge.config.config import Config, ConfigBuilder
|
||||
from forge.llm.providers.schema import ChatModelInfo, ModelProviderName
|
||||
from openai.pagination import AsyncPage
|
||||
from openai.types import Model
|
||||
from pydantic import SecretStr
|
||||
|
||||
from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, apply_overrides_to_config
|
||||
from autogpt.config import Config, ConfigBuilder
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
ChatModelInfo,
|
||||
ModelProviderName,
|
||||
)
|
||||
|
||||
|
||||
def test_initial_values(config: Config) -> None:
|
||||
@@ -147,7 +144,7 @@ def test_azure_config(config_with_azure: Config) -> None:
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_config_gpt4only(config: Config) -> None:
|
||||
with mock.patch(
|
||||
"autogpt.core.resource.model_providers.multi.MultiProvider.get_available_models"
|
||||
"forge.llm.providers.multi.MultiProvider.get_available_models"
|
||||
) as mock_get_models:
|
||||
mock_get_models.return_value = [
|
||||
ChatModelInfo(
|
||||
@@ -167,7 +164,7 @@ async def test_create_config_gpt4only(config: Config) -> None:
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_config_gpt3only(config: Config) -> None:
|
||||
with mock.patch(
|
||||
"autogpt.core.resource.model_providers.multi.MultiProvider.get_available_models"
|
||||
"forge.llm.providers.multi.MultiProvider.get_available_models"
|
||||
) as mock_get_models:
|
||||
mock_get_models.return_value = [
|
||||
ChatModelInfo(
|
||||
|
||||
@@ -2,14 +2,9 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
from forge.file_storage import FileStorage
|
||||
|
||||
import autogpt.agents.features.agent_file_manager as file_ops
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.config import Config
|
||||
from autogpt.file_storage import FileStorage
|
||||
from autogpt.memory.vector.memory_item import MemoryItem
|
||||
from autogpt.memory.vector.utils import Embedding
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -17,25 +12,6 @@ def file_content():
|
||||
return "This is a test file.\n"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mock_MemoryItem_from_text(
|
||||
mocker: MockerFixture, mock_embedding: Embedding, config: Config
|
||||
):
|
||||
mocker.patch.object(
|
||||
file_ops.MemoryItemFactory,
|
||||
"from_text",
|
||||
new=lambda content, source_type, config, metadata: MemoryItem(
|
||||
raw_content=content,
|
||||
summary=f"Summary of content '{content}'",
|
||||
chunk_summaries=[f"Summary of content '{content}'"],
|
||||
chunks=[content],
|
||||
e_summary=mock_embedding,
|
||||
e_chunks=[mock_embedding],
|
||||
metadata=metadata | {"source_type": source_type},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def file_manager_component(agent: Agent):
|
||||
return agent.file_manager
|
||||
|
||||
@@ -4,12 +4,11 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from forge.file_storage import GCSFileStorage, GCSFileStorageConfiguration
|
||||
from google.auth.exceptions import GoogleAuthError
|
||||
from google.cloud import storage
|
||||
from google.cloud.exceptions import NotFound
|
||||
|
||||
from autogpt.file_storage.gcs import GCSFileStorage, GCSFileStorageConfiguration
|
||||
|
||||
try:
|
||||
storage.Client()
|
||||
except GoogleAuthError:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import pytest
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
from git.exc import GitCommandError
|
||||
from git.repo.base import Repo
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.commands.git_operations import GitOperationsComponent
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.utils.exceptions import CommandExecutionError
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.core.utils.json_utils import json_loads
|
||||
from forge.json import json_loads
|
||||
|
||||
_JSON_FIXABLE: list[tuple[str, str]] = [
|
||||
# Missing comma
|
||||
@@ -1,8 +1,7 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.file_storage.local import FileStorageConfiguration, LocalFileStorage
|
||||
from forge.file_storage import FileStorageConfiguration, LocalFileStorage
|
||||
|
||||
_ACCESSIBLE_PATHS = [
|
||||
Path("."),
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.logs.utils import remove_color_codes
|
||||
from forge.logging.utils import remove_color_codes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
|
||||
"""
|
||||
Test cases for the PromptConfig class, which handles loads the Prompts configuration
|
||||
|
||||
@@ -5,8 +5,7 @@ from pathlib import Path
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from autogpt.file_storage.s3 import S3FileStorage, S3FileStorageConfiguration
|
||||
from forge.file_storage import S3FileStorage, S3FileStorageConfiguration
|
||||
|
||||
if not (os.getenv("S3_ENDPOINT_URL") and os.getenv("AWS_ACCESS_KEY_ID")):
|
||||
pytest.skip("S3 environment variables are not set", allow_module_level=True)
|
||||
|
||||
@@ -9,8 +9,7 @@ import docx
|
||||
import pytest
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.utils.file_operations_utils import decode_textual_file, is_file_binary_fn
|
||||
from forge.utils.file_operations import decode_textual_file, is_file_binary_fn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import pytest
|
||||
from forge.utils.url_validator import validate_url
|
||||
from pytest import raises
|
||||
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
|
||||
@validate_url
|
||||
def dummy_method(url):
|
||||
|
||||
@@ -5,6 +5,8 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from forge.json.parsing import extract_dict_from_json
|
||||
from forge.utils.yaml_validator import validate_yaml_file
|
||||
from git import InvalidGitRepositoryError
|
||||
|
||||
import autogpt.app.utils
|
||||
@@ -14,8 +16,6 @@ from autogpt.app.utils import (
|
||||
get_latest_bulletin,
|
||||
set_env_config_value,
|
||||
)
|
||||
from autogpt.core.utils.json_utils import extract_dict_from_json
|
||||
from autogpt.utils.utils import validate_yaml_file
|
||||
from tests.utils import skip_in_ci
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from forge.components.web.search import WebSearchComponent
|
||||
from forge.utils.exceptions import ConfigurationError
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.commands.web_search import WebSearchComponent
|
||||
from autogpt.utils.exceptions import ConfigurationError
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -56,7 +56,7 @@ def test_google_search(
|
||||
mock_ddg = mocker.Mock()
|
||||
mock_ddg.return_value = return_value
|
||||
|
||||
mocker.patch("autogpt.commands.web_search.DDGS.text", mock_ddg)
|
||||
mocker.patch("forge.components.web.search.DDGS.text", mock_ddg)
|
||||
actual_output = web_search_component.web_search(query, num_results=num_results)
|
||||
for o in expected_output_parts:
|
||||
assert o in actual_output
|
||||
|
||||
@@ -36,7 +36,7 @@ from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from forge.sdk.errors import CommandExecutionError
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
|
||||
from ..registry import action
|
||||
|
||||
|
||||
15
autogpts/forge/forge/agent/__init__.py
Normal file
15
autogpts/forge/forge/agent/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from .base import AgentMeta, BaseAgent, BaseAgentConfiguration, BaseAgentSettings
|
||||
from .components import (
|
||||
AgentComponent,
|
||||
ComponentEndpointError,
|
||||
ComponentSystemError,
|
||||
EndpointPipelineError,
|
||||
)
|
||||
from .protocols import (
|
||||
AfterExecute,
|
||||
AfterParse,
|
||||
CommandProvider,
|
||||
DirectiveProvider,
|
||||
ExecutionFailure,
|
||||
MessageProvider,
|
||||
)
|
||||
@@ -19,34 +19,31 @@ from colorama import Fore
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.core.resource.model_providers.schema import (
|
||||
ChatModelInfo,
|
||||
)
|
||||
from autogpt.models.action_history import ActionResult
|
||||
from forge.models.action import ActionProposal, ActionResult
|
||||
|
||||
from autogpt.agents import protocols as _protocols
|
||||
from autogpt.agents.components import (
|
||||
from forge.agent import protocols
|
||||
from forge.agent.components import (
|
||||
AgentComponent,
|
||||
ComponentEndpointError,
|
||||
EndpointPipelineError,
|
||||
)
|
||||
from autogpt.config import ConfigBuilder
|
||||
from autogpt.config.ai_directives import AIDirectives
|
||||
from autogpt.config.ai_profile import AIProfile
|
||||
from autogpt.core.configuration import (
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import ConfigBuilder
|
||||
from forge.llm.prompting.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from forge.llm.providers import (
|
||||
CHAT_MODELS,
|
||||
AssistantFunctionCall,
|
||||
ModelName,
|
||||
OpenAIModelName,
|
||||
)
|
||||
from forge.llm.providers.schema import ChatModelInfo
|
||||
from forge.models.config import (
|
||||
Configurable,
|
||||
SystemConfiguration,
|
||||
SystemSettings,
|
||||
UserConfigurable,
|
||||
)
|
||||
from autogpt.core.resource.model_providers import (
|
||||
CHAT_MODELS,
|
||||
AssistantFunctionCall,
|
||||
ModelName,
|
||||
)
|
||||
from autogpt.core.resource.model_providers.openai import OpenAIModelName
|
||||
from autogpt.models.utils import ModelWithSummary
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -140,9 +137,7 @@ class AgentMeta(ABCMeta):
|
||||
return instance
|
||||
|
||||
|
||||
class BaseAgentActionProposal(BaseModel):
|
||||
thoughts: str | ModelWithSummary
|
||||
use_tool: AssistantFunctionCall = None
|
||||
|
||||
|
||||
|
||||
class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
@@ -182,13 +177,13 @@ class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
return self.config.send_token_limit or self.llm.max_tokens * 3 // 4
|
||||
|
||||
@abstractmethod
|
||||
async def propose_action(self) -> BaseAgentActionProposal:
|
||||
async def propose_action(self) -> ActionProposal:
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def execute(
|
||||
self,
|
||||
proposal: BaseAgentActionProposal,
|
||||
proposal: ActionProposal,
|
||||
user_feedback: str = "",
|
||||
) -> ActionResult:
|
||||
...
|
||||
@@ -196,7 +191,7 @@ class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
@abstractmethod
|
||||
async def do_not_execute(
|
||||
self,
|
||||
denied_proposal: BaseAgentActionProposal,
|
||||
denied_proposal: ActionProposal,
|
||||
user_feedback: str,
|
||||
) -> ActionResult:
|
||||
...
|
||||
@@ -224,7 +219,7 @@ class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
) -> list[T] | list[None]:
|
||||
method_name = protocol_method.__name__
|
||||
protocol_name = protocol_method.__qualname__.split(".")[0]
|
||||
protocol_class = getattr(_protocols, protocol_name)
|
||||
protocol_class = getattr(protocols, protocol_name)
|
||||
if not issubclass(protocol_class, AgentComponent):
|
||||
raise TypeError(f"{repr(protocol_method)} is not a protocol method")
|
||||
|
||||
@@ -300,7 +295,7 @@ class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
]
|
||||
|
||||
if self.components:
|
||||
# Check if any coponent is missed (added to Agent but not to components)
|
||||
# Check if any component is missing (added to Agent but not to components)
|
||||
for component in components:
|
||||
if component not in self.components:
|
||||
logger.warning(
|
||||
@@ -321,12 +316,11 @@ class BaseAgent(Configurable[BaseAgentSettings], metaclass=AgentMeta):
|
||||
if node in visited:
|
||||
return
|
||||
visited.add(node)
|
||||
for neighbor_class in node.__class__.run_after:
|
||||
# Find the instance of neighbor_class in components
|
||||
for neighbor_class in node._run_after:
|
||||
neighbor = next(
|
||||
(m for m in components if isinstance(m, neighbor_class)), None
|
||||
)
|
||||
if neighbor:
|
||||
if neighbor and neighbor not in visited:
|
||||
visit(neighbor)
|
||||
stack.append(node)
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC
|
||||
from typing import Callable
|
||||
from typing import Callable, TypeVar
|
||||
|
||||
T = TypeVar("T", bound="AgentComponent")
|
||||
|
||||
|
||||
class AgentComponent(ABC):
|
||||
run_after: list[type["AgentComponent"]] = []
|
||||
"""Base class for all agent components."""
|
||||
|
||||
_run_after: list[type[AgentComponent]] = []
|
||||
_enabled: Callable[[], bool] | bool = True
|
||||
_disabled_reason: str = ""
|
||||
|
||||
@@ -15,8 +21,17 @@ class AgentComponent(ABC):
|
||||
|
||||
@property
|
||||
def disabled_reason(self) -> str:
|
||||
"""Return the reason this component is disabled."""
|
||||
return self._disabled_reason
|
||||
|
||||
def run_after(self: T, *components: type[AgentComponent] | AgentComponent) -> T:
|
||||
"""Set the components that this component should run after."""
|
||||
for component in components:
|
||||
t = component if isinstance(component, type) else type(component)
|
||||
if t not in self._run_after and t is not self.__class__:
|
||||
self._run_after.append(t)
|
||||
return self
|
||||
|
||||
|
||||
class ComponentEndpointError(Exception):
|
||||
"""Error of a single protocol method on a component."""
|
||||
@@ -1,13 +1,14 @@
|
||||
from abc import abstractmethod
|
||||
from typing import TYPE_CHECKING, Iterator
|
||||
|
||||
from autogpt.agents.components import AgentComponent
|
||||
from .components import AgentComponent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.base import BaseAgentActionProposal
|
||||
from autogpt.core.resource.model_providers.schema import ChatMessage
|
||||
from autogpt.models.action_history import ActionResult
|
||||
from autogpt.models.command import Command
|
||||
from forge.command.command import Command
|
||||
from forge.llm.providers import ChatMessage
|
||||
from forge.models.action import ActionResult
|
||||
|
||||
from .base import ActionProposal
|
||||
|
||||
|
||||
class DirectiveProvider(AgentComponent):
|
||||
@@ -35,7 +36,7 @@ class MessageProvider(AgentComponent):
|
||||
|
||||
class AfterParse(AgentComponent):
|
||||
@abstractmethod
|
||||
def after_parse(self, result: "BaseAgentActionProposal") -> None:
|
||||
def after_parse(self, result: "ActionProposal") -> None:
|
||||
...
|
||||
|
||||
|
||||
3
autogpts/forge/forge/command/__init__.py
Normal file
3
autogpts/forge/forge/command/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .command import Command, CommandOutput, CommandParameter
|
||||
from .decorator import command
|
||||
from .parameter import CommandParameter
|
||||
@@ -3,11 +3,9 @@ from __future__ import annotations
|
||||
import inspect
|
||||
from typing import Any, Callable, Generic, ParamSpec, TypeVar
|
||||
|
||||
from .command_parameter import CommandParameter
|
||||
from .context_item import ContextItem
|
||||
from .parameter import CommandParameter
|
||||
|
||||
CommandReturnValue = Any
|
||||
CommandOutput = CommandReturnValue | tuple[CommandReturnValue, ContextItem]
|
||||
CommandOutput = Any
|
||||
|
||||
P = ParamSpec("P")
|
||||
CO = TypeVar("CO", bound=CommandOutput)
|
||||
@@ -1,9 +1,10 @@
|
||||
import re
|
||||
from typing import Callable, Concatenate, Optional, TypeVar
|
||||
|
||||
from autogpt.agents.protocols import CommandProvider
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.models.command import CO, Command, CommandParameter, P
|
||||
from forge.agent.protocols import CommandProvider
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from .command import CO, Command, CommandParameter, P
|
||||
|
||||
_CP = TypeVar("_CP", bound=CommandProvider)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import dataclasses
|
||||
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@@ -12,8 +12,8 @@ You can use any valid Python variable name, what matters for the component to be
|
||||
Visit [Built-in Components](./built-in-components.md) to see what components are available out of the box.
|
||||
|
||||
```py
|
||||
from autogpt.agents import Agent
|
||||
from autogpt.agents.components import AgentComponent
|
||||
from forge.agent import BaseAgent
|
||||
from forge.agent.components import AgentComponent
|
||||
|
||||
class HelloComponent(AgentComponent):
|
||||
pass
|
||||
@@ -22,7 +22,7 @@ class SomeComponent(AgentComponent):
|
||||
def __init__(self, hello_component: HelloComponent):
|
||||
self.hello_component = hello_component
|
||||
|
||||
class MyAgent(Agent):
|
||||
class MyAgent(BaseAgent):
|
||||
def __init__(self):
|
||||
# These components will be automatically discovered and used
|
||||
self.hello_component = HelloComponent()
|
||||
@@ -32,21 +32,30 @@ class MyAgent(Agent):
|
||||
|
||||
## Ordering components
|
||||
|
||||
The execution order of components is important because the latter ones may depend on the results of the former ones.
|
||||
The execution order of components is important because some may depend on the results of the previous ones.
|
||||
**By default, components are ordered alphabetically.**
|
||||
|
||||
### Implicit order
|
||||
### Ordering individual components
|
||||
|
||||
Components can be ordered implicitly by the agent; each component can set `run_after` list to specify which components should run before it. This is useful when components rely on each other or need to be executed in a specific order. Otherwise, the order of components is alphabetical.
|
||||
You can order a single component by passing other components (or their types) to the `run_after` method. This way you can ensure that the component will be executed after the specified one.
|
||||
The `run_after` method returns the component itself, so you can call it when assigning the component to a variable:
|
||||
|
||||
```py
|
||||
# This component will run after HelloComponent
|
||||
class CalculatorComponent(AgentComponent):
|
||||
run_after = [HelloComponent]
|
||||
class MyAgent(Agent):
|
||||
def __init__(self):
|
||||
self.hello_component = HelloComponent()
|
||||
self.calculator_component = CalculatorComponent().run_after(self.hello_component)
|
||||
# This is equivalent to passing a type:
|
||||
# self.calculator_component = CalculatorComponent().run_after(HelloComponent)
|
||||
```
|
||||
|
||||
### Explicit order
|
||||
!!! warning
|
||||
Be sure not to make circular dependencies when ordering components!
|
||||
|
||||
Sometimes it may be easier to order components explicitly by setting `self.components` list in the agent's `__init__` method. This way you can also ensure there's no circular dependencies and `run_after` is ignored.
|
||||
### Ordering all components
|
||||
|
||||
You can also order all components by setting `self.components` list in the agent's `__init__` method.
|
||||
This way ensures that there's no circular dependencies and any `run_after` calls are ignored.
|
||||
|
||||
!!! warning
|
||||
Be sure to include all components - by setting `self.components` list, you're overriding the default behavior of discovering components automatically. Since it's usually not intended agent will inform you in the terminal if some components were skipped.
|
||||
@@ -55,7 +64,7 @@ Sometimes it may be easier to order components explicitly by setting `self.compo
|
||||
class MyAgent(Agent):
|
||||
def __init__(self):
|
||||
self.hello_component = HelloComponent()
|
||||
self.calculator_component = CalculatorComponent(self.hello_component)
|
||||
self.calculator_component = CalculatorComponent()
|
||||
# Explicitly set components list
|
||||
self.components = [self.hello_component, self.calculator_component]
|
||||
```
|
||||
@@ -116,8 +125,8 @@ All errors accept an optional `str` message. There are following errors ordered
|
||||
**Example**
|
||||
|
||||
```py
|
||||
from autogpt.agents.components import ComponentEndpointError
|
||||
from autogpt.agents.protocols import MessageProvider
|
||||
from forge.agent.components import ComponentEndpointError
|
||||
from forge.agent.protocols import MessageProvider
|
||||
|
||||
# Example of raising an error
|
||||
class MyComponent(MessageProvider):
|
||||
@@ -0,0 +1,2 @@
|
||||
from .action_history import ActionHistoryComponent
|
||||
from .model import Episode, EpisodicActionHistory
|
||||
@@ -1,29 +1,24 @@
|
||||
from typing import Callable, Generic, Iterator, Optional
|
||||
from typing import TYPE_CHECKING, Callable, Generic, Iterator, Optional
|
||||
|
||||
from autogpt.agents.features.watchdog import WatchdogComponent
|
||||
from autogpt.agents.protocols import AfterExecute, AfterParse, MessageProvider
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.core.resource.model_providers.schema import ChatMessage, ChatModelProvider
|
||||
from autogpt.models.action_history import (
|
||||
AP,
|
||||
ActionResult,
|
||||
Episode,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from autogpt.prompts.utils import indent
|
||||
from forge.agent.protocols import AfterExecute, AfterParse, MessageProvider
|
||||
from forge.llm.prompting.utils import indent
|
||||
from forge.llm.providers import ChatMessage, ChatModelProvider
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from forge.config.config import Config
|
||||
|
||||
from .model import AP, ActionResult, Episode, EpisodicActionHistory
|
||||
|
||||
|
||||
class EventHistoryComponent(MessageProvider, AfterParse, AfterExecute, Generic[AP]):
|
||||
class ActionHistoryComponent(MessageProvider, AfterParse, AfterExecute, Generic[AP]):
|
||||
"""Keeps track of the event history and provides a summary of the steps."""
|
||||
|
||||
run_after = [WatchdogComponent]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
event_history: EpisodicActionHistory[AP],
|
||||
max_tokens: int,
|
||||
count_tokens: Callable[[str], int],
|
||||
legacy_config: Config,
|
||||
legacy_config: "Config",
|
||||
llm_provider: ChatModelProvider,
|
||||
) -> None:
|
||||
self.event_history = event_history
|
||||
@@ -1,83 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING, Any, Generic, Iterator, Literal, Optional, TypeVar
|
||||
from typing import TYPE_CHECKING, Generic, Iterator, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
from pydantic.generics import GenericModel
|
||||
|
||||
from autogpt.agents.base import BaseAgentActionProposal
|
||||
from autogpt.models.utils import ModelWithSummary
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.utils import format_numbered_list, indent
|
||||
from forge.content_processing.text import summarize_text
|
||||
from forge.llm.prompting.utils import format_numbered_list, indent
|
||||
from forge.models.action import ActionProposal, ActionResult
|
||||
from forge.models.utils import ModelWithSummary
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.core.resource.model_providers import ChatModelProvider
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers import ChatModelProvider
|
||||
|
||||
|
||||
class ActionSuccessResult(BaseModel):
|
||||
outputs: Any
|
||||
status: Literal["success"] = "success"
|
||||
|
||||
def __str__(self) -> str:
|
||||
outputs = str(self.outputs).replace("```", r"\```")
|
||||
multiline = "\n" in outputs
|
||||
return f"```\n{self.outputs}\n```" if multiline else str(self.outputs)
|
||||
|
||||
|
||||
class ErrorInfo(BaseModel):
|
||||
args: tuple
|
||||
message: str
|
||||
exception_type: str
|
||||
repr: str
|
||||
|
||||
@staticmethod
|
||||
def from_exception(exception: Exception) -> ErrorInfo:
|
||||
return ErrorInfo(
|
||||
args=exception.args,
|
||||
message=getattr(exception, "message", exception.args[0]),
|
||||
exception_type=exception.__class__.__name__,
|
||||
repr=repr(exception),
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return repr(self)
|
||||
|
||||
def __repr__(self):
|
||||
return self.repr
|
||||
|
||||
|
||||
class ActionErrorResult(BaseModel):
|
||||
reason: str
|
||||
error: Optional[ErrorInfo] = None
|
||||
status: Literal["error"] = "error"
|
||||
|
||||
@staticmethod
|
||||
def from_exception(exception: Exception) -> ActionErrorResult:
|
||||
return ActionErrorResult(
|
||||
reason=getattr(exception, "message", exception.args[0]),
|
||||
error=ErrorInfo.from_exception(exception),
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Action failed: '{self.reason}'"
|
||||
|
||||
|
||||
class ActionInterruptedByHuman(BaseModel):
|
||||
feedback: str
|
||||
status: Literal["interrupted_by_human"] = "interrupted_by_human"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
'The user interrupted the action with the following feedback: "%s"'
|
||||
% self.feedback
|
||||
)
|
||||
|
||||
|
||||
ActionResult = ActionSuccessResult | ActionErrorResult | ActionInterruptedByHuman
|
||||
|
||||
AP = TypeVar("AP", bound=BaseAgentActionProposal)
|
||||
AP = TypeVar("AP", bound=ActionProposal)
|
||||
|
||||
|
||||
class Episode(GenericModel, Generic[AP]):
|
||||
@@ -0,0 +1,7 @@
|
||||
from .code_executor import (
|
||||
ALLOWLIST_CONTROL,
|
||||
DENYLIST_CONTROL,
|
||||
CodeExecutorComponent,
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
@@ -10,14 +10,12 @@ import docker
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
from docker.models.containers import Container as DockerContainer
|
||||
|
||||
from autogpt.agents.base import BaseAgentSettings
|
||||
from autogpt.agents.protocols import CommandProvider
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.models.command import Command
|
||||
from autogpt.utils.exceptions import (
|
||||
from forge.agent import BaseAgentSettings, CommandProvider
|
||||
from forge.command import Command, command
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.utils.exceptions import (
|
||||
CodeExecutionError,
|
||||
CommandExecutionError,
|
||||
InvalidArgumentError,
|
||||
@@ -175,7 +173,7 @@ class CodeExecutorComponent(CommandProvider):
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
logger.debug(
|
||||
"AutoGPT is running in a Docker container; "
|
||||
"App is running in a Docker container; "
|
||||
f"executing {file_path} directly..."
|
||||
)
|
||||
result = subprocess.run(
|
||||
@@ -189,7 +187,7 @@ class CodeExecutorComponent(CommandProvider):
|
||||
else:
|
||||
raise CodeExecutionError(result.stderr)
|
||||
|
||||
logger.debug("AutoGPT is not running in a Docker container")
|
||||
logger.debug("App is not running in a Docker container")
|
||||
try:
|
||||
assert self.state.agent_id, "Need Agent ID to attach Docker container"
|
||||
|
||||
7
autogpts/forge/forge/components/context/__init__.py
Normal file
7
autogpts/forge/forge/components/context/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .context import ContextComponent
|
||||
from .context_item import (
|
||||
ContextItem,
|
||||
FileContextItem,
|
||||
FolderContextItem,
|
||||
StaticContextItem,
|
||||
)
|
||||
@@ -5,14 +5,14 @@ from typing import Iterator
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from autogpt.agents.protocols import CommandProvider, MessageProvider
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.resource.model_providers import ChatMessage
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.models.command import Command
|
||||
from autogpt.models.context_item import ContextItem, FileContextItem, FolderContextItem
|
||||
from autogpt.utils.exceptions import InvalidArgumentError
|
||||
from forge.agent.protocols import CommandProvider, MessageProvider
|
||||
from forge.command import Command, command
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import ChatMessage
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.utils.exceptions import InvalidArgumentError
|
||||
|
||||
from .context_item import ContextItem, FileContextItem, FolderContextItem
|
||||
|
||||
|
||||
class AgentContext(BaseModel):
|
||||
@@ -5,8 +5,8 @@ from typing import Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.utils.file_operations_utils import decode_textual_file
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.file_operations import decode_textual_file
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
1
autogpts/forge/forge/components/file_manager/__init__.py
Normal file
1
autogpts/forge/forge/components/file_manager/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .file_manager import FileManagerComponent
|
||||
@@ -3,14 +3,12 @@ import os
|
||||
from pathlib import Path
|
||||
from typing import Iterator, Optional
|
||||
|
||||
from autogpt.agents.protocols import CommandProvider, DirectiveProvider
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.file_storage.base import FileStorage
|
||||
from autogpt.models.command import Command
|
||||
from autogpt.utils.file_operations_utils import decode_textual_file
|
||||
|
||||
from ..base import BaseAgentSettings
|
||||
from forge.agent import BaseAgentSettings
|
||||
from forge.agent.protocols import CommandProvider, DirectiveProvider
|
||||
from forge.command import Command, command
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.utils.file_operations import decode_textual_file
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from .git_operations import GitOperationsComponent
|
||||
@@ -3,13 +3,12 @@ from typing import Iterator
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.agents.protocols import CommandProvider
|
||||
from autogpt.command_decorator import command
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.core.utils.json_schema import JSONSchema
|
||||
from autogpt.models.command import Command
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
from autogpt.utils.exceptions import CommandExecutionError
|
||||
from forge.agent.protocols import CommandProvider
|
||||
from forge.command import Command, command
|
||||
from forge.config.config import Config
|
||||
from forge.models.json_schema import JSONSchema
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
from forge.utils.url_validator import validate_url
|
||||
|
||||
|
||||
class GitOperationsComponent(CommandProvider):
|
||||
1
autogpts/forge/forge/components/image_gen/__init__.py
Normal file
1
autogpts/forge/forge/components/image_gen/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .image_gen import ImageGeneratorComponent
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user