From 18ca9a5d1c3836e0275a1d2bdd2800b666d004fb Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:12:25 -0600 Subject: [PATCH 01/30] fix(ci): undo autofix --- .github/workflows/platform-backend-ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml index 7671919dba..938cb7b6f9 100644 --- a/.github/workflows/platform-backend-ci.yml +++ b/.github/workflows/platform-backend-ci.yml @@ -137,9 +137,9 @@ jobs: SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }} SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }} SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }} - REDIS_HOST: "localhost" - REDIS_PORT: "6379" - REDIS_PASSWORD: "testpassword" + REDIS_HOST: 'localhost' + REDIS_PORT: '6379' + REDIS_PASSWORD: 'testpassword' env: CI: true @@ -152,8 +152,8 @@ jobs: # If you want to replace this, you can do so by making our entire system generate # new credentials for each local user and update the environment variables in # the backend service, docker composes, and examples - RABBITMQ_DEFAULT_USER: rabbitmq_user_default - RABBITMQ_DEFAULT_PASS: k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + RABBITMQ_DEFAULT_USER: 'rabbitmq_user_default' + RABBITMQ_DEFAULT_PASS: 'k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7' # - name: Upload coverage reports to Codecov # uses: codecov/codecov-action@v4 From fd41bcf4c5a5fcd34c58f6bcbf9e8585a3027e95 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:12:39 -0600 Subject: [PATCH 02/30] fix(backend): rename the service to be more reasonable --- autogpt_platform/backend/backend/util/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index bc60ce2d26..29b2cfdbc5 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -22,7 +22,7 @@ class SpinTestServer: self.exec_manager = ExecutionManager() self.agent_server = AgentServer() self.scheduler = ExecutionScheduler() - self.notifications = NotificationManager() + self.notif_manager = NotificationManager() @staticmethod def test_get_user_id(): @@ -34,7 +34,7 @@ class SpinTestServer: self.agent_server.__enter__() self.exec_manager.__enter__() self.scheduler.__enter__() - self.notifications.__enter__() + self.notif_manager.__enter__() await db.connect() await initialize_blocks() @@ -49,7 +49,7 @@ class SpinTestServer: self.exec_manager.__exit__(exc_type, exc_val, exc_tb) self.agent_server.__exit__(exc_type, exc_val, exc_tb) self.db_api.__exit__(exc_type, exc_val, exc_tb) - self.notifications.__exit__(exc_type, exc_val, exc_tb) + self.notif_manager.__exit__(exc_type, exc_val, exc_tb) def setup_dependency_overrides(self): # Override get_user_id for testing From 250316c946016af6079b2d15ab5150c4b5499981 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:13:56 -0600 Subject: [PATCH 03/30] fix(backend): fix indentation --- autogpt_platform/backend/backend/data/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 2dde243bc2..0008a14f69 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -720,7 +720,7 @@ async def fix_llm_provider_credentials(): try: broken_nodes = await prisma.get_client().query_raw( """ - SELECT graph."userId" user_id, + SELECT graph."userId" user_id, node.id node_id, node."constantInput" node_preset_input FROM platform."AgentNode" node From 9ef35205b9cffbfa0abad1ee8f576e4586463430 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:19:38 -0600 Subject: [PATCH 04/30] fix(backend): drop old param --- autogpt_platform/backend/backend/notifications/notifications.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 4c97d951f2..202d5ac490 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -98,7 +98,6 @@ class NotificationManager(AppService): def __init__(self): super().__init__() self.use_db = True - self.use_async = False # Use async RabbitMQ client self.use_rabbitmq = create_notification_config() self.running = True From 9cb53c497b6457acbb35c83b30110d83e2cb87c5 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:26:55 -0600 Subject: [PATCH 05/30] refactor(backend): rename use_rabbitmq to rabbitmq_config --- .../backend/backend/notifications/notifications.py | 2 +- autogpt_platform/backend/backend/util/service.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 202d5ac490..d4e591ac8b 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -98,7 +98,7 @@ class NotificationManager(AppService): def __init__(self): super().__init__() self.use_db = True - self.use_rabbitmq = create_notification_config() + self.rabbitmq_config = create_notification_config() self.running = True @classmethod diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 338f9fdb29..387f4758e0 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -117,7 +117,7 @@ class AppService(AppProcess, ABC): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False - use_rabbitmq: Optional[rabbitmq.RabbitMQConfig] = None + rabbitmq_config: Optional[rabbitmq.RabbitMQConfig] = None rabbitmq_service: Optional[rabbitmq.AsyncRabbitMQ] = None use_supabase: bool = False @@ -143,9 +143,9 @@ class AppService(AppProcess, ABC): @property def rabbit_config(self) -> rabbitmq.RabbitMQConfig: """Access the RabbitMQ config. Will raise if not configured.""" - if not self.use_rabbitmq: + if not self.rabbitmq_config: raise RuntimeError("RabbitMQ not configured for this service") - return self.use_rabbitmq + return self.rabbitmq_config def run_service(self) -> None: while True: @@ -164,13 +164,13 @@ class AppService(AppProcess, ABC): self.shared_event_loop.run_until_complete(db.connect()) if self.use_redis: redis.connect() - if self.use_rabbitmq: + if self.rabbitmq_config: logger.info(f"[{self.__class__.__name__}] ⏳ Configuring RabbitMQ...") # if self.use_async: - self.rabbitmq_service = rabbitmq.AsyncRabbitMQ(self.use_rabbitmq) + self.rabbitmq_service = rabbitmq.AsyncRabbitMQ(self.rabbitmq_config) self.shared_event_loop.run_until_complete(self.rabbitmq_service.connect()) # else: - # self.rabbitmq_service = rabbitmq.SyncRabbitMQ(self.use_rabbitmq) + # self.rabbitmq_service = rabbitmq.SyncRabbitMQ(self.rabbitmq_config) # self.rabbitmq_service.connect() if self.use_supabase: from supabase import create_client @@ -200,7 +200,7 @@ class AppService(AppProcess, ABC): if self.use_redis: logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting Redis...") redis.disconnect() - if self.use_rabbitmq: + if self.rabbitmq_config: logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting RabbitMQ...") @conn_retry("Pyro", "Starting Pyro Service") From b4e46c0eb5d24c4ec97594379a70513eba101e66 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:28:04 -0600 Subject: [PATCH 06/30] Discard changes to autogpt_platform/backend/backend/data/graph.py --- autogpt_platform/backend/backend/data/graph.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 0008a14f69..3d9a107676 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -716,10 +716,8 @@ async def fix_llm_provider_credentials(): store = IntegrationCredentialsStore() - broken_nodes = [] - try: - broken_nodes = await prisma.get_client().query_raw( - """ + broken_nodes = await prisma.get_client().query_raw( + """ SELECT graph."userId" user_id, node.id node_id, node."constantInput" node_preset_input @@ -729,10 +727,8 @@ async def fix_llm_provider_credentials(): WHERE node."constantInput"::jsonb->'credentials'->>'provider' = 'llm' ORDER BY graph."userId"; """ - ) - logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes") - except Exception as e: - logger.error(f"Error fixing LLM credential inputs: {e}") + ) + logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes") user_id: str = "" user_integrations = None From ce1d63c51735fe36535e6db7e971ba558879db4a Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 13 Feb 2025 18:30:58 +0100 Subject: [PATCH 07/30] feat(backend): Library v2 Agents and Presets (#9258) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Blocked by #9267 This re-introduces changes from the following PRs with fixes: - #9218 - #9211 ### Changes 🏗️ - See #9218 - See #9211 Fixes: - Fix Prisma query statements in `v2.library.db` - Fix creation of (library) agents - Fix test cleanup of (library) agents - Fix handling and passing of `node_input` parameters ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create & run a new agent - [x] Update & run an existing agent --- .../backend/backend/data/execution.py | 2 + .../backend/backend/data/graph.py | 2 +- .../backend/backend/executor/manager.py | 8 +- .../backend/server/external/routes/v1.py | 8 +- .../backend/backend/server/model.py | 19 +- .../backend/backend/server/rest_api.py | 62 ++- .../backend/backend/server/routers/v1.py | 109 ++---- .../backend/backend/server/v2/library/db.py | 362 +++++++++++++----- .../backend/server/v2/library/db_test.py | 67 ++-- .../backend/server/v2/library/model.py | 107 +++++- .../backend/server/v2/library/model_test.py | 158 +++++++- .../backend/server/v2/library/routes.py | 123 ------ .../server/v2/library/routes/__init__.py | 9 + .../server/v2/library/routes/agents.py | 138 +++++++ .../server/v2/library/routes/presets.py | 130 +++++++ .../backend/server/v2/library/routes_test.py | 53 +-- .../backend/backend/usecases/block_autogen.py | 4 +- .../backend/usecases/reddit_marketing.py | 4 +- .../backend/backend/usecases/sample.py | 4 +- .../backend/backend/util/service.py | 4 +- .../migration.sql | 2 + .../migration.sql | 46 +++ .../migration.sql | 2 + autogpt_platform/backend/schema.prisma | 14 +- .../backend/test/executor/test_manager.py | 193 +++++++++- .../backend/test/test_data_creator.py | 4 +- .../src/lib/autogpt-server-api/client.ts | 6 +- 27 files changed, 1218 insertions(+), 422 deletions(-) delete mode 100644 autogpt_platform/backend/backend/server/v2/library/routes.py create mode 100644 autogpt_platform/backend/backend/server/v2/library/routes/__init__.py create mode 100644 autogpt_platform/backend/backend/server/v2/library/routes/agents.py create mode 100644 autogpt_platform/backend/backend/server/v2/library/routes/presets.py create mode 100644 autogpt_platform/backend/migrations/20250107095812_preset_soft_delete/migration.sql create mode 100644 autogpt_platform/backend/migrations/20250108084101_user_agent_to_library_agent/migration.sql create mode 100644 autogpt_platform/backend/migrations/20250108084305_use_graph_is_active_version/migration.sql diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index 4396fcb423..eae46278d8 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -141,6 +141,7 @@ async def create_graph_execution( graph_version: int, nodes_input: list[tuple[str, BlockInput]], user_id: str, + preset_id: str | None = None, ) -> tuple[str, list[ExecutionResult]]: """ Create a new AgentGraphExecution record. @@ -168,6 +169,7 @@ async def create_graph_execution( ] }, "userId": user_id, + "agentPresetId": preset_id, }, include=GRAPH_EXECUTION_INCLUDE, ) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 3d9a107676..31bd1dee4b 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -534,7 +534,7 @@ async def get_execution(user_id: str, execution_id: str) -> GraphExecution | Non async def get_graph( graph_id: str, version: int | None = None, - template: bool = False, + template: bool = False, # note: currently not in use; TODO: remove from DB entirely user_id: str | None = None, for_export: bool = False, ) -> GraphModel | None: diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 1d965e0121..5f1b8e62c0 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -803,6 +803,7 @@ class ExecutionManager(AppService): data: BlockInput, user_id: str, graph_version: Optional[int] = None, + preset_id: str | None = None, ) -> GraphExecutionEntry: graph: GraphModel | None = self.db_client.get_graph( graph_id=graph_id, user_id=user_id, version=graph_version @@ -824,9 +825,9 @@ class ExecutionManager(AppService): # Extract request input data, and assign it to the input pin. if block.block_type == BlockType.INPUT: - name = node.input_default.get("name") - if name in data.get("node_input", {}): - input_data = {"value": data["node_input"][name]} + input_name = node.input_default.get("name") + if input_name and input_name in data: + input_data = {"value": data[input_name]} # Extract webhook payload, and assign it to the input pin webhook_payload_key = f"webhook_{node.webhook_id}_payload" @@ -851,6 +852,7 @@ class ExecutionManager(AppService): graph_version=graph.version, nodes_input=nodes_input, user_id=user_id, + preset_id=preset_id, ) starting_node_execs = [] diff --git a/autogpt_platform/backend/backend/server/external/routes/v1.py b/autogpt_platform/backend/backend/server/external/routes/v1.py index bdd4cadf6f..1eebe82381 100644 --- a/autogpt_platform/backend/backend/server/external/routes/v1.py +++ b/autogpt_platform/backend/backend/server/external/routes/v1.py @@ -1,9 +1,9 @@ import logging from collections import defaultdict -from typing import Any, Dict, List, Optional, Sequence +from typing import Annotated, Any, Dict, List, Optional, Sequence from autogpt_libs.utils.cache import thread_cached -from fastapi import APIRouter, Depends, HTTPException +from fastapi import APIRouter, Body, Depends, HTTPException from prisma.enums import AgentExecutionStatus, APIKeyPermission from typing_extensions import TypedDict @@ -101,7 +101,7 @@ def execute_graph_block( def execute_graph( graph_id: str, graph_version: int, - node_input: dict[Any, Any], + node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)], api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)), ) -> dict[str, Any]: try: @@ -113,7 +113,7 @@ def execute_graph( ) return {"id": graph_exec.graph_exec_id} except Exception as e: - msg = e.__str__().encode().decode("unicode_escape") + msg = str(e).encode().decode("unicode_escape") raise HTTPException(status_code=400, detail=msg) diff --git a/autogpt_platform/backend/backend/server/model.py b/autogpt_platform/backend/backend/server/model.py index 9b1b2f2e98..34b0c229ed 100644 --- a/autogpt_platform/backend/backend/server/model.py +++ b/autogpt_platform/backend/backend/server/model.py @@ -33,9 +33,7 @@ class ExecuteGraphResponse(pydantic.BaseModel): class CreateGraph(pydantic.BaseModel): - template_id: str | None = None - template_version: int | None = None - graph: backend.data.graph.Graph | None = None + graph: backend.data.graph.Graph class CreateAPIKeyRequest(pydantic.BaseModel): @@ -57,5 +55,20 @@ class UpdatePermissionsRequest(pydantic.BaseModel): permissions: List[APIKeyPermission] +class Pagination(pydantic.BaseModel): + total_items: int = pydantic.Field( + description="Total number of items.", examples=[42] + ) + total_pages: int = pydantic.Field( + description="Total number of pages.", examples=[2] + ) + current_page: int = pydantic.Field( + description="Current_page page number.", examples=[1] + ) + page_size: int = pydantic.Field( + description="Number of items per page.", examples=[25] + ) + + class RequestTopUp(pydantic.BaseModel): credit_amount: int diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index e247d2e8fd..ae756ee71f 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -17,6 +17,8 @@ import backend.data.db import backend.data.graph import backend.data.user import backend.server.routers.v1 +import backend.server.v2.library.db +import backend.server.v2.library.model import backend.server.v2.library.routes import backend.server.v2.store.model import backend.server.v2.store.routes @@ -123,15 +125,15 @@ class AgentServer(backend.util.service.AppProcess): @staticmethod async def test_execute_graph( graph_id: str, - node_input: dict[str, Any], user_id: str, graph_version: Optional[int] = None, + node_input: Optional[dict[str, Any]] = None, ): return backend.server.routers.v1.execute_graph( user_id=user_id, graph_id=graph_id, graph_version=graph_version, - node_input=node_input, + node_input=node_input or {}, ) @staticmethod @@ -170,8 +172,64 @@ class AgentServer(backend.util.service.AppProcess): @staticmethod async def test_delete_graph(graph_id: str, user_id: str): + await backend.server.v2.library.db.delete_library_agent_by_graph_id( + graph_id=graph_id, user_id=user_id + ) return await backend.server.routers.v1.delete_graph(graph_id, user_id) + @staticmethod + async def test_get_presets(user_id: str, page: int = 1, page_size: int = 10): + return await backend.server.v2.library.routes.presets.get_presets( + user_id=user_id, page=page, page_size=page_size + ) + + @staticmethod + async def test_get_preset(preset_id: str, user_id: str): + return await backend.server.v2.library.routes.presets.get_preset( + preset_id=preset_id, user_id=user_id + ) + + @staticmethod + async def test_create_preset( + preset: backend.server.v2.library.model.CreateLibraryAgentPresetRequest, + user_id: str, + ): + return await backend.server.v2.library.routes.presets.create_preset( + preset=preset, user_id=user_id + ) + + @staticmethod + async def test_update_preset( + preset_id: str, + preset: backend.server.v2.library.model.CreateLibraryAgentPresetRequest, + user_id: str, + ): + return await backend.server.v2.library.routes.presets.update_preset( + preset_id=preset_id, preset=preset, user_id=user_id + ) + + @staticmethod + async def test_delete_preset(preset_id: str, user_id: str): + return await backend.server.v2.library.routes.presets.delete_preset( + preset_id=preset_id, user_id=user_id + ) + + @staticmethod + async def test_execute_preset( + graph_id: str, + graph_version: int, + preset_id: str, + user_id: str, + node_input: Optional[dict[str, Any]] = None, + ): + return await backend.server.v2.library.routes.presets.execute_preset( + graph_id=graph_id, + graph_version=graph_version, + preset_id=preset_id, + node_input=node_input or {}, + user_id=user_id, + ) + @staticmethod async def test_create_store_listing( request: backend.server.v2.store.model.StoreSubmissionRequest, user_id: str diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index dd7f72e50b..9e6c2337d6 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -9,12 +9,13 @@ import stripe from autogpt_libs.auth.middleware import auth_middleware from autogpt_libs.feature_flag.client import feature_flag from autogpt_libs.utils.cache import thread_cached -from fastapi import APIRouter, Depends, HTTPException, Request, Response +from fastapi import APIRouter, Body, Depends, HTTPException, Request, Response from typing_extensions import Optional, TypedDict import backend.data.block import backend.server.integrations.router import backend.server.routers.analytics +import backend.server.v2.library.db as library_db from backend.data import execution as execution_db from backend.data import graph as graph_db from backend.data.api_key import ( @@ -310,11 +311,6 @@ async def get_graph( tags=["graphs"], dependencies=[Depends(auth_middleware)], ) -@v1_router.get( - path="/templates/{graph_id}/versions", - tags=["templates", "graphs"], - dependencies=[Depends(auth_middleware)], -) async def get_graph_all_versions( graph_id: str, user_id: Annotated[str, Depends(get_user_id)] ) -> Sequence[graph_db.GraphModel]: @@ -330,41 +326,18 @@ async def get_graph_all_versions( async def create_new_graph( create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] ) -> graph_db.GraphModel: - return await do_create_graph(create_graph, is_template=False, user_id=user_id) - - -async def do_create_graph( - create_graph: CreateGraph, - is_template: bool, - # user_id doesn't have to be annotated like on other endpoints, - # because create_graph isn't used directly as an endpoint - user_id: str, -) -> graph_db.GraphModel: - if create_graph.graph: - graph = graph_db.make_graph_model(create_graph.graph, user_id) - elif create_graph.template_id: - # Create a new graph from a template - graph = await graph_db.get_graph( - create_graph.template_id, - create_graph.template_version, - template=True, - user_id=user_id, - ) - if not graph: - raise HTTPException( - 400, detail=f"Template #{create_graph.template_id} not found" - ) - graph.version = 1 - else: - raise HTTPException( - status_code=400, detail="Either graph or template_id must be provided." - ) - - graph.is_template = is_template - graph.is_active = not is_template + graph = graph_db.make_graph_model(create_graph.graph, user_id) graph.reassign_ids(user_id=user_id, reassign_graph_id=True) graph = await graph_db.create_graph(graph, user_id=user_id) + + # Create a library agent for the new graph + await library_db.create_library_agent( + graph.id, + graph.version, + user_id, + ) + graph = await on_graph_activate( graph, get_credentials=lambda id: integration_creds_manager.get(user_id, id), @@ -391,11 +364,6 @@ async def delete_graph( @v1_router.put( path="/graphs/{graph_id}", tags=["graphs"], dependencies=[Depends(auth_middleware)] ) -@v1_router.put( - path="/templates/{graph_id}", - tags=["templates", "graphs"], - dependencies=[Depends(auth_middleware)], -) async def update_graph( graph_id: str, graph: graph_db.Graph, @@ -427,6 +395,10 @@ async def update_graph( new_graph_version = await graph_db.create_graph(graph, user_id=user_id) if new_graph_version.is_active: + # Keep the library agent up to date with the new active version + await library_db.update_agent_version_in_library( + user_id, graph.id, graph.version + ) def get_credentials(credentials_id: str) -> "Credentials | None": return integration_creds_manager.get(user_id, credentials_id) @@ -483,6 +455,12 @@ async def set_graph_active_version( version=new_active_version, user_id=user_id, ) + + # Keep the library agent up to date with the new active version + await library_db.update_agent_version_in_library( + user_id, new_active_graph.id, new_active_graph.version + ) + if current_active_graph and current_active_graph.version != new_active_version: # Handle deactivation of the previously active version await on_graph_deactivate( @@ -498,7 +476,7 @@ async def set_graph_active_version( ) def execute_graph( graph_id: str, - node_input: dict[Any, Any], + node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)], user_id: Annotated[str, Depends(get_user_id)], graph_version: Optional[int] = None, ) -> ExecuteGraphResponse: @@ -508,7 +486,7 @@ def execute_graph( ) return ExecuteGraphResponse(graph_exec_id=graph_exec.graph_exec_id) except Exception as e: - msg = e.__str__().encode().decode("unicode_escape") + msg = str(e).encode().decode("unicode_escape") raise HTTPException(status_code=400, detail=msg) @@ -559,47 +537,6 @@ async def get_graph_run_node_execution_results( return await execution_db.get_execution_results(graph_exec_id) -######################################################## -##################### Templates ######################## -######################################################## - - -@v1_router.get( - path="/templates", - tags=["graphs", "templates"], - dependencies=[Depends(auth_middleware)], -) -async def get_templates( - user_id: Annotated[str, Depends(get_user_id)] -) -> Sequence[graph_db.GraphModel]: - return await graph_db.get_graphs(filter_by="template", user_id=user_id) - - -@v1_router.get( - path="/templates/{graph_id}", - tags=["templates", "graphs"], - dependencies=[Depends(auth_middleware)], -) -async def get_template( - graph_id: str, version: int | None = None -) -> graph_db.GraphModel: - graph = await graph_db.get_graph(graph_id, version, template=True) - if not graph: - raise HTTPException(status_code=404, detail=f"Template #{graph_id} not found.") - return graph - - -@v1_router.post( - path="/templates", - tags=["templates", "graphs"], - dependencies=[Depends(auth_middleware)], -) -async def create_new_template( - create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] -) -> graph_db.GraphModel: - return await do_create_graph(create_graph, is_template=True, user_id=user_id) - - ######################################################## ##################### Schedules ######################## ######################################################## diff --git a/autogpt_platform/backend/backend/server/v2/library/db.py b/autogpt_platform/backend/backend/server/v2/library/db.py index 8d142ef40c..da106a9081 100644 --- a/autogpt_platform/backend/backend/server/v2/library/db.py +++ b/autogpt_platform/backend/backend/server/v2/library/db.py @@ -1,103 +1,166 @@ import logging -from typing import List import prisma.errors +import prisma.fields import prisma.models import prisma.types -import backend.data.graph -import backend.data.includes -import backend.server.v2.library.model -import backend.server.v2.store.exceptions +import backend.server.model +import backend.server.v2.library.model as library_model +import backend.server.v2.store.exceptions as store_exceptions logger = logging.getLogger(__name__) async def get_library_agents( - user_id: str, -) -> List[backend.server.v2.library.model.LibraryAgent]: - """ - Returns all agents (AgentGraph) that belong to the user and all agents in their library (UserAgent table) - """ - logger.debug(f"Getting library agents for user {user_id}") + user_id: str, search_query: str | None = None +) -> list[library_model.LibraryAgent]: + logger.debug( + f"Fetching library agents for user_id={user_id} search_query={search_query}" + ) - try: - # Get agents created by user with nodes and links - user_created = await prisma.models.AgentGraph.prisma().find_many( - where=prisma.types.AgentGraphWhereInput(userId=user_id, isActive=True), - include=backend.data.includes.AGENT_GRAPH_INCLUDE, - ) + if search_query and len(search_query.strip()) > 100: + logger.warning(f"Search query too long: {search_query}") + raise store_exceptions.DatabaseError("Search query is too long.") - # Get agents in user's library with nodes and links - library_agents = await prisma.models.UserAgent.prisma().find_many( - where=prisma.types.UserAgentWhereInput( - userId=user_id, isDeleted=False, isArchived=False - ), - include={ + where_clause: prisma.types.LibraryAgentWhereInput = { + "userId": user_id, + "isDeleted": False, + "isArchived": False, + } + + if search_query: + where_clause["OR"] = [ + { "Agent": { - "include": { - "AgentNodes": { - "include": { - "Input": True, - "Output": True, - "Webhook": True, - "AgentBlock": True, - } - } + "is": {"name": {"contains": search_query, "mode": "insensitive"}} + } + }, + { + "Agent": { + "is": { + "description": {"contains": search_query, "mode": "insensitive"} } } }, + ] + + try: + library_agents = await prisma.models.LibraryAgent.prisma().find_many( + where=where_clause, + include={ + "Agent": { + "include": { + "AgentNodes": {"include": {"Input": True, "Output": True}} + } + } + }, + order=[{"updatedAt": "desc"}], ) - - # Convert to Graph models first - graphs = [] - - # Add user created agents - for agent in user_created: - try: - graphs.append(backend.data.graph.GraphModel.from_db(agent)) - except Exception as e: - logger.error(f"Error processing user created agent {agent.id}: {e}") - continue - - # Add library agents - for agent in library_agents: - if agent.Agent: - try: - graphs.append(backend.data.graph.GraphModel.from_db(agent.Agent)) - except Exception as e: - logger.error(f"Error processing library agent {agent.agentId}: {e}") - continue - - # Convert Graph models to LibraryAgent models - result = [] - for graph in graphs: - result.append( - backend.server.v2.library.model.LibraryAgent( - id=graph.id, - version=graph.version, - is_active=graph.is_active, - name=graph.name, - description=graph.description, - isCreatedByUser=any(a.id == graph.id for a in user_created), - input_schema=graph.input_schema, - output_schema=graph.output_schema, - ) - ) - - logger.debug(f"Found {len(result)} library agents") - return result - + logger.debug(f"Retrieved {len(library_agents)} agents for user_id={user_id}.") + return [library_model.LibraryAgent.from_db(agent) for agent in library_agents] except prisma.errors.PrismaError as e: - logger.error(f"Database error getting library agents: {str(e)}") - raise backend.server.v2.store.exceptions.DatabaseError( - "Failed to fetch library agents" + logger.error(f"Database error fetching library agents: {e}") + raise store_exceptions.DatabaseError("Unable to fetch library agents.") + + +async def create_library_agent( + agent_id: str, agent_version: int, user_id: str +) -> prisma.models.LibraryAgent: + """ + Adds an agent to the user's library (LibraryAgent table) + """ + + try: + return await prisma.models.LibraryAgent.prisma().create( + data={ + "userId": user_id, + "agentId": agent_id, + "agentVersion": agent_version, + "isCreatedByUser": False, + "useGraphIsActiveVersion": True, + } + ) + except prisma.errors.PrismaError as e: + logger.error(f"Database error creating agent to library: {str(e)}") + raise store_exceptions.DatabaseError("Failed to create agent to library") from e + + +async def update_agent_version_in_library( + user_id: str, agent_id: str, agent_version: int +) -> None: + """ + Updates the agent version in the library + """ + try: + library_agent = await prisma.models.LibraryAgent.prisma().find_first_or_raise( + where={ + "userId": user_id, + "agentId": agent_id, + "useGraphIsActiveVersion": True, + }, + ) + await prisma.models.LibraryAgent.prisma().update( + where={"id": library_agent.id}, + data={ + "Agent": { + "connect": { + "graphVersionId": {"id": agent_id, "version": agent_version} + }, + }, + }, + ) + except prisma.errors.PrismaError as e: + logger.error(f"Database error updating agent version in library: {str(e)}") + raise store_exceptions.DatabaseError( + "Failed to update agent version in library" ) from e -async def add_agent_to_library(store_listing_version_id: str, user_id: str) -> None: +async def update_library_agent( + library_agent_id: str, + user_id: str, + auto_update_version: bool = False, + is_favorite: bool = False, + is_archived: bool = False, + is_deleted: bool = False, +) -> None: """ - Finds the agent from the store listing version and adds it to the user's library (UserAgent table) + Updates the library agent with the given fields + """ + try: + await prisma.models.LibraryAgent.prisma().update_many( + where={"id": library_agent_id, "userId": user_id}, + data={ + "useGraphIsActiveVersion": auto_update_version, + "isFavorite": is_favorite, + "isArchived": is_archived, + "isDeleted": is_deleted, + }, + ) + except prisma.errors.PrismaError as e: + logger.error(f"Database error updating library agent: {str(e)}") + raise store_exceptions.DatabaseError("Failed to update library agent") from e + + +async def delete_library_agent_by_graph_id(graph_id: str, user_id: str) -> None: + """ + Deletes a library agent for the given user + """ + try: + await prisma.models.LibraryAgent.prisma().delete_many( + where={"agentId": graph_id, "userId": user_id} + ) + except prisma.errors.PrismaError as e: + logger.error(f"Database error deleting library agent: {str(e)}") + raise store_exceptions.DatabaseError("Failed to delete library agent") from e + + +async def add_store_agent_to_library( + store_listing_version_id: str, user_id: str +) -> None: + """ + Finds the agent from the store listing version and adds it to the user's library (LibraryAgent table) if they don't already have it """ logger.debug( @@ -116,7 +179,7 @@ async def add_agent_to_library(store_listing_version_id: str, user_id: str) -> N logger.warning( f"Store listing version not found: {store_listing_version_id}" ) - raise backend.server.v2.store.exceptions.AgentNotFoundError( + raise store_exceptions.AgentNotFoundError( f"Store listing version {store_listing_version_id} not found" ) @@ -126,12 +189,10 @@ async def add_agent_to_library(store_listing_version_id: str, user_id: str) -> N logger.warning( f"User {user_id} cannot add their own agent to their library" ) - raise backend.server.v2.store.exceptions.DatabaseError( - "Cannot add own agent to library" - ) + raise store_exceptions.DatabaseError("Cannot add own agent to library") # Check if user already has this agent - existing_user_agent = await prisma.models.UserAgent.prisma().find_first( + existing_user_agent = await prisma.models.LibraryAgent.prisma().find_first( where={ "userId": user_id, "agentId": agent.id, @@ -145,21 +206,134 @@ async def add_agent_to_library(store_listing_version_id: str, user_id: str) -> N ) return - # Create UserAgent entry - await prisma.models.UserAgent.prisma().create( - data=prisma.types.UserAgentCreateInput( - userId=user_id, - agentId=agent.id, - agentVersion=agent.version, - isCreatedByUser=False, - ) + # Create LibraryAgent entry + await prisma.models.LibraryAgent.prisma().create( + data={ + "userId": user_id, + "agentId": agent.id, + "agentVersion": agent.version, + "isCreatedByUser": False, + } ) logger.debug(f"Added agent {agent.id} to library for user {user_id}") - except backend.server.v2.store.exceptions.AgentNotFoundError: + except store_exceptions.AgentNotFoundError: raise except prisma.errors.PrismaError as e: logger.error(f"Database error adding agent to library: {str(e)}") - raise backend.server.v2.store.exceptions.DatabaseError( - "Failed to add agent to library" - ) from e + raise store_exceptions.DatabaseError("Failed to add agent to library") from e + + +############################################## +########### Presets DB Functions ############# +############################################## + + +async def get_presets( + user_id: str, page: int, page_size: int +) -> library_model.LibraryAgentPresetResponse: + try: + presets = await prisma.models.AgentPreset.prisma().find_many( + where={"userId": user_id}, + skip=page * page_size, + take=page_size, + ) + + total_items = await prisma.models.AgentPreset.prisma().count( + where={"userId": user_id}, + ) + total_pages = (total_items + page_size - 1) // page_size + + presets = [ + library_model.LibraryAgentPreset.from_db(preset) for preset in presets + ] + + return library_model.LibraryAgentPresetResponse( + presets=presets, + pagination=backend.server.model.Pagination( + total_items=total_items, + total_pages=total_pages, + current_page=page, + page_size=page_size, + ), + ) + + except prisma.errors.PrismaError as e: + logger.error(f"Database error getting presets: {str(e)}") + raise store_exceptions.DatabaseError("Failed to fetch presets") from e + + +async def get_preset( + user_id: str, preset_id: str +) -> library_model.LibraryAgentPreset | None: + try: + preset = await prisma.models.AgentPreset.prisma().find_unique( + where={"id": preset_id}, include={"InputPresets": True} + ) + if not preset or preset.userId != user_id: + return None + return library_model.LibraryAgentPreset.from_db(preset) + except prisma.errors.PrismaError as e: + logger.error(f"Database error getting preset: {str(e)}") + raise store_exceptions.DatabaseError("Failed to fetch preset") from e + + +async def upsert_preset( + user_id: str, + preset: library_model.CreateLibraryAgentPresetRequest, + preset_id: str | None = None, +) -> library_model.LibraryAgentPreset: + try: + if preset_id: + # Update existing preset + new_preset = await prisma.models.AgentPreset.prisma().update( + where={"id": preset_id}, + data={ + "name": preset.name, + "description": preset.description, + "isActive": preset.is_active, + "InputPresets": { + "create": [ + {"name": name, "data": prisma.fields.Json(data)} + for name, data in preset.inputs.items() + ] + }, + }, + include={"InputPresets": True}, + ) + if not new_preset: + raise ValueError(f"AgentPreset #{preset_id} not found") + else: + # Create new preset + new_preset = await prisma.models.AgentPreset.prisma().create( + data={ + "userId": user_id, + "name": preset.name, + "description": preset.description, + "agentId": preset.agent_id, + "agentVersion": preset.agent_version, + "isActive": preset.is_active, + "InputPresets": { + "create": [ + {"name": name, "data": prisma.fields.Json(data)} + for name, data in preset.inputs.items() + ] + }, + }, + include={"InputPresets": True}, + ) + return library_model.LibraryAgentPreset.from_db(new_preset) + except prisma.errors.PrismaError as e: + logger.error(f"Database error creating preset: {str(e)}") + raise store_exceptions.DatabaseError("Failed to create preset") from e + + +async def delete_preset(user_id: str, preset_id: str) -> None: + try: + await prisma.models.AgentPreset.prisma().update_many( + where={"id": preset_id, "userId": user_id}, + data={"isDeleted": True}, + ) + except prisma.errors.PrismaError as e: + logger.error(f"Database error deleting preset: {str(e)}") + raise store_exceptions.DatabaseError("Failed to delete preset") from e diff --git a/autogpt_platform/backend/backend/server/v2/library/db_test.py b/autogpt_platform/backend/backend/server/v2/library/db_test.py index e06d4bfa9a..17e3bdb4ef 100644 --- a/autogpt_platform/backend/backend/server/v2/library/db_test.py +++ b/autogpt_platform/backend/backend/server/v2/library/db_test.py @@ -37,7 +37,7 @@ async def test_get_library_agents(mocker): ] mock_library_agents = [ - prisma.models.UserAgent( + prisma.models.LibraryAgent( id="ua1", userId="test-user", agentId="agent2", @@ -48,6 +48,7 @@ async def test_get_library_agents(mocker): createdAt=datetime.now(), updatedAt=datetime.now(), isFavorite=False, + useGraphIsActiveVersion=True, Agent=prisma.models.AgentGraph( id="agent2", version=1, @@ -67,8 +68,8 @@ async def test_get_library_agents(mocker): return_value=mock_user_created ) - mock_user_agent = mocker.patch("prisma.models.UserAgent.prisma") - mock_user_agent.return_value.find_many = mocker.AsyncMock( + mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma") + mock_library_agent.return_value.find_many = mocker.AsyncMock( return_value=mock_library_agents ) @@ -76,40 +77,16 @@ async def test_get_library_agents(mocker): result = await db.get_library_agents("test-user") # Verify results - assert len(result) == 2 - assert result[0].id == "agent1" - assert result[0].name == "Test Agent 1" - assert result[0].description == "Test Description 1" - assert result[0].isCreatedByUser is True - assert result[1].id == "agent2" - assert result[1].name == "Test Agent 2" - assert result[1].description == "Test Description 2" - assert result[1].isCreatedByUser is False - - # Verify mocks called correctly - mock_agent_graph.return_value.find_many.assert_called_once_with( - where=prisma.types.AgentGraphWhereInput(userId="test-user", isActive=True), - include=backend.data.includes.AGENT_GRAPH_INCLUDE, - ) - mock_user_agent.return_value.find_many.assert_called_once_with( - where=prisma.types.UserAgentWhereInput( - userId="test-user", isDeleted=False, isArchived=False - ), - include={ - "Agent": { - "include": { - "AgentNodes": { - "include": { - "Input": True, - "Output": True, - "Webhook": True, - "AgentBlock": True, - } - } - } - } - }, - ) + assert len(result) == 1 + assert result[0].id == "ua1" + assert result[0].name == "Test Agent 2" + assert result[0].description == "Test Description 2" + assert result[0].is_created_by_user is False + assert result[0].is_latest_version is True + assert result[0].is_favorite is False + assert result[0].agent_id == "agent2" + assert result[0].agent_version == 1 + assert result[0].preset_id is None @pytest.mark.asyncio @@ -152,26 +129,26 @@ async def test_add_agent_to_library(mocker): return_value=mock_store_listing ) - mock_user_agent = mocker.patch("prisma.models.UserAgent.prisma") - mock_user_agent.return_value.find_first = mocker.AsyncMock(return_value=None) - mock_user_agent.return_value.create = mocker.AsyncMock() + mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma") + mock_library_agent.return_value.find_first = mocker.AsyncMock(return_value=None) + mock_library_agent.return_value.create = mocker.AsyncMock() # Call function - await db.add_agent_to_library("version123", "test-user") + await db.add_store_agent_to_library("version123", "test-user") # Verify mocks called correctly mock_store_listing_version.return_value.find_unique.assert_called_once_with( where={"id": "version123"}, include={"Agent": True} ) - mock_user_agent.return_value.find_first.assert_called_once_with( + mock_library_agent.return_value.find_first.assert_called_once_with( where={ "userId": "test-user", "agentId": "agent1", "agentVersion": 1, } ) - mock_user_agent.return_value.create.assert_called_once_with( - data=prisma.types.UserAgentCreateInput( + mock_library_agent.return_value.create.assert_called_once_with( + data=prisma.types.LibraryAgentCreateInput( userId="test-user", agentId="agent1", agentVersion=1, isCreatedByUser=False ) ) @@ -189,7 +166,7 @@ async def test_add_agent_to_library_not_found(mocker): # Call function and verify exception with pytest.raises(backend.server.v2.store.exceptions.AgentNotFoundError): - await db.add_agent_to_library("version123", "test-user") + await db.add_store_agent_to_library("version123", "test-user") # Verify mock called correctly mock_store_listing_version.return_value.find_unique.assert_called_once_with( diff --git a/autogpt_platform/backend/backend/server/v2/library/model.py b/autogpt_platform/backend/backend/server/v2/library/model.py index 88a81f6d77..fc2c6c6757 100644 --- a/autogpt_platform/backend/backend/server/v2/library/model.py +++ b/autogpt_platform/backend/backend/server/v2/library/model.py @@ -1,16 +1,111 @@ -import typing +import datetime +from typing import Any +import prisma.models import pydantic +import backend.data.block as block_model +import backend.data.graph as graph_model +import backend.server.model as server_model + class LibraryAgent(pydantic.BaseModel): id: str # Changed from agent_id to match GraphMeta - version: int # Changed from agent_version to match GraphMeta - is_active: bool # Added to match GraphMeta + + agent_id: str + agent_version: int # Changed from agent_version to match GraphMeta + + preset_id: str | None + + updated_at: datetime.datetime + name: str description: str - isCreatedByUser: bool # Made input_schema and output_schema match GraphMeta's type - input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend - output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend + input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend + output_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend + + is_favorite: bool + is_created_by_user: bool + + is_latest_version: bool + + @staticmethod + def from_db(agent: prisma.models.LibraryAgent): + if not agent.Agent: + raise ValueError("AgentGraph is required") + + graph = graph_model.GraphModel.from_db(agent.Agent) + + agent_updated_at = agent.Agent.updatedAt + lib_agent_updated_at = agent.updatedAt + + # Take the latest updated_at timestamp either when the graph was updated or the library agent was updated + updated_at = ( + max(agent_updated_at, lib_agent_updated_at) + if agent_updated_at + else lib_agent_updated_at + ) + + return LibraryAgent( + id=agent.id, + agent_id=agent.agentId, + agent_version=agent.agentVersion, + updated_at=updated_at, + name=graph.name, + description=graph.description, + input_schema=graph.input_schema, + output_schema=graph.output_schema, + is_favorite=agent.isFavorite, + is_created_by_user=agent.isCreatedByUser, + is_latest_version=graph.is_active, + preset_id=agent.AgentPreset.id if agent.AgentPreset else None, + ) + + +class LibraryAgentPreset(pydantic.BaseModel): + id: str + updated_at: datetime.datetime + + agent_id: str + agent_version: int + + name: str + description: str + + is_active: bool + + inputs: block_model.BlockInput + + @staticmethod + def from_db(preset: prisma.models.AgentPreset): + input_data: block_model.BlockInput = {} + + for preset_input in preset.InputPresets or []: + input_data[preset_input.name] = preset_input.data + + return LibraryAgentPreset( + id=preset.id, + updated_at=preset.updatedAt, + agent_id=preset.agentId, + agent_version=preset.agentVersion, + name=preset.name, + description=preset.description, + is_active=preset.isActive, + inputs=input_data, + ) + + +class LibraryAgentPresetResponse(pydantic.BaseModel): + presets: list[LibraryAgentPreset] + pagination: server_model.Pagination + + +class CreateLibraryAgentPresetRequest(pydantic.BaseModel): + name: str + description: str + inputs: block_model.BlockInput + agent_id: str + agent_version: int + is_active: bool diff --git a/autogpt_platform/backend/backend/server/v2/library/model_test.py b/autogpt_platform/backend/backend/server/v2/library/model_test.py index 81aa8fe07b..0eb700a7b3 100644 --- a/autogpt_platform/backend/backend/server/v2/library/model_test.py +++ b/autogpt_platform/backend/backend/server/v2/library/model_test.py @@ -1,23 +1,36 @@ +import datetime + +import prisma.fields +import prisma.models + +import backend.data.block +import backend.server.model import backend.server.v2.library.model def test_library_agent(): agent = backend.server.v2.library.model.LibraryAgent( id="test-agent-123", - version=1, - is_active=True, + agent_id="agent-123", + agent_version=1, + preset_id=None, + updated_at=datetime.datetime.now(), name="Test Agent", description="Test description", - isCreatedByUser=False, input_schema={"type": "object", "properties": {}}, output_schema={"type": "object", "properties": {}}, + is_favorite=False, + is_created_by_user=False, + is_latest_version=True, ) assert agent.id == "test-agent-123" - assert agent.version == 1 - assert agent.is_active is True + assert agent.agent_id == "agent-123" + assert agent.agent_version == 1 assert agent.name == "Test Agent" assert agent.description == "Test description" - assert agent.isCreatedByUser is False + assert agent.is_favorite is False + assert agent.is_created_by_user is False + assert agent.is_latest_version is True assert agent.input_schema == {"type": "object", "properties": {}} assert agent.output_schema == {"type": "object", "properties": {}} @@ -25,19 +38,140 @@ def test_library_agent(): def test_library_agent_with_user_created(): agent = backend.server.v2.library.model.LibraryAgent( id="user-agent-456", - version=2, - is_active=True, + agent_id="agent-456", + agent_version=2, + preset_id=None, + updated_at=datetime.datetime.now(), name="User Created Agent", description="An agent created by the user", - isCreatedByUser=True, input_schema={"type": "object", "properties": {}}, output_schema={"type": "object", "properties": {}}, + is_favorite=False, + is_created_by_user=True, + is_latest_version=True, ) assert agent.id == "user-agent-456" - assert agent.version == 2 - assert agent.is_active is True + assert agent.agent_id == "agent-456" + assert agent.agent_version == 2 assert agent.name == "User Created Agent" assert agent.description == "An agent created by the user" - assert agent.isCreatedByUser is True + assert agent.is_favorite is False + assert agent.is_created_by_user is True + assert agent.is_latest_version is True assert agent.input_schema == {"type": "object", "properties": {}} assert agent.output_schema == {"type": "object", "properties": {}} + + +def test_library_agent_preset(): + preset = backend.server.v2.library.model.LibraryAgentPreset( + id="preset-123", + name="Test Preset", + description="Test preset description", + agent_id="test-agent-123", + agent_version=1, + is_active=True, + inputs={ + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + }, + updated_at=datetime.datetime.now(), + ) + assert preset.id == "preset-123" + assert preset.name == "Test Preset" + assert preset.description == "Test preset description" + assert preset.agent_id == "test-agent-123" + assert preset.agent_version == 1 + assert preset.is_active is True + assert preset.inputs == { + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + } + + +def test_library_agent_preset_response(): + preset = backend.server.v2.library.model.LibraryAgentPreset( + id="preset-123", + name="Test Preset", + description="Test preset description", + agent_id="test-agent-123", + agent_version=1, + is_active=True, + inputs={ + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + }, + updated_at=datetime.datetime.now(), + ) + + pagination = backend.server.model.Pagination( + total_items=1, total_pages=1, current_page=1, page_size=10 + ) + + response = backend.server.v2.library.model.LibraryAgentPresetResponse( + presets=[preset], pagination=pagination + ) + + assert len(response.presets) == 1 + assert response.presets[0].id == "preset-123" + assert response.pagination.total_items == 1 + assert response.pagination.total_pages == 1 + assert response.pagination.current_page == 1 + assert response.pagination.page_size == 10 + + +def test_create_library_agent_preset_request(): + request = backend.server.v2.library.model.CreateLibraryAgentPresetRequest( + name="New Preset", + description="New preset description", + agent_id="agent-123", + agent_version=1, + is_active=True, + inputs={ + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + }, + ) + + assert request.name == "New Preset" + assert request.description == "New preset description" + assert request.agent_id == "agent-123" + assert request.agent_version == 1 + assert request.is_active is True + assert request.inputs == { + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + } + + +def test_library_agent_from_db(): + # Create mock DB agent + db_agent = prisma.models.AgentPreset( + id="test-agent-123", + createdAt=datetime.datetime.now(), + updatedAt=datetime.datetime.now(), + agentId="agent-123", + agentVersion=1, + name="Test Agent", + description="Test agent description", + isActive=True, + userId="test-user-123", + isDeleted=False, + InputPresets=[ + prisma.models.AgentNodeExecutionInputOutput( + id="input-123", + time=datetime.datetime.now(), + name="input1", + data=prisma.fields.Json({"type": "string", "value": "test value"}), + ) + ], + ) + + # Convert to LibraryAgentPreset + agent = backend.server.v2.library.model.LibraryAgentPreset.from_db(db_agent) + + assert agent.id == "test-agent-123" + assert agent.agent_version == 1 + assert agent.is_active is True + assert agent.name == "Test Agent" + assert agent.description == "Test agent description" + assert agent.inputs == {"input1": {"type": "string", "value": "test value"}} diff --git a/autogpt_platform/backend/backend/server/v2/library/routes.py b/autogpt_platform/backend/backend/server/v2/library/routes.py deleted file mode 100644 index 3ee8680254..0000000000 --- a/autogpt_platform/backend/backend/server/v2/library/routes.py +++ /dev/null @@ -1,123 +0,0 @@ -import logging -import typing - -import autogpt_libs.auth.depends -import autogpt_libs.auth.middleware -import fastapi -import prisma - -import backend.data.graph -import backend.integrations.creds_manager -import backend.integrations.webhooks.graph_lifecycle_hooks -import backend.server.v2.library.db -import backend.server.v2.library.model - -logger = logging.getLogger(__name__) - -router = fastapi.APIRouter() -integration_creds_manager = ( - backend.integrations.creds_manager.IntegrationCredentialsManager() -) - - -@router.get( - "/agents", - tags=["library", "private"], - dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], -) -async def get_library_agents( - user_id: typing.Annotated[ - str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) - ] -) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]: - """ - Get all agents in the user's library, including both created and saved agents. - """ - try: - agents = await backend.server.v2.library.db.get_library_agents(user_id) - return agents - except Exception: - logger.exception("Exception occurred whilst getting library agents") - raise fastapi.HTTPException( - status_code=500, detail="Failed to get library agents" - ) - - -@router.post( - "/agents/{store_listing_version_id}", - tags=["library", "private"], - dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], - status_code=201, -) -async def add_agent_to_library( - store_listing_version_id: str, - user_id: typing.Annotated[ - str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) - ], -) -> fastapi.Response: - """ - Add an agent from the store to the user's library. - - Args: - store_listing_version_id (str): ID of the store listing version to add - user_id (str): ID of the authenticated user - - Returns: - fastapi.Response: 201 status code on success - - Raises: - HTTPException: If there is an error adding the agent to the library - """ - try: - # Get the graph from the store listing - store_listing_version = ( - await prisma.models.StoreListingVersion.prisma().find_unique( - where={"id": store_listing_version_id}, include={"Agent": True} - ) - ) - - if not store_listing_version or not store_listing_version.Agent: - raise fastapi.HTTPException( - status_code=404, - detail=f"Store listing version {store_listing_version_id} not found", - ) - - agent = store_listing_version.Agent - - if agent.userId == user_id: - raise fastapi.HTTPException( - status_code=400, detail="Cannot add own agent to library" - ) - - # Create a new graph from the template - graph = await backend.data.graph.get_graph( - agent.id, agent.version, user_id=user_id - ) - - if not graph: - raise fastapi.HTTPException( - status_code=404, detail=f"Agent {agent.id} not found" - ) - - # Create a deep copy with new IDs - graph.version = 1 - graph.is_template = False - graph.is_active = True - graph.reassign_ids(user_id=user_id, reassign_graph_id=True) - - # Save the new graph - graph = await backend.data.graph.create_graph(graph, user_id=user_id) - graph = ( - await backend.integrations.webhooks.graph_lifecycle_hooks.on_graph_activate( - graph, - get_credentials=lambda id: integration_creds_manager.get(user_id, id), - ) - ) - - return fastapi.Response(status_code=201) - - except Exception: - logger.exception("Exception occurred whilst adding agent to library") - raise fastapi.HTTPException( - status_code=500, detail="Failed to add agent to library" - ) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes/__init__.py b/autogpt_platform/backend/backend/server/v2/library/routes/__init__.py new file mode 100644 index 0000000000..f62cbe7ff2 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/routes/__init__.py @@ -0,0 +1,9 @@ +import fastapi + +from .agents import router as agents_router +from .presets import router as presets_router + +router = fastapi.APIRouter() + +router.include_router(presets_router) +router.include_router(agents_router) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes/agents.py b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py new file mode 100644 index 0000000000..ba3523c552 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py @@ -0,0 +1,138 @@ +import logging +from typing import Annotated, Sequence + +import autogpt_libs.auth as autogpt_auth_lib +import fastapi + +import backend.server.v2.library.db as library_db +import backend.server.v2.library.model as library_model +import backend.server.v2.store.exceptions as store_exceptions + +logger = logging.getLogger(__name__) + +router = fastapi.APIRouter() + + +@router.get( + "/agents", + tags=["library", "private"], + dependencies=[fastapi.Depends(autogpt_auth_lib.auth_middleware)], +) +async def get_library_agents( + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)] +) -> Sequence[library_model.LibraryAgent]: + """ + Get all agents in the user's library, including both created and saved agents. + """ + try: + agents = await library_db.get_library_agents(user_id) + return agents + except Exception as e: + logger.exception(f"Exception occurred whilst getting library agents: {e}") + raise fastapi.HTTPException( + status_code=500, detail="Failed to get library agents" + ) + + +@router.post( + "/agents/{store_listing_version_id}", + tags=["library", "private"], + dependencies=[fastapi.Depends(autogpt_auth_lib.auth_middleware)], + status_code=201, +) +async def add_agent_to_library( + store_listing_version_id: str, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +) -> fastapi.Response: + """ + Add an agent from the store to the user's library. + + Args: + store_listing_version_id (str): ID of the store listing version to add + user_id (str): ID of the authenticated user + + Returns: + fastapi.Response: 201 status code on success + + Raises: + HTTPException: If there is an error adding the agent to the library + """ + try: + # Use the database function to add the agent to the library + await library_db.add_store_agent_to_library(store_listing_version_id, user_id) + return fastapi.Response(status_code=201) + + except store_exceptions.AgentNotFoundError: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing version {store_listing_version_id} not found", + ) + except store_exceptions.DatabaseError as e: + logger.exception(f"Database error occurred whilst adding agent to library: {e}") + raise fastapi.HTTPException( + status_code=500, detail="Failed to add agent to library" + ) + except Exception as e: + logger.exception( + f"Unexpected exception occurred whilst adding agent to library: {e}" + ) + raise fastapi.HTTPException( + status_code=500, detail="Failed to add agent to library" + ) + + +@router.put( + "/agents/{library_agent_id}", + tags=["library", "private"], + dependencies=[fastapi.Depends(autogpt_auth_lib.auth_middleware)], + status_code=204, +) +async def update_library_agent( + library_agent_id: str, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], + auto_update_version: bool = False, + is_favorite: bool = False, + is_archived: bool = False, + is_deleted: bool = False, +) -> fastapi.Response: + """ + Update the library agent with the given fields. + + Args: + library_agent_id (str): ID of the library agent to update + user_id (str): ID of the authenticated user + auto_update_version (bool): Whether to auto-update the agent version + is_favorite (bool): Whether the agent is marked as favorite + is_archived (bool): Whether the agent is archived + is_deleted (bool): Whether the agent is deleted + + Returns: + fastapi.Response: 204 status code on success + + Raises: + HTTPException: If there is an error updating the library agent + """ + try: + # Use the database function to update the library agent + await library_db.update_library_agent( + library_agent_id, + user_id, + auto_update_version, + is_favorite, + is_archived, + is_deleted, + ) + return fastapi.Response(status_code=204) + + except store_exceptions.DatabaseError as e: + logger.exception(f"Database error occurred whilst updating library agent: {e}") + raise fastapi.HTTPException( + status_code=500, detail="Failed to update library agent" + ) + except Exception as e: + logger.exception( + f"Unexpected exception occurred whilst updating library agent: {e}" + ) + raise fastapi.HTTPException( + status_code=500, detail="Failed to update library agent" + ) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes/presets.py b/autogpt_platform/backend/backend/server/v2/library/routes/presets.py new file mode 100644 index 0000000000..ff02a394bb --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/routes/presets.py @@ -0,0 +1,130 @@ +import logging +from typing import Annotated, Any + +import autogpt_libs.auth as autogpt_auth_lib +import autogpt_libs.utils.cache +import fastapi + +import backend.executor +import backend.server.v2.library.db as library_db +import backend.server.v2.library.model as library_model +import backend.util.service + +logger = logging.getLogger(__name__) + +router = fastapi.APIRouter() + + +@autogpt_libs.utils.cache.thread_cached +def execution_manager_client() -> backend.executor.ExecutionManager: + return backend.util.service.get_service_client(backend.executor.ExecutionManager) + + +@router.get("/presets") +async def get_presets( + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], + page: int = 1, + page_size: int = 10, +) -> library_model.LibraryAgentPresetResponse: + try: + presets = await library_db.get_presets(user_id, page, page_size) + return presets + except Exception as e: + logger.exception(f"Exception occurred whilst getting presets: {e}") + raise fastapi.HTTPException(status_code=500, detail="Failed to get presets") + + +@router.get("/presets/{preset_id}") +async def get_preset( + preset_id: str, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +) -> library_model.LibraryAgentPreset: + try: + preset = await library_db.get_preset(user_id, preset_id) + if not preset: + raise fastapi.HTTPException( + status_code=404, + detail=f"Preset {preset_id} not found", + ) + return preset + except Exception as e: + logger.exception(f"Exception occurred whilst getting preset: {e}") + raise fastapi.HTTPException(status_code=500, detail="Failed to get preset") + + +@router.post("/presets") +async def create_preset( + preset: library_model.CreateLibraryAgentPresetRequest, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +) -> library_model.LibraryAgentPreset: + try: + return await library_db.upsert_preset(user_id, preset) + except Exception as e: + logger.exception(f"Exception occurred whilst creating preset: {e}") + raise fastapi.HTTPException(status_code=500, detail="Failed to create preset") + + +@router.put("/presets/{preset_id}") +async def update_preset( + preset_id: str, + preset: library_model.CreateLibraryAgentPresetRequest, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +) -> library_model.LibraryAgentPreset: + try: + return await library_db.upsert_preset(user_id, preset, preset_id) + except Exception as e: + logger.exception(f"Exception occurred whilst updating preset: {e}") + raise fastapi.HTTPException(status_code=500, detail="Failed to update preset") + + +@router.delete("/presets/{preset_id}") +async def delete_preset( + preset_id: str, + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +): + try: + await library_db.delete_preset(user_id, preset_id) + return fastapi.Response(status_code=204) + except Exception as e: + logger.exception(f"Exception occurred whilst deleting preset: {e}") + raise fastapi.HTTPException(status_code=500, detail="Failed to delete preset") + + +@router.post( + path="/presets/{preset_id}/execute", + tags=["presets"], + dependencies=[fastapi.Depends(autogpt_auth_lib.auth_middleware)], +) +async def execute_preset( + graph_id: str, + graph_version: int, + preset_id: str, + node_input: Annotated[ + dict[str, Any], fastapi.Body(..., embed=True, default_factory=dict) + ], + user_id: Annotated[str, fastapi.Depends(autogpt_auth_lib.depends.get_user_id)], +) -> dict[str, Any]: # FIXME: add proper return type + try: + preset = await library_db.get_preset(user_id, preset_id) + if not preset: + raise fastapi.HTTPException(status_code=404, detail="Preset not found") + + logger.debug(f"Preset inputs: {preset.inputs}") + + # Merge input overrides with preset inputs + merged_node_input = preset.inputs | node_input + + execution = execution_manager_client().add_execution( + graph_id=graph_id, + graph_version=graph_version, + data=merged_node_input, + user_id=user_id, + preset_id=preset_id, + ) + + logger.debug(f"Execution added: {execution} with input: {merged_node_input}") + + return {"id": execution.graph_exec_id} + except Exception as e: + msg = str(e).encode().decode("unicode_escape") + raise fastapi.HTTPException(status_code=400, detail=msg) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes_test.py b/autogpt_platform/backend/backend/server/v2/library/routes_test.py index d793ce13b6..55efe586b4 100644 --- a/autogpt_platform/backend/backend/server/v2/library/routes_test.py +++ b/autogpt_platform/backend/backend/server/v2/library/routes_test.py @@ -1,16 +1,16 @@ -import autogpt_libs.auth.depends -import autogpt_libs.auth.middleware +import datetime + +import autogpt_libs.auth as autogpt_auth_lib import fastapi import fastapi.testclient import pytest import pytest_mock -import backend.server.v2.library.db -import backend.server.v2.library.model -import backend.server.v2.library.routes +import backend.server.v2.library.model as library_model +from backend.server.v2.library.routes import router as library_router app = fastapi.FastAPI() -app.include_router(backend.server.v2.library.routes.router) +app.include_router(library_router) client = fastapi.testclient.TestClient(app) @@ -25,31 +25,37 @@ def override_get_user_id(): return "test-user-id" -app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = ( - override_auth_middleware -) -app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id +app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middleware +app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id def test_get_library_agents_success(mocker: pytest_mock.MockFixture): mocked_value = [ - backend.server.v2.library.model.LibraryAgent( + library_model.LibraryAgent( id="test-agent-1", - version=1, - is_active=True, + agent_id="test-agent-1", + agent_version=1, + preset_id="preset-1", + updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0), + is_favorite=False, + is_created_by_user=True, + is_latest_version=True, name="Test Agent 1", description="Test Description 1", - isCreatedByUser=True, input_schema={"type": "object", "properties": {}}, output_schema={"type": "object", "properties": {}}, ), - backend.server.v2.library.model.LibraryAgent( + library_model.LibraryAgent( id="test-agent-2", - version=1, - is_active=True, + agent_id="test-agent-2", + agent_version=1, + preset_id="preset-2", + updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0), + is_favorite=False, + is_created_by_user=False, + is_latest_version=True, name="Test Agent 2", description="Test Description 2", - isCreatedByUser=False, input_schema={"type": "object", "properties": {}}, output_schema={"type": "object", "properties": {}}, ), @@ -61,14 +67,13 @@ def test_get_library_agents_success(mocker: pytest_mock.MockFixture): assert response.status_code == 200 data = [ - backend.server.v2.library.model.LibraryAgent.model_validate(agent) - for agent in response.json() + library_model.LibraryAgent.model_validate(agent) for agent in response.json() ] assert len(data) == 2 - assert data[0].id == "test-agent-1" - assert data[0].isCreatedByUser is True - assert data[1].id == "test-agent-2" - assert data[1].isCreatedByUser is False + assert data[0].agent_id == "test-agent-1" + assert data[0].is_created_by_user is True + assert data[1].agent_id == "test-agent-2" + assert data[1].is_created_by_user is False mock_db_call.assert_called_once_with("test-user-id") diff --git a/autogpt_platform/backend/backend/usecases/block_autogen.py b/autogpt_platform/backend/backend/usecases/block_autogen.py index 97435220da..9f046a2e02 100644 --- a/autogpt_platform/backend/backend/usecases/block_autogen.py +++ b/autogpt_platform/backend/backend/usecases/block_autogen.py @@ -253,7 +253,9 @@ async def block_autogen_agent(): test_graph = await create_graph(create_test_graph(), user_id=test_user.id) input_data = {"input": "Write me a block that writes a string into a file."} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + graph_id=test_graph.id, + user_id=test_user.id, + node_input=input_data, ) print(response) result = await wait_execution( diff --git a/autogpt_platform/backend/backend/usecases/reddit_marketing.py b/autogpt_platform/backend/backend/usecases/reddit_marketing.py index 0c7769bd1b..494b3a85ca 100644 --- a/autogpt_platform/backend/backend/usecases/reddit_marketing.py +++ b/autogpt_platform/backend/backend/usecases/reddit_marketing.py @@ -157,7 +157,9 @@ async def reddit_marketing_agent(): test_graph = await create_graph(create_test_graph(), user_id=test_user.id) input_data = {"subreddit": "AutoGPT"} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + graph_id=test_graph.id, + user_id=test_user.id, + node_input=input_data, ) print(response) result = await wait_execution( diff --git a/autogpt_platform/backend/backend/usecases/sample.py b/autogpt_platform/backend/backend/usecases/sample.py index 79819fb149..8171850a7c 100644 --- a/autogpt_platform/backend/backend/usecases/sample.py +++ b/autogpt_platform/backend/backend/usecases/sample.py @@ -86,7 +86,9 @@ async def sample_agent(): test_graph = await create_graph(create_test_graph(), test_user.id) input_data = {"input_1": "Hello", "input_2": "World"} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + graph_id=test_graph.id, + user_id=test_user.id, + node_input=input_data, ) print(response) result = await wait_execution( diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 9b07ba88e8..ac7f437fff 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -62,7 +62,7 @@ def expose(func: C) -> C: try: return func(*args, **kwargs) except Exception as e: - msg = f"Error in {func.__name__}: {e.__str__()}" + msg = f"Error in {func.__name__}: {e}" if isinstance(e, ValueError): logger.warning(msg) else: @@ -80,7 +80,7 @@ def register_pydantic_serializers(func: Callable): try: pydantic_types = _pydantic_models_from_type_annotation(annotation) except Exception as e: - raise TypeError(f"Error while exposing {func.__name__}: {e.__str__()}") + raise TypeError(f"Error while exposing {func.__name__}: {e}") for model in pydantic_types: logger.debug( diff --git a/autogpt_platform/backend/migrations/20250107095812_preset_soft_delete/migration.sql b/autogpt_platform/backend/migrations/20250107095812_preset_soft_delete/migration.sql new file mode 100644 index 0000000000..e85cee9654 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250107095812_preset_soft_delete/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "AgentPreset" ADD COLUMN "isDeleted" BOOLEAN NOT NULL DEFAULT false; diff --git a/autogpt_platform/backend/migrations/20250108084101_user_agent_to_library_agent/migration.sql b/autogpt_platform/backend/migrations/20250108084101_user_agent_to_library_agent/migration.sql new file mode 100644 index 0000000000..a822161a17 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250108084101_user_agent_to_library_agent/migration.sql @@ -0,0 +1,46 @@ +/* + Warnings: + + - You are about to drop the `UserAgent` table. If the table is not empty, all the data it contains will be lost. + +*/ +-- DropForeignKey +ALTER TABLE "UserAgent" DROP CONSTRAINT "UserAgent_agentId_agentVersion_fkey"; + +-- DropForeignKey +ALTER TABLE "UserAgent" DROP CONSTRAINT "UserAgent_agentPresetId_fkey"; + +-- DropForeignKey +ALTER TABLE "UserAgent" DROP CONSTRAINT "UserAgent_userId_fkey"; + +-- DropTable +DROP TABLE "UserAgent"; + +-- CreateTable +CREATE TABLE "LibraryAgent" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "userId" TEXT NOT NULL, + "agentId" TEXT NOT NULL, + "agentVersion" INTEGER NOT NULL, + "agentPresetId" TEXT, + "isFavorite" BOOLEAN NOT NULL DEFAULT false, + "isCreatedByUser" BOOLEAN NOT NULL DEFAULT false, + "isArchived" BOOLEAN NOT NULL DEFAULT false, + "isDeleted" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "LibraryAgent_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE INDEX "LibraryAgent_userId_idx" ON "LibraryAgent"("userId"); + +-- AddForeignKey +ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_agentId_agentVersion_fkey" FOREIGN KEY ("agentId", "agentVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_agentPresetId_fkey" FOREIGN KEY ("agentPresetId") REFERENCES "AgentPreset"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20250108084305_use_graph_is_active_version/migration.sql b/autogpt_platform/backend/migrations/20250108084305_use_graph_is_active_version/migration.sql new file mode 100644 index 0000000000..5ffca8202f --- /dev/null +++ b/autogpt_platform/backend/migrations/20250108084305_use_graph_is_active_version/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "LibraryAgent" ADD COLUMN "useGraphIsActiveVersion" BOOLEAN NOT NULL DEFAULT false; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index b24f770149..c91dccb3df 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -41,8 +41,8 @@ model User { AnalyticsMetrics AnalyticsMetrics[] CreditTransaction CreditTransaction[] - AgentPreset AgentPreset[] - UserAgent UserAgent[] + AgentPreset AgentPreset[] + LibraryAgent LibraryAgent[] Profile Profile[] StoreListing StoreListing[] @@ -78,7 +78,7 @@ model AgentGraph { AgentGraphExecution AgentGraphExecution[] AgentPreset AgentPreset[] - UserAgent UserAgent[] + LibraryAgent LibraryAgent[] StoreListing StoreListing[] StoreListingVersion StoreListingVersion? @@ -116,9 +116,11 @@ model AgentPreset { Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade) InputPresets AgentNodeExecutionInputOutput[] @relation("AgentPresetsInputData") - UserAgents UserAgent[] + LibraryAgents LibraryAgent[] AgentExecution AgentGraphExecution[] + isDeleted Boolean @default(false) + @@index([userId]) } @@ -163,7 +165,7 @@ model UserNotificationBatch { // For the library page // It is a user controlled list of agents, that they will see in there library -model UserAgent { +model LibraryAgent { id String @id @default(uuid()) createdAt DateTime @default(now()) updatedAt DateTime @default(now()) @updatedAt @@ -178,6 +180,8 @@ model UserAgent { agentPresetId String? AgentPreset AgentPreset? @relation(fields: [agentPresetId], references: [id]) + useGraphIsActiveVersion Boolean @default(false) + isFavorite Boolean @default(false) isCreatedByUser Boolean @default(false) isArchived Boolean @default(false) diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py index 431c9e2c4b..3e4a7cacd2 100644 --- a/autogpt_platform/backend/test/executor/test_manager.py +++ b/autogpt_platform/backend/test/executor/test_manager.py @@ -5,8 +5,9 @@ import fastapi.responses import pytest from prisma.models import User +import backend.server.v2.library.model import backend.server.v2.store.model -from backend.blocks.basic import FindInDictionaryBlock, StoreValueBlock +from backend.blocks.basic import AgentInputBlock, FindInDictionaryBlock, StoreValueBlock from backend.blocks.maths import CalculatorBlock, Operation from backend.data import execution, graph from backend.server.model import CreateGraph @@ -131,7 +132,7 @@ async def test_agent_execution(server: SpinTestServer): logger.info("Starting test_agent_execution") test_user = await create_test_user() test_graph = await create_graph(server, create_test_graph(), test_user) - data = {"node_input": {"input_1": "Hello", "input_2": "World"}} + data = {"input_1": "Hello", "input_2": "World"} graph_exec_id = await execute_graph( server.agent_server, test_graph, @@ -295,6 +296,192 @@ async def test_static_input_link_on_graph(server: SpinTestServer): logger.info("Completed test_static_input_link_on_graph") +@pytest.mark.asyncio(scope="session") +async def test_execute_preset(server: SpinTestServer): + """ + Test executing a preset. + + This test ensures that: + 1. A preset can be successfully executed + 2. The execution results are correct + + Args: + server (SpinTestServer): The test server instance. + """ + # Create test graph and user + nodes = [ + graph.Node( # 0 + block_id=AgentInputBlock().id, + input_default={"name": "dictionary"}, + ), + graph.Node( # 1 + block_id=AgentInputBlock().id, + input_default={"name": "selected_value"}, + ), + graph.Node( # 2 + block_id=StoreValueBlock().id, + input_default={"input": {"key1": "Hi", "key2": "Everyone"}}, + ), + graph.Node( # 3 + block_id=FindInDictionaryBlock().id, + input_default={"key": "", "input": {}}, + ), + ] + links = [ + graph.Link( + source_id=nodes[0].id, + sink_id=nodes[2].id, + source_name="result", + sink_name="input", + ), + graph.Link( + source_id=nodes[1].id, + sink_id=nodes[3].id, + source_name="result", + sink_name="key", + ), + graph.Link( + source_id=nodes[2].id, + sink_id=nodes[3].id, + source_name="output", + sink_name="input", + ), + ] + test_graph = graph.Graph( + name="TestGraph", + description="Test graph", + nodes=nodes, + links=links, + ) + test_user = await create_test_user() + test_graph = await create_graph(server, test_graph, test_user) + + # Create preset with initial values + preset = backend.server.v2.library.model.CreateLibraryAgentPresetRequest( + name="Test Preset With Clash", + description="Test preset with clashing input values", + agent_id=test_graph.id, + agent_version=test_graph.version, + inputs={ + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + }, + is_active=True, + ) + created_preset = await server.agent_server.test_create_preset(preset, test_user.id) + + # Execute preset with overriding values + result = await server.agent_server.test_execute_preset( + graph_id=test_graph.id, + graph_version=test_graph.version, + preset_id=created_preset.id, + user_id=test_user.id, + ) + + # Verify execution + assert result is not None + graph_exec_id = result["id"] + + # Wait for execution to complete + executions = await wait_execution(test_user.id, test_graph.id, graph_exec_id) + assert len(executions) == 4 + + # FindInDictionaryBlock should wait for the input pin to be provided, + # Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"} + assert executions[3].status == execution.ExecutionStatus.COMPLETED + assert executions[3].output_data == {"output": ["World"]} + + +@pytest.mark.asyncio(scope="session") +async def test_execute_preset_with_clash(server: SpinTestServer): + """ + Test executing a preset with clashing input data. + """ + # Create test graph and user + nodes = [ + graph.Node( # 0 + block_id=AgentInputBlock().id, + input_default={"name": "dictionary"}, + ), + graph.Node( # 1 + block_id=AgentInputBlock().id, + input_default={"name": "selected_value"}, + ), + graph.Node( # 2 + block_id=StoreValueBlock().id, + input_default={"input": {"key1": "Hi", "key2": "Everyone"}}, + ), + graph.Node( # 3 + block_id=FindInDictionaryBlock().id, + input_default={"key": "", "input": {}}, + ), + ] + links = [ + graph.Link( + source_id=nodes[0].id, + sink_id=nodes[2].id, + source_name="result", + sink_name="input", + ), + graph.Link( + source_id=nodes[1].id, + sink_id=nodes[3].id, + source_name="result", + sink_name="key", + ), + graph.Link( + source_id=nodes[2].id, + sink_id=nodes[3].id, + source_name="output", + sink_name="input", + ), + ] + test_graph = graph.Graph( + name="TestGraph", + description="Test graph", + nodes=nodes, + links=links, + ) + test_user = await create_test_user() + test_graph = await create_graph(server, test_graph, test_user) + + # Create preset with initial values + preset = backend.server.v2.library.model.CreateLibraryAgentPresetRequest( + name="Test Preset With Clash", + description="Test preset with clashing input values", + agent_id=test_graph.id, + agent_version=test_graph.version, + inputs={ + "dictionary": {"key1": "Hello", "key2": "World"}, + "selected_value": "key2", + }, + is_active=True, + ) + created_preset = await server.agent_server.test_create_preset(preset, test_user.id) + + # Execute preset with overriding values + result = await server.agent_server.test_execute_preset( + graph_id=test_graph.id, + graph_version=test_graph.version, + preset_id=created_preset.id, + node_input={"selected_value": "key1"}, + user_id=test_user.id, + ) + + # Verify execution + assert result is not None + graph_exec_id = result["id"] + + # Wait for execution to complete + executions = await wait_execution(test_user.id, test_graph.id, graph_exec_id) + assert len(executions) == 4 + + # FindInDictionaryBlock should wait for the input pin to be provided, + # Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"} + assert executions[3].status == execution.ExecutionStatus.COMPLETED + assert executions[3].output_data == {"output": ["Hello"]} + + @pytest.mark.asyncio(scope="session") async def test_store_listing_graph(server: SpinTestServer): logger.info("Starting test_agent_execution") @@ -344,7 +531,7 @@ async def test_store_listing_graph(server: SpinTestServer): ) alt_test_user = admin_user - data = {"node_input": {"input_1": "Hello", "input_2": "World"}} + data = {"input_1": "Hello", "input_2": "World"} graph_exec_id = await execute_graph( server.agent_server, test_graph, diff --git a/autogpt_platform/backend/test/test_data_creator.py b/autogpt_platform/backend/test/test_data_creator.py index 45596b7d90..0dc42fd335 100644 --- a/autogpt_platform/backend/test/test_data_creator.py +++ b/autogpt_platform/backend/test/test_data_creator.py @@ -140,10 +140,10 @@ async def main(): print(f"Inserting {NUM_USERS * MAX_AGENTS_PER_USER} user agents") for user in users: num_agents = random.randint(MIN_AGENTS_PER_USER, MAX_AGENTS_PER_USER) - for _ in range(num_agents): # Create 1 UserAgent per user + for _ in range(num_agents): # Create 1 LibraryAgent per user graph = random.choice(agent_graphs) preset = random.choice(agent_presets) - user_agent = await db.useragent.create( + user_agent = await db.libraryagent.create( data={ "userId": user.id, "agentId": graph.id, diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index bd13e5a6cc..981ad04cb7 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -160,10 +160,8 @@ export default class BackendAPI { return this._get(`/graphs/${id}/versions`); } - createGraph(graphCreateBody: GraphCreatable): Promise; - - createGraph(graphID: GraphCreatable | string): Promise { - let requestBody = { graph: graphID } as GraphCreateRequestBody; + createGraph(graph: GraphCreatable): Promise { + let requestBody = { graph } as GraphCreateRequestBody; return this._request("POST", "/graphs", requestBody); } From 06495c9b02751d846c7f43e9397e8ef8227a81bb Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 11:37:26 -0600 Subject: [PATCH 08/30] fix(backend): improve docs --- autogpt_platform/backend/backend/notifications/email.py | 1 + autogpt_platform/backend/backend/notifications/notifications.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/notifications/email.py b/autogpt_platform/backend/backend/notifications/email.py index 9ede68a6f7..6468dc65e2 100644 --- a/autogpt_platform/backend/backend/notifications/email.py +++ b/autogpt_platform/backend/backend/notifications/email.py @@ -42,6 +42,7 @@ class EmailSender: user_email: str, data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]], ): + """Send an email to a user using a template pulled from the notification type""" if not self.postmark: logger.warning("Postmark client not initialized, email not sent") return diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index be201ce2d7..b198b4c3db 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -152,7 +152,7 @@ class NotificationManager(AppService): return NotificationResult(success=False, message=str(e)) async def _process_immediate(self, message: str) -> bool: - """Process a single notification immediately""" + """Process a single notification immediately, returning whether to put into the failed queue""" try: event = NotificationEventDTO.model_validate_json(message) parsed_event = NotificationEventModel[ From 8e5cc830373f1c670662587613455f98d224db1d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 15:15:54 -0600 Subject: [PATCH 09/30] fix(backend): the agent block was not attached so substitute the id directly --- autogpt_platform/backend/backend/data/graph.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 31bd1dee4b..6078d21762 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -70,11 +70,9 @@ class NodeModel(Node): @staticmethod def from_db(node: AgentNode): - if not node.AgentBlock: - raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.") obj = NodeModel( id=node.id, - block_id=node.AgentBlock.id, + block_id=node.agentBlockId, input_default=type.convert(node.constantInput, dict[str, Any]), metadata=type.convert(node.metadata, dict[str, Any]), graph_id=node.agentGraphId, From ed54af9dc88841b43e1c49a3cf028e9fbbbfe5d6 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 17:11:17 -0600 Subject: [PATCH 10/30] fix(frontend): add a timeout on waiting for complete --- autogpt_platform/frontend/src/tests/pages/build.page.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autogpt_platform/frontend/src/tests/pages/build.page.ts b/autogpt_platform/frontend/src/tests/pages/build.page.ts index 0e76a045c7..28cf0b3b17 100644 --- a/autogpt_platform/frontend/src/tests/pages/build.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/build.page.ts @@ -310,8 +310,10 @@ export class BuildPage extends BasePage { async waitForCompletionBadge(): Promise { console.log(`waiting for completion badge`); + // data-id="badge-155a41a3-8e98-4cfb-880e-53392777b41a-COMPLETED" await this.page.waitForSelector( '[data-id^="badge-"][data-id$="-COMPLETED"]', + { timeout: 60_000 }, ); } From f28968c5a944b2153c71e7db9e2ef199a0595e3c Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 18:07:55 -0600 Subject: [PATCH 11/30] fix: change stuff --- autogpt_platform/frontend/playwright.config.ts | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/frontend/playwright.config.ts b/autogpt_platform/frontend/playwright.config.ts index ed0bdc445c..b20133ecbf 100644 --- a/autogpt_platform/frontend/playwright.config.ts +++ b/autogpt_platform/frontend/playwright.config.ts @@ -25,6 +25,7 @@ export default defineConfig({ reporter: "html", /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ use: { + // viewport: { width: 2560, height: 1440 }, /* Base URL to use in actions like `await page.goto('/')`. */ baseURL: "http://localhost:3000/", @@ -40,7 +41,10 @@ export default defineConfig({ projects: [ { name: "chromium", - use: { ...devices["Desktop Chrome"] }, + use: { + ...devices["Desktop Chrome"], + viewport: { width: 1920, height: 1080 }, + }, }, // { @@ -50,7 +54,10 @@ export default defineConfig({ { name: "webkit", - use: { ...devices["Desktop Safari"] }, + use: { + ...devices["Desktop Safari"], + viewport: { width: 1920, height: 1080 }, + }, }, // /* Test against mobile viewports. */ From f208840ba05caee3c3136a31dfdb65a74285b163 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 18:49:14 -0600 Subject: [PATCH 12/30] Revert "fix: change stuff" This reverts commit f28968c5a944b2153c71e7db9e2ef199a0595e3c. --- autogpt_platform/frontend/playwright.config.ts | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/autogpt_platform/frontend/playwright.config.ts b/autogpt_platform/frontend/playwright.config.ts index b20133ecbf..ed0bdc445c 100644 --- a/autogpt_platform/frontend/playwright.config.ts +++ b/autogpt_platform/frontend/playwright.config.ts @@ -25,7 +25,6 @@ export default defineConfig({ reporter: "html", /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ use: { - // viewport: { width: 2560, height: 1440 }, /* Base URL to use in actions like `await page.goto('/')`. */ baseURL: "http://localhost:3000/", @@ -41,10 +40,7 @@ export default defineConfig({ projects: [ { name: "chromium", - use: { - ...devices["Desktop Chrome"], - viewport: { width: 1920, height: 1080 }, - }, + use: { ...devices["Desktop Chrome"] }, }, // { @@ -54,10 +50,7 @@ export default defineConfig({ { name: "webkit", - use: { - ...devices["Desktop Safari"], - viewport: { width: 1920, height: 1080 }, - }, + use: { ...devices["Desktop Safari"] }, }, // /* Test against mobile viewports. */ From fcdc769566e2ba7967205f2f406c0f4e2dcd441e Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:06:11 -0600 Subject: [PATCH 13/30] Revert "fix(frontend): add a timeout on waiting for complete" This reverts commit ed54af9dc88841b43e1c49a3cf028e9fbbbfe5d6. --- autogpt_platform/frontend/src/tests/pages/build.page.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/autogpt_platform/frontend/src/tests/pages/build.page.ts b/autogpt_platform/frontend/src/tests/pages/build.page.ts index 28cf0b3b17..0e76a045c7 100644 --- a/autogpt_platform/frontend/src/tests/pages/build.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/build.page.ts @@ -310,10 +310,8 @@ export class BuildPage extends BasePage { async waitForCompletionBadge(): Promise { console.log(`waiting for completion badge`); - // data-id="badge-155a41a3-8e98-4cfb-880e-53392777b41a-COMPLETED" await this.page.waitForSelector( '[data-id^="badge-"][data-id$="-COMPLETED"]', - { timeout: 60_000 }, ); } From dbeca6f3c3c4f8019fcbb3b33d3560376d7dc591 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:06:29 -0600 Subject: [PATCH 14/30] feat: maybe try spawning the containers? --- .github/workflows/platform-frontend-ci.yml | 28 ++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 8fc27fde56..e89b4b0616 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -62,6 +62,30 @@ jobs: fail-fast: false matrix: browser: [chromium, webkit] + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + # We know these are here, don't report this as a security vulnerability + # This is used as the default credential for the entire system's RabbitMQ instance + # If you want to replace this, you can do so by making our entire system generate + # new credentials for each local user and update the environment variables in + # the backend service, docker composes, and examples + RABBITMQ_DEFAULT_USER: "rabbitmq_user_default" + RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7" + services: + redis: + image: bitnami/redis:6.2 + env: + REDIS_PASSWORD: testpassword + ports: + - 6379:6379 + rabbitmq: + image: rabbitmq:3.12-management + ports: + - 5672:5672 + - 15672:15672 + env: + RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }} + RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }} steps: - name: Checkout repository @@ -77,8 +101,8 @@ jobs: - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main with: - large-packages: false # slow - docker-images: false # limited benefit + large-packages: false # slow + docker-images: false # limited benefit - name: Copy default supabase .env run: | From 8eb0cbb05b45434ba9a908cd7f8b8cd359ec2c69 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:06:57 -0600 Subject: [PATCH 15/30] use logs --- .github/workflows/platform-frontend-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index e89b4b0616..6329b9cf4c 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -132,7 +132,7 @@ jobs: yarn test --project=${{ matrix.browser }} - name: Print Docker Compose logs in debug mode - if: runner.debug + if: always() run: | docker compose -f ../docker-compose.yml logs From 6ddac1465d8cad60cad47497274e2180589e0c95 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:23:55 -0600 Subject: [PATCH 16/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 6329b9cf4c..a12a102918 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -71,22 +71,7 @@ jobs: # the backend service, docker composes, and examples RABBITMQ_DEFAULT_USER: "rabbitmq_user_default" RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7" - services: - redis: - image: bitnami/redis:6.2 - env: - REDIS_PASSWORD: testpassword - ports: - - 6379:6379 - rabbitmq: - image: rabbitmq:3.12-management - ports: - - 5672:5672 - - 15672:15672 - env: - RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }} - RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }} - + steps: - name: Checkout repository uses: actions/checkout@v4 @@ -116,6 +101,10 @@ jobs: run: | docker compose -f ../docker-compose.yml up -d + - name: Run docker compose logs + run: | + docker compose logs -n all + - name: Install dependencies run: | yarn install --frozen-lockfile From 88b82c506bec7dbb479ea647795a1598b453200a Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:25:05 -0600 Subject: [PATCH 17/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index a12a102918..7344b5df9e 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -103,7 +103,7 @@ jobs: - name: Run docker compose logs run: | - docker compose logs -n all + docker compose -f ../docker-compose.yml logs -n all - name: Install dependencies run: | From c6a4809191bc5466a25f61b167b2ac07f1bb4ecc Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:44:07 -0600 Subject: [PATCH 18/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 31 +++++++++++++--------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 7344b5df9e..80746ca3a2 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -62,16 +62,11 @@ jobs: fail-fast: false matrix: browser: [chromium, webkit] - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - # We know these are here, don't report this as a security vulnerability - # This is used as the default credential for the entire system's RabbitMQ instance - # If you want to replace this, you can do so by making our entire system generate - # new credentials for each local user and update the environment variables in - # the backend service, docker composes, and examples - RABBITMQ_DEFAULT_USER: "rabbitmq_user_default" - RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7" - + # env: + # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + # RABBITMQ_DEFAULT_USER: "rabbitmq_user_default" + # RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7" + steps: - name: Checkout repository uses: actions/checkout@v4 @@ -101,9 +96,11 @@ jobs: run: | docker compose -f ../docker-compose.yml up -d - - name: Run docker compose logs + - name: Watch Docker Logs run: | - docker compose -f ../docker-compose.yml logs -n all + docker compose -f ../docker-compose.yml logs -f & + LOGS_PID=$! + echo "LOGS_PID=$LOGS_PID" >> $GITHUB_ENV - name: Install dependencies run: | @@ -117,10 +114,18 @@ jobs: run: yarn playwright install --with-deps ${{ matrix.browser }} - name: Run tests + timeout-minutes: 10 run: | yarn test --project=${{ matrix.browser }} - - name: Print Docker Compose logs in debug mode + - name: Stop Docker Logs + if: always() + run: | + if [ ! -z "$LOGS_PID" ]; then + kill $LOGS_PID || true + fi + + - name: Print Final Docker Compose logs if: always() run: | docker compose -f ../docker-compose.yml logs From cd03f86a5c018f80aeb6ea4577eac7646e4d8a0e Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 19:51:14 -0600 Subject: [PATCH 19/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 32 +++++++++------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 80746ca3a2..e104b5048f 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -96,12 +96,6 @@ jobs: run: | docker compose -f ../docker-compose.yml up -d - - name: Watch Docker Logs - run: | - docker compose -f ../docker-compose.yml logs -f & - LOGS_PID=$! - echo "LOGS_PID=$LOGS_PID" >> $GITHUB_ENV - - name: Install dependencies run: | yarn install --frozen-lockfile @@ -113,22 +107,22 @@ jobs: - name: Install Browser '${{ matrix.browser }}' run: yarn playwright install --with-deps ${{ matrix.browser }} - - name: Run tests + - name: Run tests with Docker logs timeout-minutes: 10 run: | + # Start streaming Docker logs and save its PID + docker compose -f ../docker-compose.yml logs -f & + LOGS_PID=$! + + # Run the tests yarn test --project=${{ matrix.browser }} - - - name: Stop Docker Logs - if: always() - run: | - if [ ! -z "$LOGS_PID" ]; then - kill $LOGS_PID || true - fi - - - name: Print Final Docker Compose logs - if: always() - run: | - docker compose -f ../docker-compose.yml logs + TEST_EXIT_CODE=$? + + # Kill the logs process + kill $LOGS_PID || true + + # Exit with the test's exit code + exit $TEST_EXIT_CODE - uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} From 0886ae973b130d9782bcd90f63453a2eda8dcfd7 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 20:02:28 -0600 Subject: [PATCH 20/30] expose port --- autogpt_platform/docker-compose.platform.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 2ba1987612..7802c457c3 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -93,6 +93,7 @@ services: - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! ports: - "8006:8006" + - "8007:8007" - "8003:8003" # execution scheduler networks: - app-network @@ -157,7 +158,7 @@ services: # rabbitmq: # condition: service_healthy migrate: - condition: service_completed_successfully + condition: service_completed_successfully environment: - SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform From c6173a2b7cf2a132b9ceaf94ef106faab6801f29 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 20:03:08 -0600 Subject: [PATCH 21/30] Revert "Update platform-frontend-ci.yml" This reverts commit cd03f86a5c018f80aeb6ea4577eac7646e4d8a0e. --- .github/workflows/platform-frontend-ci.yml | 32 +++++++++++++--------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index e104b5048f..80746ca3a2 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -96,6 +96,12 @@ jobs: run: | docker compose -f ../docker-compose.yml up -d + - name: Watch Docker Logs + run: | + docker compose -f ../docker-compose.yml logs -f & + LOGS_PID=$! + echo "LOGS_PID=$LOGS_PID" >> $GITHUB_ENV + - name: Install dependencies run: | yarn install --frozen-lockfile @@ -107,22 +113,22 @@ jobs: - name: Install Browser '${{ matrix.browser }}' run: yarn playwright install --with-deps ${{ matrix.browser }} - - name: Run tests with Docker logs + - name: Run tests timeout-minutes: 10 run: | - # Start streaming Docker logs and save its PID - docker compose -f ../docker-compose.yml logs -f & - LOGS_PID=$! - - # Run the tests yarn test --project=${{ matrix.browser }} - TEST_EXIT_CODE=$? - - # Kill the logs process - kill $LOGS_PID || true - - # Exit with the test's exit code - exit $TEST_EXIT_CODE + + - name: Stop Docker Logs + if: always() + run: | + if [ ! -z "$LOGS_PID" ]; then + kill $LOGS_PID || true + fi + + - name: Print Final Docker Compose logs + if: always() + run: | + docker compose -f ../docker-compose.yml logs - uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} From a8f8dd3da279d9ceaf77a3bd028ac5a28ab2226d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 20:20:21 -0600 Subject: [PATCH 22/30] Update docker-compose.platform.yml --- autogpt_platform/docker-compose.platform.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 7802c457c3..993b6a1eb2 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -88,6 +88,7 @@ services: - PYRO_HOST=0.0.0.0 - EXECUTIONSCHEDULER_HOST=rest_server - EXECUTIONMANAGER_HOST=executor + - NOTIFICATION_HOST=rest_server - FRONTEND_BASE_URL=http://localhost:3000 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! @@ -133,6 +134,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - NOTIFICATION_HOST=rest_server - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! ports: - "8002:8000" From 95eb77e93cb70d1e046169e49927a9fe19714659 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 20:32:15 -0600 Subject: [PATCH 23/30] fix: name --- autogpt_platform/docker-compose.platform.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 993b6a1eb2..8231533f6a 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -88,7 +88,7 @@ services: - PYRO_HOST=0.0.0.0 - EXECUTIONSCHEDULER_HOST=rest_server - EXECUTIONMANAGER_HOST=executor - - NOTIFICATION_HOST=rest_server + - NOTIFICATIONMANAGER_HOST=rest_server - FRONTEND_BASE_URL=http://localhost:3000 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! @@ -134,7 +134,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server - - NOTIFICATION_HOST=rest_server + - NOTIFICATIONMANAGER_HOST=rest_server - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! ports: - "8002:8000" From ef1d4757eb3b66b477a3454ba998239471b4d339 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 21:19:40 -0600 Subject: [PATCH 24/30] fix(infra): recompose ourselves --- autogpt_platform/docker-compose.platform.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 8231533f6a..948ade616e 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -81,8 +81,8 @@ services: - REDIS_PORT=6379 - RABBITMQ_HOST=rabbitmq - RABBITMQ_PORT=5672 - - RABBITMQ_USER=rabbitmq_user_default - - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - REDIS_PASSWORD=password - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 @@ -129,8 +129,8 @@ services: - REDIS_PASSWORD=password - RABBITMQ_HOST=rabbitmq - RABBITMQ_PORT=5672 - - RABBITMQ_USER=rabbitmq_user_default - - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server @@ -167,10 +167,10 @@ services: - REDIS_HOST=redis - REDIS_PORT=6379 - REDIS_PASSWORD=password - # - RABBITMQ_HOST=rabbitmq # TODO: Uncomment this when we have a need for it in websocket (like nofifying when stuff went down) + # - RABBITMQ_HOST=rabbitmq # - RABBITMQ_PORT=5672 - # - RABBITMQ_USER=rabbitmq_user_default - # - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + # - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + # - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] From 62a8b925a0bffb7530923a4b31ff1aa221272aba Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 21:36:38 -0600 Subject: [PATCH 25/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 80746ca3a2..dd2a1d600f 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -96,12 +96,6 @@ jobs: run: | docker compose -f ../docker-compose.yml up -d - - name: Watch Docker Logs - run: | - docker compose -f ../docker-compose.yml logs -f & - LOGS_PID=$! - echo "LOGS_PID=$LOGS_PID" >> $GITHUB_ENV - - name: Install dependencies run: | yarn install --frozen-lockfile @@ -118,13 +112,6 @@ jobs: run: | yarn test --project=${{ matrix.browser }} - - name: Stop Docker Logs - if: always() - run: | - if [ ! -z "$LOGS_PID" ]; then - kill $LOGS_PID || true - fi - - name: Print Final Docker Compose logs if: always() run: | From 67af37135ffb8d0c05be7ac32e7ae7c1fb0a5294 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 21:37:10 -0600 Subject: [PATCH 26/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index dd2a1d600f..05a1e91326 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -108,7 +108,7 @@ jobs: run: yarn playwright install --with-deps ${{ matrix.browser }} - name: Run tests - timeout-minutes: 10 + timeout-minutes: 20 run: | yarn test --project=${{ matrix.browser }} From e1e585292852c5ebeb3f36fe27baec93014d1b6b Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 21:37:47 -0600 Subject: [PATCH 27/30] Update platform-frontend-ci.yml --- .github/workflows/platform-frontend-ci.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 05a1e91326..f134687525 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -62,10 +62,6 @@ jobs: fail-fast: false matrix: browser: [chromium, webkit] - # env: - # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - # RABBITMQ_DEFAULT_USER: "rabbitmq_user_default" - # RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7" steps: - name: Checkout repository From 15275e2ce105e0403c341d717daf3ab6a90c4d54 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 21:58:12 -0600 Subject: [PATCH 28/30] feat(backend): spawn the notifications service + basic test (#9464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to send emails on a schedule, in response to events, and be expandable without being overbearing on the amount of effort to implement. We also want this to use rabbitmq and be easy for other services to send messages into. This PR adds the first use of the service to simply show a log message ### Changes 🏗️ - Adds a new backend service for notifications - Adds first notification into the service -> Agent Execution - Adds spawning the notification service Also - Adds RabbitMQ to CI so we can test stuff - Adds a minor fix for one of the migrations that I thought was causing failures, but isn't but the change is still useful ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Built and ran an agent and ensured the following log line appeared which shows the event would have sent an email ``` 2025-02-10 15:52:02,232 INFO Processing notification: user_id='96b8d2f5-a036-437f-bd8e-ba8856028553' type= data=AgentRunData(agent_name='CalculatorBlock', credits_used=0.0, execution_time=0.0, graph_id='30e5f332-a092-4795-892a-b063a8c7bdd9', node_count=1) created_at=datetime.datetime(2025, 2, 10, 15, 52, 2, 162865) ``` #### For configuration changes: - [ ] `.env.example` is updated or already compatible with my changes - [ ] `docker-compose.yml` is updated or already compatible with my changes - [ ] I have included a list of my configuration changes in the PR description (under **Changes**) None of the other ports are configurable via .env.example listing so left as is
Examples of configuration changes - Changing ports - Adding new services that need to communicate with each other - Secrets or environment variable changes - New or infrastructure changes such as databases
--------- Co-authored-by: Reinier van der Leer --- .github/workflows/platform-backend-ci.yml | 15 ++ .github/workflows/platform-frontend-ci.yml | 9 +- autogpt_platform/backend/backend/app.py | 2 + .../backend/backend/data/graph.py | 4 +- .../backend/backend/executor/manager.py | 39 +++- .../backend/backend/notifications/__init__.py | 5 + .../backend/notifications/notifications.py | 213 ++++++++++++++++++ autogpt_platform/backend/backend/rest.py | 2 + .../backend/backend/util/service.py | 29 ++- .../backend/backend/util/settings.py | 5 + autogpt_platform/backend/backend/util/test.py | 4 + autogpt_platform/docker-compose.platform.yml | 19 +- 12 files changed, 313 insertions(+), 33 deletions(-) create mode 100644 autogpt_platform/backend/backend/notifications/__init__.py create mode 100644 autogpt_platform/backend/backend/notifications/notifications.py diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml index cdab6ed300..938cb7b6f9 100644 --- a/.github/workflows/platform-backend-ci.yml +++ b/.github/workflows/platform-backend-ci.yml @@ -42,6 +42,14 @@ jobs: REDIS_PASSWORD: testpassword ports: - 6379:6379 + rabbitmq: + image: rabbitmq:3.12-management + ports: + - 5672:5672 + - 15672:15672 + env: + RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }} + RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }} steps: - name: Checkout repository @@ -139,6 +147,13 @@ jobs: RUN_ENV: local PORT: 8080 OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + # We know these are here, don't report this as a security vulnerability + # This is used as the default credential for the entire system's RabbitMQ instance + # If you want to replace this, you can do so by making our entire system generate + # new credentials for each local user and update the environment variables in + # the backend service, docker composes, and examples + RABBITMQ_DEFAULT_USER: 'rabbitmq_user_default' + RABBITMQ_DEFAULT_PASS: 'k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7' # - name: Upload coverage reports to Codecov # uses: codecov/codecov-action@v4 diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index 8fc27fde56..f134687525 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -77,8 +77,8 @@ jobs: - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main with: - large-packages: false # slow - docker-images: false # limited benefit + large-packages: false # slow + docker-images: false # limited benefit - name: Copy default supabase .env run: | @@ -104,11 +104,12 @@ jobs: run: yarn playwright install --with-deps ${{ matrix.browser }} - name: Run tests + timeout-minutes: 20 run: | yarn test --project=${{ matrix.browser }} - - name: Print Docker Compose logs in debug mode - if: runner.debug + - name: Print Final Docker Compose logs + if: always() run: | docker compose -f ../docker-compose.yml logs diff --git a/autogpt_platform/backend/backend/app.py b/autogpt_platform/backend/backend/app.py index 5d77ea9632..eb0c6370c6 100644 --- a/autogpt_platform/backend/backend/app.py +++ b/autogpt_platform/backend/backend/app.py @@ -25,6 +25,7 @@ def main(**kwargs): """ from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler + from backend.notifications import NotificationManager from backend.server.rest_api import AgentServer from backend.server.ws_api import WebsocketServer @@ -32,6 +33,7 @@ def main(**kwargs): DatabaseManager(), ExecutionManager(), ExecutionScheduler(), + NotificationManager(), WebsocketServer(), AgentServer(), **kwargs, diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 31bd1dee4b..6078d21762 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -70,11 +70,9 @@ class NodeModel(Node): @staticmethod def from_db(node: AgentNode): - if not node.AgentBlock: - raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.") obj = NodeModel( id=node.id, - block_id=node.AgentBlock.id, + block_id=node.agentBlockId, input_default=type.convert(node.constantInput, dict[str, Any]), metadata=type.convert(node.metadata, dict[str, Any]), graph_id=node.agentGraphId, diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 5f1b8e62c0..513e384b08 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -12,8 +12,15 @@ from typing import TYPE_CHECKING, Any, Generator, Optional, TypeVar, cast from redis.lock import Lock as RedisLock +from backend.data.notifications import ( + AgentRunData, + NotificationEventDTO, + NotificationType, +) + if TYPE_CHECKING: from backend.executor import DatabaseManager + from backend.notifications.notifications import NotificationManager from autogpt_libs.utils.cache import thread_cached @@ -108,6 +115,7 @@ ExecutionStream = Generator[NodeExecutionEntry, None, None] def execute_node( db_client: "DatabaseManager", creds_manager: IntegrationCredentialsManager, + notification_service: "NotificationManager", data: NodeExecutionEntry, execution_stats: dict[str, Any] | None = None, ) -> ExecutionStream: @@ -196,7 +204,7 @@ def execute_node( # Charge the user for the execution before running the block. # TODO: We assume the block is executed within 0 seconds. # This is fine because for now, there is no block that is charged by time. - db_client.spend_credits(data, input_size + output_size, 0) + cost = db_client.spend_credits(data, input_size + output_size, 0) for output_name, output_data in node_block.execute( input_data, **extra_exec_kwargs @@ -216,7 +224,22 @@ def execute_node( ): yield execution + # Update execution status and spend credits update_execution(ExecutionStatus.COMPLETED) + event = NotificationEventDTO( + user_id=user_id, + type=NotificationType.AGENT_RUN, + data=AgentRunData( + agent_name=node_block.name, + credits_used=cost, + execution_time=0, + graph_id=graph_id, + node_count=1, + ).model_dump(), + ) + + logger.info(f"Sending notification for {event}") + notification_service.queue_notification(event) except Exception as e: error_msg = str(e) @@ -480,6 +503,7 @@ class Executor: cls.pid = os.getpid() cls.db_client = get_db_client() cls.creds_manager = IntegrationCredentialsManager() + cls.notification_service = get_notification_service() # Set up shutdown handlers cls.shutdown_lock = threading.Lock() @@ -554,7 +578,11 @@ class Executor: try: log_metadata.info(f"Start node execution {node_exec.node_exec_id}") for execution in execute_node( - cls.db_client, cls.creds_manager, node_exec, stats + db_client=cls.db_client, + creds_manager=cls.creds_manager, + notification_service=cls.notification_service, + data=node_exec, + execution_stats=stats, ): q.add(execution) log_metadata.info(f"Finished node execution {node_exec.node_exec_id}") @@ -968,6 +996,13 @@ def get_db_client() -> "DatabaseManager": return get_service_client(DatabaseManager) +@thread_cached +def get_notification_service() -> "NotificationManager": + from backend.notifications import NotificationManager + + return get_service_client(NotificationManager) + + @contextmanager def synchronized(key: str, timeout: int = 60): lock: RedisLock = redis.get_redis().lock(f"lock:{key}", timeout=timeout) diff --git a/autogpt_platform/backend/backend/notifications/__init__.py b/autogpt_platform/backend/backend/notifications/__init__.py new file mode 100644 index 0000000000..e791d49b3c --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/__init__.py @@ -0,0 +1,5 @@ +from .notifications import NotificationManager + +__all__ = [ + "NotificationManager", +] diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py new file mode 100644 index 0000000000..d4e591ac8b --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -0,0 +1,213 @@ +import logging +import time +from typing import TYPE_CHECKING + +from aio_pika.exceptions import QueueEmpty +from autogpt_libs.utils.cache import thread_cached + +from backend.data.notifications import ( + BatchingStrategy, + NotificationEventDTO, + NotificationEventModel, + NotificationResult, + get_data_type, +) +from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig +from backend.executor.database import DatabaseManager +from backend.util.service import AppService, expose, get_service_client +from backend.util.settings import Settings + +if TYPE_CHECKING: + from backend.executor import DatabaseManager + +logger = logging.getLogger(__name__) +settings = Settings() + + +def create_notification_config() -> RabbitMQConfig: + """Create RabbitMQ configuration for notifications""" + notification_exchange = Exchange(name="notifications", type=ExchangeType.TOPIC) + + summary_exchange = Exchange(name="summaries", type=ExchangeType.TOPIC) + + dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.DIRECT) + delay_exchange = Exchange(name="delay", type=ExchangeType.DIRECT) + + queues = [ + # Main notification queues + Queue( + name="immediate_notifications", + exchange=notification_exchange, + routing_key="notification.immediate.#", + arguments={ + "x-dead-letter-exchange": dead_letter_exchange.name, + "x-dead-letter-routing-key": "failed.immediate", + }, + ), + Queue( + name="backoff_notifications", + exchange=notification_exchange, + routing_key="notification.backoff.#", + arguments={ + "x-dead-letter-exchange": dead_letter_exchange.name, + "x-dead-letter-routing-key": "failed.backoff", + }, + ), + # Summary queues + Queue( + name="daily_summary_trigger", + exchange=summary_exchange, + routing_key="summary.daily", + arguments={"x-message-ttl": 86400000}, # 24 hours + ), + Queue( + name="weekly_summary_trigger", + exchange=summary_exchange, + routing_key="summary.weekly", + arguments={"x-message-ttl": 604800000}, # 7 days + ), + Queue( + name="monthly_summary_trigger", + exchange=summary_exchange, + routing_key="summary.monthly", + arguments={"x-message-ttl": 2592000000}, # 30 days + ), + # Failed notifications queue + Queue( + name="failed_notifications", + exchange=dead_letter_exchange, + routing_key="failed.#", + ), + ] + + return RabbitMQConfig( + exchanges=[ + notification_exchange, + # batch_exchange, + summary_exchange, + dead_letter_exchange, + delay_exchange, + ], + queues=queues, + ) + + +class NotificationManager(AppService): + """Service for handling notifications with batching support""" + + def __init__(self): + super().__init__() + self.use_db = True + self.rabbitmq_config = create_notification_config() + self.running = True + + @classmethod + def get_port(cls) -> int: + return settings.config.notification_service_port + + def get_routing_key(self, event: NotificationEventModel) -> str: + """Get the appropriate routing key for an event""" + if event.strategy == BatchingStrategy.IMMEDIATE: + return f"notification.immediate.{event.type.value}" + elif event.strategy == BatchingStrategy.BACKOFF: + return f"notification.backoff.{event.type.value}" + return f"notification.{event.type.value}" + + @expose + def queue_notification(self, event: NotificationEventDTO) -> NotificationResult: + """Queue a notification - exposed method for other services to call""" + try: + logger.info(f"Recieved Request to queue {event=}") + # Workaround for not being able to seralize generics over the expose bus + parsed_event = NotificationEventModel[ + get_data_type(event.type) + ].model_validate(event.model_dump()) + routing_key = self.get_routing_key(parsed_event) + message = parsed_event.model_dump_json() + + logger.info(f"Recieved Request to queue {message=}") + + exchange = "notifications" + + # Publish to RabbitMQ + self.run_and_wait( + self.rabbit.publish_message( + routing_key=routing_key, + message=message, + exchange=next( + ex for ex in self.rabbit_config.exchanges if ex.name == exchange + ), + ) + ) + + return NotificationResult( + success=True, + message=(f"Notification queued with routing key: {routing_key}"), + ) + + except Exception as e: + logger.error(f"Error queueing notification: {e}") + return NotificationResult(success=False, message=str(e)) + + async def _process_immediate(self, message: str) -> bool: + """Process a single notification immediately""" + try: + event = NotificationEventDTO.model_validate_json(message) + parsed_event = NotificationEventModel[ + get_data_type(event.type) + ].model_validate_json(message) + # Implementation of actual notification sending would go here + # self.email_sender.send_templated(event.type, event.user_id, parsed_event) + logger.info(f"Processing notification: {parsed_event}") + return True + except Exception as e: + logger.error(f"Error processing notification: {e}") + return False + + def run_service(self): + logger.info(f"[{self.service_name}] Started notification service") + + # Set up queue consumers + channel = self.run_and_wait(self.rabbit.get_channel()) + + immediate_queue = self.run_and_wait( + channel.get_queue("immediate_notifications") + ) + + while self.running: + try: + # Process immediate notifications + try: + message = self.run_and_wait(immediate_queue.get()) + + if message: + success = self.run_and_wait( + self._process_immediate(message.body.decode()) + ) + if success: + self.run_and_wait(message.ack()) + else: + self.run_and_wait(message.reject(requeue=True)) + except QueueEmpty: + logger.debug("Immediate queue empty") + + time.sleep(0.1) + + except QueueEmpty as e: + logger.debug(f"Queue empty: {e}") + except Exception as e: + logger.error(f"Error in notification service loop: {e}") + + def cleanup(self): + """Cleanup service resources""" + self.running = False + super().cleanup() + + # ------- UTILITIES ------- # + + +@thread_cached +def get_db_client() -> "DatabaseManager": + from backend.executor import DatabaseManager + + return get_service_client(DatabaseManager) diff --git a/autogpt_platform/backend/backend/rest.py b/autogpt_platform/backend/backend/rest.py index e0da452ca2..ecff860bc5 100644 --- a/autogpt_platform/backend/backend/rest.py +++ b/autogpt_platform/backend/backend/rest.py @@ -1,5 +1,6 @@ from backend.app import run_processes from backend.executor import DatabaseManager, ExecutionScheduler +from backend.notifications.notifications import NotificationManager from backend.server.rest_api import AgentServer @@ -8,6 +9,7 @@ def main(): Run all the processes required for the AutoGPT-server REST API. """ run_processes( + NotificationManager(), DatabaseManager(), ExecutionScheduler(), AgentServer(), diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index ac7f437fff..96bdc74bc3 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -117,9 +117,8 @@ class AppService(AppProcess, ABC): shared_event_loop: asyncio.AbstractEventLoop use_db: bool = False use_redis: bool = False - use_async: bool = False - use_rabbitmq: Optional[rabbitmq.RabbitMQConfig] = None - rabbitmq_service: Optional[rabbitmq.SyncRabbitMQ | rabbitmq.AsyncRabbitMQ] = None + rabbitmq_config: Optional[rabbitmq.RabbitMQConfig] = None + rabbitmq_service: Optional[rabbitmq.AsyncRabbitMQ] = None use_supabase: bool = False def __init__(self): @@ -135,7 +134,7 @@ class AppService(AppProcess, ABC): return os.environ.get(f"{cls.service_name.upper()}_HOST", config.pyro_host) @property - def rabbit(self) -> rabbitmq.SyncRabbitMQ | rabbitmq.AsyncRabbitMQ: + def rabbit(self) -> rabbitmq.AsyncRabbitMQ: """Access the RabbitMQ service. Will raise if not configured.""" if not self.rabbitmq_service: raise RuntimeError("RabbitMQ not configured for this service") @@ -144,9 +143,9 @@ class AppService(AppProcess, ABC): @property def rabbit_config(self) -> rabbitmq.RabbitMQConfig: """Access the RabbitMQ config. Will raise if not configured.""" - if not self.use_rabbitmq: + if not self.rabbitmq_config: raise RuntimeError("RabbitMQ not configured for this service") - return self.use_rabbitmq + return self.rabbitmq_config def run_service(self) -> None: while True: @@ -165,16 +164,14 @@ class AppService(AppProcess, ABC): self.shared_event_loop.run_until_complete(db.connect()) if self.use_redis: redis.connect() - if self.use_rabbitmq: + if self.rabbitmq_config: logger.info(f"[{self.__class__.__name__}] ⏳ Configuring RabbitMQ...") - if self.use_async: - self.rabbitmq_service = rabbitmq.AsyncRabbitMQ(self.use_rabbitmq) - self.shared_event_loop.run_until_complete( - self.rabbitmq_service.connect() - ) - else: - self.rabbitmq_service = rabbitmq.SyncRabbitMQ(self.use_rabbitmq) - self.rabbitmq_service.connect() + # if self.use_async: + self.rabbitmq_service = rabbitmq.AsyncRabbitMQ(self.rabbitmq_config) + self.shared_event_loop.run_until_complete(self.rabbitmq_service.connect()) + # else: + # self.rabbitmq_service = rabbitmq.SyncRabbitMQ(self.rabbitmq_config) + # self.rabbitmq_service.connect() if self.use_supabase: from supabase import create_client @@ -203,7 +200,7 @@ class AppService(AppProcess, ABC): if self.use_redis: logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting Redis...") redis.disconnect() - if self.use_rabbitmq: + if self.rabbitmq_config: logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting RabbitMQ...") @conn_retry("Pyro", "Starting Pyro Service") diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index c965572631..7eead6f735 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -140,6 +140,11 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for agent server API to run on", ) + notification_service_port: int = Field( + default=8007, + description="The port for notification service daemon to run on", + ) + platform_base_url: str = Field( default="", description="Must be set so the application knows where it's hosted at. " diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index ed4a343ab3..29b2cfdbc5 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -9,6 +9,7 @@ from backend.data.execution import ExecutionResult, ExecutionStatus from backend.data.model import _BaseCredentials from backend.data.user import create_default_user from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler +from backend.notifications.notifications import NotificationManager from backend.server.rest_api import AgentServer from backend.server.utils import get_user_id @@ -21,6 +22,7 @@ class SpinTestServer: self.exec_manager = ExecutionManager() self.agent_server = AgentServer() self.scheduler = ExecutionScheduler() + self.notif_manager = NotificationManager() @staticmethod def test_get_user_id(): @@ -32,6 +34,7 @@ class SpinTestServer: self.agent_server.__enter__() self.exec_manager.__enter__() self.scheduler.__enter__() + self.notif_manager.__enter__() await db.connect() await initialize_blocks() @@ -46,6 +49,7 @@ class SpinTestServer: self.exec_manager.__exit__(exc_type, exc_val, exc_tb) self.agent_server.__exit__(exc_type, exc_val, exc_tb) self.db_api.__exit__(exc_type, exc_val, exc_tb) + self.notif_manager.__exit__(exc_type, exc_val, exc_tb) def setup_dependency_overrides(self): # Override get_user_id for testing diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index 2ba1987612..948ade616e 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -81,18 +81,20 @@ services: - REDIS_PORT=6379 - RABBITMQ_HOST=rabbitmq - RABBITMQ_PORT=5672 - - RABBITMQ_USER=rabbitmq_user_default - - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - REDIS_PASSWORD=password - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - EXECUTIONSCHEDULER_HOST=rest_server - EXECUTIONMANAGER_HOST=executor + - NOTIFICATIONMANAGER_HOST=rest_server - FRONTEND_BASE_URL=http://localhost:3000 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! ports: - "8006:8006" + - "8007:8007" - "8003:8003" # execution scheduler networks: - app-network @@ -127,11 +129,12 @@ services: - REDIS_PASSWORD=password - RABBITMQ_HOST=rabbitmq - RABBITMQ_PORT=5672 - - RABBITMQ_USER=rabbitmq_user_default - - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - NOTIFICATIONMANAGER_HOST=rest_server - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! ports: - "8002:8000" @@ -157,17 +160,17 @@ services: # rabbitmq: # condition: service_healthy migrate: - condition: service_completed_successfully + condition: service_completed_successfully environment: - SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform - REDIS_HOST=redis - REDIS_PORT=6379 - REDIS_PASSWORD=password - # - RABBITMQ_HOST=rabbitmq # TODO: Uncomment this when we have a need for it in websocket (like nofifying when stuff went down) + # - RABBITMQ_HOST=rabbitmq # - RABBITMQ_PORT=5672 - # - RABBITMQ_USER=rabbitmq_user_default - # - RABBITMQ_PASSWORD=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 + # - RABBITMQ_DEFAULT_USER=rabbitmq_user_default + # - RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7 - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] From 7414b87c330b310bc98cd0c74e38252a07ec62d5 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Thu, 13 Feb 2025 23:59:36 -0600 Subject: [PATCH 29/30] feat(backend): rebuild templating system completely to send better emails --- .../backend/backend/data/notifications.py | 19 +- .../backend/backend/executor/manager.py | 4 +- .../backend/backend/notifications/email.py | 45 ++- .../templates/agent_run.html.jinja2 | 81 +++- .../notifications/templates/base.html.jinja2 | 352 ++++++++++++++++++ autogpt_platform/backend/backend/util/text.py | 57 ++- autogpt_platform/backend/poetry.lock | 32 +- autogpt_platform/backend/pyproject.toml | 1 + 8 files changed, 565 insertions(+), 26 deletions(-) create mode 100644 autogpt_platform/backend/backend/notifications/templates/base.html.jinja2 diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index a4549de632..1dbfb8bc4d 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -1,7 +1,7 @@ import logging from datetime import datetime, timedelta from enum import Enum -from typing import Annotated, Generic, Optional, TypeVar, Union +from typing import Annotated, Any, Generic, Optional, TypeVar, Union from prisma import Json from prisma.enums import NotificationType @@ -35,10 +35,10 @@ class BaseNotificationData(BaseModel): class AgentRunData(BaseNotificationData): agent_name: str credits_used: float - # remaining_balance: float execution_time: float - graph_id: str node_count: int = Field(..., description="Number of nodes executed") + graph_id: str + outputs: dict[str, Any] = Field(..., description="Outputs of the agent") class ZeroBalanceData(BaseNotificationData): @@ -203,6 +203,19 @@ class NotificationTypeOverride: NotificationType.MONTHLY_SUMMARY: "monthly_summary.html", }[self.notification_type] + @property + def subject(self) -> str: + return { + NotificationType.AGENT_RUN: "Agent Run Report", + NotificationType.ZERO_BALANCE: "You're out of credits!", + NotificationType.LOW_BALANCE: "Low Balance Warning!", + NotificationType.BLOCK_EXECUTION_FAILED: "Uh oh! Block Execution Failed", + NotificationType.CONTINUOUS_AGENT_ERROR: "Shoot! Continuous Agent Error", + NotificationType.DAILY_SUMMARY: "Here's your daily summary!", + NotificationType.WEEKLY_SUMMARY: "Look at all the cool stuff you did last week!", + NotificationType.MONTHLY_SUMMARY: "We did a lot this month!", + }[self.notification_type] + class NotificationPreference(BaseModel): user_id: str diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 513e384b08..65c84ee1bc 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -206,13 +206,14 @@ def execute_node( # This is fine because for now, there is no block that is charged by time. cost = db_client.spend_credits(data, input_size + output_size, 0) + outputs: dict[str, Any] = {} for output_name, output_data in node_block.execute( input_data, **extra_exec_kwargs ): output_size += len(json.dumps(output_data)) log_metadata.info("Node produced output", **{output_name: output_data}) db_client.upsert_execution_output(node_exec_id, output_name, output_data) - + outputs[output_name] = output_data for execution in _enqueue_next_nodes( db_client=db_client, node=node, @@ -230,6 +231,7 @@ def execute_node( user_id=user_id, type=NotificationType.AGENT_RUN, data=AgentRunData( + outputs=outputs, agent_name=node_block.name, credits_used=cost, execution_time=0, diff --git a/autogpt_platform/backend/backend/notifications/email.py b/autogpt_platform/backend/backend/notifications/email.py index 6468dc65e2..748aa6c9c4 100644 --- a/autogpt_platform/backend/backend/notifications/email.py +++ b/autogpt_platform/backend/backend/notifications/email.py @@ -12,6 +12,7 @@ from backend.data.notifications import ( ) from backend.util.settings import Settings from backend.util.text import TextFormatter +from pydantic import BaseModel logger = logging.getLogger(__name__) settings = Settings() @@ -24,6 +25,12 @@ class TypedPostmarkClient(PostmarkClient): emails: EmailManager +class Template(BaseModel): + subject: str + body: str + base_template: str + + class EmailSender: def __init__(self): if settings.secrets.postmark_server_api_token: @@ -46,30 +53,44 @@ class EmailSender: if not self.postmark: logger.warning("Postmark client not initialized, email not sent") return - body = self._get_template(notification) - # use the jinja2 library to render the template - body = self.formatter.format_string(body, data) - logger.info( - f"Sending email to {user_email} with subject {"subject"} and body {body}" - ) - self._send_email(user_email, "subject", body) + template = self._get_template(notification) + + try: + subject, full_message = self.formatter.format_email( + base_template=template.base_template, + subject_template=template.subject, + content_template=template.body, + data=data, + unsubscribe_link="https://autogpt.com/unsubscribe", + ) + + except Exception as e: + logger.error(f"Error formatting full message: {e}") + raise e + + self._send_email(user_email, subject, full_message) def _get_template(self, notification: NotificationType): # convert the notification type to a notification type override notification_type_override = NotificationTypeOverride(notification) # find the template in templates/name.html (the .template returns with the .html) template_path = f"templates/{notification_type_override.template}.jinja2" - logger.info( + logger.debug( f"Template full path: {pathlib.Path(__file__).parent / template_path}" ) + base_template_path = f"templates/base.html.jinja2" + with open(pathlib.Path(__file__).parent / base_template_path, "r") as file: + base_template = file.read() with open(pathlib.Path(__file__).parent / template_path, "r") as file: template = file.read() - return template + return Template( + subject=notification_type_override.subject, + body=template, + base_template=base_template, + ) def _send_email(self, user_email: str, subject: str, body: str): - logger.info( - f"Sending email to {user_email} with subject {subject} and body {body}" - ) + logger.debug(f"Sending email to {user_email} with subject {subject}") self.postmark.emails.send( From=settings.config.postmark_sender_email, To=user_email, diff --git a/autogpt_platform/backend/backend/notifications/templates/agent_run.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/agent_run.html.jinja2 index 21204bc631..f4416fa9ff 100644 --- a/autogpt_platform/backend/backend/notifications/templates/agent_run.html.jinja2 +++ b/autogpt_platform/backend/backend/notifications/templates/agent_run.html.jinja2 @@ -1,6 +1,75 @@ -AGENT RUN - -{{data.name}} -{{data.node_count}} -{{data.execution_time}} -{{data.graph_id}} +{# Agent Run #} +{# Template variables: +data.name: the name of the agent +data.credits_used: the number of credits used by the agent +data.node_count: the number of nodes the agent ran on +data.execution_time: the time it took to run the agent +data.graph_id: the id of the graph the agent ran on +data.outputs: the dict[str, Any] of outputs of the agent +#} +

+ Hi, +

+

+ We've run your agent {{ data.name }} and it took {{ data.execution_time }} seconds to complete. +

+

+ It ran on {{ data.node_count }} nodes and used {{ data.credits_used }} credits. +

+
    + It output the following: + {# jinja2 list iteration thorugh data.outputs #} + {% for key, value in data.outputs.items() %} +
  • {{ key }}: {{ value }}
  • + {% endfor %} +
+

+ Your feedback has been instrumental in shaping AutoGPT, and we couldn't have + done it without you. We look forward to continuing this journey together as we + bring AI-powered automation to the world. +

+

+ Thank you again for your time and support. +

\ No newline at end of file diff --git a/autogpt_platform/backend/backend/notifications/templates/base.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/base.html.jinja2 new file mode 100644 index 0000000000..3ed4035f83 --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/templates/base.html.jinja2 @@ -0,0 +1,352 @@ +{# Base Template #} +{# Template variables: + data.message: the message to display in the email + data.title: the title of the email + data.unsubscribe_link: the link to unsubscribe from the email +#} + + + + + + + + + + + + + + + {{data.title}} + + + +
+ + + + + +
+ + + + + +
+ + + + + + + + +
+
+ + + + +
+ +
+
+ + + + + + +
+ {{data.message|safe}} +
+ + + + + + +
+ + + + + + +
+ + +

+ John Ababseh
Product Manager
+ john.ababseh@agpt.co +

+
+
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + +
+ + x + + + + discord + + + + website + +
+
+
+
+ AutoGPT +
+
+

+ 3rd Floor 1 Ashley Road, Cheshire, United Kingdom, WA14 2DT, Altrincham
United Kingdom +

+
+

+ You received this email because you signed up on our website.

+
+

+ Unsubscribe +

+
+
+
+
+
+ + + \ No newline at end of file diff --git a/autogpt_platform/backend/backend/util/text.py b/autogpt_platform/backend/backend/util/text.py index c867b7a40f..1eddba8563 100644 --- a/autogpt_platform/backend/backend/util/text.py +++ b/autogpt_platform/backend/backend/util/text.py @@ -1,17 +1,68 @@ from jinja2 import BaseLoader +from markupsafe import Markup from jinja2.sandbox import SandboxedEnvironment +import bleach +import logging + +logger = logging.getLogger(__name__) class TextFormatter: def __init__(self): - # Create a sandboxed environment self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True) - - # Clear any registered filters, tests, and globals to minimize attack surface self.env.filters.clear() self.env.tests.clear() self.env.globals.clear() + self.allowed_tags = ["p", "b", "i", "u", "ul", "li", "br", "strong", "em"] + self.allowed_attributes = {"*": ["style", "class"]} + def format_string(self, template_str: str, values=None, **kwargs) -> str: + """Regular template rendering with escaping""" template = self.env.from_string(template_str) return template.render(values or {}, **kwargs) + + def format_email( + self, + subject_template: str, + base_template: str, + content_template: str, + data=None, + **kwargs, + ) -> tuple[str, str]: + """ + Special handling for email templates where content needs to be rendered as HTML + """ + # First render the content template + content = self.format_string(content_template, data, **kwargs) + + # Clean the HTML but don't escape it + clean_content = bleach.clean( + content, + tags=self.allowed_tags, + attributes=self.allowed_attributes, + strip=True, + ) + + # Mark the cleaned HTML as safe using Markup + safe_content = Markup(clean_content) + + rendered_subject_template = self.format_string(subject_template, data, **kwargs) + + # Create new env just for HTML template + html_env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True) + html_env.filters["safe"] = lambda x: ( + x if isinstance(x, Markup) else Markup(str(x)) + ) + + # Render base template with the safe content + template = html_env.from_string(base_template) + rendered_base_template = template.render( + data={ + "message": safe_content, + "title": rendered_subject_template, + "unsubscribe_link": kwargs.get("unsubscribe_link", ""), + } + ) + + return rendered_subject_template, rendered_base_template diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 3645897287..f1ddaac2fa 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -365,6 +365,24 @@ d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "bleach" +version = "6.2.0" +description = "An easy safelist-based HTML-sanitizing tool." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, +] + +[package.dependencies] +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.5)"] + [[package]] name = "cachetools" version = "5.5.1" @@ -4799,6 +4817,18 @@ files = [ [package.dependencies] anyio = ">=3.0.0" +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + [[package]] name = "websocket-client" version = "1.8.0" @@ -5137,4 +5167,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.13" -content-hash = "e06603a4c5a82e6e40e8b76c7c4a5dc0b60b31fbe5e47cce3b89d66c53bb704c" +content-hash = "fd396e770353328ac3fe71aaba5b73b5a147dd445e4866e5328d6173a13bf7a3" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index 0568f2e8f1..42fbe65a6e 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -62,6 +62,7 @@ uvicorn = { extras = ["standard"], version = "^0.34.0" } websockets = "^13.1" youtube-transcript-api = "^0.6.2" # NOTE: please insert new dependencies in their alphabetical location +bleach = "^6.2.0" [tool.poetry.group.dev.dependencies] aiohappyeyeballs = "^2.4.4" From c7b4a939ef6b6b29bb2efdf3d47f29bfd964cd0e Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 14 Feb 2025 00:00:23 -0600 Subject: [PATCH 30/30] fix(backend): linting --- autogpt_platform/backend/backend/notifications/email.py | 4 ++-- autogpt_platform/backend/backend/util/text.py | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/autogpt_platform/backend/backend/notifications/email.py b/autogpt_platform/backend/backend/notifications/email.py index 748aa6c9c4..5bb09c85fd 100644 --- a/autogpt_platform/backend/backend/notifications/email.py +++ b/autogpt_platform/backend/backend/notifications/email.py @@ -4,6 +4,7 @@ import pathlib from postmarker.core import PostmarkClient from postmarker.models.emails import EmailManager from prisma.enums import NotificationType +from pydantic import BaseModel from backend.data.notifications import ( NotificationEventModel, @@ -12,7 +13,6 @@ from backend.data.notifications import ( ) from backend.util.settings import Settings from backend.util.text import TextFormatter -from pydantic import BaseModel logger = logging.getLogger(__name__) settings = Settings() @@ -78,7 +78,7 @@ class EmailSender: logger.debug( f"Template full path: {pathlib.Path(__file__).parent / template_path}" ) - base_template_path = f"templates/base.html.jinja2" + base_template_path = "templates/base.html.jinja2" with open(pathlib.Path(__file__).parent / base_template_path, "r") as file: base_template = file.read() with open(pathlib.Path(__file__).parent / template_path, "r") as file: diff --git a/autogpt_platform/backend/backend/util/text.py b/autogpt_platform/backend/backend/util/text.py index 1eddba8563..04ac5a0942 100644 --- a/autogpt_platform/backend/backend/util/text.py +++ b/autogpt_platform/backend/backend/util/text.py @@ -1,9 +1,10 @@ -from jinja2 import BaseLoader -from markupsafe import Markup -from jinja2.sandbox import SandboxedEnvironment -import bleach import logging +import bleach +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment +from markupsafe import Markup + logger = logging.getLogger(__name__)