From af65058bb71985f64e26ecd91d944f92e8e22ab6 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 12 Mar 2025 12:17:38 +0700 Subject: [PATCH 01/61] fix(platform): Make LibraryAgent image as initial StoreListing image (#9610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ We've been auto-generating the thumbnail image for the agent in the library, this PR is propagating that image as an initial image for the store listing. This PR also removes the fetch all query for getting the count for paginating library agent. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create an library agent and try to create a store listing. image --- .../backend/backend/server/v2/library/db.py | 8 +--- .../backend/backend/server/v2/store/db.py | 40 +++++++------------ .../backend/server/v2/store/image_gen.py | 10 ++++- .../backend/backend/server/v2/store/model.py | 1 + .../components/agptui/PublishAgentSelect.tsx | 2 - .../agptui/composite/PublishAgentPopout.tsx | 5 ++- .../src/lib/autogpt-server-api/types.ts | 1 + 7 files changed, 30 insertions(+), 37 deletions(-) diff --git a/autogpt_platform/backend/backend/server/v2/library/db.py b/autogpt_platform/backend/backend/server/v2/library/db.py index b878e8d2dc..7f947625e8 100644 --- a/autogpt_platform/backend/backend/server/v2/library/db.py +++ b/autogpt_platform/backend/backend/server/v2/library/db.py @@ -1,4 +1,3 @@ -import asyncio import logging from typing import Optional @@ -186,12 +185,7 @@ async def add_generated_agent_image( try: if not (image_url := await store_media.check_media_exists(user_id, filename)): # Generate agent image as JPEG - if config.use_agent_image_generation_v2: - image = await asyncio.to_thread( - store_image_gen.generate_agent_image_v2, graph=graph - ) - else: - image = await store_image_gen.generate_agent_image(agent=graph) + image = await store_image_gen.generate_agent_image(graph) # Create UploadFile with the correct filename and content_type image_file = fastapi.UploadFile(file=image, filename=filename) diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py index 657a4c32cf..662275d5b1 100644 --- a/autogpt_platform/backend/backend/server/v2/store/db.py +++ b/autogpt_platform/backend/backend/server/v2/store/db.py @@ -706,36 +706,24 @@ async def get_my_agents( logger.debug(f"Getting my agents for user {user_id}, page={page}") try: - agents_with_max_version = await prisma.models.AgentGraph.prisma().find_many( - where=prisma.types.AgentGraphWhereInput( - userId=user_id, StoreListing={"none": {"isDeleted": False}} - ), - order=[{"version": "desc"}], - distinct=["id"], + search_filter = prisma.types.LibraryAgentWhereInput( + userId=user_id, + Agent={"is": {"StoreListing": {"none": {"isDeleted": False}}}}, + isArchived=False, + isDeleted=False, + ) + + library_agents = await prisma.models.LibraryAgent.prisma().find_many( + where=search_filter, + order=[{"agentVersion": "desc"}], skip=(page - 1) * page_size, take=page_size, + include={"Agent": True}, ) - # store_listings = await prisma.models.StoreListing.prisma().find_many( - # where=prisma.types.StoreListingWhereInput( - # isDeleted=False, - # ), - # ) - - total = len( - await prisma.models.AgentGraph.prisma().find_many( - where=prisma.types.AgentGraphWhereInput( - userId=user_id, StoreListing={"none": {"isDeleted": False}} - ), - order=[{"version": "desc"}], - distinct=["id"], - ) - ) - + total = await prisma.models.LibraryAgent.prisma().count(where=search_filter) total_pages = (total + page_size - 1) // page_size - agents = agents_with_max_version - my_agents = [ backend.server.v2.store.model.MyAgent( agent_id=agent.id, @@ -743,8 +731,10 @@ async def get_my_agents( agent_name=agent.name or "", last_edited=agent.updatedAt or agent.createdAt, description=agent.description or "", + agent_image=entry.imageUrl, ) - for agent in agents + for entry in library_agents + if (agent := entry.Agent) ] return backend.server.v2.store.model.MyAgentsResponse( diff --git a/autogpt_platform/backend/backend/server/v2/store/image_gen.py b/autogpt_platform/backend/backend/server/v2/store/image_gen.py index 4f7ac353fb..1da4129fe1 100644 --- a/autogpt_platform/backend/backend/server/v2/store/image_gen.py +++ b/autogpt_platform/backend/backend/server/v2/store/image_gen.py @@ -1,3 +1,4 @@ +import asyncio import io import logging from enum import Enum @@ -34,6 +35,13 @@ class ImageStyle(str, Enum): DIGITAL_ART = "digital art" +async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO: + if settings.config.use_agent_image_generation_v2: + return await asyncio.to_thread(generate_agent_image_v2, graph=agent) + else: + return await generate_agent_image_v1(agent=agent) + + def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO: """ Generate an image for an agent using Ideogram model. @@ -91,7 +99,7 @@ def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO: return io.BytesIO(requests.get(url).content) -async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO: +async def generate_agent_image_v1(agent: Graph | AgentGraph) -> io.BytesIO: """ Generate an image for an agent using Flux model via Replicate API. diff --git a/autogpt_platform/backend/backend/server/v2/store/model.py b/autogpt_platform/backend/backend/server/v2/store/model.py index 7f09e853ae..9ed564dc53 100644 --- a/autogpt_platform/backend/backend/server/v2/store/model.py +++ b/autogpt_platform/backend/backend/server/v2/store/model.py @@ -24,6 +24,7 @@ class MyAgent(pydantic.BaseModel): agent_id: str agent_version: int agent_name: str + agent_image: str | None = None description: str last_edited: datetime.datetime diff --git a/autogpt_platform/frontend/src/components/agptui/PublishAgentSelect.tsx b/autogpt_platform/frontend/src/components/agptui/PublishAgentSelect.tsx index a71a8930eb..36ecc424f3 100644 --- a/autogpt_platform/frontend/src/components/agptui/PublishAgentSelect.tsx +++ b/autogpt_platform/frontend/src/components/agptui/PublishAgentSelect.tsx @@ -30,7 +30,6 @@ export const PublishAgentSelect: React.FC = ({ onClose, onOpenBuilder, }) => { - const [selectedAgent, setSelectedAgent] = React.useState(null); const [selectedAgentId, setSelectedAgentId] = React.useState( null, ); @@ -43,7 +42,6 @@ export const PublishAgentSelect: React.FC = ({ agentId: string, agentVersion: number, ) => { - setSelectedAgent(agentName); setSelectedAgentId(agentId); setSelectedAgentVersion(agentVersion); onSelect(agentId, agentVersion); diff --git a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx index f410090234..4992ce3f86 100644 --- a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx +++ b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx @@ -130,7 +130,7 @@ export const PublishAgentPopout: React.FC = ({ title: name, subheader: "", description: description, - thumbnailSrc: "", + thumbnailSrc: selectedAgentData?.agent_image || "", youtubeLink: "", category: "", slug: name.replace(/ /g, "-"), @@ -222,7 +222,8 @@ export const PublishAgentPopout: React.FC = ({ id: agent.agent_id, version: agent.agent_version, lastEdited: agent.last_edited, - imageSrc: "https://picsum.photos/300/200", // Fallback image if none provided + imageSrc: + agent.agent_image || "https://picsum.photos/300/200", })) || [] } onSelect={handleAgentSelect} diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 6e4e7c03f8..a9995b377e 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -649,6 +649,7 @@ export type MyAgent = { agent_id: GraphID; agent_version: number; agent_name: string; + agent_image: string | null; last_edited: string; description: string; }; From 942ac0bae43e31c2083da75319ad21ae2692d55d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 12 Mar 2025 00:19:23 -0500 Subject: [PATCH 02/61] feat(backend): baseline summary processing (#9596) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to be able to process emails on a scheduled basis for summaries. This adds the baselines for that ### Changes 🏗️ - Adds new tooling to Scheduluer to handle the in-memory schedule for the weekly processing - Adds new exposes to notification manager to handle the different data models for scheduled emails - adds new models to the notification data models to handle the different requirements for scheduled emails, closely paralleling the existing notification ones - Adds new email template Note: After testing, email sending was disabled until the template and data filling are done later down the line. We don't want to email people random stuff, ya know? ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Test sending an email on the scheduled basis - [x] Make sure you get the email, ignoring the fact that all the data isn't real inside it --------- Co-authored-by: Zamil Majdy --- .../backend/backend/data/notifications.py | 104 ++++++- .../backend/backend/executor/scheduler.py | 33 +++ .../backend/backend/notifications/email.py | 7 +- .../backend/notifications/notifications.py | 265 ++++++++++++++++-- .../templates/weekly_summary.html.jinja2 | 27 ++ 5 files changed, 403 insertions(+), 33 deletions(-) create mode 100644 autogpt_platform/backend/backend/notifications/templates/weekly_summary.html.jinja2 diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index f61517d1f5..b42605a2d9 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -1,5 +1,5 @@ import logging -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from enum import Enum from typing import Annotated, Any, Generic, Optional, TypeVar, Union @@ -18,7 +18,12 @@ from .db import transaction logger = logging.getLogger(__name__) -T_co = TypeVar("T_co", bound="BaseNotificationData", covariant=True) +NotificationDataType_co = TypeVar( + "NotificationDataType_co", bound="BaseNotificationData", covariant=True +) +SummaryParamsType_co = TypeVar( + "SummaryParamsType_co", bound="BaseSummaryParams", covariant=True +) class QueueType(Enum): @@ -47,6 +52,13 @@ class ZeroBalanceData(BaseNotificationData): last_transaction_time: datetime top_up_link: str + @field_validator("last_transaction_time") + @classmethod + def validate_timezone(cls, value: datetime): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value + class LowBalanceData(BaseNotificationData): agent_name: str = Field(..., description="Name of the agent") @@ -75,6 +87,13 @@ class ContinuousAgentErrorData(BaseNotificationData): error_time: datetime attempts: int = Field(..., description="Number of retry attempts made") + @field_validator("start_time", "error_time") + @classmethod + def validate_timezone(cls, value: datetime): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value + class BaseSummaryData(BaseNotificationData): total_credits_used: float @@ -87,18 +106,53 @@ class BaseSummaryData(BaseNotificationData): cost_breakdown: dict[str, float] +class BaseSummaryParams(BaseModel): + pass + + +class DailySummaryParams(BaseSummaryParams): + date: datetime + + @field_validator("date") + def validate_timezone(cls, value): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value + + +class WeeklySummaryParams(BaseSummaryParams): + start_date: datetime + end_date: datetime + + @field_validator("start_date", "end_date") + def validate_timezone(cls, value): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value + + class DailySummaryData(BaseSummaryData): date: datetime + @field_validator("date") + def validate_timezone(cls, value): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value + class WeeklySummaryData(BaseSummaryData): start_date: datetime end_date: datetime - week_number: int - year: int + + @field_validator("start_date", "end_date") + def validate_timezone(cls, value): + if value.tzinfo is None: + raise ValueError("datetime must have timezone information") + return value -class MonthlySummaryData(BaseSummaryData): +class MonthlySummaryData(BaseNotificationData): month: int year: int @@ -125,6 +179,7 @@ NotificationData = Annotated[ WeeklySummaryData, DailySummaryData, RefundRequestData, + BaseSummaryData, ], Field(discriminator="type"), ] @@ -134,15 +189,22 @@ class NotificationEventDTO(BaseModel): user_id: str type: NotificationType data: dict - created_at: datetime = Field(default_factory=datetime.now) + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) retry_count: int = 0 -class NotificationEventModel(BaseModel, Generic[T_co]): +class SummaryParamsEventDTO(BaseModel): user_id: str type: NotificationType - data: T_co - created_at: datetime = Field(default_factory=datetime.now) + data: dict + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) + + +class NotificationEventModel(BaseModel, Generic[NotificationDataType_co]): + user_id: str + type: NotificationType + data: NotificationDataType_co + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) @property def strategy(self) -> QueueType: @@ -159,7 +221,14 @@ class NotificationEventModel(BaseModel, Generic[T_co]): return NotificationTypeOverride(self.type).template -def get_data_type( +class SummaryParamsEventModel(BaseModel, Generic[SummaryParamsType_co]): + user_id: str + type: NotificationType + data: SummaryParamsType_co + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) + + +def get_notif_data_type( notification_type: NotificationType, ) -> type[BaseNotificationData]: return { @@ -176,11 +245,20 @@ def get_data_type( }[notification_type] +def get_summary_params_type( + notification_type: NotificationType, +) -> type[BaseSummaryParams]: + return { + NotificationType.DAILY_SUMMARY: DailySummaryParams, + NotificationType.WEEKLY_SUMMARY: WeeklySummaryParams, + }[notification_type] + + class NotificationBatch(BaseModel): user_id: str events: list[NotificationEvent] strategy: QueueType - last_update: datetime = datetime.now() + last_update: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) class NotificationResult(BaseModel): @@ -258,7 +336,9 @@ class NotificationPreference(BaseModel): ) daily_limit: int = 10 # Max emails per day emails_sent_today: int = 0 - last_reset_date: datetime = Field(default_factory=datetime.now) + last_reset_date: datetime = Field( + default_factory=lambda: datetime.now(timezone.utc) + ) def get_batch_delay(notification_type: NotificationType) -> timedelta: diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index 922d18a663..e2ac74e0af 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -5,6 +5,7 @@ from urllib.parse import parse_qs, urlencode, urlparse, urlunparse from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED from apscheduler.job import Job as JobObj +from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.triggers.cron import CronTrigger @@ -93,9 +94,18 @@ def process_existing_batches(**kwargs): logger.exception(f"Error processing existing batches: {e}") +def process_weekly_summary(**kwargs): + try: + log("Processing weekly summary") + get_notification_client().queue_weekly_summary() + except Exception as e: + logger.exception(f"Error processing weekly summary: {e}") + + class Jobstores(Enum): EXECUTION = "execution" BATCHED_NOTIFICATIONS = "batched_notifications" + WEEKLY_NOTIFICATIONS = "weekly_notifications" class ExecutionJobArgs(BaseModel): @@ -189,6 +199,8 @@ class Scheduler(AppService): metadata=MetaData(schema=db_schema), tablename="apscheduler_jobs_batched_notifications", ), + # These don't really need persistence + Jobstores.WEEKLY_NOTIFICATIONS.value: MemoryJobStore(), } ) self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) @@ -242,6 +254,9 @@ class Scheduler(AppService): ) -> list[ExecutionJobInfo]: schedules = [] for job in self.scheduler.get_jobs(jobstore=Jobstores.EXECUTION.value): + logger.info( + f"Found job {job.id} with cron schedule {job.trigger} and args {job.kwargs}" + ) job_args = ExecutionJobArgs(**job.kwargs) if ( job.next_run_time is not None @@ -271,3 +286,21 @@ class Scheduler(AppService): ) log(f"Added job {job.id} with cron schedule '{cron}' input data: {data}") return NotificationJobInfo.from_db(job_args, job) + + @expose + def add_weekly_notification_schedule(self, cron: str) -> NotificationJobInfo: + + job = self.scheduler.add_job( + process_weekly_summary, + CronTrigger.from_crontab(cron), + kwargs={}, + replace_existing=True, + jobstore=Jobstores.WEEKLY_NOTIFICATIONS.value, + ) + log(f"Added job {job.id} with cron schedule '{cron}'") + return NotificationJobInfo.from_db( + NotificationJobArgs( + cron=cron, notification_types=[NotificationType.WEEKLY_SUMMARY] + ), + job, + ) diff --git a/autogpt_platform/backend/backend/notifications/email.py b/autogpt_platform/backend/backend/notifications/email.py index 58144c0ae0..4ac0c07760 100644 --- a/autogpt_platform/backend/backend/notifications/email.py +++ b/autogpt_platform/backend/backend/notifications/email.py @@ -7,9 +7,9 @@ from prisma.enums import NotificationType from pydantic import BaseModel from backend.data.notifications import ( + NotificationDataType_co, NotificationEventModel, NotificationTypeOverride, - T_co, ) from backend.util.settings import Settings from backend.util.text import TextFormatter @@ -48,7 +48,10 @@ class EmailSender: self, notification: NotificationType, user_email: str, - data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]], + data: ( + NotificationEventModel[NotificationDataType_co] + | list[NotificationEventModel[NotificationDataType_co]] + ), user_unsub_link: str | None = None, ): """Send an email to a user using a template pulled from the notification type""" diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index b085f407d9..5222e5c2d4 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -1,6 +1,6 @@ import logging import time -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from typing import Callable import aio_pika @@ -10,21 +10,32 @@ from prisma.enums import NotificationType from pydantic import BaseModel from backend.data.notifications import ( + BaseSummaryData, + BaseSummaryParams, + DailySummaryData, + DailySummaryParams, NotificationEventDTO, NotificationEventModel, NotificationResult, + NotificationTypeOverride, QueueType, + SummaryParamsEventDTO, + SummaryParamsEventModel, + WeeklySummaryData, + WeeklySummaryParams, create_or_add_to_user_notification_batch, empty_user_notification_batch, get_all_batches_by_type, get_batch_delay, - get_data_type, + get_notif_data_type, + get_summary_params_type, get_user_notification_batch, get_user_notification_oldest_message_in_batch, ) from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig from backend.data.user import ( generate_unsubscribe_link, + get_active_user_ids_in_timerange, get_user_email_by_id, get_user_email_verification, get_user_notification_preference, @@ -68,6 +79,17 @@ def create_notification_config() -> RabbitMQConfig: "x-dead-letter-routing-key": "failed.admin", }, ), + # Summary notification queues + Queue( + name="summary_notifications", + exchange=notification_exchange, + routing_key="notification.summary.#", + arguments={ + "x-dead-letter-exchange": dead_letter_exchange.name, + "x-dead-letter-routing-key": "failed.summary.weekly", + "x-dead-letter-routing-key": "failed.summary", + }, + ), # Batch Queue Queue( name="batch_notifications", @@ -116,19 +138,53 @@ class NotificationManager(AppService): def get_port(cls) -> int: return settings.config.notification_service_port - def get_routing_key(self, event: NotificationEventModel) -> str: + def get_routing_key(self, event_type: NotificationType) -> str: + strategy = NotificationTypeOverride(event_type).strategy """Get the appropriate routing key for an event""" - if event.strategy == QueueType.IMMEDIATE: - return f"notification.immediate.{event.type.value}" - elif event.strategy == QueueType.BACKOFF: - return f"notification.backoff.{event.type.value}" - elif event.strategy == QueueType.ADMIN: - return f"notification.admin.{event.type.value}" - elif event.strategy == QueueType.BATCH: - return f"notification.batch.{event.type.value}" - elif event.strategy == QueueType.SUMMARY: - return f"notification.summary.{event.type.value}" - return f"notification.{event.type.value}" + if strategy == QueueType.IMMEDIATE: + return f"notification.immediate.{event_type.value}" + elif strategy == QueueType.BACKOFF: + return f"notification.backoff.{event_type.value}" + elif strategy == QueueType.ADMIN: + return f"notification.admin.{event_type.value}" + elif strategy == QueueType.BATCH: + return f"notification.batch.{event_type.value}" + elif strategy == QueueType.SUMMARY: + return f"notification.summary.{event_type.value}" + return f"notification.{event_type.value}" + + @expose + def queue_weekly_summary(self): + """Process weekly summary for specified notification types""" + try: + logger.info("Processing weekly summary queuing operation") + processed_count = 0 + current_time = datetime.now(tz=timezone.utc) + start_time = current_time - timedelta(days=7) + users = self.run_and_wait( + get_active_user_ids_in_timerange( + end_time=current_time.isoformat(), + start_time=start_time.isoformat(), + ) + ) + for user in users: + + self._queue_scheduled_notification( + SummaryParamsEventDTO( + user_id=user, + type=NotificationType.WEEKLY_SUMMARY, + data=WeeklySummaryParams( + start_date=start_time, + end_date=current_time, + ).model_dump(), + ), + ) + processed_count += 1 + + logger.info(f"Processed {processed_count} weekly summaries into queue") + + except Exception as e: + logger.exception(f"Error processing weekly summary: {e}") @expose def process_existing_batches(self, notification_types: list[NotificationType]): @@ -206,7 +262,7 @@ class NotificationManager(AppService): events = [ NotificationEventModel[ - get_data_type(db_event.type) + get_notif_data_type(db_event.type) ].model_validate( { "user_id": batch.userId, @@ -259,9 +315,9 @@ class NotificationManager(AppService): logger.info(f"Received Request to queue {event=}") # Workaround for not being able to serialize generics over the expose bus parsed_event = NotificationEventModel[ - get_data_type(event.type) + get_notif_data_type(event.type) ].model_validate(event.model_dump()) - routing_key = self.get_routing_key(parsed_event) + routing_key = self.get_routing_key(parsed_event.type) message = parsed_event.model_dump_json() logger.info(f"Received Request to queue {message=}") @@ -288,6 +344,36 @@ class NotificationManager(AppService): logger.exception(f"Error queueing notification: {e}") return NotificationResult(success=False, message=str(e)) + def _queue_scheduled_notification(self, event: SummaryParamsEventDTO): + """Queue a scheduled notification - exposed method for other services to call""" + try: + logger.info(f"Received Request to queue scheduled notification {event=}") + + parsed_event = SummaryParamsEventModel[ + get_summary_params_type(event.type) + ].model_validate(event.model_dump()) + + routing_key = self.get_routing_key(event.type) + message = parsed_event.model_dump_json() + + logger.info(f"Received Request to queue {message=}") + + exchange = "notifications" + + # Publish to RabbitMQ + self.run_and_wait( + self.rabbit.publish_message( + routing_key=routing_key, + message=message, + exchange=next( + ex for ex in self.rabbit_config.exchanges if ex.name == exchange + ), + ) + ) + + except Exception as e: + logger.exception(f"Error queueing notification: {e}") + def _should_email_user_based_on_preference( self, user_id: str, event_type: NotificationType ) -> bool: @@ -299,6 +385,86 @@ class NotificationManager(AppService): # only if both are true, should we email this person return validated_email and preference + async def _gather_summary_data( + self, user_id: str, event_type: NotificationType, params: BaseSummaryParams + ) -> BaseSummaryData: + """Gathers the data to build a summary notification""" + + logger.info( + f"Gathering summary data for {user_id} and {event_type} wiht {params=}" + ) + + # total_credits_used = self.run_and_wait( + # get_total_credits_used(user_id, start_time, end_time) + # ) + + # total_executions = self.run_and_wait( + # get_total_executions(user_id, start_time, end_time) + # ) + + # most_used_agent = self.run_and_wait( + # get_most_used_agent(user_id, start_time, end_time) + # ) + + # execution_times = self.run_and_wait( + # get_execution_time(user_id, start_time, end_time) + # ) + + # runs = self.run_and_wait( + # get_runs(user_id, start_time, end_time) + # ) + total_credits_used = 3.0 + total_executions = 2 + most_used_agent = {"name": "Some"} + execution_times = [1, 2, 3] + runs = [{"status": "COMPLETED"}, {"status": "FAILED"}] + + successful_runs = len([run for run in runs if run["status"] == "COMPLETED"]) + failed_runs = len([run for run in runs if run["status"] != "COMPLETED"]) + average_execution_time = ( + sum(execution_times) / len(execution_times) if execution_times else 0 + ) + # cost_breakdown = self.run_and_wait( + # get_cost_breakdown(user_id, start_time, end_time) + # ) + + cost_breakdown = { + "agent1": 1.0, + "agent2": 2.0, + } + + if event_type == NotificationType.DAILY_SUMMARY and isinstance( + params, DailySummaryParams + ): + return DailySummaryData( + total_credits_used=total_credits_used, + total_executions=total_executions, + most_used_agent=most_used_agent["name"], + total_execution_time=sum(execution_times), + successful_runs=successful_runs, + failed_runs=failed_runs, + average_execution_time=average_execution_time, + cost_breakdown=cost_breakdown, + date=params.date, + ) + elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance( + params, WeeklySummaryParams + ): + return WeeklySummaryData( + total_credits_used=total_credits_used, + total_executions=total_executions, + most_used_agent=most_used_agent["name"], + total_execution_time=sum(execution_times), + successful_runs=successful_runs, + failed_runs=failed_runs, + average_execution_time=average_execution_time, + cost_breakdown=cost_breakdown, + start_date=params.start_date, + end_date=params.end_date, + ) + else: + raise ValueError("Invalid event type or params") + async def _should_batch( self, user_id: str, event_type: NotificationType, event: NotificationEventModel ) -> bool: @@ -329,7 +495,7 @@ class NotificationManager(AppService): try: event = NotificationEventDTO.model_validate_json(message) model = NotificationEventModel[ - get_data_type(event.type) + get_notif_data_type(event.type) ].model_validate_json(message) return NotificationEvent(event=event, model=model) except Exception as e: @@ -429,7 +595,9 @@ class NotificationManager(AppService): unsub_link = generate_unsubscribe_link(event.user_id) batch_messages = [ - NotificationEventModel[get_data_type(db_event.type)].model_validate( + NotificationEventModel[ + get_notif_data_type(db_event.type) + ].model_validate( { "user_id": event.user_id, "type": db_event.type, @@ -453,6 +621,53 @@ class NotificationManager(AppService): logger.exception(f"Error processing notification for batch queue: {e}") return False + def _process_summary(self, message: str) -> bool: + """Process a single notification with a summary strategy, returning whether to put into the failed queue""" + try: + logger.info(f"Processing summary notification: {message}") + event = SummaryParamsEventDTO.model_validate_json(message) + model = SummaryParamsEventModel[ + get_summary_params_type(event.type) + ].model_validate_json(message) + + logger.info(f"Processing summary notification: {model}") + + recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + if not recipient_email: + logger.error(f"User email not found for user {event.user_id}") + return False + should_send = self._should_email_user_based_on_preference( + event.user_id, event.type + ) + if not should_send: + logger.info( + f"User {event.user_id} does not want to receive {event.type} notifications" + ) + return True + + summary_data = self.run_and_wait( + self._gather_summary_data(event.user_id, event.type, model.data) + ) + + unsub_link = generate_unsubscribe_link(event.user_id) + + data = NotificationEventModel( + user_id=event.user_id, + type=event.type, + data=summary_data, + ) + + self.email_sender.send_templated( + notification=event.type, + user_email=recipient_email, + data=data, + user_unsub_link=unsub_link, + ) + return True + except Exception as e: + logger.exception(f"Error processing notification for summary queue: {e}") + return False + def _run_queue( self, queue: aio_pika.abc.AbstractQueue, @@ -493,6 +708,10 @@ class NotificationManager(AppService): data={}, cron="0 * * * *", ) + # get_scheduler().add_weekly_notification_schedule( + # # weekly on Friday at 12pm + # cron="0 12 * * 5", + # ) logger.info("Scheduled notification cleanup") except Exception as e: logger.error(f"Error scheduling notification cleanup: {e}") @@ -507,6 +726,8 @@ class NotificationManager(AppService): admin_queue = self.run_and_wait(channel.get_queue("admin_notifications")) + summary_queue = self.run_and_wait(channel.get_queue("summary_notifications")) + while self.running: try: self._run_queue( @@ -525,6 +746,12 @@ class NotificationManager(AppService): error_queue_name="batch_notifications", ) + self._run_queue( + queue=summary_queue, + process_func=self._process_summary, + error_queue_name="summary_notifications", + ) + time.sleep(0.1) except QueueEmpty as e: diff --git a/autogpt_platform/backend/backend/notifications/templates/weekly_summary.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/weekly_summary.html.jinja2 new file mode 100644 index 0000000000..c561851a60 --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/templates/weekly_summary.html.jinja2 @@ -0,0 +1,27 @@ +{# Weekly Summary #} +{# Template variables: +data: the stuff below +data.start_date: the start date of the summary +data.end_date: the end date of the summary +data.total_credits_used: the total credits used during the summary +data.total_executions: the total number of executions during the summary +data.most_used_agent: the most used agent's nameduring the summary +data.total_execution_time: the total execution time during the summary +data.successful_runs: the total number of successful runs during the summary +data.failed_runs: the total number of failed runs during the summary +data.average_execution_time: the average execution time during the summary +data.cost_breakdown: the cost breakdown during the summary +#} + +

Weekly Summary

+ +

Start Date: {{ data.start_date }}

+

End Date: {{ data.end_date }}

+

Total Credits Used: {{ data.total_credits_used }}

+

Total Executions: {{ data.total_executions }}

+

Most Used Agent: {{ data.most_used_agent }}

+

Total Execution Time: {{ data.total_execution_time }}

+

Successful Runs: {{ data.successful_runs }}

+

Failed Runs: {{ data.failed_runs }}

+

Average Execution Time: {{ data.average_execution_time }}

+

Cost Breakdown: {{ data.cost_breakdown }}

From c179a492186b6b8ff14939d775397cf8b02b4208 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Wed, 12 Mar 2025 13:12:09 +0700 Subject: [PATCH 03/61] fix(platform): Make LibraryAgent image as initial StoreListing image - Followup (#9617) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ This is a follow-up of https://github.com/Significant-Gravitas/AutoGPT/pull/9610 * Addressing the PR comments described in the mentioned PR * Removed debug logging * Fix image state loading logic on agent upload process ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create an library agent and try to create a store listing. image --- .../backend/notifications/notifications.py | 1 - .../backend/backend/server/v2/store/db.py | 28 ++--- .../src/app/reset_password/actions.ts | 1 - .../src/components/agptui/AgentInfo.tsx | 1 - .../src/components/agptui/BecomeACreator.tsx | 1 - .../src/components/agptui/ProfileInfoForm.tsx | 2 - .../agptui/PublishAgentSelectInfo.tsx | 22 ++-- .../src/components/agptui/StoreCard.tsx | 26 ++-- .../agptui/composite/PublishAgentPopout.tsx | 3 - .../components/library/library-agent-card.tsx | 116 +++++++++--------- .../components/library/library-agent-list.tsx | 1 - .../frontend/src/hooks/useAgentGraph.tsx | 7 +- .../src/lib/autogpt-server-api/client.ts | 6 +- 13 files changed, 103 insertions(+), 112 deletions(-) diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 5222e5c2d4..9cdcebda88 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -86,7 +86,6 @@ def create_notification_config() -> RabbitMQConfig: routing_key="notification.summary.#", arguments={ "x-dead-letter-exchange": dead_letter_exchange.name, - "x-dead-letter-routing-key": "failed.summary.weekly", "x-dead-letter-routing-key": "failed.summary", }, ), diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py index 662275d5b1..1bb2cb983b 100644 --- a/autogpt_platform/backend/backend/server/v2/store/db.py +++ b/autogpt_platform/backend/backend/server/v2/store/db.py @@ -706,12 +706,12 @@ async def get_my_agents( logger.debug(f"Getting my agents for user {user_id}, page={page}") try: - search_filter = prisma.types.LibraryAgentWhereInput( - userId=user_id, - Agent={"is": {"StoreListing": {"none": {"isDeleted": False}}}}, - isArchived=False, - isDeleted=False, - ) + search_filter: prisma.types.LibraryAgentWhereInput = { + "userId": user_id, + "Agent": {"is": {"StoreListing": {"none": {"isDeleted": False}}}}, + "isArchived": False, + "isDeleted": False, + } library_agents = await prisma.models.LibraryAgent.prisma().find_many( where=search_filter, @@ -726,15 +726,15 @@ async def get_my_agents( my_agents = [ backend.server.v2.store.model.MyAgent( - agent_id=agent.id, - agent_version=agent.version, - agent_name=agent.name or "", - last_edited=agent.updatedAt or agent.createdAt, - description=agent.description or "", - agent_image=entry.imageUrl, + agent_id=graph.id, + agent_version=graph.version, + agent_name=graph.name or "", + last_edited=graph.updatedAt or graph.createdAt, + description=graph.description or "", + agent_image=library_agent.imageUrl, ) - for entry in library_agents - if (agent := entry.Agent) + for library_agent in library_agents + if (graph := library_agent.Agent) ] return backend.server.v2.store.model.MyAgentsResponse( diff --git a/autogpt_platform/frontend/src/app/reset_password/actions.ts b/autogpt_platform/frontend/src/app/reset_password/actions.ts index eebea08b97..496667035a 100644 --- a/autogpt_platform/frontend/src/app/reset_password/actions.ts +++ b/autogpt_platform/frontend/src/app/reset_password/actions.ts @@ -29,7 +29,6 @@ export async function sendResetEmail(email: string) { return error.message; } - console.log("Reset email sent"); redirect("/reset_password"); }, ); diff --git a/autogpt_platform/frontend/src/components/agptui/AgentInfo.tsx b/autogpt_platform/frontend/src/components/agptui/AgentInfo.tsx index ec96f0cfb4..a9531221f1 100644 --- a/autogpt_platform/frontend/src/components/agptui/AgentInfo.tsx +++ b/autogpt_platform/frontend/src/components/agptui/AgentInfo.tsx @@ -47,7 +47,6 @@ export const AgentInfo: React.FC = ({ const newLibraryAgent = await api.addMarketplaceAgentToLibrary( storeListingVersionId, ); - console.log("Agent added to library successfully"); router.push(`/library/agents/${newLibraryAgent.id}`); } catch (error) { console.error("Failed to add agent to library:", error); diff --git a/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx b/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx index c30c3df915..e4448c25bd 100644 --- a/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx +++ b/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx @@ -17,7 +17,6 @@ export const BecomeACreator: React.FC = ({ }) => { const handleButtonClick = () => { onButtonClick?.(); - console.log("Become A Creator clicked"); }; return ( diff --git a/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx b/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx index 302287788e..eebe428f56 100644 --- a/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx +++ b/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx @@ -49,8 +49,6 @@ export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => { const formData = new FormData(); formData.append("file", file); - console.log(formData); - // Get auth token if (!supabase) { throw new Error("Supabase client not initialized"); diff --git a/autogpt_platform/frontend/src/components/agptui/PublishAgentSelectInfo.tsx b/autogpt_platform/frontend/src/components/agptui/PublishAgentSelectInfo.tsx index 23c4ebfdbb..dff239c19a 100644 --- a/autogpt_platform/frontend/src/components/agptui/PublishAgentSelectInfo.tsx +++ b/autogpt_platform/frontend/src/components/agptui/PublishAgentSelectInfo.tsx @@ -5,6 +5,7 @@ import Image from "next/image"; import { Button } from "../agptui/Button"; import { IconClose, IconPlus } from "../ui/icons"; import BackendAPI from "@/lib/autogpt-server-api"; +import { toast } from "../ui/use-toast"; export interface PublishAgentInfoInitialData { agent_id: string; @@ -40,13 +41,7 @@ export const PublishAgentInfo: React.FC = ({ initialData, }) => { const [agentId, setAgentId] = React.useState(null); - const [images, setImages] = React.useState( - initialData?.additionalImages - ? [initialData.thumbnailSrc, ...initialData.additionalImages] - : initialData?.thumbnailSrc - ? [initialData.thumbnailSrc] - : [], - ); + const [images, setImages] = React.useState([]); const [selectedImage, setSelectedImage] = React.useState( initialData?.thumbnailSrc || null, ); @@ -66,7 +61,10 @@ export const PublishAgentInfo: React.FC = ({ React.useEffect(() => { if (initialData) { setAgentId(initialData.agent_id); - setImagesWithValidation(initialData.additionalImages || []); + setImagesWithValidation([ + ...(initialData?.thumbnailSrc ? [initialData.thumbnailSrc] : []), + ...(initialData.additionalImages || []), + ]); setSelectedImage(initialData.thumbnailSrc || null); setTitle(initialData.title); setSubheader(initialData.subheader); @@ -94,8 +92,6 @@ export const PublishAgentInfo: React.FC = ({ } if (newImages.length === 0) { setSelectedImage(null); - } else { - console.log("images", newImages); } }; @@ -134,7 +130,10 @@ export const PublishAgentInfo: React.FC = ({ setSelectedImage(imageUrl); } } catch (error) { - console.error("Error uploading image:", error); + toast({ + title: "Failed to upload image", + description: `Error: ${error}`, + }); } }; @@ -150,7 +149,6 @@ export const PublishAgentInfo: React.FC = ({ throw new Error("Agent ID is required"); } const { image_url } = await api.generateStoreSubmissionImage(agentId); - console.log("image_url", image_url); setImagesWithValidation([...images, image_url]); } catch (error) { console.error("Failed to generate image:", error); diff --git a/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx b/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx index a498064dc1..28c29794aa 100644 --- a/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx +++ b/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx @@ -46,20 +46,24 @@ export const StoreCard: React.FC = ({ > {/* Header Image Section with Avatar */}
- {`${agentName} + {agentImage && ( + {`${agentName} + )} {!hideAvatar && (
- + {avatarSrc && ( + + )} {(creatorName || agentName).charAt(0)} diff --git a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx index 4992ce3f86..5e826194d1 100644 --- a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx +++ b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx @@ -76,14 +76,12 @@ export const PublishAgentPopout: React.FC = ({ const { toast } = useToast(); React.useEffect(() => { - console.log("PublishAgentPopout Effect"); setOpen(openPopout); setStep(inputStep); setPublishData(submissionData); }, [openPopout]); // eslint-disable-line react-hooks/exhaustive-deps React.useEffect(() => { - console.log("LoadMyAgents Effect"); if (open) { const loadMyAgents = async () => { try { @@ -193,7 +191,6 @@ export const PublishAgentPopout: React.FC = ({ slug: slug.replace(/\s+/g, "-"), categories: categories, }); - console.log("Store submission created:", submission); } catch (error) { console.error("Error creating store submission:", error); } diff --git a/autogpt_platform/frontend/src/components/library/library-agent-card.tsx b/autogpt_platform/frontend/src/components/library/library-agent-card.tsx index 199d2f9b21..89e79f7ab8 100644 --- a/autogpt_platform/frontend/src/components/library/library-agent-card.tsx +++ b/autogpt_platform/frontend/src/components/library/library-agent-card.tsx @@ -17,50 +17,53 @@ export default function LibraryAgentCard({ agent: LibraryAgent; }): React.ReactNode { return ( - -
-
- {!image_url ? ( -
+ + {!image_url ? ( +
+ ) : ( + {`${name} + )} +
+ + - ) : ( - {`${name} - )} -
- - - {name.charAt(0)} - -
+ {name.charAt(0)} +
+ -
+
+

{name}

@@ -68,23 +71,26 @@ export default function LibraryAgentCard({

{description}

+ -
- - See runs - +
+ + See runs + - {can_access_graph && ( - - Open in builder - - )} -
+ {can_access_graph && ( + + Open in builder + + )}
- +
); } diff --git a/autogpt_platform/frontend/src/components/library/library-agent-list.tsx b/autogpt_platform/frontend/src/components/library/library-agent-list.tsx index 9a6a302a6c..60b9b14658 100644 --- a/autogpt_platform/frontend/src/components/library/library-agent-list.tsx +++ b/autogpt_platform/frontend/src/components/library/library-agent-list.tsx @@ -30,7 +30,6 @@ export default function LibraryAgentList(): React.ReactNode { } else { setAgents(response.agents); } - console.log(response); setHasMore( response.pagination.current_page * response.pagination.page_size < response.pagination.total_items, diff --git a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx index 84cc3b15f4..02012651d7 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx +++ b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx @@ -406,7 +406,6 @@ export default function useAgentGraph( errorMessage = error.message || "Invalid input"; if (path && error.message) { const key = path.slice(1); - console.log("Error", key, error.message); setNestedProperty( errors, key, @@ -495,7 +494,6 @@ export default function useAgentGraph( // Display error message if (saveRunRequest.state === "error") { if (saveRunRequest.request === "save") { - console.error("Error saving agent"); toast({ variant: "destructive", title: `Error saving agent`, @@ -507,9 +505,7 @@ export default function useAgentGraph( title: `Error saving&running agent`, duration: 2000, }); - console.error(`Error saving&running agent`); } else if (saveRunRequest.request === "stop") { - console.error(`Error stopping agent`); toast({ variant: "destructive", title: `Error stopping agent`, @@ -539,7 +535,6 @@ export default function useAgentGraph( } else if (saveRunRequest.request === "run") { const validationError = validateNodes(); if (validationError) { - console.error("Validation failed; aborting run"); toast({ title: `Validation failed: ${validationError}`, variant: "destructive", @@ -1034,7 +1029,7 @@ export default function useAgentGraph( return; } } catch (error) { - console.log(error); + console.error(error); toast({ variant: "destructive", title: "Error scheduling agent", diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index 7562c45d55..0e3f94fe2c 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -482,7 +482,6 @@ export default class BackendAPI { agentName: string, review: StoreReviewCreate, ): Promise { - console.log("Reviewing agent: ", username, agentName, review); return this._request( "POST", `/store/agents/${encodeURIComponent(username)}/${encodeURIComponent( @@ -785,7 +784,7 @@ export default class BackendAPI { ); this.heartbeatTimeoutId = window.setTimeout(() => { - console.log("Heartbeat timeout - reconnecting"); + console.warn("Heartbeat timeout - reconnecting"); this.webSocket?.close(); this.connectWebSocket(); }, this.HEARTBEAT_TIMEOUT); @@ -821,13 +820,12 @@ export default class BackendAPI { this.webSocket = new WebSocket(wsUrlWithToken); this.webSocket.onopen = () => { - console.log("WebSocket connection established"); this.startHeartbeat(); // Start heartbeat when connection opens resolve(); }; this.webSocket.onclose = (event) => { - console.log("WebSocket connection closed", event); + console.warn("WebSocket connection closed", event); this.stopHeartbeat(); // Stop heartbeat when connection closes this.webSocket = null; // Attempt to reconnect after a delay From c1e329497c9aa19f0515cab1392c74428fefdd24 Mon Sep 17 00:00:00 2001 From: Bently Date: Wed, 12 Mar 2025 07:36:55 +0000 Subject: [PATCH 04/61] fix(otto): prevent breaking when failing to make api call (#9613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are some tweaks for Otto to try prevent the frontend from breaking when/if it fails to make the otto api call. ### Changes 🏗️ Removed unused reference to ``OTTO_API_URL`` in [backend/server/v2/otto/routes.py](https://github.com/Significant-Gravitas/AutoGPT/compare/dev...otto-fixes?expand=1#diff-6545b2ed01d619cc095b8d0ca6d5baa86d1448dc970cff508669ec3430675d28) Removed un-needed ``revalidatePath("/build");`` from [frontend/src/app/build/actions.ts](https://github.com/Significant-Gravitas/AutoGPT/compare/dev...otto-fixes?expand=1#diff-6861f6b90ce91138b3821c0a82abfffbc09f425c26b7335fac60f54894e353e9) Added a 60 second timeout to the api call in [backend/server/v2/otto/service.py](https://github.com/Significant-Gravitas/AutoGPT/compare/dev...otto-fixes?expand=1#diff-63c9a1a5337cd5e4ddec7544a258916d4998a6cb5c4181f621d7e24f654bd5c8) Added a better error handler in [frontend/src/components/OttoChatWidget.tsx](https://github.com/Significant-Gravitas/AutoGPT/compare/dev...otto-fixes?expand=1#diff-7351568d5c588b77f35d80994ca6800a7faa1b3b0ca229970cfa491eab4b4b33) Made it so errors return a structured error response for better handling in [frontend/src/app/build/actions.ts](https://github.com/Significant-Gravitas/AutoGPT/compare/dev...otto-fixes?expand=1#diff-6861f6b90ce91138b3821c0a82abfffbc09f425c26b7335fac60f54894e353e9) ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] do not set the ENV var for ``otto_api_url`` - [x] Set the ENV var for ``NEXT_PUBLIC_BEHAVE_AS`` to ``CLOUD`` to have the chat ui show in the build page - [x] Send otto a message from the build page frontend, it should fail and not break the frontend - [x] Send otto a message that contains graph data to see if the sending graph data works, this should also fail and not break the frontend - [x] now we set the ENV for ``otto_api_url`` - [x] Send otto a message from the build page frontend, it should work - [x] Send otto a message that contains graph data to see if the sending graph data works, it should work --- .../backend/backend/server/v2/otto/routes.py | 4 - .../backend/backend/server/v2/otto/service.py | 18 ++- .../frontend/src/app/build/actions.ts | 8 +- .../src/components/OttoChatWidget.tsx | 104 ++++++++++-------- .../src/lib/autogpt-server-api/types.ts | 1 + 5 files changed, 81 insertions(+), 54 deletions(-) diff --git a/autogpt_platform/backend/backend/server/v2/otto/routes.py b/autogpt_platform/backend/backend/server/v2/otto/routes.py index eb63aacad5..6f409e9a6c 100644 --- a/autogpt_platform/backend/backend/server/v2/otto/routes.py +++ b/autogpt_platform/backend/backend/server/v2/otto/routes.py @@ -4,15 +4,11 @@ from autogpt_libs.auth.middleware import auth_middleware from fastapi import APIRouter, Depends from backend.server.utils import get_user_id -from backend.util.settings import Settings from .models import ApiResponse, ChatRequest from .service import OttoService logger = logging.getLogger(__name__) -settings = Settings() - -OTTO_API_URL = settings.config.otto_api_url router = APIRouter() diff --git a/autogpt_platform/backend/backend/server/v2/otto/service.py b/autogpt_platform/backend/backend/server/v2/otto/service.py index f8be312249..8efa4f642f 100644 --- a/autogpt_platform/backend/backend/server/v2/otto/service.py +++ b/autogpt_platform/backend/backend/server/v2/otto/service.py @@ -1,3 +1,4 @@ +import asyncio import logging from typing import Optional @@ -67,6 +68,13 @@ class OttoService: """ Send request to Otto API and handle the response. """ + # Check if Otto API URL is configured + if not OTTO_API_URL: + logger.error("Otto API URL is not configured") + raise HTTPException( + status_code=503, detail="Otto service is not configured" + ) + try: async with aiohttp.ClientSession() as session: headers = { @@ -94,7 +102,10 @@ class OttoService: logger.debug(f"Request payload: {payload}") async with session.post( - OTTO_API_URL, json=payload, headers=headers + OTTO_API_URL, + json=payload, + headers=headers, + timeout=aiohttp.ClientTimeout(total=60), ) as response: if response.status != 200: error_text = await response.text() @@ -115,6 +126,11 @@ class OttoService: raise HTTPException( status_code=503, detail="Failed to connect to Otto service" ) + except asyncio.TimeoutError: + logger.error("Timeout error connecting to Otto API after 60 seconds") + raise HTTPException( + status_code=504, detail="Request to Otto service timed out" + ) except Exception as e: logger.error(f"Unexpected error in Otto API proxy: {str(e)}") raise HTTPException( diff --git a/autogpt_platform/frontend/src/app/build/actions.ts b/autogpt_platform/frontend/src/app/build/actions.ts index 6c1ca0d505..16c577d16c 100644 --- a/autogpt_platform/frontend/src/app/build/actions.ts +++ b/autogpt_platform/frontend/src/app/build/actions.ts @@ -24,10 +24,14 @@ export async function askOtto( try { const response = await api.askOtto(ottoQuery); - revalidatePath("/build"); return response; } catch (error) { console.error("Error in askOtto server action:", error); - throw error; + return { + answer: error instanceof Error ? error.message : "Unknown error occurred", + documents: [], + success: false, + error: true, + }; } } diff --git a/autogpt_platform/frontend/src/components/OttoChatWidget.tsx b/autogpt_platform/frontend/src/components/OttoChatWidget.tsx index efbb6d73cc..e03fcfbf62 100644 --- a/autogpt_platform/frontend/src/components/OttoChatWidget.tsx +++ b/autogpt_platform/frontend/src/components/OttoChatWidget.tsx @@ -56,29 +56,30 @@ const OttoChatWidget = () => { // Add user message to chat setMessages((prev) => [...prev, { type: "user", content: userMessage }]); + // Add temporary processing message + setMessages((prev) => [ + ...prev, + { type: "assistant", content: "Processing your question..." }, + ]); + + const conversationHistory = messages.reduce< + { query: string; response: string }[] + >((acc, msg, i, arr) => { + if ( + msg.type === "user" && + i + 1 < arr.length && + arr[i + 1].type === "assistant" && + arr[i + 1].content !== "Processing your question..." + ) { + acc.push({ + query: msg.content, + response: arr[i + 1].content, + }); + } + return acc; + }, []); + try { - // Add temporary processing message - setMessages((prev) => [ - ...prev, - { type: "assistant", content: "Processing your question..." }, - ]); - - const conversationHistory = messages.reduce< - { query: string; response: string }[] - >((acc, msg, i, arr) => { - if ( - msg.type === "user" && - i + 1 < arr.length && - arr[i + 1].type === "assistant" - ) { - acc.push({ - query: msg.content, - response: arr[i + 1].content, - }); - } - return acc; - }, []); - const data = await askOtto( userMessage, conversationHistory, @@ -86,34 +87,43 @@ const OttoChatWidget = () => { flowID || undefined, ); - // Remove processing message and add actual response - setMessages((prev) => [ - ...prev.slice(0, -1), - { type: "assistant", content: data.answer }, - ]); - } catch (error) { - console.error("Error calling API:", error); - // Remove processing message and add error message - const errorMessage = - error instanceof Error && error.message === "Authentication required" - ? "Please sign in to use the chat feature." - : "Sorry, there was an error processing your message. Please try again."; + // Check if the response contains an error + if ("error" in data && data.error === true) { + // Handle different error types + let errorMessage = + "Sorry, there was an error processing your message. Please try again."; - setMessages((prev) => [ - ...prev.slice(0, -1), - { type: "assistant", content: errorMessage }, - ]); + if (data.answer === "Authentication required") { + errorMessage = "Please sign in to use the chat feature."; + } else if (data.answer === "Failed to connect to Otto service") { + errorMessage = + "Otto service is currently unavailable. Please try again later."; + } else if (data.answer.includes("timed out")) { + errorMessage = "Request timed out. Please try again later."; + } - if ( - error instanceof Error && - error.message === "Authentication required" - ) { - toast({ - title: "Authentication Error", - description: "Please sign in to use the chat feature.", - variant: "destructive", - }); + // Remove processing message and add error message + setMessages((prev) => [ + ...prev.slice(0, -1), + { type: "assistant", content: errorMessage }, + ]); + } else { + // Remove processing message and add actual response + setMessages((prev) => [ + ...prev.slice(0, -1), + { type: "assistant", content: data.answer }, + ]); } + } catch (error) { + console.error("Unexpected error in chat widget:", error); + setMessages((prev) => [ + ...prev.slice(0, -1), + { + type: "assistant", + content: + "An unexpected error occurred. Please refresh the page and try again.", + }, + ]); } finally { setIsProcessing(false); setIncludeGraphData(false); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index a9995b377e..26561c889e 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -760,6 +760,7 @@ export interface OttoResponse { answer: string; documents: OttoDocument[]; success: boolean; + error: boolean; } export interface OttoQuery { From 02618e1a5247c31ce72ad6cbb0c67cd8ecc19afb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Mar 2025 21:33:06 +0700 Subject: [PATCH 05/61] chore(frontend/deps): bump the production-dependencies group across 1 directory with 13 updates (#9611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the production-dependencies group with 13 updates in the /autogpt_platform/frontend directory: | Package | From | To | | --- | --- | --- | | [@faker-js/faker](https://github.com/faker-js/faker) | `9.4.0` | `9.6.0` | | [@next/third-parties](https://github.com/vercel/next.js/tree/HEAD/packages/third-parties) | `15.1.6` | `15.2.1` | | [@supabase/supabase-js](https://github.com/supabase/supabase-js) | `2.48.1` | `2.49.1` | | [@tanstack/react-table](https://github.com/TanStack/table/tree/HEAD/packages/react-table) | `8.20.6` | `8.21.2` | | [@xyflow/react](https://github.com/xyflow/xyflow/tree/HEAD/packages/react) | `12.4.2` | `12.4.4` | | [framer-motion](https://github.com/motiondivision/motion) | `12.3.1` | `12.4.11` | | [lucide-react](https://github.com/lucide-icons/lucide/tree/HEAD/packages/lucide-react) | `0.474.0` | `0.479.0` | | [next-themes](https://github.com/pacocoursey/next-themes) | `0.4.4` | `0.4.5` | | [react-day-picker](https://github.com/gpbl/react-day-picker) | `9.5.1` | `9.6.1` | | [react-icons](https://github.com/react-icons/react-icons) | `5.4.0` | `5.5.0` | | [react-shepherd](https://github.com/shepherd-pro/shepherd) | `6.1.7` | `6.1.8` | | [uuid](https://github.com/uuidjs/uuid) | `11.0.5` | `11.1.0` | | [zod](https://github.com/colinhacks/zod) | `3.24.1` | `3.24.2` | Updates `@faker-js/faker` from 9.4.0 to 9.6.0
Release notes

Sourced from @​faker-js/faker's releases.

v9.6.0

What's Changed

New Contributors

Full Changelog: https://github.com/faker-js/faker/compare/v9.5.1...v9.6.0

v9.5.1

What's Changed

New Contributors

Full Changelog: https://github.com/faker-js/faker/compare/v9.5.0...v9.5.1

v9.5.0

What's Changed

... (truncated)

Changelog

Sourced from @​faker-js/faker's changelog.

9.6.0 (2025-03-06)

Features

9.5.1 (2025-02-28)

Bug Fixes

9.5.0 (2025-02-10)

Features

Commits
Maintainer changes

This version was pushed to npm by st-ddt, a new releaser for @​faker-js/faker since your current version.


Updates `@next/third-parties` from 15.1.6 to 15.2.1
Release notes

Sourced from @​next/third-parties's releases.

v15.2.1

Core Changes

  • Unify Link and Form prefetching: #76184
  • Turbopack: Ensure server actions sourcemaps tests pass: #76157
  • [dev-overlay] control dark theme in one place: #76528
  • [dev-overlay] change css var for terminal: #76590
  • [dev-overlay] Discriminate stack frame settled typed: #76517
  • Remove obsolete sourcePackage references: #76550
  • refactor: remove unused variable in externals handling: #76599
  • fix: Add popular embedding libraries to serverExternalPackages: #76574
  • [Segment Cache] Implement hash-only navigations: #76179
  • Webpack: abstract away getting compilation spans: #76579
  • report compiler duration for webpack and improve numbers: #76665
  • [dev-overlay] fix dark theme missing close bracket: #76672
  • Remove revalidate property from incremental cache ctx for FETCH kind: #76500
  • [dev-overlay] fix: env name label style was out of sync with error type label: #76668
  • Turbopack: avoid celling source maps before minify: #76626
  • refactor(CI): Merge all four bundler test manifest scripts into one: #76652
  • [metadata] fix duplicate metadata for parallel routes: #76669
  • [Segment Cache] Omit from bundle if flag disabled: #76622
  • [Segment Cache] Support output: "export" mode: #75671
  • [Segment Cache] Refresh on same-page navigation: #76223
  • [metadata] re-enable streaming metadata with PPR: #76119
  • [Segment Cache] Search param fallback handling: #75990
  • [Segment Cache] Fix: canonicalURL omits origin: #76444
  • fix metadata basePath for manifest: #76681
  • Propagate expire time to cache-control header and prerender manifest: #76207
  • Show revalidate/expire columns in build output: #76343
  • Gate alternate bundler behind canary only: #76634
  • [dynamicIO] routes with dynamic segments should be able to be static in dev: #76691
  • [repo] upgrade ts 5.8.2: #76709
  • [metadata]: ensure metadata boundary is only rendered once on client nav: #76692
  • [metadata] clean up redudant options: #76712
  • Fix uniqueness detection for generateStaticParams: #76713
  • Upgrade React from 22e39ea7-20250225 to d55cc79b-20250228: #76680
  • [Turbopack] Compute module batches and use them for chunking: #76133
  • [Dev Tools] Improve keyboard interactions for menu & overlays: #76754
  • Keep server code out of browser chunks: #76660
  • Turbopack: inline minify into code generation and make it a plain function instead of a turbo tasks function: #76628
  • fix edge runtime asset fetch in pages api: #76750
  • Update use-cache-unknown-cache-kind.test.ts snapshot for alternate bundler: #76682

Example Changes

  • docs: fix reading params code blocks: #76705

Misc Changes

  • fix(rustdoc): Fix rustdoc warnings, block on rustdoc failures in CI: #76448

... (truncated)

Commits

Updates `@supabase/supabase-js` from 2.48.1 to 2.49.1
Release notes

Sourced from @​supabase/supabase-js's releases.

v2.49.1

2.49.1 (2025-02-24)

Bug Fixes

  • deps: upgrade postgrest-js 1.19.2 (3f01c3f)

v2.49.0

2.49.0 (2025-02-24)

Features

Commits
  • fceca48 Merge pull request #1369 from supabase/avallete/chore-bump-postgrest-js-1-19-2
  • a9ece9a feat: bump @supabase/auth-js to 2.68.0 (#1359)
  • 3f01c3f fix(deps): upgrade postgrest-js 1.19.2
  • See full diff in compare view

Updates `@tanstack/react-table` from 8.20.6 to 8.21.2
Release notes

Sourced from @​tanstack/react-table's releases.

v8.21.2

Version 8.21.2 - 2/11/25, 8:59 PM

Changes

Fix

  • arrIncludes autoremove filterFn (#5623) (2efaf57) by lukebui
  • lit-table: spread table options in lit adapter (#5904) (36dede1) by @​kadoshms

Docs

  • row accessor bug in example code block (#5893) (b1506a7) by Valerii Petryniak
  • virtualizer tbody from onchange (827b098) by Kevin Van Cott
  • exp virtual - remeasure when table state changes (9e6987d) by Kevin Van Cott
  • angular: add expanding and sub components examples (#5898) (099e1a4) by @​riccardoperra
  • example name (57703a4) by Kevin Van Cott

Packages

  • @​tanstack/table-core@​8.21.2
  • @​tanstack/lit-table@​8.21.2
  • @​tanstack/angular-table@​8.21.2
  • @​tanstack/qwik-table@​8.21.2
  • @​tanstack/react-table@​8.21.2
  • @​tanstack/solid-table@​8.21.2
  • @​tanstack/svelte-table@​8.21.2
  • @​tanstack/vue-table@​8.21.2
  • @​tanstack/react-table-devtools@​8.21.2

v8.21.1

Version 8.21.1 - 2/3/25, 5:37 AM

Changes

Fix

  • lit-table: dynamic data updates in the Lit Table Adapter (#5884) (9763877) by Luke Schierer

Docs

  • add experimental virtualization example (#5895) (8d6e19f) by Kevin Van Cott
  • angular: add missing faker-js deps (#5883) (190c669) by @​riccardoperra
  • angular: add editable, row-dnd and performant column resizing example (#5881) (0baabdd) by @​riccardoperra

Packages

  • @​tanstack/lit-table@​8.21.1

v8.21.0

... (truncated)

Commits

Updates `@xyflow/react` from 12.4.2 to 12.4.4
Release notes

Sourced from @​xyflow/react's releases.

@​xyflow/react@​12.4.4

Patch Changes

@​xyflow/react@​12.4.3

Patch Changes

Changelog

Sourced from @​xyflow/react's changelog.

12.4.4

Patch Changes

12.4.3

Patch Changes

Commits
  • d045503 chore(packages): bump
  • 7a00fe3 chore(getNodeConnections): remove deprecation #5051
  • 08b99e8 Merge pull request #5043 from xyflow/refactor/dynamic-expand-parent
  • 8dd2b4f chore(updateNodePositions): cleanup
  • 0b67a6c refactor(errors): show error when user drags uninitialized node #5014
  • 27df80b fix(selection-listener): pass generics
  • d094ef0 fix(OnSelectionChangeFunc): pass node and edge type generics #5023
  • 3969758 refactor(expandParent): use current value on drag #5039
  • 43f188d fix(click-connections): handle isConnectableStart correctly #5041
  • 68591a8 Merge branch 'main' into no-deprecated
  • Additional commits viewable in compare view

Updates `framer-motion` from 12.3.1 to 12.4.11
Changelog

Sourced from framer-motion's changelog.

[12.4.11] 2025-03-10

Fixed

  • Preventing flattening of scroll animations when type or ease are explicitly set.

[12.4.10] 2025-03-03

Fixed

  • Adding UMD bundles for motion-dom and motion-utils.

[12.4.9] 2025-03-03

Fixed

  • Fixed Reorder.Item reordering causing lostpointercapture event to fire.

[12.4.8] 2025-02-26

Fixed

  • Fixed exiting children with layoutDependency not animating layout changes because of a stale layout dependency.

[12.4.7] 2025-02-20

Fixed

  • Fixed AnimatePresence not triggering exit animations when a child with layout or drag is removed.

[12.4.6] 2025-02-20

Fixed

  • Fixed drag gesture on child elements.

[12.4.5] 2025-02-19

Fixed

  • Fixed onClick handlers not working inside press events.

[12.4.4] 2025-02-18

Fixed

  • Changed press, drag and pan gestures to use pointer capturing for better usage within iframe embeds.

[12.4.3] 2025-02-12

... (truncated)

Commits

Updates `lucide-react` from 0.474.0 to 0.479.0
Release notes

Sourced from lucide-react's releases.

Version 0.479.0

What's Changed

Full Changelog: https://github.com/lucide-icons/lucide/compare/0.478.0...0.479.0

Version 0.478.0

What's Changed

Full Changelog: https://github.com/lucide-icons/lucide/compare/0.477.0...0.478.0

New icons 0.477.0

New icons 🎨

Modified Icons 🔨

Fixes and new icons 0.476.0

Fixes

New icons 🎨

Modified Icons 🔨

New icons 0.475.0

New icons 🎨

... (truncated)

Commits
  • 1787b82 build(deps-dev): bump vite from 5.4.13 to 5.4.14 in /packages/lucide (#2804)
  • b46927e fix(lucide-react): Revert exports property package.json, fixing edge worker e...
  • 3ab6c37 build(deps-dev): bump vite from 5.4.12 to 5.4.13 (#2798)
  • ba2c4b5 build(deps-dev): bump vite from 5.1.8 to 5.4.12 (#2786)
  • 50630b3 ci: Improve build speeds (#2778)
  • See full diff in compare view

Updates `next-themes` from 0.4.4 to 0.4.5
Release notes

Sourced from next-themes's releases.

v0.4.5

What's Changed

New Contributors

Full Changelog: https://github.com/pacocoursey/next-themes/compare/v0.4.4...v0.4.5

Commits
  • d12996b chore: Fix corepack errors in CI (#342)
  • b77db23 Bump the npm_and_yarn group across 1 directory with 7 updates (#341)
  • d3fa4ee Bump next from 14.2.10 to 14.2.15 in the npm_and_yarn group across 1 director...
  • ad83567 Reduce number of renders by pre-setting resolvedTheme (#338)
  • 1b51044 fix: map theme to class using ValueObject in injected script (#330)
  • See full diff in compare view

Updates `react-day-picker` from 9.5.1 to 9.6.1
Release notes

Sourced from react-day-picker's releases.

v9.6.1

This release addresses an accessibility issue, adds a new animate prop and fixes other minor bugs.

Possible Breaking Change in Custom Styles

To address a focus lost bug affecting navigation buttons, we updated the buttons to use aria-disabled instead of the disabled attribute.

This change may cause custom styles for those disabled buttons to break. To fix it in your code, update the CSS selector to target [aria-disabled="true"]:

- .rdp-button_next:disabled,
+ .rdp-button_next[aria-disabled="true"] {
  /* your custom CSS */
}
- .rdp-button_previous:disabled,
+ .rdp-button_previous[aria-disabled="true"] {
  /* your custom CSS */
}

Animating Month Transitions

Thanks to the work by @​rodgobbi, we have added animations to DayPicker. The new animate prop enables CSS transitions for captions and weeks when navigating between months:

<DayPicker animate />

Customizing the animation style can be challenging due to the HTML table structure of the grid. We may address this in the future. Please leave your feedback in DayPicker Discussions.

What's Changed

v9.6.1

New Contributors

Full Changelog: https://github.com/gpbl/react-day-picker/compare/v9.5.1...v9.6.1

v9.6.0

... (truncated)

Commits

Updates `react-icons` from 5.4.0 to 5.5.0
Release notes

Sourced from react-icons's releases.

v5.5.0

What's Changed

New Contributors

Full Changelog: https://github.com/react-icons/react-icons/compare/v5.4.0...v5.5.0

Icon Library License Version Count
Circum Icons MPL-2.0 license 1.0.0 288
Font Awesome 5 CC BY 4.0 License 5.15.4-3-gafecf2a 1612
Font Awesome 6 CC BY 4.0 License 6.6.0 2050 Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Zamil Majdy --- autogpt_platform/frontend/package.json | 26 ++-- autogpt_platform/frontend/yarn.lock | 164 ++++++++++++------------- 2 files changed, 95 insertions(+), 95 deletions(-) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index a8765aa8ce..889e2ff265 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -23,9 +23,9 @@ "defaults" ], "dependencies": { - "@faker-js/faker": "^9.4.0", + "@faker-js/faker": "^9.6.0", "@hookform/resolvers": "^3.10.0", - "@next/third-parties": "^15.1.6", + "@next/third-parties": "^15.2.1", "@radix-ui/react-alert-dialog": "^1.1.5", "@radix-ui/react-avatar": "^1.1.1", "@radix-ui/react-checkbox": "^1.1.2", @@ -46,9 +46,9 @@ "@radix-ui/react-tooltip": "^1.1.7", "@sentry/nextjs": "^8", "@supabase/ssr": "^0.5.2", - "@supabase/supabase-js": "^2.48.1", - "@tanstack/react-table": "^8.20.6", - "@xyflow/react": "^12.4.2", + "@supabase/supabase-js": "^2.49.1", + "@tanstack/react-table": "^8.21.2", + "@xyflow/react": "12.4.2", "ajv": "^8.17.1", "boring-avatars": "^1.11.2", "canvas-confetti": "^1.9.3", @@ -60,28 +60,28 @@ "dotenv": "^16.4.7", "elliptic": "6.6.1", "embla-carousel-react": "^8.5.2", - "framer-motion": "^12.0.11", + "framer-motion": "^12.4.11", "geist": "^1.3.1", "launchdarkly-react-client-sdk": "^3.6.1", "lodash.debounce": "^4.0.8", - "lucide-react": "^0.474.0", + "lucide-react": "^0.479.0", "moment": "^2.30.1", "next": "^14.2.21", - "next-themes": "^0.4.4", + "next-themes": "^0.4.5", "react": "^18", - "react-day-picker": "^9.5.1", + "react-day-picker": "^9.6.1", "react-dom": "^18", "react-drag-drop-files": "^2.4.0", "react-hook-form": "^7.54.0", - "react-icons": "^5.4.0", + "react-icons": "^5.5.0", "react-markdown": "^9.0.3", "react-modal": "^3.16.3", - "react-shepherd": "^6.1.7", + "react-shepherd": "^6.1.8", "recharts": "^2.15.1", "tailwind-merge": "^2.6.0", "tailwindcss-animate": "^1.0.7", - "uuid": "^11.0.5", - "zod": "^3.23.8" + "uuid": "^11.1.0", + "zod": "^3.24.2" }, "devDependencies": { "@chromatic-com/storybook": "^3.2.4", diff --git a/autogpt_platform/frontend/yarn.lock b/autogpt_platform/frontend/yarn.lock index 09b08b117e..1bbe3be0aa 100644 --- a/autogpt_platform/frontend/yarn.lock +++ b/autogpt_platform/frontend/yarn.lock @@ -1215,10 +1215,10 @@ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.1.tgz#de633db3ec2ef6a3c89e2f19038063e8a122e2c2" integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q== -"@faker-js/faker@^9.4.0": - version "9.4.0" - resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-9.4.0.tgz#3e85604df3a318729436677565e9433d964276d2" - integrity sha512-85+k0AxaZSTowL0gXp8zYWDIrWclTbRPg/pm/V0dSFZ6W6D4lhcG3uuZl4zLsEKfEvs69xDbLN2cHQudwp95JA== +"@faker-js/faker@^9.6.0": + version "9.6.0" + resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-9.6.0.tgz#64235d20330b142eef3d1d1638ba56c083b4bf1d" + integrity sha512-3vm4by+B5lvsFPSyep3ELWmZfE3kicDtmemVpuwl1yH7tqtnHdsA6hG8fbXedMVdkzgtvzWoRgjSB4Q+FHnZiw== "@floating-ui/core@^1.6.0": version "1.6.9" @@ -1771,10 +1771,10 @@ resolved "https://registry.yarnpkg.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.23.tgz#c81838f02f2f16a321b7533890fb63c1edec68e1" integrity sha512-xCtq5BD553SzOgSZ7UH5LH+OATQihydObTrCTvVzOro8QiWYKdBVwcB2Mn2MLMo6DGW9yH1LSPw7jS7HhgJgjw== -"@next/third-parties@^15.1.6": - version "15.1.6" - resolved "https://registry.yarnpkg.com/@next/third-parties/-/third-parties-15.1.6.tgz#2ff58ff6424b835b20fab86414b4d13342b49fa6" - integrity sha512-F0uemUqFwD3lLx5SrWXYRe9dZvMVkO0rFuMnvLiPBcagxNc23Ufl5cNXEm4Yuo8O1Mu8dgh+VjExMz1Td4vBew== +"@next/third-parties@^15.2.1": + version "15.2.1" + resolved "https://registry.yarnpkg.com/@next/third-parties/-/third-parties-15.2.1.tgz#15cac0221fa889518268c3b8bb6538060b8171cb" + integrity sha512-XIOqG5NptLoH7MUBuxZP58DMjgX/XeDhY2UTcLHjMMMXfvu2kckGOUYFfoueWzkLmmfKbj7e8nq03pkcN6vXUA== dependencies: third-party-capital "1.0.20" @@ -2646,7 +2646,7 @@ resolved "https://registry.yarnpkg.com/@rushstack/eslint-patch/-/eslint-patch-1.10.5.tgz#3a1c12c959010a55c17d46b395ed3047b545c246" integrity sha512-kkKUDVlII2DQiKy7UstOR1ErJP8kUKAQ4oa+SQtM0K+lPdmmjj0YnnxBgtTVYH7mUKtbsxeFC9y0AmK7Yb78/A== -"@scarf/scarf@^1.3.0": +"@scarf/scarf@^1.4.0": version "1.4.0" resolved "https://registry.yarnpkg.com/@scarf/scarf/-/scarf-1.4.0.tgz#3bbb984085dbd6d982494538b523be1ce6562972" integrity sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ== @@ -3281,10 +3281,10 @@ resolved "https://registry.yarnpkg.com/@storybook/theming/-/theming-8.5.3.tgz#44462cc59a0ce66d2e714330399fae4672afda2e" integrity sha512-Jvzw+gT1HNarkJo21WZBq5pU89qDN8u/pD3woSh/1c2h5RS6UylWjQHotPFpcBIQiUSrDFtvCU9xugJm4MD0+w== -"@supabase/auth-js@2.67.3": - version "2.67.3" - resolved "https://registry.yarnpkg.com/@supabase/auth-js/-/auth-js-2.67.3.tgz#a1f5eb22440b0cdbf87fe2ecae662a8dd8bb2028" - integrity sha512-NJDaW8yXs49xMvWVOkSIr8j46jf+tYHV0wHhrwOaLLMZSFO4g6kKAf+MfzQ2RaD06OCUkUHIzctLAxjTgEVpzw== +"@supabase/auth-js@2.68.0": + version "2.68.0" + resolved "https://registry.yarnpkg.com/@supabase/auth-js/-/auth-js-2.68.0.tgz#e1fb51ed577952d16faf86ee47db1fd3d1c4e7db" + integrity sha512-odG7nb7aOmZPUXk6SwL2JchSsn36Ppx11i2yWMIc/meUO2B2HK9YwZHPK06utD9Ql9ke7JKDbwGin/8prHKxxQ== dependencies: "@supabase/node-fetch" "^2.6.14" @@ -3302,10 +3302,10 @@ dependencies: whatwg-url "^5.0.0" -"@supabase/postgrest-js@1.18.1": - version "1.18.1" - resolved "https://registry.yarnpkg.com/@supabase/postgrest-js/-/postgrest-js-1.18.1.tgz#7ceac0dfbe567d953343c5ae9750f848cc6db744" - integrity sha512-dWDnoC0MoDHKhaEOrsEKTadWQcBNknZVQcSgNE/Q2wXh05mhCL1ut/jthRUrSbYcqIw/CEjhaeIPp7dLarT0bg== +"@supabase/postgrest-js@1.19.2": + version "1.19.2" + resolved "https://registry.yarnpkg.com/@supabase/postgrest-js/-/postgrest-js-1.19.2.tgz#cb721860fefd9ec2818bbafc56de4314c0ebca81" + integrity sha512-MXRbk4wpwhWl9IN6rIY1mR8uZCCG4MZAEji942ve6nMwIqnBgBnZhZlON6zTTs6fgveMnoCILpZv1+K91jN+ow== dependencies: "@supabase/node-fetch" "^2.6.14" @@ -3334,15 +3334,15 @@ dependencies: "@supabase/node-fetch" "^2.6.14" -"@supabase/supabase-js@^2.48.1": - version "2.48.1" - resolved "https://registry.yarnpkg.com/@supabase/supabase-js/-/supabase-js-2.48.1.tgz#6bbe6bd799115bd1237510e1c2de6f8ccdd84cd0" - integrity sha512-VMD+CYk/KxfwGbI4fqwSUVA7CLr1izXpqfFerhnYPSi6LEKD8GoR4kuO5Cc8a+N43LnfSQwLJu4kVm2e4etEmA== +"@supabase/supabase-js@^2.49.1": + version "2.49.1" + resolved "https://registry.yarnpkg.com/@supabase/supabase-js/-/supabase-js-2.49.1.tgz#457f7b19722d2cff064a1923399a42b855c4a9b8" + integrity sha512-lKaptKQB5/juEF5+jzmBeZlz69MdHZuxf+0f50NwhL+IE//m4ZnOeWlsKRjjsM0fVayZiQKqLvYdBn0RLkhGiQ== dependencies: - "@supabase/auth-js" "2.67.3" + "@supabase/auth-js" "2.68.0" "@supabase/functions-js" "2.4.4" "@supabase/node-fetch" "2.6.15" - "@supabase/postgrest-js" "1.18.1" + "@supabase/postgrest-js" "1.19.2" "@supabase/realtime-js" "2.11.2" "@supabase/storage-js" "2.7.1" @@ -3444,17 +3444,17 @@ dependencies: "@swc/counter" "^0.1.3" -"@tanstack/react-table@^8.20.6": - version "8.20.6" - resolved "https://registry.yarnpkg.com/@tanstack/react-table/-/react-table-8.20.6.tgz#a1f3103327aa59aa621931f4087a7604a21054d0" - integrity sha512-w0jluT718MrOKthRcr2xsjqzx+oEM7B7s/XXyfs19ll++hlId3fjTm+B2zrR3ijpANpkzBAr15j1XGVOMxpggQ== +"@tanstack/react-table@^8.21.2": + version "8.21.2" + resolved "https://registry.yarnpkg.com/@tanstack/react-table/-/react-table-8.21.2.tgz#6a7fce828b64547e33f4606ada8114db496007cc" + integrity sha512-11tNlEDTdIhMJba2RBH+ecJ9l1zgS2kjmexDPAraulc8jeNA4xocSNeyzextT0XJyASil4XsCYlJmf5jEWAtYg== dependencies: - "@tanstack/table-core" "8.20.5" + "@tanstack/table-core" "8.21.2" -"@tanstack/table-core@8.20.5": - version "8.20.5" - resolved "https://registry.yarnpkg.com/@tanstack/table-core/-/table-core-8.20.5.tgz#3974f0b090bed11243d4107283824167a395cf1d" - integrity sha512-P9dF7XbibHph2PFRz8gfBKEXEY/HJPOhym8CHmjF8y3q5mWpKx9xtZapXQUWCgkqvsK0R46Azuz+VaxD4Xl+Tg== +"@tanstack/table-core@8.21.2": + version "8.21.2" + resolved "https://registry.yarnpkg.com/@tanstack/table-core/-/table-core-8.21.2.tgz#dd57595a1773652bb6fb437e90a5f5386a49fd7e" + integrity sha512-uvXk/U4cBiFMxt+p9/G7yUWI/UbHYbyghLCjlpWZ3mLeIZiUBSKcUnw9UnKkdRz7Z/N4UBuFLWQdJCjUe7HjvA== "@testing-library/dom@10.4.0": version "10.4.0" @@ -4172,7 +4172,7 @@ resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== -"@xyflow/react@^12.4.2": +"@xyflow/react@12.4.2": version "12.4.2" resolved "https://registry.yarnpkg.com/@xyflow/react/-/react-12.4.2.tgz#669ab18923d93a8d8fb526241a2affc0d50abf9d" integrity sha512-AFJKVc/fCPtgSOnRst3xdYJwiEcUN9lDY7EO/YiRvFHYCJGgfzg+jpvZjkTOnBLGyrMJre9378pRxAc3fsR06A== @@ -6775,13 +6775,13 @@ forwarded-parse@2.1.2: resolved "https://registry.yarnpkg.com/forwarded-parse/-/forwarded-parse-2.1.2.tgz#08511eddaaa2ddfd56ba11138eee7df117a09325" integrity sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw== -framer-motion@^12.0.11: - version "12.3.1" - resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-12.3.1.tgz#041be06a3e1041cf6820a0895b704e8ea588e3df" - integrity sha512-Br4a2YhdeEvdzMDX8ToVO+/Jbqm2Loo/8rMjizX1nejjBvRJGMfz+NW1qBo4WCHF26uIdES5MBztUlB4pIyfqQ== +framer-motion@^12.4.11: + version "12.4.11" + resolved "https://registry.yarnpkg.com/framer-motion/-/framer-motion-12.4.11.tgz#c903d07d41222f7c2ec303ab4326eff86260b504" + integrity sha512-MHeZlgzo9DnQ6+TFgRqJiOk4vWwsDcXFtxeXlVawVs1nwgcZW3966foGIgkIiIrBSPHB9RlbqspAxiYWosFT9g== dependencies: - motion-dom "^12.0.0" - motion-utils "^12.0.0" + motion-dom "^12.4.11" + motion-utils "^12.4.10" tslib "^2.4.0" fromentries@^1.2.0: @@ -8494,10 +8494,10 @@ lru-cache@^5.1.1: dependencies: yallist "^3.0.2" -lucide-react@^0.474.0: - version "0.474.0" - resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.474.0.tgz#9fcaa96250fa2de0b3e2803d4ad744eaea572247" - integrity sha512-CmghgHkh0OJNmxGKWc0qfPJCYHASPMVSyGY8fj3xgk4v84ItqDg64JNKFZn5hC6E0vHi6gxnbCgwhyVB09wQtA== +lucide-react@^0.479.0: + version "0.479.0" + resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.479.0.tgz#7321f979a389ec5dd86747b2deb6444cf0922f8d" + integrity sha512-aBhNnveRhorBOK7uA4gDjgaf+YlHMdMhQ/3cupk6exM10hWlEU+2QtWYOfhXhjAsmdb6LeKR+NZnow4UxRRiTQ== lz-string@^1.5.0: version "1.5.0" @@ -8980,17 +8980,17 @@ moment@^2.30.1: resolved "https://registry.yarnpkg.com/moment/-/moment-2.30.1.tgz#f8c91c07b7a786e30c59926df530b4eac96974ae" integrity sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how== -motion-dom@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/motion-dom/-/motion-dom-12.0.0.tgz#7045c63642eecbcc04c40b4457ebb07b3c2b3d0c" - integrity sha512-CvYd15OeIR6kHgMdonCc1ihsaUG4MYh/wrkz8gZ3hBX/uamyZCXN9S9qJoYF03GqfTt7thTV/dxnHYX4+55vDg== +motion-dom@^12.4.11: + version "12.4.11" + resolved "https://registry.yarnpkg.com/motion-dom/-/motion-dom-12.4.11.tgz#0419c8686cda4d523f08249deeb8fa6683a9b9d3" + integrity sha512-wstlyV3pktgFjqsjbXMo1NX9hQD9XTVqxQNvfc+FREAgxr3GVzgWIEKvbyyNlki3J1jmmh+et9X3aCKeqFPcxA== dependencies: - motion-utils "^12.0.0" + motion-utils "^12.4.10" -motion-utils@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/motion-utils/-/motion-utils-12.0.0.tgz#fabf79f4f1c818720a1b70f615e2a1768f396ac0" - integrity sha512-MNFiBKbbqnmvOjkPyOKgHUp3Q6oiokLkI1bEwm5QA28cxMZrv0CbbBGDNmhF6DIXsi1pCQBSs0dX8xjeER1tmA== +motion-utils@^12.4.10: + version "12.4.10" + resolved "https://registry.yarnpkg.com/motion-utils/-/motion-utils-12.4.10.tgz#3d93acea5454419eaaad8d5e5425cb71cbfa1e7f" + integrity sha512-NPwZd94V013SwRf++jMrk2+HEBgPkeIE2RiOzhAuuQlqxMJPkKt/LXVh6Upl+iN8oarSGD2dlY5/bqgsYXDABA== ms@^2.1.1, ms@^2.1.3: version "2.1.3" @@ -9062,10 +9062,10 @@ neo-async@^2.6.2: resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== -next-themes@^0.4.4: - version "0.4.4" - resolved "https://registry.yarnpkg.com/next-themes/-/next-themes-0.4.4.tgz#ce6f68a4af543821bbc4755b59c0d3ced55c2d13" - integrity sha512-LDQ2qIOJF0VnuVrrMSMLrWGjRMkq+0mpgl6e0juCLqdJ+oo8Q84JRWT6Wh11VDQKkMMe+dVzDKLWs5n87T+PkQ== +next-themes@^0.4.5: + version "0.4.5" + resolved "https://registry.yarnpkg.com/next-themes/-/next-themes-0.4.5.tgz#267178c45798df6adfca0843bfc968269fcb7198" + integrity sha512-E8/gYKBxZknOXBiDk/sRokAvkOw35PTUD4Gxtq1eBhd0r4Dx5S42zU65/q8ozR5rcSG2ZlE1E3+ShlUpC7an+A== next@^14.2.21: version "14.2.23" @@ -10014,10 +10014,10 @@ react-confetti@^6.1.0: dependencies: tween-functions "^1.2.0" -react-day-picker@^9.5.1: - version "9.5.1" - resolved "https://registry.yarnpkg.com/react-day-picker/-/react-day-picker-9.5.1.tgz#ec40acdcc3ffbf7c0b9bfea8b6f97924249ea974" - integrity sha512-PxuK8inYLlYgM2zZUVBPsaBM5jI40suPeG+naKyx7kpyF032RRlEAUEjkpW9/poTASh/vyWAOVqjGuGw+47isw== +react-day-picker@^9.6.1: + version "9.6.1" + resolved "https://registry.yarnpkg.com/react-day-picker/-/react-day-picker-9.6.1.tgz#e4dcf562a2b0c083dd6de12e7dca087c36116508" + integrity sha512-PiRT/l6yk+fLpSmyMFUHIep8dbKAlilJGfDB0N2krXFhnxbitZf/t+ePDLk8kou/lYUVWAfIIxBJjFuvrNy7Hw== dependencies: "@date-fns/tz" "^1.2.0" date-fns "^4.1.0" @@ -10065,10 +10065,10 @@ react-hook-form@^7.54.0: resolved "https://registry.yarnpkg.com/react-hook-form/-/react-hook-form-7.54.2.tgz#8c26ed54c71628dff57ccd3c074b1dd377cfb211" integrity sha512-eHpAUgUjWbZocoQYUHposymRb4ZP6d0uwUnooL2uOybA9/3tPUvoAKqEWK1WaSiTxxOfTpffNZP7QwlnM3/gEg== -react-icons@^5.4.0: - version "5.4.0" - resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-5.4.0.tgz#443000f6e5123ee1b21ea8c0a716f6e7797f7416" - integrity sha512-7eltJxgVt7X64oHh6wSWNwwbKTCtMfK35hcjvJS0yxEAhPM8oUKdS3+kqaW1vicIltw+kR2unHaa12S9pPALoQ== +react-icons@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/react-icons/-/react-icons-5.5.0.tgz#8aa25d3543ff84231685d3331164c00299cdfaf2" + integrity sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw== react-is@^16.13.1, react-is@^16.7.0: version "16.13.1" @@ -10140,12 +10140,12 @@ react-remove-scroll@^2.6.3: use-callback-ref "^1.3.3" use-sidecar "^1.1.3" -react-shepherd@^6.1.7: - version "6.1.7" - resolved "https://registry.yarnpkg.com/react-shepherd/-/react-shepherd-6.1.7.tgz#58b5291128155b4903830bc42e5ac1ef4bea8427" - integrity sha512-CjlJYuUrToNLUJm/cwsFWyIou0bqwAoVTnLaT7rYYnhSSv1Jr6yDQonmsXmN7+7Ntfvr3l2+X+k21pIAkLWTdA== +react-shepherd@^6.1.8: + version "6.1.8" + resolved "https://registry.yarnpkg.com/react-shepherd/-/react-shepherd-6.1.8.tgz#9ccacbcc9f1835a9ef6a3782a76ba0d5cf6a1dd9" + integrity sha512-AA/ZqSbhkztCnRtNS5V9+V+lBJc1tjyYBGO6Gkjb41OX/jhGiFO0dJpfPnWYuHwAloYAXR0UuFq/lGqlXRWkrw== dependencies: - shepherd.js "14.4.0" + shepherd.js "14.5.0" react-smooth@^4.0.4: version "4.0.4" @@ -10722,13 +10722,13 @@ shell-quote@^1.8.1: resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.2.tgz#d2d83e057959d53ec261311e9e9b8f51dcb2934a" integrity sha512-AzqKpGKjrj7EM6rKVQEPpB288oCfnrEIuyoT9cyF4nmGa7V8Zk6f7RRqYisX8X9m+Q7bd632aZW4ky7EhbQztA== -shepherd.js@14.4.0: - version "14.4.0" - resolved "https://registry.yarnpkg.com/shepherd.js/-/shepherd.js-14.4.0.tgz#4d061a1e0a92f13caa5b7f3c8d1095c1215d4c08" - integrity sha512-pXuyhtHPj47Wp6vYESIdhcRrpva67+AolbvVBnJlZNZDa4VBPTwoN+x3R7h1C+RV+z5Tvk3JuLB/8ZgEKMeyEQ== +shepherd.js@14.5.0: + version "14.5.0" + resolved "https://registry.yarnpkg.com/shepherd.js/-/shepherd.js-14.5.0.tgz#200a77ac7197ef0ae2ecc74afbf99569222cca7e" + integrity sha512-23yBjWnrEeaCHFVUukPNol/K0pdvq6NgyqxDeq1qfJuNhxTHpiAvqTB9ULUogndBcGxfkyTRud95PpUyZwGAGQ== dependencies: "@floating-ui/dom" "^1.6.12" - "@scarf/scarf" "^1.3.0" + "@scarf/scarf" "^1.4.0" deepmerge-ts "^7.1.1" shimmer@^1.2.1: @@ -11726,10 +11726,10 @@ utila@~0.4: resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" integrity sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA== -uuid@^11.0.5: - version "11.0.5" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-11.0.5.tgz#07b46bdfa6310c92c3fb3953a8720f170427fc62" - integrity sha512-508e6IcKLrhxKdBbcA2b4KQZlLVp2+J5UwQ6F7Drckkc5N9ZJwFa4TgWtsww9UG8fGHbm6gbV19TdM5pQ4GaIA== +uuid@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-11.1.0.tgz#9549028be1753bb934fc96e2bca09bb4105ae912" + integrity sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== uuid@^8.0.0, uuid@^8.3.2: version "8.3.2" @@ -12160,10 +12160,10 @@ yoctocolors-cjs@^2.1.2: resolved "https://registry.yarnpkg.com/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz#f4b905a840a37506813a7acaa28febe97767a242" integrity sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA== -zod@^3.23.8: - version "3.24.1" - resolved "https://registry.yarnpkg.com/zod/-/zod-3.24.1.tgz#27445c912738c8ad1e9de1bea0359fa44d9d35ee" - integrity sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A== +zod@^3.24.2: + version "3.24.2" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.24.2.tgz#8efa74126287c675e92f46871cfc8d15c34372b3" + integrity sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ== zustand@^4.4.0: version "4.5.6" From f4d4bb83b0e660a88d459913ca549796c4f77980 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 13 Mar 2025 10:14:46 +0700 Subject: [PATCH 06/61] fix(backend): Move Notification service to DB manager (#9626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DatabaseManager is already provisioned in RestApiService, and NotificationService lives within the same instance as the Rest Server. ### Changes 🏗️ Moving the DB calls of NotificationService to DatabaseManager. ### Checklist 📋 #### For code changes: - [ ] I have clearly listed my changes in the PR description - [ ] I have made a test plan - [ ] I have tested my changes according to the test plan: - [ ] ...
Example test plan - [ ] Create from scratch and execute an agent with at least 3 blocks - [ ] Import an agent from file upload, and confirm it executes correctly - [ ] Upload agent to marketplace - [ ] Import an agent from marketplace and confirm it executes correctly - [ ] Edit an agent from monitor, and confirm it executes correctly
#### For configuration changes: - [ ] `.env.example` is updated or already compatible with my changes - [ ] `docker-compose.yml` is updated or already compatible with my changes - [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
Examples of configuration changes - Changing ports - Adding new services that need to communicate with each other - Secrets or environment variable changes - New or infrastructure changes such as databases
--- .../backend/backend/data/graph.py | 24 ++--- .../backend/backend/executor/database.py | 32 ++++++ .../backend/notifications/notifications.py | 99 ++++++++----------- .../backend/backend/server/routers/v1.py | 2 +- 4 files changed, 87 insertions(+), 70 deletions(-) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index a6a04b0ceb..4fcd13c964 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -14,7 +14,7 @@ from prisma.models import ( AgentNodeLink, StoreListingVersion, ) -from prisma.types import AgentGraphWhereInput +from prisma.types import AgentGraphExecutionWhereInput, AgentGraphWhereInput from pydantic.fields import Field, computed_field from backend.blocks.agent import AgentExecutorBlock @@ -597,18 +597,20 @@ async def get_graphs( return graph_models -# TODO: move execution stuff to .execution -async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]: - executions = await AgentGraphExecution.prisma().find_many( - where={"isDeleted": False, "userId": user_id}, - order={"createdAt": "desc"}, - ) - return [GraphExecutionMeta.from_db(execution) for execution in executions] +async def get_graph_executions( + graph_id: Optional[str] = None, + user_id: Optional[str] = None, +) -> list[GraphExecutionMeta]: + where_filter: AgentGraphExecutionWhereInput = { + "isDeleted": False, + } + if user_id: + where_filter["userId"] = user_id + if graph_id: + where_filter["agentGraphId"] = graph_id - -async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]: executions = await AgentGraphExecution.prisma().find_many( - where={"agentGraphId": graph_id, "isDeleted": False, "userId": user_id}, + where=where_filter, order={"createdAt": "desc"}, ) return [GraphExecutionMeta.from_db(execution) for execution in executions] diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index faa525b4db..e906e5e674 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -20,9 +20,20 @@ from backend.data.graph import ( get_graph_metadata, get_node, ) +from backend.data.notifications import ( + create_or_add_to_user_notification_batch, + empty_user_notification_batch, + get_all_batches_by_type, + get_user_notification_batch, + get_user_notification_oldest_message_in_batch, +) from backend.data.user import ( + get_active_user_ids_in_timerange, + get_user_email_by_id, + get_user_email_verification, get_user_integrations, get_user_metadata, + get_user_notification_preference, update_user_integrations, update_user_metadata, ) @@ -80,3 +91,24 @@ class DatabaseManager(AppService): update_user_metadata = exposed_run_and_wait(update_user_metadata) get_user_integrations = exposed_run_and_wait(get_user_integrations) update_user_integrations = exposed_run_and_wait(update_user_integrations) + + # User Comms - async + get_active_user_ids_in_timerange = exposed_run_and_wait( + get_active_user_ids_in_timerange + ) + get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id) + get_user_email_verification = exposed_run_and_wait(get_user_email_verification) + get_user_notification_preference = exposed_run_and_wait( + get_user_notification_preference + ) + + # Notifications - async + create_or_add_to_user_notification_batch = exposed_run_and_wait( + create_or_add_to_user_notification_batch + ) + empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch) + get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type) + get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch) + get_user_notification_oldest_message_in_batch = exposed_run_and_wait( + get_user_notification_oldest_message_in_batch + ) diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 9cdcebda88..3de577f32f 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -23,23 +23,12 @@ from backend.data.notifications import ( SummaryParamsEventModel, WeeklySummaryData, WeeklySummaryParams, - create_or_add_to_user_notification_batch, - empty_user_notification_batch, - get_all_batches_by_type, get_batch_delay, get_notif_data_type, get_summary_params_type, - get_user_notification_batch, - get_user_notification_oldest_message_in_batch, ) from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig -from backend.data.user import ( - generate_unsubscribe_link, - get_active_user_ids_in_timerange, - get_user_email_by_id, - get_user_email_verification, - get_user_notification_preference, -) +from backend.data.user import generate_unsubscribe_link from backend.notifications.email import EmailSender from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Settings @@ -123,12 +112,18 @@ def get_scheduler(): return get_service_client(Scheduler) +@thread_cached +def get_db(): + from backend.executor.database import DatabaseManager + + return get_service_client(DatabaseManager) + + class NotificationManager(AppService): """Service for handling notifications with batching support""" def __init__(self): super().__init__() - self.use_db = True self.rabbitmq_config = create_notification_config() self.running = True self.email_sender = EmailSender() @@ -160,11 +155,9 @@ class NotificationManager(AppService): processed_count = 0 current_time = datetime.now(tz=timezone.utc) start_time = current_time - timedelta(days=7) - users = self.run_and_wait( - get_active_user_ids_in_timerange( - end_time=current_time.isoformat(), - start_time=start_time.isoformat(), - ) + users = get_db().get_active_user_ids_in_timerange( + end_time=current_time.isoformat(), + start_time=start_time.isoformat(), ) for user in users: @@ -194,12 +187,12 @@ class NotificationManager(AppService): for notification_type in notification_types: # Get all batches for this notification type - batches = self.run_and_wait(get_all_batches_by_type(notification_type)) + batches = get_db().get_all_batches_by_type(notification_type) for batch in batches: # Check if batch has aged out - oldest_message = self.run_and_wait( - get_user_notification_oldest_message_in_batch( + oldest_message = ( + get_db().get_user_notification_oldest_message_in_batch( batch.userId, notification_type ) ) @@ -215,9 +208,7 @@ class NotificationManager(AppService): # If batch has aged out, process it if oldest_message.createdAt + max_delay < current_time: - recipient_email = self.run_and_wait( - get_user_email_by_id(batch.userId) - ) + recipient_email = get_db().get_user_email_by_id(batch.userId) if not recipient_email: logger.error( @@ -234,15 +225,13 @@ class NotificationManager(AppService): f"User {batch.userId} does not want to receive {notification_type} notifications" ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) continue - batch_data = self.run_and_wait( - get_user_notification_batch(batch.userId, notification_type) + batch_data = get_db().get_user_notification_batch( + batch.userId, notification_type ) if not batch_data or not batch_data.notifications: @@ -250,10 +239,8 @@ class NotificationManager(AppService): f"Batch data not found for user {batch.userId}" ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) continue @@ -282,10 +269,8 @@ class NotificationManager(AppService): ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) processed_count += 1 @@ -377,14 +362,16 @@ class NotificationManager(AppService): self, user_id: str, event_type: NotificationType ) -> bool: """Check if a user wants to receive a notification based on their preferences and email verification status""" - validated_email = self.run_and_wait(get_user_email_verification(user_id)) - preference = self.run_and_wait( - get_user_notification_preference(user_id) - ).preferences.get(event_type, True) + validated_email = get_db().get_user_email_verification(user_id) + preference = ( + get_db() + .get_user_notification_preference(user_id) + .preferences.get(event_type, True) + ) # only if both are true, should we email this person return validated_email and preference - async def _gather_summary_data( + def _gather_summary_data( self, user_id: str, event_type: NotificationType, params: BaseSummaryParams ) -> BaseSummaryData: """Gathers the data to build a summary notification""" @@ -464,13 +451,13 @@ class NotificationManager(AppService): else: raise ValueError("Invalid event type or params") - async def _should_batch( + def _should_batch( self, user_id: str, event_type: NotificationType, event: NotificationEventModel ) -> bool: - await create_or_add_to_user_notification_batch(user_id, event_type, event) + get_db().create_or_add_to_user_notification_batch(user_id, event_type, event) - oldest_message = await get_user_notification_oldest_message_in_batch( + oldest_message = get_db().get_user_notification_oldest_message_in_batch( user_id, event_type ) if not oldest_message: @@ -527,7 +514,7 @@ class NotificationManager(AppService): model = parsed.model logger.debug(f"Processing immediate notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -564,7 +551,7 @@ class NotificationManager(AppService): model = parsed.model logger.info(f"Processing batch notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -578,16 +565,12 @@ class NotificationManager(AppService): ) return True - should_send = self.run_and_wait( - self._should_batch(event.user_id, event.type, model) - ) + should_send = self._should_batch(event.user_id, event.type, model) if not should_send: logger.info("Batch not old enough to send") return False - batch = self.run_and_wait( - get_user_notification_batch(event.user_id, event.type) - ) + batch = get_db().get_user_notification_batch(event.user_id, event.type) if not batch or not batch.notifications: logger.error(f"Batch not found for user {event.user_id}") return False @@ -614,7 +597,7 @@ class NotificationManager(AppService): user_unsub_link=unsub_link, ) # only empty the batch if we sent the email successfully - self.run_and_wait(empty_user_notification_batch(event.user_id, event.type)) + get_db().empty_user_notification_batch(event.user_id, event.type) return True except Exception as e: logger.exception(f"Error processing notification for batch queue: {e}") @@ -631,7 +614,7 @@ class NotificationManager(AppService): logger.info(f"Processing summary notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -644,8 +627,8 @@ class NotificationManager(AppService): ) return True - summary_data = self.run_and_wait( - self._gather_summary_data(event.user_id, event.type, model.data) + summary_data = self._gather_summary_data( + event.user_id, event.type, model.data ) unsub_link = generate_unsubscribe_link(event.user_id) diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index 686365d0ab..f695642dda 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -630,7 +630,7 @@ async def stop_graph_run( async def get_graphs_executions( user_id: Annotated[str, Depends(get_user_id)], ) -> list[graph_db.GraphExecutionMeta]: - return await graph_db.get_graphs_executions(user_id=user_id) + return await graph_db.get_graph_executions(user_id=user_id) @v1_router.get( From 90f9e4e94aa0b5d2e6c009245b7214b40a50493b Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 13 Mar 2025 10:14:46 +0700 Subject: [PATCH 07/61] fix(backend): Move Notification service to DB manager (#9626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DatabaseManager is already provisioned in RestApiService, and NotificationService lives within the same instance as the Rest Server. ### Changes 🏗️ Moving the DB calls of NotificationService to DatabaseManager. ### Checklist 📋 #### For code changes: - [ ] I have clearly listed my changes in the PR description - [ ] I have made a test plan - [ ] I have tested my changes according to the test plan: - [ ] ...
Example test plan - [ ] Create from scratch and execute an agent with at least 3 blocks - [ ] Import an agent from file upload, and confirm it executes correctly - [ ] Upload agent to marketplace - [ ] Import an agent from marketplace and confirm it executes correctly - [ ] Edit an agent from monitor, and confirm it executes correctly
#### For configuration changes: - [ ] `.env.example` is updated or already compatible with my changes - [ ] `docker-compose.yml` is updated or already compatible with my changes - [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
Examples of configuration changes - Changing ports - Adding new services that need to communicate with each other - Secrets or environment variable changes - New or infrastructure changes such as databases
(cherry picked from commit f4d4bb83b0e660a88d459913ca549796c4f77980) --- .../backend/backend/data/graph.py | 24 ++--- .../backend/backend/executor/database.py | 32 ++++++ .../backend/notifications/notifications.py | 99 ++++++++----------- .../backend/backend/server/routers/v1.py | 2 +- 4 files changed, 87 insertions(+), 70 deletions(-) diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index a6a04b0ceb..4fcd13c964 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -14,7 +14,7 @@ from prisma.models import ( AgentNodeLink, StoreListingVersion, ) -from prisma.types import AgentGraphWhereInput +from prisma.types import AgentGraphExecutionWhereInput, AgentGraphWhereInput from pydantic.fields import Field, computed_field from backend.blocks.agent import AgentExecutorBlock @@ -597,18 +597,20 @@ async def get_graphs( return graph_models -# TODO: move execution stuff to .execution -async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]: - executions = await AgentGraphExecution.prisma().find_many( - where={"isDeleted": False, "userId": user_id}, - order={"createdAt": "desc"}, - ) - return [GraphExecutionMeta.from_db(execution) for execution in executions] +async def get_graph_executions( + graph_id: Optional[str] = None, + user_id: Optional[str] = None, +) -> list[GraphExecutionMeta]: + where_filter: AgentGraphExecutionWhereInput = { + "isDeleted": False, + } + if user_id: + where_filter["userId"] = user_id + if graph_id: + where_filter["agentGraphId"] = graph_id - -async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]: executions = await AgentGraphExecution.prisma().find_many( - where={"agentGraphId": graph_id, "isDeleted": False, "userId": user_id}, + where=where_filter, order={"createdAt": "desc"}, ) return [GraphExecutionMeta.from_db(execution) for execution in executions] diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index faa525b4db..e906e5e674 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -20,9 +20,20 @@ from backend.data.graph import ( get_graph_metadata, get_node, ) +from backend.data.notifications import ( + create_or_add_to_user_notification_batch, + empty_user_notification_batch, + get_all_batches_by_type, + get_user_notification_batch, + get_user_notification_oldest_message_in_batch, +) from backend.data.user import ( + get_active_user_ids_in_timerange, + get_user_email_by_id, + get_user_email_verification, get_user_integrations, get_user_metadata, + get_user_notification_preference, update_user_integrations, update_user_metadata, ) @@ -80,3 +91,24 @@ class DatabaseManager(AppService): update_user_metadata = exposed_run_and_wait(update_user_metadata) get_user_integrations = exposed_run_and_wait(get_user_integrations) update_user_integrations = exposed_run_and_wait(update_user_integrations) + + # User Comms - async + get_active_user_ids_in_timerange = exposed_run_and_wait( + get_active_user_ids_in_timerange + ) + get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id) + get_user_email_verification = exposed_run_and_wait(get_user_email_verification) + get_user_notification_preference = exposed_run_and_wait( + get_user_notification_preference + ) + + # Notifications - async + create_or_add_to_user_notification_batch = exposed_run_and_wait( + create_or_add_to_user_notification_batch + ) + empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch) + get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type) + get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch) + get_user_notification_oldest_message_in_batch = exposed_run_and_wait( + get_user_notification_oldest_message_in_batch + ) diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 9cdcebda88..3de577f32f 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -23,23 +23,12 @@ from backend.data.notifications import ( SummaryParamsEventModel, WeeklySummaryData, WeeklySummaryParams, - create_or_add_to_user_notification_batch, - empty_user_notification_batch, - get_all_batches_by_type, get_batch_delay, get_notif_data_type, get_summary_params_type, - get_user_notification_batch, - get_user_notification_oldest_message_in_batch, ) from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig -from backend.data.user import ( - generate_unsubscribe_link, - get_active_user_ids_in_timerange, - get_user_email_by_id, - get_user_email_verification, - get_user_notification_preference, -) +from backend.data.user import generate_unsubscribe_link from backend.notifications.email import EmailSender from backend.util.service import AppService, expose, get_service_client from backend.util.settings import Settings @@ -123,12 +112,18 @@ def get_scheduler(): return get_service_client(Scheduler) +@thread_cached +def get_db(): + from backend.executor.database import DatabaseManager + + return get_service_client(DatabaseManager) + + class NotificationManager(AppService): """Service for handling notifications with batching support""" def __init__(self): super().__init__() - self.use_db = True self.rabbitmq_config = create_notification_config() self.running = True self.email_sender = EmailSender() @@ -160,11 +155,9 @@ class NotificationManager(AppService): processed_count = 0 current_time = datetime.now(tz=timezone.utc) start_time = current_time - timedelta(days=7) - users = self.run_and_wait( - get_active_user_ids_in_timerange( - end_time=current_time.isoformat(), - start_time=start_time.isoformat(), - ) + users = get_db().get_active_user_ids_in_timerange( + end_time=current_time.isoformat(), + start_time=start_time.isoformat(), ) for user in users: @@ -194,12 +187,12 @@ class NotificationManager(AppService): for notification_type in notification_types: # Get all batches for this notification type - batches = self.run_and_wait(get_all_batches_by_type(notification_type)) + batches = get_db().get_all_batches_by_type(notification_type) for batch in batches: # Check if batch has aged out - oldest_message = self.run_and_wait( - get_user_notification_oldest_message_in_batch( + oldest_message = ( + get_db().get_user_notification_oldest_message_in_batch( batch.userId, notification_type ) ) @@ -215,9 +208,7 @@ class NotificationManager(AppService): # If batch has aged out, process it if oldest_message.createdAt + max_delay < current_time: - recipient_email = self.run_and_wait( - get_user_email_by_id(batch.userId) - ) + recipient_email = get_db().get_user_email_by_id(batch.userId) if not recipient_email: logger.error( @@ -234,15 +225,13 @@ class NotificationManager(AppService): f"User {batch.userId} does not want to receive {notification_type} notifications" ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) continue - batch_data = self.run_and_wait( - get_user_notification_batch(batch.userId, notification_type) + batch_data = get_db().get_user_notification_batch( + batch.userId, notification_type ) if not batch_data or not batch_data.notifications: @@ -250,10 +239,8 @@ class NotificationManager(AppService): f"Batch data not found for user {batch.userId}" ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) continue @@ -282,10 +269,8 @@ class NotificationManager(AppService): ) # Clear the batch - self.run_and_wait( - empty_user_notification_batch( - batch.userId, notification_type - ) + get_db().empty_user_notification_batch( + batch.userId, notification_type ) processed_count += 1 @@ -377,14 +362,16 @@ class NotificationManager(AppService): self, user_id: str, event_type: NotificationType ) -> bool: """Check if a user wants to receive a notification based on their preferences and email verification status""" - validated_email = self.run_and_wait(get_user_email_verification(user_id)) - preference = self.run_and_wait( - get_user_notification_preference(user_id) - ).preferences.get(event_type, True) + validated_email = get_db().get_user_email_verification(user_id) + preference = ( + get_db() + .get_user_notification_preference(user_id) + .preferences.get(event_type, True) + ) # only if both are true, should we email this person return validated_email and preference - async def _gather_summary_data( + def _gather_summary_data( self, user_id: str, event_type: NotificationType, params: BaseSummaryParams ) -> BaseSummaryData: """Gathers the data to build a summary notification""" @@ -464,13 +451,13 @@ class NotificationManager(AppService): else: raise ValueError("Invalid event type or params") - async def _should_batch( + def _should_batch( self, user_id: str, event_type: NotificationType, event: NotificationEventModel ) -> bool: - await create_or_add_to_user_notification_batch(user_id, event_type, event) + get_db().create_or_add_to_user_notification_batch(user_id, event_type, event) - oldest_message = await get_user_notification_oldest_message_in_batch( + oldest_message = get_db().get_user_notification_oldest_message_in_batch( user_id, event_type ) if not oldest_message: @@ -527,7 +514,7 @@ class NotificationManager(AppService): model = parsed.model logger.debug(f"Processing immediate notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -564,7 +551,7 @@ class NotificationManager(AppService): model = parsed.model logger.info(f"Processing batch notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -578,16 +565,12 @@ class NotificationManager(AppService): ) return True - should_send = self.run_and_wait( - self._should_batch(event.user_id, event.type, model) - ) + should_send = self._should_batch(event.user_id, event.type, model) if not should_send: logger.info("Batch not old enough to send") return False - batch = self.run_and_wait( - get_user_notification_batch(event.user_id, event.type) - ) + batch = get_db().get_user_notification_batch(event.user_id, event.type) if not batch or not batch.notifications: logger.error(f"Batch not found for user {event.user_id}") return False @@ -614,7 +597,7 @@ class NotificationManager(AppService): user_unsub_link=unsub_link, ) # only empty the batch if we sent the email successfully - self.run_and_wait(empty_user_notification_batch(event.user_id, event.type)) + get_db().empty_user_notification_batch(event.user_id, event.type) return True except Exception as e: logger.exception(f"Error processing notification for batch queue: {e}") @@ -631,7 +614,7 @@ class NotificationManager(AppService): logger.info(f"Processing summary notification: {model}") - recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + recipient_email = get_db().get_user_email_by_id(event.user_id) if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False @@ -644,8 +627,8 @@ class NotificationManager(AppService): ) return True - summary_data = self.run_and_wait( - self._gather_summary_data(event.user_id, event.type, model.data) + summary_data = self._gather_summary_data( + event.user_id, event.type, model.data ) unsub_link = generate_unsubscribe_link(event.user_id) diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index 686365d0ab..f695642dda 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -630,7 +630,7 @@ async def stop_graph_run( async def get_graphs_executions( user_id: Annotated[str, Depends(get_user_id)], ) -> list[graph_db.GraphExecutionMeta]: - return await graph_db.get_graphs_executions(user_id=user_id) + return await graph_db.get_graph_executions(user_id=user_id) @v1_router.get( From b9f31a9c44e8babc09c08efbc2d6d5dd162ab5ad Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 14 Mar 2025 08:54:18 +0700 Subject: [PATCH 08/61] feat(backend): Fix failed RPC on Notification Service (#9630) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although returning a Prisma object on an RPC is a bad practice, we have instances where we do so and the type contains a `prisma.Json` field. This Json field can't be seamlessly serialized and then converted back into the Prisma object. ### Changes 🏗️ Replacing prisma object as return type on notification service with a plain pydantic object as DTO. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Calling notification APIs through the RPC client. --- .../backend/backend/data/notifications.py | 62 ++++++++++++++++--- .../backend/notifications/notifications.py | 34 +++++----- .../backend/backend/util/service.py | 2 +- 3 files changed, 71 insertions(+), 27 deletions(-) diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index b42605a2d9..bf5599bbaa 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -341,6 +341,43 @@ class NotificationPreference(BaseModel): ) +class UserNotificationEventDTO(BaseModel): + type: NotificationType + data: dict + created_at: datetime + updated_at: datetime + + @staticmethod + def from_db(model: NotificationEvent) -> "UserNotificationEventDTO": + return UserNotificationEventDTO( + type=model.type, + data=dict(model.data), + created_at=model.createdAt, + updated_at=model.updatedAt, + ) + + +class UserNotificationBatchDTO(BaseModel): + user_id: str + type: NotificationType + notifications: list[UserNotificationEventDTO] + created_at: datetime + updated_at: datetime + + @staticmethod + def from_db(model: UserNotificationBatch) -> "UserNotificationBatchDTO": + return UserNotificationBatchDTO( + user_id=model.userId, + type=model.type, + notifications=[ + UserNotificationEventDTO.from_db(notification) + for notification in model.notifications or [] + ], + created_at=model.createdAt, + updated_at=model.updatedAt, + ) + + def get_batch_delay(notification_type: NotificationType) -> timedelta: return { NotificationType.AGENT_RUN: timedelta(minutes=1), @@ -355,7 +392,7 @@ async def create_or_add_to_user_notification_batch( user_id: str, notification_type: NotificationType, notification_data: NotificationEventModel, -) -> UserNotificationBatch: +) -> UserNotificationBatchDTO: try: logger.info( f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {notification_data}" @@ -393,7 +430,7 @@ async def create_or_add_to_user_notification_batch( }, include={"notifications": True}, ) - return resp + return UserNotificationBatchDTO.from_db(resp) else: async with transaction() as tx: notification_event = await tx.notificationevent.create( @@ -415,7 +452,7 @@ async def create_or_add_to_user_notification_batch( raise DatabaseError( f"Failed to add notification event {notification_event.id} to existing batch {existing_batch.id}" ) - return resp + return UserNotificationBatchDTO.from_db(resp) except Exception as e: raise DatabaseError( f"Failed to create or add to notification batch for user {user_id} and type {notification_type}: {e}" @@ -425,7 +462,7 @@ async def create_or_add_to_user_notification_batch( async def get_user_notification_oldest_message_in_batch( user_id: str, notification_type: NotificationType, -) -> NotificationEvent | None: +) -> UserNotificationEventDTO | None: try: batch = await UserNotificationBatch.prisma().find_first( where={"userId": user_id, "type": notification_type}, @@ -436,7 +473,12 @@ async def get_user_notification_oldest_message_in_batch( if not batch.notifications: return None sorted_notifications = sorted(batch.notifications, key=lambda x: x.createdAt) - return sorted_notifications[0] + + return ( + UserNotificationEventDTO.from_db(sorted_notifications[0]) + if sorted_notifications + else None + ) except Exception as e: raise DatabaseError( f"Failed to get user notification last message in batch for user {user_id} and type {notification_type}: {e}" @@ -471,12 +513,13 @@ async def empty_user_notification_batch( async def get_user_notification_batch( user_id: str, notification_type: NotificationType, -) -> UserNotificationBatch | None: +) -> UserNotificationBatchDTO | None: try: - return await UserNotificationBatch.prisma().find_first( + batch = await UserNotificationBatch.prisma().find_first( where={"userId": user_id, "type": notification_type}, include={"notifications": True}, ) + return UserNotificationBatchDTO.from_db(batch) if batch else None except Exception as e: raise DatabaseError( f"Failed to get user notification batch for user {user_id} and type {notification_type}: {e}" @@ -485,9 +528,9 @@ async def get_user_notification_batch( async def get_all_batches_by_type( notification_type: NotificationType, -) -> list[UserNotificationBatch]: +) -> list[UserNotificationBatchDTO]: try: - return await UserNotificationBatch.prisma().find_many( + batches = await UserNotificationBatch.prisma().find_many( where={ "type": notification_type, "notifications": { @@ -496,6 +539,7 @@ async def get_all_batches_by_type( }, include={"notifications": True}, ) + return [UserNotificationBatchDTO.from_db(batch) for batch in batches] except Exception as e: raise DatabaseError( f"Failed to get all batches by type {notification_type}: {e}" diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 3de577f32f..5210d47033 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -193,68 +193,68 @@ class NotificationManager(AppService): # Check if batch has aged out oldest_message = ( get_db().get_user_notification_oldest_message_in_batch( - batch.userId, notification_type + batch.user_id, notification_type ) ) if not oldest_message: # this should never happen logger.error( - f"Batch for user {batch.userId} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" + f"Batch for user {batch.user_id} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" ) continue max_delay = get_batch_delay(notification_type) # If batch has aged out, process it - if oldest_message.createdAt + max_delay < current_time: - recipient_email = get_db().get_user_email_by_id(batch.userId) + if oldest_message.created_at + max_delay < current_time: + recipient_email = get_db().get_user_email_by_id(batch.user_id) if not recipient_email: logger.error( - f"User email not found for user {batch.userId}" + f"User email not found for user {batch.user_id}" ) continue should_send = self._should_email_user_based_on_preference( - batch.userId, notification_type + batch.user_id, notification_type ) if not should_send: logger.debug( - f"User {batch.userId} does not want to receive {notification_type} notifications" + f"User {batch.user_id} does not want to receive {notification_type} notifications" ) # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) continue batch_data = get_db().get_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) if not batch_data or not batch_data.notifications: logger.error( - f"Batch data not found for user {batch.userId}" + f"Batch data not found for user {batch.user_id}" ) # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) continue - unsub_link = generate_unsubscribe_link(batch.userId) + unsub_link = generate_unsubscribe_link(batch.user_id) events = [ NotificationEventModel[ get_notif_data_type(db_event.type) ].model_validate( { - "user_id": batch.userId, + "user_id": batch.user_id, "type": db_event.type, "data": db_event.data, - "created_at": db_event.createdAt, + "created_at": db_event.created_at, } ) for db_event in batch_data.notifications @@ -270,7 +270,7 @@ class NotificationManager(AppService): # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) processed_count += 1 @@ -465,7 +465,7 @@ class NotificationManager(AppService): f"Batch for user {user_id} and type {event_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" ) return False - oldest_age = oldest_message.createdAt + oldest_age = oldest_message.created_at max_delay = get_batch_delay(event_type) @@ -584,7 +584,7 @@ class NotificationManager(AppService): "user_id": event.user_id, "type": db_event.type, "data": db_event.data, - "created_at": db_event.createdAt, + "created_at": db_event.created_at, } ) for db_event in batch.notifications diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 3a45581c05..0f557ce02b 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -455,7 +455,7 @@ def fastapi_get_service_client( return response.json() except httpx.HTTPStatusError as e: logger.error(f"HTTP error in {method_name}: {e.response.text}") - error = RemoteCallError.model_validate(e.response.json(), strict=False) + error = RemoteCallError.model_validate(e.response.json()) # DEBUG HELP: if you made a custom exception, make sure you override self.args to be how to make your exception raise EXCEPTION_MAPPING.get(error.type, Exception)( *(error.args or [str(e)]) From b0fed439714af970ef8a36bb9485d20d97027496 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 14 Mar 2025 08:54:18 +0700 Subject: [PATCH 09/61] feat(backend): Fix failed RPC on Notification Service (#9630) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although returning a Prisma object on an RPC is a bad practice, we have instances where we do so and the type contains a `prisma.Json` field. This Json field can't be seamlessly serialized and then converted back into the Prisma object. ### Changes 🏗️ Replacing prisma object as return type on notification service with a plain pydantic object as DTO. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Calling notification APIs through the RPC client. (cherry picked from commit b9f31a9c44e8babc09c08efbc2d6d5dd162ab5ad) --- .../backend/backend/data/notifications.py | 62 ++++++++++++++++--- .../backend/notifications/notifications.py | 34 +++++----- .../backend/backend/util/service.py | 2 +- 3 files changed, 71 insertions(+), 27 deletions(-) diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index b42605a2d9..bf5599bbaa 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -341,6 +341,43 @@ class NotificationPreference(BaseModel): ) +class UserNotificationEventDTO(BaseModel): + type: NotificationType + data: dict + created_at: datetime + updated_at: datetime + + @staticmethod + def from_db(model: NotificationEvent) -> "UserNotificationEventDTO": + return UserNotificationEventDTO( + type=model.type, + data=dict(model.data), + created_at=model.createdAt, + updated_at=model.updatedAt, + ) + + +class UserNotificationBatchDTO(BaseModel): + user_id: str + type: NotificationType + notifications: list[UserNotificationEventDTO] + created_at: datetime + updated_at: datetime + + @staticmethod + def from_db(model: UserNotificationBatch) -> "UserNotificationBatchDTO": + return UserNotificationBatchDTO( + user_id=model.userId, + type=model.type, + notifications=[ + UserNotificationEventDTO.from_db(notification) + for notification in model.notifications or [] + ], + created_at=model.createdAt, + updated_at=model.updatedAt, + ) + + def get_batch_delay(notification_type: NotificationType) -> timedelta: return { NotificationType.AGENT_RUN: timedelta(minutes=1), @@ -355,7 +392,7 @@ async def create_or_add_to_user_notification_batch( user_id: str, notification_type: NotificationType, notification_data: NotificationEventModel, -) -> UserNotificationBatch: +) -> UserNotificationBatchDTO: try: logger.info( f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {notification_data}" @@ -393,7 +430,7 @@ async def create_or_add_to_user_notification_batch( }, include={"notifications": True}, ) - return resp + return UserNotificationBatchDTO.from_db(resp) else: async with transaction() as tx: notification_event = await tx.notificationevent.create( @@ -415,7 +452,7 @@ async def create_or_add_to_user_notification_batch( raise DatabaseError( f"Failed to add notification event {notification_event.id} to existing batch {existing_batch.id}" ) - return resp + return UserNotificationBatchDTO.from_db(resp) except Exception as e: raise DatabaseError( f"Failed to create or add to notification batch for user {user_id} and type {notification_type}: {e}" @@ -425,7 +462,7 @@ async def create_or_add_to_user_notification_batch( async def get_user_notification_oldest_message_in_batch( user_id: str, notification_type: NotificationType, -) -> NotificationEvent | None: +) -> UserNotificationEventDTO | None: try: batch = await UserNotificationBatch.prisma().find_first( where={"userId": user_id, "type": notification_type}, @@ -436,7 +473,12 @@ async def get_user_notification_oldest_message_in_batch( if not batch.notifications: return None sorted_notifications = sorted(batch.notifications, key=lambda x: x.createdAt) - return sorted_notifications[0] + + return ( + UserNotificationEventDTO.from_db(sorted_notifications[0]) + if sorted_notifications + else None + ) except Exception as e: raise DatabaseError( f"Failed to get user notification last message in batch for user {user_id} and type {notification_type}: {e}" @@ -471,12 +513,13 @@ async def empty_user_notification_batch( async def get_user_notification_batch( user_id: str, notification_type: NotificationType, -) -> UserNotificationBatch | None: +) -> UserNotificationBatchDTO | None: try: - return await UserNotificationBatch.prisma().find_first( + batch = await UserNotificationBatch.prisma().find_first( where={"userId": user_id, "type": notification_type}, include={"notifications": True}, ) + return UserNotificationBatchDTO.from_db(batch) if batch else None except Exception as e: raise DatabaseError( f"Failed to get user notification batch for user {user_id} and type {notification_type}: {e}" @@ -485,9 +528,9 @@ async def get_user_notification_batch( async def get_all_batches_by_type( notification_type: NotificationType, -) -> list[UserNotificationBatch]: +) -> list[UserNotificationBatchDTO]: try: - return await UserNotificationBatch.prisma().find_many( + batches = await UserNotificationBatch.prisma().find_many( where={ "type": notification_type, "notifications": { @@ -496,6 +539,7 @@ async def get_all_batches_by_type( }, include={"notifications": True}, ) + return [UserNotificationBatchDTO.from_db(batch) for batch in batches] except Exception as e: raise DatabaseError( f"Failed to get all batches by type {notification_type}: {e}" diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index 3de577f32f..5210d47033 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -193,68 +193,68 @@ class NotificationManager(AppService): # Check if batch has aged out oldest_message = ( get_db().get_user_notification_oldest_message_in_batch( - batch.userId, notification_type + batch.user_id, notification_type ) ) if not oldest_message: # this should never happen logger.error( - f"Batch for user {batch.userId} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" + f"Batch for user {batch.user_id} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" ) continue max_delay = get_batch_delay(notification_type) # If batch has aged out, process it - if oldest_message.createdAt + max_delay < current_time: - recipient_email = get_db().get_user_email_by_id(batch.userId) + if oldest_message.created_at + max_delay < current_time: + recipient_email = get_db().get_user_email_by_id(batch.user_id) if not recipient_email: logger.error( - f"User email not found for user {batch.userId}" + f"User email not found for user {batch.user_id}" ) continue should_send = self._should_email_user_based_on_preference( - batch.userId, notification_type + batch.user_id, notification_type ) if not should_send: logger.debug( - f"User {batch.userId} does not want to receive {notification_type} notifications" + f"User {batch.user_id} does not want to receive {notification_type} notifications" ) # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) continue batch_data = get_db().get_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) if not batch_data or not batch_data.notifications: logger.error( - f"Batch data not found for user {batch.userId}" + f"Batch data not found for user {batch.user_id}" ) # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) continue - unsub_link = generate_unsubscribe_link(batch.userId) + unsub_link = generate_unsubscribe_link(batch.user_id) events = [ NotificationEventModel[ get_notif_data_type(db_event.type) ].model_validate( { - "user_id": batch.userId, + "user_id": batch.user_id, "type": db_event.type, "data": db_event.data, - "created_at": db_event.createdAt, + "created_at": db_event.created_at, } ) for db_event in batch_data.notifications @@ -270,7 +270,7 @@ class NotificationManager(AppService): # Clear the batch get_db().empty_user_notification_batch( - batch.userId, notification_type + batch.user_id, notification_type ) processed_count += 1 @@ -465,7 +465,7 @@ class NotificationManager(AppService): f"Batch for user {user_id} and type {event_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!" ) return False - oldest_age = oldest_message.createdAt + oldest_age = oldest_message.created_at max_delay = get_batch_delay(event_type) @@ -584,7 +584,7 @@ class NotificationManager(AppService): "user_id": event.user_id, "type": db_event.type, "data": db_event.data, - "created_at": db_event.createdAt, + "created_at": db_event.created_at, } ) for db_event in batch.notifications diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py index 3a45581c05..0f557ce02b 100644 --- a/autogpt_platform/backend/backend/util/service.py +++ b/autogpt_platform/backend/backend/util/service.py @@ -455,7 +455,7 @@ def fastapi_get_service_client( return response.json() except httpx.HTTPStatusError as e: logger.error(f"HTTP error in {method_name}: {e.response.text}") - error = RemoteCallError.model_validate(e.response.json(), strict=False) + error = RemoteCallError.model_validate(e.response.json()) # DEBUG HELP: if you made a custom exception, make sure you override self.args to be how to make your exception raise EXCEPTION_MAPPING.get(error.type, Exception)( *(error.args or [str(e)]) From 801f3a3a246ae2ca0a3b8e7d9eba89b5350fdf8f Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 14 Mar 2025 14:36:44 +0700 Subject: [PATCH 10/61] feat(backend): Fix failed RPC on Notification Service --- autogpt_platform/backend/backend/data/notifications.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index bf5599bbaa..0fa7d26ee0 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -35,7 +35,8 @@ class QueueType(Enum): class BaseNotificationData(BaseModel): - pass + class Config: + extra = "allow" class AgentRunData(BaseNotificationData): From aa17872667ac5836fb51e0b2c0f49aea79201b62 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 14 Mar 2025 14:36:44 +0700 Subject: [PATCH 11/61] feat(backend): Fix failed RPC on Notification Service (cherry picked from commit 801f3a3a246ae2ca0a3b8e7d9eba89b5350fdf8f) --- autogpt_platform/backend/backend/data/notifications.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index bf5599bbaa..0fa7d26ee0 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -35,7 +35,8 @@ class QueueType(Enum): class BaseNotificationData(BaseModel): - pass + class Config: + extra = "allow" class AgentRunData(BaseNotificationData): From b67c2e166b9cb4f3daf44673252257188e9c2688 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 14 Mar 2025 21:09:34 +0700 Subject: [PATCH 12/61] fix(platform): Fallback front-end-url to platform-url for billing page --- autogpt_platform/backend/backend/data/credit.py | 15 +++++++++++---- .../backend/backend/server/routers/v1.py | 11 +---------- .../src/app/profile/(user)/credits/page.tsx | 1 - 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index 452b60d750..78463f9364 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -39,6 +39,7 @@ from backend.util.settings import Settings settings = Settings() stripe.api_key = settings.secrets.stripe_api_key logger = logging.getLogger(__name__) +base_url = settings.config.frontend_base_url or settings.config.platform_base_url class UserCreditBase(ABC): @@ -185,6 +186,14 @@ class UserCreditBase(ABC): """ pass + @staticmethod + async def create_billing_portal_session(user_id: str) -> str: + session = stripe.billing_portal.Session.create( + customer=await get_stripe_customer_id(user_id), + return_url=base_url + "/profile/credits", + ) + return session.url + @staticmethod def time_now() -> datetime: return datetime.now(timezone.utc) @@ -765,10 +774,8 @@ class UserCredit(UserCreditBase): ui_mode="hosted", payment_intent_data={"setup_future_usage": "off_session"}, saved_payment_method_options={"payment_method_save": "enabled"}, - success_url=settings.config.frontend_base_url - + "/profile/credits?topup=success", - cancel_url=settings.config.frontend_base_url - + "/profile/credits?topup=cancel", + success_url=base_url + "/profile/credits?topup=success", + cancel_url=base_url + "/profile/credits?topup=cancel", allow_promotion_codes=True, ) diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index f695642dda..54cd33f959 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -38,7 +38,6 @@ from backend.data.credit import ( TransactionHistory, get_auto_top_up, get_block_costs, - get_stripe_customer_id, get_user_credit_model, set_auto_top_up, ) @@ -341,15 +340,7 @@ async def stripe_webhook(request: Request): async def manage_payment_method( user_id: Annotated[str, Depends(get_user_id)], ) -> dict[str, str]: - session = stripe.billing_portal.Session.create( - customer=await get_stripe_customer_id(user_id), - return_url=settings.config.frontend_base_url + "/profile/credits", - ) - if not session: - raise HTTPException( - status_code=400, detail="Failed to create billing portal session" - ) - return {"url": session.url} + return {"url": await _user_credit_model.create_billing_portal_session(user_id)} @v1_router.get(path="/credits/transactions", dependencies=[Depends(auth_middleware)]) diff --git a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx index 06ed54459e..7b76b70951 100644 --- a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx @@ -29,7 +29,6 @@ export default function CreditsPage() { formatCredits, refundTopUp, refundRequests, - fetchRefundRequests, } = useCredits({ fetchInitialAutoTopUpConfig: true, fetchInitialRefundRequests: true, From 9c84dbddca43422a005470882ae2dba83211e099 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Fri, 14 Mar 2025 11:37:15 -0500 Subject: [PATCH 13/61] fix: backend admin page logic was broken (#9616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're building out admin utilities so we need to bring back the `/admin` route with RBAC. This PR goes through re-enabling that to work with the latest changes ### Changes 🏗️ - Adds back removed logic - Refactors the role checks to fix minor bug for admin page and more importantly clarify - Updates routes to the latest ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Test with admin and authenticated user roles - [x] Test with logged out user role - [x] For the above check the all the existing routes + new ones in the `middleware.ts` --- .../frontend/src/lib/supabase/middleware.ts | 62 ++++++++++--------- .../frontend/src/lib/withRoleAccess.ts | 10 ++- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/autogpt_platform/frontend/src/lib/supabase/middleware.ts b/autogpt_platform/frontend/src/lib/supabase/middleware.ts index 2c00831285..7680323558 100644 --- a/autogpt_platform/frontend/src/lib/supabase/middleware.ts +++ b/autogpt_platform/frontend/src/lib/supabase/middleware.ts @@ -5,10 +5,10 @@ import { NextResponse, type NextRequest } from "next/server"; const PROTECTED_PAGES = [ "/monitor", "/build", - "/marketplace/profile", - "/marketplace/settings", - "/marketplace/dashboard", "/onboarding", + "/profile", + "/library", + "/monitoring", ]; const ADMIN_PAGES = ["/admin"]; @@ -62,34 +62,38 @@ export async function updateSession(request: NextRequest) { // Get the user role const userRole = user?.role; const url = request.nextUrl.clone(); - const pathname = request.nextUrl.pathname; // AUTH REDIRECTS - // If not logged in and trying to access a protected page, redirect to login - if ( - (!user && - PROTECTED_PAGES.some((page) => { - const combinedPath = `${page}`; - // console.log("Checking pathname:", request.nextUrl.pathname, "against:", combinedPath); - return request.nextUrl.pathname.startsWith(combinedPath); - })) || - ADMIN_PAGES.some((page) => { - const combinedPath = `${page}`; - // console.log("Checking pathname:", request.nextUrl.pathname, "against:", combinedPath); - return request.nextUrl.pathname.startsWith(combinedPath); - }) - ) { - // no user, potentially respond by redirecting the user to the login page - url.pathname = `/login`; - return NextResponse.redirect(url); + // 1. Check if user is not authenticated but trying to access protected content + if (!user) { + // Check if the user is trying to access either a protected page or an admin page + const isAttemptingProtectedPage = PROTECTED_PAGES.some((page) => + request.nextUrl.pathname.startsWith(page), + ); + + const isAttemptingAdminPage = ADMIN_PAGES.some((page) => + request.nextUrl.pathname.startsWith(page), + ); + + // If trying to access any protected content without being logged in, + // redirect to login page + if (isAttemptingProtectedPage || isAttemptingAdminPage) { + url.pathname = `/login`; + return NextResponse.redirect(url); + } } - if ( - user && - userRole != "admin" && - ADMIN_PAGES.some((page) => request.nextUrl.pathname.startsWith(`${page}`)) - ) { - // no user, potentially respond by redirecting the user to the login page - url.pathname = `/marketplace`; - return NextResponse.redirect(url); + + // 2. Check if user is authenticated but lacks admin role when accessing admin pages + if (user && userRole !== "admin") { + const isAttemptingAdminPage = ADMIN_PAGES.some((page) => + request.nextUrl.pathname.startsWith(page), + ); + + // If a non-admin user is trying to access admin pages, + // redirect to marketplace + if (isAttemptingAdminPage) { + url.pathname = `/marketplace`; + return NextResponse.redirect(url); + } } // IMPORTANT: You *must* return the supabaseResponse object as it is. If you're diff --git a/autogpt_platform/frontend/src/lib/withRoleAccess.ts b/autogpt_platform/frontend/src/lib/withRoleAccess.ts index fdf59a0cfc..685e9aa9f5 100644 --- a/autogpt_platform/frontend/src/lib/withRoleAccess.ts +++ b/autogpt_platform/frontend/src/lib/withRoleAccess.ts @@ -1,14 +1,20 @@ import React from "react"; import * as Sentry from "@sentry/nextjs"; +import { redirect } from "next/navigation"; +import getServerUser from "./supabase/getServerUser"; export async function withRoleAccess(allowedRoles: string[]) { - console.log("withRoleAccess called:", allowedRoles); - ("use server"); + "use server"; return await Sentry.withServerActionInstrumentation( "withRoleAccess", {}, async () => { return async function >(Component: T) { + const { user, role, error } = await getServerUser(); + + if (error || !user || !role || !allowedRoles.includes(role)) { + redirect("/unauthorized"); + } return Component; }; }, From 1bc3041615b827f8c6ada352bafe4fcffc0f8bc2 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Sat, 15 Mar 2025 00:16:13 +0700 Subject: [PATCH 14/61] feat(platform)!: Lock Supabase docker-compose code (#9620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have been submoduling Supabase for provisioning local Supabase instances using docker-compose. Aside from the huge size of unrelated code being pulled, there is also the risk of pulling unintentional breaking change from the upstream to the platform. The latest Supabase changes hide the 5432 port from the supabase-db container and shift it to the supavisor, the instance that we are currently not using. This causes an error in the existing setup. ## BREAKING CHANGES This change will introduce different volume locations for the database content, pulling this change will make the data content fresh from the start. To keep your old data with this change, execute this command: ``` cp -r supabase/docker/volumes/db/data db/docker/volumes/db/data ``` ### Changes 🏗️ The scope of this PR is snapshotting the current docker-compose code obtained from the Supabase repository and embedding it into our repository. This will eliminate the need for submodule / recursive cloning and bringing the entire Supabase repository into the platform. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Existing CI --- .github/dependabot.yml | 24 - .github/workflows/platform-frontend-ci.yml | 2 +- .gitmodules | 3 - autogpt_platform/.env.example | 123 ++++ autogpt_platform/README.md | 18 +- autogpt_platform/db/docker/.env.example | 123 ++++ autogpt_platform/db/docker/.gitignore | 5 + autogpt_platform/db/docker/README.md | 3 + autogpt_platform/db/docker/dev/data.sql | 48 ++ .../db/docker/dev/docker-compose.dev.yml | 34 ++ .../db/docker/docker-compose.s3.yml | 94 ++++ autogpt_platform/db/docker/docker-compose.yml | 526 ++++++++++++++++++ autogpt_platform/db/docker/reset.sh | 44 ++ .../db/docker/volumes/api/kong.yml | 241 ++++++++ .../db/docker/volumes/db/_supabase.sql | 3 + .../db/docker/volumes/db/init/data.sql | 0 autogpt_platform/db/docker/volumes/db/jwt.sql | 5 + .../db/docker/volumes/db/logs.sql | 6 + .../db/docker/volumes/db/pooler.sql | 6 + .../db/docker/volumes/db/realtime.sql | 4 + .../db/docker/volumes/db/roles.sql | 8 + .../db/docker/volumes/db/webhooks.sql | 208 +++++++ .../docker/volumes/functions/hello/index.ts | 16 + .../db/docker/volumes/functions/main/index.ts | 94 ++++ .../db/docker/volumes/logs/vector.yml | 232 ++++++++ .../db/docker/volumes/pooler/pooler.exs | 30 + autogpt_platform/docker-compose.yml | 27 +- autogpt_platform/supabase | 1 - docs/content/platform/getting-started.md | 6 +- 29 files changed, 1878 insertions(+), 56 deletions(-) create mode 100644 autogpt_platform/.env.example create mode 100644 autogpt_platform/db/docker/.env.example create mode 100644 autogpt_platform/db/docker/.gitignore create mode 100644 autogpt_platform/db/docker/README.md create mode 100644 autogpt_platform/db/docker/dev/data.sql create mode 100644 autogpt_platform/db/docker/dev/docker-compose.dev.yml create mode 100644 autogpt_platform/db/docker/docker-compose.s3.yml create mode 100644 autogpt_platform/db/docker/docker-compose.yml create mode 100755 autogpt_platform/db/docker/reset.sh create mode 100644 autogpt_platform/db/docker/volumes/api/kong.yml create mode 100644 autogpt_platform/db/docker/volumes/db/_supabase.sql create mode 100755 autogpt_platform/db/docker/volumes/db/init/data.sql create mode 100644 autogpt_platform/db/docker/volumes/db/jwt.sql create mode 100644 autogpt_platform/db/docker/volumes/db/logs.sql create mode 100644 autogpt_platform/db/docker/volumes/db/pooler.sql create mode 100644 autogpt_platform/db/docker/volumes/db/realtime.sql create mode 100644 autogpt_platform/db/docker/volumes/db/roles.sql create mode 100644 autogpt_platform/db/docker/volumes/db/webhooks.sql create mode 100644 autogpt_platform/db/docker/volumes/functions/hello/index.ts create mode 100644 autogpt_platform/db/docker/volumes/functions/main/index.ts create mode 100644 autogpt_platform/db/docker/volumes/logs/vector.yml create mode 100644 autogpt_platform/db/docker/volumes/pooler/pooler.exs delete mode 160000 autogpt_platform/supabase diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 853791e2c1..8f4bad86b2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -129,30 +129,6 @@ updates: - "minor" - "patch" - - # Submodules - - package-ecosystem: "gitsubmodule" - directory: "autogpt_platform/supabase" - schedule: - interval: "weekly" - open-pull-requests-limit: 1 - target-branch: "dev" - commit-message: - prefix: "chore(platform/deps)" - prefix-development: "chore(platform/deps-dev)" - groups: - production-dependencies: - dependency-type: "production" - update-types: - - "minor" - - "patch" - development-dependencies: - dependency-type: "development" - update-types: - - "minor" - - "patch" - - # Docs - package-ecosystem: 'pip' directory: "docs/" diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml index f134687525..4f729995a8 100644 --- a/.github/workflows/platform-frontend-ci.yml +++ b/.github/workflows/platform-frontend-ci.yml @@ -82,7 +82,7 @@ jobs: - name: Copy default supabase .env run: | - cp ../supabase/docker/.env.example ../.env + cp ../.env.example ../.env - name: Copy backend .env run: | diff --git a/.gitmodules b/.gitmodules index 4db81f42c0..112960b5b5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "classic/forge/tests/vcr_cassettes"] path = classic/forge/tests/vcr_cassettes url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes -[submodule "autogpt_platform/supabase"] - path = autogpt_platform/supabase - url = https://github.com/supabase/supabase.git diff --git a/autogpt_platform/.env.example b/autogpt_platform/.env.example new file mode 100644 index 0000000000..bb74500874 --- /dev/null +++ b/autogpt_platform/.env.example @@ -0,0 +1,123 @@ +############ +# Secrets +# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION +############ + +POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password +JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long +ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +DASHBOARD_USERNAME=supabase +DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated +SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq +VAULT_ENC_KEY=your-encryption-key-32-chars-min + + +############ +# Database - You can change these to any PostgreSQL database that has logical replication enabled. +############ + +POSTGRES_HOST=db +POSTGRES_DB=postgres +POSTGRES_PORT=5432 +# default user is postgres + + +############ +# Supavisor -- Database pooler +############ +POOLER_PROXY_PORT_TRANSACTION=6543 +POOLER_DEFAULT_POOL_SIZE=20 +POOLER_MAX_CLIENT_CONN=100 +POOLER_TENANT_ID=your-tenant-id + + +############ +# API Proxy - Configuration for the Kong Reverse proxy. +############ + +KONG_HTTP_PORT=8000 +KONG_HTTPS_PORT=8443 + + +############ +# API - Configuration for PostgREST. +############ + +PGRST_DB_SCHEMAS=public,storage,graphql_public + + +############ +# Auth - Configuration for the GoTrue authentication server. +############ + +## General +SITE_URL=http://localhost:3000 +ADDITIONAL_REDIRECT_URLS= +JWT_EXPIRY=3600 +DISABLE_SIGNUP=false +API_EXTERNAL_URL=http://localhost:8000 + +## Mailer Config +MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify" +MAILER_URLPATHS_INVITE="/auth/v1/verify" +MAILER_URLPATHS_RECOVERY="/auth/v1/verify" +MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify" + +## Email auth +ENABLE_EMAIL_SIGNUP=true +ENABLE_EMAIL_AUTOCONFIRM=false +SMTP_ADMIN_EMAIL=admin@example.com +SMTP_HOST=supabase-mail +SMTP_PORT=2500 +SMTP_USER=fake_mail_user +SMTP_PASS=fake_mail_password +SMTP_SENDER_NAME=fake_sender +ENABLE_ANONYMOUS_USERS=false + +## Phone auth +ENABLE_PHONE_SIGNUP=true +ENABLE_PHONE_AUTOCONFIRM=true + + +############ +# Studio - Configuration for the Dashboard +############ + +STUDIO_DEFAULT_ORGANIZATION=Default Organization +STUDIO_DEFAULT_PROJECT=Default Project + +STUDIO_PORT=3000 +# replace if you intend to use Studio outside of localhost +SUPABASE_PUBLIC_URL=http://localhost:8000 + +# Enable webp support +IMGPROXY_ENABLE_WEBP_DETECTION=true + +# Add your OpenAI API key to enable SQL Editor Assistant +OPENAI_API_KEY= + + +############ +# Functions - Configuration for Functions +############ +# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet. +FUNCTIONS_VERIFY_JWT=false + + +############ +# Logs - Configuration for Logflare +# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction +############ + +LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key + +# Change vector.toml sinks to reflect this change +LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key + +# Docker socket location - this value will differ depending on your OS +DOCKER_SOCKET_LOCATION=/var/run/docker.sock + +# Google Cloud Project details +GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID +GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER diff --git a/autogpt_platform/README.md b/autogpt_platform/README.md index 3fbb6519b1..92ea6ec950 100644 --- a/autogpt_platform/README.md +++ b/autogpt_platform/README.md @@ -22,35 +22,29 @@ To run the AutoGPT Platform, follow these steps: 2. Run the following command: ``` - git submodule update --init --recursive --progress + cp .env.example .env ``` - This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory. + This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables. 3. Run the following command: - ``` - cp supabase/docker/.env.example .env - ``` - This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables. - -4. Run the following command: ``` docker compose up -d ``` This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode. -5. Navigate to `frontend` within the `autogpt_platform` directory: +4. Navigate to `frontend` within the `autogpt_platform` directory: ``` cd frontend ``` You will need to run your frontend application separately on your local machine. -6. Run the following command: +5. Run the following command: ``` cp .env.example .env.local ``` This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application. -7. Run the following command: +6. Run the following command: ``` npm install npm run dev @@ -61,7 +55,7 @@ To run the AutoGPT Platform, follow these steps: yarn install && yarn dev ``` -8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend. +7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend. ### Docker Compose Commands diff --git a/autogpt_platform/db/docker/.env.example b/autogpt_platform/db/docker/.env.example new file mode 100644 index 0000000000..bb74500874 --- /dev/null +++ b/autogpt_platform/db/docker/.env.example @@ -0,0 +1,123 @@ +############ +# Secrets +# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION +############ + +POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password +JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long +ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +DASHBOARD_USERNAME=supabase +DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated +SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq +VAULT_ENC_KEY=your-encryption-key-32-chars-min + + +############ +# Database - You can change these to any PostgreSQL database that has logical replication enabled. +############ + +POSTGRES_HOST=db +POSTGRES_DB=postgres +POSTGRES_PORT=5432 +# default user is postgres + + +############ +# Supavisor -- Database pooler +############ +POOLER_PROXY_PORT_TRANSACTION=6543 +POOLER_DEFAULT_POOL_SIZE=20 +POOLER_MAX_CLIENT_CONN=100 +POOLER_TENANT_ID=your-tenant-id + + +############ +# API Proxy - Configuration for the Kong Reverse proxy. +############ + +KONG_HTTP_PORT=8000 +KONG_HTTPS_PORT=8443 + + +############ +# API - Configuration for PostgREST. +############ + +PGRST_DB_SCHEMAS=public,storage,graphql_public + + +############ +# Auth - Configuration for the GoTrue authentication server. +############ + +## General +SITE_URL=http://localhost:3000 +ADDITIONAL_REDIRECT_URLS= +JWT_EXPIRY=3600 +DISABLE_SIGNUP=false +API_EXTERNAL_URL=http://localhost:8000 + +## Mailer Config +MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify" +MAILER_URLPATHS_INVITE="/auth/v1/verify" +MAILER_URLPATHS_RECOVERY="/auth/v1/verify" +MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify" + +## Email auth +ENABLE_EMAIL_SIGNUP=true +ENABLE_EMAIL_AUTOCONFIRM=false +SMTP_ADMIN_EMAIL=admin@example.com +SMTP_HOST=supabase-mail +SMTP_PORT=2500 +SMTP_USER=fake_mail_user +SMTP_PASS=fake_mail_password +SMTP_SENDER_NAME=fake_sender +ENABLE_ANONYMOUS_USERS=false + +## Phone auth +ENABLE_PHONE_SIGNUP=true +ENABLE_PHONE_AUTOCONFIRM=true + + +############ +# Studio - Configuration for the Dashboard +############ + +STUDIO_DEFAULT_ORGANIZATION=Default Organization +STUDIO_DEFAULT_PROJECT=Default Project + +STUDIO_PORT=3000 +# replace if you intend to use Studio outside of localhost +SUPABASE_PUBLIC_URL=http://localhost:8000 + +# Enable webp support +IMGPROXY_ENABLE_WEBP_DETECTION=true + +# Add your OpenAI API key to enable SQL Editor Assistant +OPENAI_API_KEY= + + +############ +# Functions - Configuration for Functions +############ +# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet. +FUNCTIONS_VERIFY_JWT=false + + +############ +# Logs - Configuration for Logflare +# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction +############ + +LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key + +# Change vector.toml sinks to reflect this change +LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key + +# Docker socket location - this value will differ depending on your OS +DOCKER_SOCKET_LOCATION=/var/run/docker.sock + +# Google Cloud Project details +GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID +GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER diff --git a/autogpt_platform/db/docker/.gitignore b/autogpt_platform/db/docker/.gitignore new file mode 100644 index 0000000000..a1e9dc61e0 --- /dev/null +++ b/autogpt_platform/db/docker/.gitignore @@ -0,0 +1,5 @@ +volumes/db/data +volumes/storage +.env +test.http +docker-compose.override.yml diff --git a/autogpt_platform/db/docker/README.md b/autogpt_platform/db/docker/README.md new file mode 100644 index 0000000000..9ab215b902 --- /dev/null +++ b/autogpt_platform/db/docker/README.md @@ -0,0 +1,3 @@ +# Supabase Docker + +This is a minimal Docker Compose setup for self-hosting Supabase. Follow the steps [here](https://supabase.com/docs/guides/hosting/docker) to get started. diff --git a/autogpt_platform/db/docker/dev/data.sql b/autogpt_platform/db/docker/dev/data.sql new file mode 100644 index 0000000000..2328004184 --- /dev/null +++ b/autogpt_platform/db/docker/dev/data.sql @@ -0,0 +1,48 @@ +create table profiles ( + id uuid references auth.users not null, + updated_at timestamp with time zone, + username text unique, + avatar_url text, + website text, + + primary key (id), + unique(username), + constraint username_length check (char_length(username) >= 3) +); + +alter table profiles enable row level security; + +create policy "Public profiles are viewable by the owner." + on profiles for select + using ( auth.uid() = id ); + +create policy "Users can insert their own profile." + on profiles for insert + with check ( auth.uid() = id ); + +create policy "Users can update own profile." + on profiles for update + using ( auth.uid() = id ); + +-- Set up Realtime +begin; + drop publication if exists supabase_realtime; + create publication supabase_realtime; +commit; +alter publication supabase_realtime add table profiles; + +-- Set up Storage +insert into storage.buckets (id, name) +values ('avatars', 'avatars'); + +create policy "Avatar images are publicly accessible." + on storage.objects for select + using ( bucket_id = 'avatars' ); + +create policy "Anyone can upload an avatar." + on storage.objects for insert + with check ( bucket_id = 'avatars' ); + +create policy "Anyone can update an avatar." + on storage.objects for update + with check ( bucket_id = 'avatars' ); diff --git a/autogpt_platform/db/docker/dev/docker-compose.dev.yml b/autogpt_platform/db/docker/dev/docker-compose.dev.yml new file mode 100644 index 0000000000..ca19a0ad78 --- /dev/null +++ b/autogpt_platform/db/docker/dev/docker-compose.dev.yml @@ -0,0 +1,34 @@ +version: "3.8" + +services: + studio: + build: + context: .. + dockerfile: studio/Dockerfile + target: dev + ports: + - 8082:8082 + mail: + container_name: supabase-mail + image: inbucket/inbucket:3.0.3 + ports: + - '2500:2500' # SMTP + - '9000:9000' # web interface + - '1100:1100' # POP3 + auth: + environment: + - GOTRUE_SMTP_USER= + - GOTRUE_SMTP_PASS= + meta: + ports: + - 5555:8080 + db: + restart: 'no' + volumes: + # Always use a fresh database when developing + - /var/lib/postgresql/data + # Seed data should be inserted last (alphabetical order) + - ./dev/data.sql:/docker-entrypoint-initdb.d/seed.sql + storage: + volumes: + - /var/lib/storage diff --git a/autogpt_platform/db/docker/docker-compose.s3.yml b/autogpt_platform/db/docker/docker-compose.s3.yml new file mode 100644 index 0000000000..043691a607 --- /dev/null +++ b/autogpt_platform/db/docker/docker-compose.s3.yml @@ -0,0 +1,94 @@ +services: + + minio: + image: minio/minio + ports: + - '9000:9000' + - '9001:9001' + environment: + MINIO_ROOT_USER: supa-storage + MINIO_ROOT_PASSWORD: secret1234 + command: server --console-address ":9001" /data + healthcheck: + test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ] + interval: 2s + timeout: 10s + retries: 5 + volumes: + - ./volumes/storage:/data:z + + minio-createbucket: + image: minio/mc + depends_on: + minio: + condition: service_healthy + entrypoint: > + /bin/sh -c " + /usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234; + /usr/bin/mc mb supa-minio/stub; + exit 0; + " + + storage: + container_name: supabase-storage + image: supabase/storage-api:v1.11.13 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + minio: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: s3 + GLOBAL_S3_BUCKET: stub + GLOBAL_S3_ENDPOINT: http://minio:9000 + GLOBAL_S3_PROTOCOL: http + GLOBAL_S3_FORCE_PATH_STYLE: true + AWS_ACCESS_KEY_ID: supa-storage + AWS_SECRET_ACCESS_KEY: secret1234 + AWS_DEFAULT_REGION: stub + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + volumes: + - ./volumes/storage:/var/lib/storage:z + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + healthcheck: + test: [ "CMD", "imgproxy", "health" ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} diff --git a/autogpt_platform/db/docker/docker-compose.yml b/autogpt_platform/db/docker/docker-compose.yml new file mode 100644 index 0000000000..ff8888d982 --- /dev/null +++ b/autogpt_platform/db/docker/docker-compose.yml @@ -0,0 +1,526 @@ +# Usage +# Start: docker compose up +# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up +# Stop: docker compose down +# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans +# Reset everything: ./reset.sh + +name: supabase + +services: + + studio: + container_name: supabase-studio + image: supabase/studio:20250224-d10db0f + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})" + ] + timeout: 10s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + OPENAI_API_KEY: ${OPENAI_API_KEY:-} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + AUTH_JWT_SECRET: ${JWT_SECRET} + + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + container_name: supabase-kong + image: kong:2.8.1 + restart: unless-stopped + ports: + - ${KONG_HTTP_PORT}:8000/tcp + - ${KONG_HTTPS_PORT}:8443/tcp + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ./volumes/api/kong.yml:/home/kong/temp.yml:ro + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' + + auth: + container_name: supabase-auth + image: supabase/gotrue:v2.170.0 + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: ${API_EXTERNAL_URL} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: ${SITE_URL} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + + # Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile. + # GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true + + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOST} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook + + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "" + + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt" + + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt" + + # GOTRUE_HOOK_SEND_SMS_ENABLED: "false" + # GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + # GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false" + # GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender" + # GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + rest: + container_name: supabase-rest + image: postgrest/postgrest:v12.2.8 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgrest" + ] + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + container_name: realtime-dev.supabase-realtime + image: supabase/realtime:v2.34.40 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "-H", + "Authorization: Bearer ${ANON_KEY}", + "http://localhost:4000/api/tenants/realtime-dev/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + ERL_AFLAGS: -proto_dist inet_tcp + DNS_NODES: "''" + RLIMIT_NOFILE: "10000" + APP_NAME: realtime + SEED_SELF_HOST: true + RUN_JANITOR: true + + # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up + storage: + container_name: supabase-storage + image: supabase/storage-api:v1.19.3 + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://storage:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "imgproxy", + "health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + + meta: + container_name: supabase-meta + image: supabase/postgres-meta:v0.86.1 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOST} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + + functions: + container_name: supabase-edge-functions + image: supabase/edge-runtime:v1.67.2 + restart: unless-stopped + volumes: + - ./volumes/functions:/home/deno/functions:Z + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + command: + [ + "start", + "--main-service", + "/home/deno/functions/main" + ] + + analytics: + container_name: supabase-analytics + image: supabase/logflare:1.12.5 + restart: unless-stopped + ports: + - 4000:4000 + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + healthcheck: + test: + [ + "CMD", + "curl", + "http://localhost:4000/health" + ] + timeout: 5s + interval: 5s + retries: 10 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: _supabase + DB_HOSTNAME: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + LOGFLARE_MIN_CLUSTER_SIZE: 1 + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + + # Comment out everything below this point if you are using an external Postgres database + db: + container_name: supabase-db + image: supabase/postgres:15.8.1.049 + restart: unless-stopped + volumes: + - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ./volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for internal supabase data such as _analytics + - ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z + # Changes required for Analytics support + - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + # Changes required for Pooler support + - ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z + # Use named volume to persist pgsodium decryption key between restarts + - db-config:/etc/postgresql-custom + healthcheck: + test: + [ + "CMD", + "pg_isready", + "-U", + "postgres", + "-h", + "localhost" + ] + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgres", + "-c", + "config_file=/etc/postgresql/postgresql.conf", + "-c", + "log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs + ] + + vector: + container_name: supabase-vector + image: timberio/vector:0.28.1-alpine + restart: unless-stopped + volumes: + - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + command: + [ + "--config", + "/etc/vector/vector.yml" + ] + + # Update the DATABASE_URL if you are using an external Postgres database + supavisor: + container_name: supabase-pooler + image: supabase/supavisor:2.4.12 + restart: unless-stopped + ports: + - ${POSTGRES_PORT}:5432 + - ${POOLER_PROXY_PORT_TRANSACTION}:6543 + volumes: + - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "http://127.0.0.1:4000/api/health" + ] + interval: 10s + timeout: 5s + retries: 5 + depends_on: + db: + condition: service_healthy + analytics: + condition: service_healthy + environment: + PORT: 4000 + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase + CLUSTER_POSTGRES: true + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + VAULT_ENC_KEY: ${VAULT_ENC_KEY} + API_JWT_SECRET: ${JWT_SECRET} + METRICS_JWT_SECRET: ${JWT_SECRET} + REGION: local + ERL_AFLAGS: -proto_dist inet_tcp + POOLER_TENANT_ID: ${POOLER_TENANT_ID} + POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} + POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} + POOLER_POOL_MODE: transaction + command: + [ + "/bin/sh", + "-c", + "/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server" + ] + +volumes: + db-config: diff --git a/autogpt_platform/db/docker/reset.sh b/autogpt_platform/db/docker/reset.sh new file mode 100755 index 0000000000..d5f3a41dae --- /dev/null +++ b/autogpt_platform/db/docker/reset.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +echo "WARNING: This will remove all containers and container data, and will reset the .env file. This action cannot be undone!" +read -p "Are you sure you want to proceed? (y/N) " -n 1 -r +echo # Move to a new line +if [[ ! $REPLY =~ ^[Yy]$ ]] +then + echo "Operation cancelled." + exit 1 +fi + +echo "Stopping and removing all containers..." +docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans + +echo "Cleaning up bind-mounted directories..." +BIND_MOUNTS=( + "./volumes/db/data" +) + +for DIR in "${BIND_MOUNTS[@]}"; do + if [ -d "$DIR" ]; then + echo "Deleting $DIR..." + rm -rf "$DIR" + else + echo "Directory $DIR does not exist. Skipping bind mount deletion step..." + fi +done + +echo "Resetting .env file..." +if [ -f ".env" ]; then + echo "Removing existing .env file..." + rm -f .env +else + echo "No .env file found. Skipping .env removal step..." +fi + +if [ -f ".env.example" ]; then + echo "Copying .env.example to .env..." + cp .env.example .env +else + echo ".env.example file not found. Skipping .env reset step..." +fi + +echo "Cleanup complete!" \ No newline at end of file diff --git a/autogpt_platform/db/docker/volumes/api/kong.yml b/autogpt_platform/db/docker/volumes/api/kong.yml new file mode 100644 index 0000000000..7abf42534c --- /dev/null +++ b/autogpt_platform/db/docker/volumes/api/kong.yml @@ -0,0 +1,241 @@ +_format_version: '2.1' +_transform: true + +### +### Consumers / Users +### +consumers: + - username: DASHBOARD + - username: anon + keyauth_credentials: + - key: $SUPABASE_ANON_KEY + - username: service_role + keyauth_credentials: + - key: $SUPABASE_SERVICE_KEY + +### +### Access Control List +### +acls: + - consumer: anon + group: anon + - consumer: service_role + group: admin + +### +### Dashboard credentials +### +basicauth_credentials: + - consumer: DASHBOARD + username: $DASHBOARD_USERNAME + password: $DASHBOARD_PASSWORD + +### +### API Routes +### +services: + ## Open Auth routes + - name: auth-v1-open + url: http://auth:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + url: http://auth:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + url: http://auth:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + + ## Secure Auth routes + - name: auth-v1 + _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' + url: http://auth:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure REST routes + - name: rest-v1 + _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' + url: http://rest:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure GraphQL routes + - name: graphql-v1 + _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql' + url: http://rest:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: request-transformer + config: + add: + headers: + - Content-Profile:graphql_public + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure Realtime routes + - name: realtime-v1-ws + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/socket + protocol: ws + routes: + - name: realtime-v1-ws + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + - name: realtime-v1-rest + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/api + protocol: http + routes: + - name: realtime-v1-rest + strip_path: true + paths: + - /realtime/v1/api + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + ## Storage routes: the storage server manages its own auth + - name: storage-v1 + _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' + url: http://storage:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors + + ## Edge Functions routes + - name: functions-v1 + _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*' + url: http://functions:9000/ + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + plugins: + - name: cors + + ## Analytics routes + - name: analytics-v1 + _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' + url: http://analytics:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + + ## Secure Database routes + - name: meta + _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' + url: http://meta:8080/ + routes: + - name: meta-all + strip_path: true + paths: + - /pg/ + plugins: + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + + ## Protected Dashboard - catch all remaining routes + - name: dashboard + _comment: 'Studio: /* -> http://studio:3000/*' + url: http://studio:3000/ + routes: + - name: dashboard-all + strip_path: true + paths: + - / + plugins: + - name: cors + - name: basic-auth + config: + hide_credentials: true diff --git a/autogpt_platform/db/docker/volumes/db/_supabase.sql b/autogpt_platform/db/docker/volumes/db/_supabase.sql new file mode 100644 index 0000000000..6236ae1bcc --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/_supabase.sql @@ -0,0 +1,3 @@ +\set pguser `echo "$POSTGRES_USER"` + +CREATE DATABASE _supabase WITH OWNER :pguser; diff --git a/autogpt_platform/db/docker/volumes/db/init/data.sql b/autogpt_platform/db/docker/volumes/db/init/data.sql new file mode 100755 index 0000000000..e69de29bb2 diff --git a/autogpt_platform/db/docker/volumes/db/jwt.sql b/autogpt_platform/db/docker/volumes/db/jwt.sql new file mode 100644 index 0000000000..cfd3b16028 --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/jwt.sql @@ -0,0 +1,5 @@ +\set jwt_secret `echo "$JWT_SECRET"` +\set jwt_exp `echo "$JWT_EXP"` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; diff --git a/autogpt_platform/db/docker/volumes/db/logs.sql b/autogpt_platform/db/docker/volumes/db/logs.sql new file mode 100644 index 0000000000..255c0f407b --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/logs.sql @@ -0,0 +1,6 @@ +\set pguser `echo "$POSTGRES_USER"` + +\c _supabase +create schema if not exists _analytics; +alter schema _analytics owner to :pguser; +\c postgres diff --git a/autogpt_platform/db/docker/volumes/db/pooler.sql b/autogpt_platform/db/docker/volumes/db/pooler.sql new file mode 100644 index 0000000000..162c5b96aa --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/pooler.sql @@ -0,0 +1,6 @@ +\set pguser `echo "$POSTGRES_USER"` + +\c _supabase +create schema if not exists _supavisor; +alter schema _supavisor owner to :pguser; +\c postgres diff --git a/autogpt_platform/db/docker/volumes/db/realtime.sql b/autogpt_platform/db/docker/volumes/db/realtime.sql new file mode 100644 index 0000000000..4d4b9ffb91 --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/realtime.sql @@ -0,0 +1,4 @@ +\set pguser `echo "$POSTGRES_USER"` + +create schema if not exists _realtime; +alter schema _realtime owner to :pguser; diff --git a/autogpt_platform/db/docker/volumes/db/roles.sql b/autogpt_platform/db/docker/volumes/db/roles.sql new file mode 100644 index 0000000000..8f7161a6db --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/roles.sql @@ -0,0 +1,8 @@ +-- NOTE: change to your own passwords for production environments +\set pgpass `echo "$POSTGRES_PASSWORD"` + +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; diff --git a/autogpt_platform/db/docker/volumes/db/webhooks.sql b/autogpt_platform/db/docker/volumes/db/webhooks.sql new file mode 100644 index 0000000000..5837b86188 --- /dev/null +++ b/autogpt_platform/db/docker/volumes/db/webhooks.sql @@ -0,0 +1,208 @@ +BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; +COMMIT; diff --git a/autogpt_platform/db/docker/volumes/functions/hello/index.ts b/autogpt_platform/db/docker/volumes/functions/hello/index.ts new file mode 100644 index 0000000000..f1e20b90e0 --- /dev/null +++ b/autogpt_platform/db/docker/volumes/functions/hello/index.ts @@ -0,0 +1,16 @@ +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +import { serve } from "https://deno.land/std@0.177.1/http/server.ts" + +serve(async () => { + return new Response( + `"Hello from Edge Functions!"`, + { headers: { "Content-Type": "application/json" } }, + ) +}) + +// To invoke: +// curl 'http://localhost:/functions/v1/hello' \ +// --header 'Authorization: Bearer ' diff --git a/autogpt_platform/db/docker/volumes/functions/main/index.ts b/autogpt_platform/db/docker/volumes/functions/main/index.ts new file mode 100644 index 0000000000..a094010b9d --- /dev/null +++ b/autogpt_platform/db/docker/volumes/functions/main/index.ts @@ -0,0 +1,94 @@ +import { serve } from 'https://deno.land/std@0.131.0/http/server.ts' +import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts' + +console.log('main function started') + +const JWT_SECRET = Deno.env.get('JWT_SECRET') +const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true' + +function getAuthToken(req: Request) { + const authHeader = req.headers.get('authorization') + if (!authHeader) { + throw new Error('Missing authorization header') + } + const [bearer, token] = authHeader.split(' ') + if (bearer !== 'Bearer') { + throw new Error(`Auth header is not 'Bearer {token}'`) + } + return token +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder() + const secretKey = encoder.encode(JWT_SECRET) + try { + await jose.jwtVerify(jwt, secretKey) + } catch (err) { + console.error(err) + return false + } + return true +} + +serve(async (req: Request) => { + if (req.method !== 'OPTIONS' && VERIFY_JWT) { + try { + const token = getAuthToken(req) + const isValidJWT = await verifyJWT(token) + + if (!isValidJWT) { + return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } catch (e) { + console.error(e) + return new Response(JSON.stringify({ msg: e.toString() }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } + + const url = new URL(req.url) + const { pathname } = url + const path_parts = pathname.split('/') + const service_name = path_parts[1] + + if (!service_name || service_name === '') { + const error = { msg: 'missing function name in request' } + return new Response(JSON.stringify(error), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }) + } + + const servicePath = `/home/deno/functions/${service_name}` + console.error(`serving the request with ${servicePath}`) + + const memoryLimitMb = 150 + const workerTimeoutMs = 1 * 60 * 1000 + const noModuleCache = false + const importMapPath = null + const envVarsObj = Deno.env.toObject() + const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]) + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath, + envVars, + }) + return await worker.fetch(req) + } catch (e) { + const error = { msg: e.toString() } + return new Response(JSON.stringify(error), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }) + } +}) diff --git a/autogpt_platform/db/docker/volumes/logs/vector.yml b/autogpt_platform/db/docker/volumes/logs/vector.yml new file mode 100644 index 0000000000..cce46df43d --- /dev/null +++ b/autogpt_platform/db/docker/volumes/logs/vector.yml @@ -0,0 +1,232 @@ +api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - supabase-vector + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P