From fa567991b3fee40fd140bc6b59744a2ccc5bdd19 Mon Sep 17 00:00:00 2001 From: "seer-by-sentry[bot]" <157164994+seer-by-sentry[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 19:00:43 +0000 Subject: [PATCH 01/58] fix(backend): Handle HTTP errors in HTTP block by returning response objects (#11515) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ - Modify the HTTP block to handle HTTP errors (4xx, 5xx) by returning response objects instead of raising exceptions. - This allows proper handling of client_error and server_error outputs. Fixes [AUTOGPT-SERVER-6VP](https://sentry.io/organizations/significant-gravitas/issues/7023985892/). The issue was that: HTTP errors are raised as exceptions by `Requests` default behavior, bypassing the block's intended error output handling, resulting in `BlockUnknownError`. This fix was generated by Seer in Sentry, triggered by Nicholas Tindle. πŸ‘οΈ Run ID: 4902617 Not quite right? [Click here to continue debugging with Seer.](https://sentry.io/organizations/significant-gravitas/issues/7023985892/?seerDrawer=true) ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Tested with a service that will return 4XX and 5XX errors to make sure the correct paths are followed --- > [!NOTE] > HTTP block now returns 4xx/5xx responses instead of raising, and Requests gains retry_max_attempts with last-result handling. > > - **Backend** > - **HTTP block (`backend/blocks/http.py`)**: > - Use `Requests(raise_for_status=False, retry_max_attempts=1)` so 4xx/5xx return response objects and route to `client_error`/`server_error` outputs. > - **HTTP client util (`backend/util/request.py`)**: > - Add `retry_max_attempts` option with `stop_after_attempt` and `_return_last_result` to return the final response when retries stop. > - Build `tenacity` retry config dynamically in `Requests.request()`; validate `retry_max_attempts >= 1` when provided. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit fccae61c2624b5d3e3942a70352e94fe271feee9. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: seer-by-sentry[bot] <157164994+seer-by-sentry[bot]@users.noreply.github.com> Co-authored-by: Cursor Agent Co-authored-by: nicholas.tindle --- .../backend/backend/blocks/http.py | 8 +++- .../backend/backend/util/request.py | 42 ++++++++++++++++--- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 25cf775466..9b27a3b129 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -184,7 +184,13 @@ class SendWebRequestBlock(Block): ) # ─── Execute request ───────────────────────────────────────── - response = await Requests().request( + # Use raise_for_status=False so HTTP errors (4xx, 5xx) are returned + # as response objects instead of raising exceptions, allowing proper + # handling via client_error and server_error outputs + response = await Requests( + raise_for_status=False, + retry_max_attempts=1, # allow callers to handle HTTP errors immediately + ).request( input_data.method.value, input_data.url, headers=input_data.headers, diff --git a/autogpt_platform/backend/backend/util/request.py b/autogpt_platform/backend/backend/util/request.py index 6e5c6a46ae..4887cfa02d 100644 --- a/autogpt_platform/backend/backend/util/request.py +++ b/autogpt_platform/backend/backend/util/request.py @@ -11,7 +11,13 @@ from urllib.parse import quote, urljoin, urlparse import aiohttp import idna from aiohttp import FormData, abc -from tenacity import retry, retry_if_result, wait_exponential_jitter +from tenacity import ( + RetryCallState, + retry, + retry_if_result, + stop_after_attempt, + wait_exponential_jitter, +) from backend.util.json import loads @@ -285,6 +291,20 @@ class Response: return 200 <= self.status < 300 +def _return_last_result(retry_state: RetryCallState) -> "Response": + """ + Ensure the final attempt's response is returned when retrying stops. + """ + if retry_state.outcome is None: + raise RuntimeError("Retry state is missing an outcome.") + + exception = retry_state.outcome.exception() + if exception is not None: + raise exception + + return retry_state.outcome.result() + + class Requests: """ A wrapper around an aiohttp ClientSession that validates URLs before @@ -299,6 +319,7 @@ class Requests: extra_url_validator: Callable[[URL], URL] | None = None, extra_headers: dict[str, str] | None = None, retry_max_wait: float = 300.0, + retry_max_attempts: int | None = None, ): self.trusted_origins = [] for url in trusted_origins or []: @@ -311,6 +332,9 @@ class Requests: self.extra_url_validator = extra_url_validator self.extra_headers = extra_headers self.retry_max_wait = retry_max_wait + if retry_max_attempts is not None and retry_max_attempts < 1: + raise ValueError("retry_max_attempts must be None or >= 1") + self.retry_max_attempts = retry_max_attempts async def request( self, @@ -325,11 +349,17 @@ class Requests: max_redirects: int = 10, **kwargs, ) -> Response: - @retry( - wait=wait_exponential_jitter(max=self.retry_max_wait), - retry=retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES), - reraise=True, - ) + retry_kwargs: dict[str, Any] = { + "wait": wait_exponential_jitter(max=self.retry_max_wait), + "retry": retry_if_result(lambda r: r.status in THROTTLE_RETRY_STATUS_CODES), + "reraise": True, + } + + if self.retry_max_attempts is not None: + retry_kwargs["stop"] = stop_after_attempt(self.retry_max_attempts) + retry_kwargs["retry_error_callback"] = _return_last_result + + @retry(**retry_kwargs) async def _make_request() -> Response: return await self._request( method=method, From 233ff40a24dd90952e6f0eb34ef714d40579eabf Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 3 Dec 2025 07:57:55 +0100 Subject: [PATCH 02/58] fix(frontend/marketplace): Fix rendering creator links without schema (#11516) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - [OPEN-2871: TypeError: URL constructor: www.agpt.co is not a valid URL.](https://linear.app/autogpt/issue/OPEN-2871/typeerror-url-constructor-wwwagptco-is-not-a-valid-url) - [Sentry Issue BUILDER-56D: TypeError: URL constructor: www.agpt.co is not a valid URL.](https://significant-gravitas.sentry.io/issues/7081476631/) ### Changes πŸ—οΈ - Amend URL handling in `CreatorLinks` to correctly handle URLs with implicit schema ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - Trivial change, CI is sufficient --- .../components/CreatorLinks/CreatorLinks.tsx | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/marketplace/components/CreatorLinks/CreatorLinks.tsx b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/CreatorLinks/CreatorLinks.tsx index 5bbfa9a939..7466c3eb1f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/marketplace/components/CreatorLinks/CreatorLinks.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/marketplace/components/CreatorLinks/CreatorLinks.tsx @@ -5,6 +5,22 @@ interface CreatorLinksProps { links: string[]; } +function normalizeURL(url: string): string { + if (!url.startsWith("http://") && !url.startsWith("https://")) { + return `https://${url}`; + } + return url; +} + +function getHostnameFromURL(url: string): string { + try { + const normalizedURL = normalizeURL(url); + return new URL(normalizedURL).hostname.replace("www.", ""); + } catch { + return url.replace(/^(https?:\/\/)?(www\.)?/, ""); + } +} + export const CreatorLinks = ({ links }: CreatorLinksProps) => { if (!links || links.length === 0) { return null; @@ -12,13 +28,13 @@ export const CreatorLinks = ({ links }: CreatorLinksProps) => { const renderLinkButton = (url: string) => (
- {new URL(url).hostname.replace("www.", "")} + {getHostnameFromURL(url)}
{getIconForSocial(url, { From e62a8fb572643b41ea762f0bd9bec187d48b023b Mon Sep 17 00:00:00 2001 From: Ubbe Date: Wed, 3 Dec 2025 14:33:18 +0700 Subject: [PATCH 03/58] feat(frontend): design updates on new library page... (#11522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ ### Design updates Design updates on the new Library page. New empty views with illustration and overall changes on the sidebar and selected run sections... Screenshot 2025-12-03 at 14 03 45 Screenshot 2025-12-03 at 14 03 52 Screenshot 2025-12-03 at 14 03 57 Screenshot 2025-12-03 at 14 04 07 ### Architecture - Make selected tabs/items synced with the URL via `?activeTab=` and `?activeItem=`, so it is easy and predictable to change their state... - Some minor updates on the design system I missed on previous PRs ... ## Checklist πŸ“‹ ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally and check the new page ( still wip ) --- .../NewAgentLibraryView.tsx | 137 ++++---- .../components/other/EmptyAgentRuns.tsx | 106 ------ .../components/other/EmptySchedules.tsx | 323 ++++++++++++++++++ .../other/EmptySchedulesIllustration.tsx | 314 +++++++++++++++++ .../components/other/EmptyTasks.tsx | 107 ++++++ ...tration.tsx => EmptyTasksIllustration.tsx} | 2 +- .../components/other/EmptyTemplates.tsx | 323 ++++++++++++++++++ .../components/other/SectionWrap.tsx | 19 ++ .../RunDetailCard/RunDetailCard.tsx | 2 +- .../RunDetailHeader/RunDetailHeader.tsx | 28 +- .../SelectedRunView/SelectedRunView.tsx | 5 +- .../SelectedScheduleView.tsx | 2 +- .../sidebar/AgentRunsLists/AgentRunsLists.tsx | 128 ------- .../SidebarRunsList/SidebarRunsList.tsx | 179 ++++++++++ .../components/RunIconWrapper.tsx | 0 .../components/RunListItem.tsx | 8 +- .../components/RunSidebarCard.tsx | 12 +- .../components/ScheduleListItem.tsx | 0 .../helpers.ts | 0 .../useSidebarRunsList.ts} | 48 +-- .../components/NewAgentLibraryView/helpers.ts | 1 + .../useNewAgentLibraryView.ts | 55 ++- .../src/components/atoms/Text/helpers.ts | 32 +- .../molecules/Breadcrumbs/Breadcrumbs.tsx | 6 +- .../molecules/TabsLine/TabsLine.tsx | 4 +- .../frontend/src/components/styles/colors.ts | 2 +- 26 files changed, 1467 insertions(+), 376 deletions(-) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedules.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedulesIllustration.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/{EmptyRunsIllustration.tsx => EmptyTasksIllustration.tsx} (99%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTemplates.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/SectionWrap.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists => SidebarRunsList}/components/RunIconWrapper.tsx (100%) rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists => SidebarRunsList}/components/RunListItem.tsx (100%) rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists => SidebarRunsList}/components/RunSidebarCard.tsx (72%) rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists => SidebarRunsList}/components/ScheduleListItem.tsx (100%) rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists => SidebarRunsList}/helpers.ts (100%) rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/{AgentRunsLists/useAgentRunsLists.ts => SidebarRunsList/useSidebarRunsList.ts} (71%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index 7eaeeab644..5970607bb9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -1,16 +1,21 @@ "use client"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import { Button } from "@/components/atoms/Button/Button"; import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { cn } from "@/lib/utils"; import { PlusIcon } from "@phosphor-icons/react"; -import { useEffect } from "react"; import { RunAgentModal } from "./components/modals/RunAgentModal/RunAgentModal"; import { AgentRunsLoading } from "./components/other/AgentRunsLoading"; -import { EmptyAgentRuns } from "./components/other/EmptyAgentRuns"; +import { EmptySchedules } from "./components/other/EmptySchedules"; +import { EmptyTasks } from "./components/other/EmptyTasks"; +import { EmptyTemplates } from "./components/other/EmptyTemplates"; +import { SectionWrap } from "./components/other/SectionWrap"; import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView"; import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView"; -import { AgentRunsLists } from "./components/sidebar/AgentRunsLists/AgentRunsLists"; +import { SidebarRunsList } from "./components/sidebar/SidebarRunsList/SidebarRunsList"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "./helpers"; import { useNewAgentLibraryView } from "./useNewAgentLibraryView"; export function NewAgentLibraryView() { @@ -20,19 +25,15 @@ export function NewAgentLibraryView() { ready, error, agentId, - selectedRun, + activeItem, sidebarLoading, + activeTab, + setActiveTab, handleSelectRun, handleCountsChange, handleClearSelectedRun, } = useNewAgentLibraryView(); - useEffect(() => { - if (agent) { - document.title = `${agent.name} - Library - AutoGPT Platform`; - } - }, [agent]); - if (error) { return ( ; } - const shouldShowSidebar = sidebarLoading || hasAnyItems; - - return ( -
- {shouldShowSidebar && ( -
-
- - New Run - - } - agent={agent} - agentId={agent.id.toString()} - onRunCreated={(execution) => handleSelectRun(execution.id)} - onScheduleCreated={(schedule) => - handleSelectRun(`schedule:${schedule.id}`) - } - /> -
- - +
+
- )} +
+ +
+
+ ); + } - {/* Main Content - 70% */} -
-
+ return ( +
+ +
+ + New task + + } + agent={agent} + agentId={agent.id.toString()} + onRunCreated={(execution) => handleSelectRun(execution.id, "runs")} + onScheduleCreated={(schedule) => + handleSelectRun(schedule.id, "scheduled") + } + /> +
+ + +
+ + +
- {selectedRun ? ( - selectedRun.startsWith("schedule:") ? ( + {activeItem ? ( + activeTab === "scheduled" ? ( ) : ( ) ) : sidebarLoading ? ( - // Show loading state while sidebar is loading to prevent flash of empty state -
Loading runs...
- ) : hasAnyItems ? ( -
- Select a run to view its details +
+ + + +
+ ) : activeTab === "scheduled" ? ( + + ) : activeTab === "templates" ? ( + ) : ( - + )}
-
+
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx deleted file mode 100644 index 578a95b7f7..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx +++ /dev/null @@ -1,106 +0,0 @@ -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Button } from "@/components/atoms/Button/Button"; -import { Link } from "@/components/atoms/Link/Link"; -import { Text } from "@/components/atoms/Text/Text"; -import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; -import { formatDate } from "@/lib/utils/time"; -import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal"; -import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard"; -import { EmptyRunsIllustration } from "./EmptyRunsIllustration"; - -type Props = { - agent: LibraryAgent; -}; - -export function EmptyAgentRuns({ agent }: Props) { - const isPublished = Boolean(agent.marketplace_listing); - const createdAt = formatDate(agent.created_at); - const updatedAt = formatDate(agent.updated_at); - const isUpdated = updatedAt !== createdAt; - - return ( -
- -
- -
-
-
- - Ready to get started? - - - Run your agent and this space will fill with your agent's - activity - -
-
- - Setup your task - - } - agent={agent} - agentId={agent.id.toString()} - /> -
-
-
- {isPublished ? ( -
- - About this agent - -
- {agent.name} - - by{" "} - - {agent.marketplace_listing?.creator.name} - - -
- - {agent.description || - `Note: If you're using Docker Compose watch mode (docker compose watch), it will automatically rebuild on file changes. Since you're using docker compose up -d, manual rebuilds are needed. -You can test the endpoint from your frontend; it should return the marketplace_listing field when an agent has been published, or null if it hasn't.`} - -
-
-
- - Agent created on - - {createdAt} -
- {isUpdated ? ( -
- - Agent updated on - - {updatedAt} -
- ) : null} -
-
- - -
-
-
- ) : null} -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedules.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedules.tsx new file mode 100644 index 0000000000..97492d8a59 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedules.tsx @@ -0,0 +1,323 @@ +import { Text } from "@/components/atoms/Text/Text"; + +export function EmptySchedules() { + return ( +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + Nothing scheduled yet + + + Create a new run, and you'll have the option to schedule your + agent to run automatically. + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedulesIllustration.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedulesIllustration.tsx new file mode 100644 index 0000000000..51d33ba098 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptySchedulesIllustration.tsx @@ -0,0 +1,314 @@ +type Props = { + className?: string; +}; + +export function EmptySchedulesIllustration({ className }: Props) { + return ( +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx new file mode 100644 index 0000000000..c0c2c900a1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx @@ -0,0 +1,107 @@ +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; +import { formatDate } from "@/lib/utils/time"; +import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal"; +import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard"; +import { EmptyTasksIllustration } from "./EmptyTasksIllustration"; + +type Props = { + agent: LibraryAgent; +}; + +export function EmptyTasks({ agent }: Props) { + const isPublished = Boolean(agent.marketplace_listing); + const createdAt = formatDate(agent.created_at); + const updatedAt = formatDate(agent.updated_at); + const isUpdated = updatedAt !== createdAt; + + return ( +
+ +
+ +
+
+
+ + Ready to get started? + + + Run your agent and this space will fill with your agent's + activity + +
+
+ + Setup your task + + } + agent={agent} + agentId={agent.id.toString()} + /> +
+
+
+ +
+ + About this agent + +
+ {agent.name} + {isPublished ? ( + + by {agent.marketplace_listing?.creator.name} + + ) : null} +
+ + {agent.description || + `This agent is not yet published. Once it is published, You can publish your agent by clicking the "Publish" button in the agent editor.`} + +
+
+
+ + Agent created on + + + {createdAt} + +
+ {isUpdated ? ( +
+ + Agent updated on + + + {updatedAt} + +
+ ) : null} +
+
+ + +
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyRunsIllustration.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasksIllustration.tsx similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyRunsIllustration.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasksIllustration.tsx index 13847884bc..978e6735df 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyRunsIllustration.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasksIllustration.tsx @@ -2,7 +2,7 @@ type Props = { className?: string; }; -export function EmptyRunsIllustration({ className }: Props) { +export function EmptyTasksIllustration({ className }: Props) { return (
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + No templates yet + + + Create a task first, then save it as a template to reuse later β€” + it'll show up here. + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/SectionWrap.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/SectionWrap.tsx new file mode 100644 index 0000000000..75571dd856 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/SectionWrap.tsx @@ -0,0 +1,19 @@ +import { cn } from "@/lib/utils"; + +type Props = { + children: React.ReactNode; + className?: string; +}; + +export function SectionWrap({ children, className }: Props) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx index d90a72e429..83df2d026a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx @@ -9,7 +9,7 @@ export function RunDetailCard({ children, className }: Props) { return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx index 7f9c5065d1..811c9d4f55 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx @@ -3,6 +3,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { FloatingSafeModeToggle } from "@/components/molecules/FloatingSafeModeToggle/FloatingSafeModeToggle"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { ArrowSquareOutIcon, @@ -11,10 +12,10 @@ import { TrashIcon, } from "@phosphor-icons/react"; import moment from "moment"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentActionsDropdown } from "../AgentActionsDropdown"; import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; import { ShareRunButton } from "../ShareRunButton/ShareRunButton"; -import { FloatingSafeModeToggle } from "@/components/molecules/FloatingSafeModeToggle/FloatingSafeModeToggle"; import { useRunDetailHeader } from "./useRunDetailHeader"; type Props = { @@ -49,16 +50,13 @@ export function RunDetailHeader({ } = useRunDetailHeader(agent.graph_id, run, onSelectRun, onClearSelectedRun); return ( -
+
-
-
+
+
{run?.status ? : null} - + {agent.name}
@@ -120,18 +118,18 @@ export function RunDetailHeader({ ) : null}
{run ? ( -
- +
+ Started {moment(run.started_at).fromNow()} | - + Version: {run.graph_version} {run.stats?.node_exec_count !== undefined && ( <> | - + Steps: {run.stats.node_exec_count} @@ -139,7 +137,7 @@ export function RunDetailHeader({ {run.stats?.duration !== undefined && ( <> | - + Duration:{" "} {moment.duration(run.stats.duration, "seconds").humanize()} @@ -148,7 +146,7 @@ export function RunDetailHeader({ {run.stats?.cost !== undefined && ( <> | - + Cost: ${(run.stats.cost / 100).toFixed(2)} @@ -156,7 +154,7 @@ export function RunDetailHeader({ {run.stats?.activity_status && ( <> | - + {String(run.stats.activity_status)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 52122edf8d..2a80aafed4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -14,6 +14,7 @@ import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/Pe import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; import { parseAsString, useQueryState } from "nuqs"; import { useEffect } from "react"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; @@ -78,7 +79,7 @@ export function SelectedRunView({ } return ( -
+
- + Output Your input {run?.status === AgentExecutionStatus.REVIEW && ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index fb5a84a3b1..a431e68021 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -78,7 +78,7 @@ export function SelectedScheduleView({ } return ( -
+
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx deleted file mode 100644 index bfd10d98b8..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx +++ /dev/null @@ -1,128 +0,0 @@ -"use client"; - -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; -import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; -import { - TabsLine, - TabsLineContent, - TabsLineList, - TabsLineTrigger, -} from "@/components/molecules/TabsLine/TabsLine"; -import { RunListItem } from "./components/RunListItem"; -import { ScheduleListItem } from "./components/ScheduleListItem"; -import { useAgentRunsLists } from "./useAgentRunsLists"; - -interface Props { - agent: LibraryAgent; - selectedRunId?: string; - onSelectRun: (id: string) => void; - onCountsChange?: (info: { - runsCount: number; - schedulesCount: number; - loading?: boolean; - }) => void; -} - -export function AgentRunsLists({ - agent, - selectedRunId, - onSelectRun, - onCountsChange, -}: Props) { - const { - runs, - schedules, - runsCount, - schedulesCount, - error, - loading, - fetchMoreRuns, - hasMoreRuns, - isFetchingMoreRuns, - tabValue, - setTabValue, - } = useAgentRunsLists({ - graphId: agent.graph_id, - onSelectRun, - onCountsChange, - }); - - if (error) { - return ; - } - - if (loading) { - return ( -
- - - -
- ); - } - - return ( - { - const value = v as "runs" | "scheduled"; - setTabValue(value); - if (value === "runs") { - if (runs && runs.length) onSelectRun(runs[0].id); - } else { - if (schedules && schedules.length) - onSelectRun(`schedule:${schedules[0].id}`); - } - }} - className="min-w-0 overflow-hidden" - > - - - Runs {runsCount} - - - Scheduled {schedulesCount} - - - - <> - - ( -
- onSelectRun && onSelectRun(run.id)} - /> -
- )} - /> -
- -
- {schedules.map((s: GraphExecutionJobInfo) => ( -
- onSelectRun(`schedule:${s.id}`)} - /> -
- ))} -
-
- -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx new file mode 100644 index 0000000000..ae4423931a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx @@ -0,0 +1,179 @@ +"use client"; + +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Text } from "@/components/atoms/Text/Text"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; +import { + TabsLine, + TabsLineContent, + TabsLineList, + TabsLineTrigger, +} from "@/components/molecules/TabsLine/TabsLine"; +import { cn } from "@/lib/utils"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; +import { RunListItem } from "./components/RunListItem"; +import { ScheduleListItem } from "./components/ScheduleListItem"; +import { useSidebarRunsList } from "./useSidebarRunsList"; + +interface Props { + agent: LibraryAgent; + selectedRunId?: string; + onSelectRun: (id: string, tab?: "runs" | "scheduled") => void; + onClearSelectedRun?: () => void; + onTabChange?: (tab: "runs" | "scheduled") => void; + onCountsChange?: (info: { + runsCount: number; + schedulesCount: number; + loading?: boolean; + }) => void; +} + +export function SidebarRunsList({ + agent, + selectedRunId, + onSelectRun, + onClearSelectedRun, + onTabChange, + onCountsChange, +}: Props) { + const { + runs, + schedules, + runsCount, + schedulesCount, + error, + loading, + fetchMoreRuns, + hasMoreRuns, + isFetchingMoreRuns, + tabValue, + } = useSidebarRunsList({ + graphId: agent.graph_id, + onSelectRun, + onCountsChange, + }); + + if (error) { + return ; + } + + if (loading) { + return ( +
+ + + +
+ ); + } + + return ( + { + const value = v as "runs" | "scheduled"; + onTabChange?.(value); + if (value === "runs") { + if (runs && runs.length) { + onSelectRun(runs[0].id, "runs"); + } else { + onClearSelectedRun?.(); + } + } else { + if (schedules && schedules.length) { + onSelectRun(schedules[0].id, "scheduled"); + } else { + onClearSelectedRun?.(); + } + } + }} + className="flex min-h-0 flex-col overflow-hidden" + > + + + Tasks {runsCount} + + + Scheduled {schedulesCount} + + + Templates 0 + + + + <> + + ( +
+ onSelectRun && onSelectRun(run.id, "runs")} + /> +
+ )} + /> +
+ +
+ {schedules.length > 0 ? ( + schedules.map((s: GraphExecutionJobInfo) => ( +
+ onSelectRun(s.id, "scheduled")} + /> +
+ )) + ) : ( +
+ + No scheduled agents + +
+ )} +
+
+ +
+
+ + No templates saved + +
+
+
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunIconWrapper.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunIconWrapper.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx index 89137cbaf7..c038217f72 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx @@ -1,10 +1,7 @@ "use client"; -import React from "react"; -import moment from "moment"; -import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; -import { RunSidebarCard } from "./RunSidebarCard"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { CheckCircleIcon, ClockIcon, @@ -13,7 +10,10 @@ import { WarningCircleIcon, XCircleIcon, } from "@phosphor-icons/react"; +import moment from "moment"; +import React from "react"; import { IconWrapper } from "./RunIconWrapper"; +import { RunSidebarCard } from "./RunSidebarCard"; const statusIconMap: Record = { INCOMPLETE: ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunSidebarCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx similarity index 72% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunSidebarCard.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx index d5f0e09b65..eb163f7337 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunSidebarCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx @@ -1,8 +1,8 @@ "use client"; -import React from "react"; -import { cn } from "@/lib/utils"; import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import React from "react"; interface RunListItemProps { title: string; @@ -22,21 +22,21 @@ export function RunSidebarCard({ return ( +
+ + + + + +
+ + + ); +}; diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx similarity index 52% rename from autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx rename to autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx index e10b09bcf9..d9fea28a8d 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx @@ -1,10 +1,21 @@ +"use client"; + +import { useState } from "react"; import { WidgetProps } from "@rjsf/utils"; import { InputType, mapJsonSchemaTypeToInputType, } from "@/app/(platform)/build/components/FlowEditor/nodes/helpers"; import { Input } from "@/components/atoms/Input/Input"; +import { Button } from "@/components/atoms/Button/Button"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; import { BlockUIType } from "@/lib/autogpt-server-api/types"; +import { InputExpanderModal } from "./InputExpanderModal"; +import { ArrowsOutIcon } from "@phosphor-icons/react"; export const TextInputWidget = (props: WidgetProps) => { const { schema, formContext } = props; @@ -13,6 +24,8 @@ export const TextInputWidget = (props: WidgetProps) => { size?: string; }; + const [isModalOpen, setIsModalOpen] = useState(false); + const mapped = mapJsonSchemaTypeToInputType(schema); type InputConfig = { @@ -59,9 +72,25 @@ export const TextInputWidget = (props: WidgetProps) => { return props.onChange(config.handleChange(v)); }; + const handleModalSave = (value: string) => { + props.onChange(config.handleChange(value)); + setIsModalOpen(false); + }; + + const handleModalOpen = () => { + setIsModalOpen(true); + }; + // Determine input size based on context const inputSize = size === "large" ? "medium" : "small"; + // Check if this input type should show the expand button + // Show for text and password types, not for number/integer + const showExpandButton = + config.htmlType === "text" || + config.htmlType === "password" || + config.htmlType === "textarea"; + if (uiType === BlockUIType.NOTE) { return ( { } return ( - + <> +
+ + {showExpandButton && ( + + + + + Expand input + + )} +
+ + setIsModalOpen(false)} + onSave={handleModalSave} + title={schema.title || "Edit value"} + description={schema.description || ""} + defaultValue={props.value ?? ""} + placeholder={schema.placeholder || config.placeholder} + /> + ); }; diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts index cdb02c728c..3788e74fbf 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts @@ -1,6 +1,6 @@ import { RegistryWidgetsType } from "@rjsf/utils"; import { SelectWidget } from "./SelectWidget"; -import { TextInputWidget } from "./TextInputWidget"; +import { TextInputWidget } from "./TextInputWidget/TextInputWidget"; import { SwitchWidget } from "./SwitchWidget"; import { FileWidget } from "./FileWidget"; import { DateInputWidget } from "./DateInputWidget"; From 729400dbe1f2e42eec870a6d86bfff62fc09eaca Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 4 Dec 2025 20:42:51 +0530 Subject: [PATCH 13/58] feat(frontend): display graph validation errors inline on node fields (#11524) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running a graph in the new builder, validation errors were only displayed in toast notifications, making it difficult for users to identify which specific fields had errors. Users needed to see validation errors directly next to the problematic fields within each node for better UX and faster debugging. Screenshot 2025-12-03 at 12 48
15β€―PM ### Changes πŸ—οΈ - **Error handling in graph execution** (`useRunGraph.ts`): - Added detection for graph validation errors using `ApiError.isGraphValidationError()` - Parse and store node-level errors from backend validation response - Clear all node errors on successful graph execution - Enhanced toast messages to guide users to fix validation errors on highlighted nodes - **Node store error management** (`nodeStore.ts`): - Added `errors` field to node data structure - Implemented `updateNodeErrors()` to set errors for a specific node - Implemented `clearNodeErrors()` to remove errors from a specific node - Implemented `getNodeErrors()` to retrieve errors for a specific node - Implemented `setNodeErrorsForBackendId()` to set errors by backend ID (supports matching by `metadata.backend_id` or node `id`) - Implemented `clearAllNodeErrors()` to clear all node errors across the graph - **Visual error indication** (`CustomNode.tsx`, `NodeContainer.tsx`): - Added error detection logic to identify both configuration errors and output errors - Applied error styling to nodes with validation errors (using `FAILED` status styling) - Nodes with errors now display with red border/ring to visually indicate issues - **Field-level error display** (`FieldTemplate.tsx`): - Fetch node errors from store for the current node - Match field IDs with error keys (handles both underscore and dot notation) - Display field-specific error messages below each field in red text - Added helper function `getFieldErrorKey()` to normalize field IDs for error matching - **Utility helpers** (`helpers.ts`): - Created `getFieldErrorKey()` function to extract field key from field ID (removes `root_` prefix) ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Create a graph with multiple nodes and intentionally leave required fields empty - [x] Run the graph and verify that validation errors appear in toast notification - [x] Verify that nodes with errors are highlighted with red border/ring styling - [x] Verify that field-specific error messages appear below each problematic field in red text - [x] Verify that error messages handle both underscore and dot notation in field keys - [x] Fix validation errors and run graph again - verify errors are cleared - [x] Verify that successful graph execution clears all node errors - [x] Test with nodes that have `backend_id` in metadata vs nodes without - [x] Verify that nodes without errors don't show error styling - [x] Test with nested fields and array fields to ensure error matching works correctly --- .../components/RunGraph/useRunGraph.ts | 51 +++++++++++++++--- .../components/FlowEditor/Flow/useFlow.ts | 2 +- .../nodes/CustomNode/CustomNode.tsx | 17 +++++- .../CustomNode/components/NodeContainer.tsx | 4 ++ .../app/(platform)/build/stores/nodeStore.ts | 52 +++++++++++++++++++ .../templates/FieldTemplate.tsx | 20 ++++++- .../renderers/input-renderer/utils/helpers.ts | 4 ++ 7 files changed, 141 insertions(+), 9 deletions(-) create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts index 2b37ebe8a9..db3b6660df 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts @@ -9,6 +9,8 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; import { useShallow } from "zustand/react/shallow"; import { useState } from "react"; import { useSaveGraph } from "@/app/(platform)/build/hooks/useSaveGraph"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; +import { ApiError } from "@/lib/autogpt-server-api/helpers"; // Check if this exists export const useRunGraph = () => { const { saveGraph, isSaving } = useSaveGraph({ @@ -24,6 +26,13 @@ export const useRunGraph = () => { ); const [openRunInputDialog, setOpenRunInputDialog] = useState(false); + const setNodeErrorsForBackendId = useNodeStore( + useShallow((state) => state.setNodeErrorsForBackendId), + ); + const clearAllNodeErrors = useNodeStore( + useShallow((state) => state.clearAllNodeErrors), + ); + const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] = useQueryStates({ flowID: parseAsString, @@ -35,19 +44,49 @@ export const useRunGraph = () => { usePostV1ExecuteGraphAgent({ mutation: { onSuccess: (response: any) => { + clearAllNodeErrors(); const { id } = response.data as GraphExecutionMeta; setQueryStates({ flowExecutionID: id, }); }, onError: (error: any) => { - // Reset running state on error setIsGraphRunning(false); - toast({ - title: (error.detail as string) ?? "An unexpected error occurred.", - description: "An unexpected error occurred.", - variant: "destructive", - }); + if (error instanceof ApiError && error.isGraphValidationError?.()) { + const errorData = error.response?.detail; + + if (errorData?.node_errors) { + Object.entries(errorData.node_errors).forEach( + ([backendId, nodeErrors]) => { + setNodeErrorsForBackendId( + backendId, + nodeErrors as { [key: string]: string }, + ); + }, + ); + + useNodeStore.getState().nodes.forEach((node) => { + const backendId = node.data.metadata?.backend_id || node.id; + if (!errorData.node_errors[backendId as string]) { + useNodeStore.getState().updateNodeErrors(node.id, {}); + } + }); + } + + toast({ + title: errorData?.message || "Graph validation failed", + description: + "Please fix the validation errors on the highlighted nodes and try again.", + variant: "destructive", + }); + } else { + toast({ + title: + (error.detail as string) ?? "An unexpected error occurred.", + description: "An unexpected error occurred.", + variant: "destructive", + }); + } }, }, }); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index badb9784b8..64f00871d8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -81,7 +81,7 @@ export const useFlow = () => { { query: { select: (res) => res.data as BlockInfo[], - enabled: !!flowID && !!blockIds, + enabled: !!flowID && !!blockIds && blockIds.length > 0, }, }, ); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 52df3edbc4..974cbe3754 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -37,6 +37,7 @@ export type CustomNodeData = { costs: BlockCost[]; categories: BlockInfoCategoriesItem[]; metadata?: NodeModelMetadata; + errors?: { [key: string]: string }; }; export type CustomNode = XYNode; @@ -71,10 +72,24 @@ export const CustomNode: React.FC> = React.memo( ? (data.hardcodedValues.output_schema ?? {}) : data.outputSchema; + const hasConfigErrors = + data.errors && + Object.values(data.errors).some( + (value) => value !== null && value !== undefined && value !== "", + ); + + const outputData = data.nodeExecutionResult?.output_data; + const hasOutputError = + typeof outputData === "object" && + outputData !== null && + "error" in outputData; + + const hasErrors = hasConfigErrors || hasOutputError; + // Currently all blockTypes design are similar - that's why i am using the same component for all of them // If in future - if we need some drastic change in some blockTypes design - we can create separate components for them return ( - +
{isWebhook && } diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx index 657f1ca048..f8d5b2e089 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx @@ -3,15 +3,18 @@ import { nodeStyleBasedOnStatus } from "../helpers"; import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useShallow } from "zustand/react/shallow"; +import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; export const NodeContainer = ({ children, nodeId, selected, + hasErrors, // these are configuration errors that occur before executing the graph -- more like validation errors }: { children: React.ReactNode; nodeId: string; selected: boolean; + hasErrors?: boolean; }) => { const status = useNodeStore( useShallow((state) => state.getNodeStatus(nodeId)), @@ -22,6 +25,7 @@ export const NodeContainer = ({ "z-12 max-w-[370px] rounded-xlarge ring-1 ring-slate-200/60", selected && "shadow-lg ring-2 ring-slate-200", status && nodeStyleBasedOnStatus[status], + hasErrors ? nodeStyleBasedOnStatus[AgentExecutionStatus.FAILED] : "", )} > {children} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts index 3beba0c615..2f41c3bb46 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts @@ -53,6 +53,15 @@ type NodeStore = { getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined; getNodeBlockUIType: (nodeId: string) => BlockUIType; hasWebhookNodes: () => boolean; + + updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => void; + clearNodeErrors: (nodeId: string) => void; + getNodeErrors: (nodeId: string) => { [key: string]: string } | undefined; + setNodeErrorsForBackendId: ( + backendId: string, + errors: { [key: string]: string }, + ) => void; + clearAllNodeErrors: () => void; // Add this }; export const useNodeStore = create((set, get) => ({ @@ -253,4 +262,47 @@ export const useNodeStore = create((set, get) => ({ [BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType), ); }, + + updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => { + set((state) => ({ + nodes: state.nodes.map((n) => + n.id === nodeId ? { ...n, data: { ...n.data, errors } } : n, + ), + })); + }, + + clearNodeErrors: (nodeId: string) => { + set((state) => ({ + nodes: state.nodes.map((n) => + n.id === nodeId ? { ...n, data: { ...n.data, errors: undefined } } : n, + ), + })); + }, + + getNodeErrors: (nodeId: string) => { + return get().nodes.find((n) => n.id === nodeId)?.data?.errors; + }, + + setNodeErrorsForBackendId: ( + backendId: string, + errors: { [key: string]: string }, + ) => { + set((state) => ({ + nodes: state.nodes.map((n) => { + // Match by backend_id if nodes have it, or by id + const matches = + n.data.metadata?.backend_id === backendId || n.id === backendId; + return matches ? { ...n, data: { ...n.data, errors } } : n; + }), + })); + }, + + clearAllNodeErrors: () => { + set((state) => ({ + nodes: state.nodes.map((n) => ({ + ...n, + data: { ...n.data, errors: undefined }, + })), + })); + }, })); diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx index b4db9d4159..a056782939 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx @@ -23,6 +23,7 @@ import { cn } from "@/lib/utils"; import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; import { BlockUIType } from "@/lib/autogpt-server-api"; import NodeHandle from "@/app/(platform)/build/components/FlowEditor/handlers/NodeHandle"; +import { getFieldErrorKey } from "../utils/helpers"; const FieldTemplate: React.FC = ({ id: fieldId, @@ -42,6 +43,11 @@ const FieldTemplate: React.FC = ({ (state) => state.nodeAdvancedStates[nodeId] ?? false, ); + const nodeErrors = useNodeStore((state) => { + const node = state.nodes.find((n) => n.id === nodeId); + return node?.data?.errors; + }); + const { isArrayItem, arrayFieldHandleId } = useContext(ArrayEditorContext); const isAnyOf = @@ -89,6 +95,13 @@ const FieldTemplate: React.FC = ({ shouldShowHandle = false; } + const fieldErrorKey = getFieldErrorKey(fieldId); + const fieldError = + nodeErrors?.[fieldErrorKey] || + nodeErrors?.[fieldErrorKey.replace(/_/g, ".")] || + nodeErrors?.[fieldErrorKey.replace(/\./g, "_")] || + null; + return (
= ({
{children}
- )}{" "} + )} + {fieldError && ( + + {fieldError} + + )}
); }; diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts new file mode 100644 index 0000000000..51b628d923 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts @@ -0,0 +1,4 @@ +export const getFieldErrorKey = (fieldId: string): string => { + const withoutRoot = fieldId.startsWith("root_") ? fieldId.slice(5) : fieldId; + return withoutRoot; +}; From 4e87f668e3b850de549e1fdf391711eb11820e5d Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 4 Dec 2025 20:43:01 +0530 Subject: [PATCH 14/58] feat(frontend): add file input widget with variants and base64 support (#11533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR enhances the FileInput component to support multiple variants and modes, and integrates it into the form renderer as a file widget. The changes enable a more flexible file input experience with both server upload and local base64 conversion capabilities. ### Compact one Screenshot 2025-12-03 at 8 05 51β€―PM Screenshot 2025-12-03 at 8 06 11β€―PM ## Default one Screenshot 2025-12-03 at 8 05 08β€―PM Screenshot 2025-12-03 at 8 05 21β€―PM ### Changes πŸ—οΈ #### FileInput Component Enhancements - **Added variant support**: Introduced `default` and `compact` variants - `default`: Full-featured UI with drag & drop, progress bar, and storage note - `compact`: Minimal inline design suitable for tight spaces like node inputs - **Added mode support**: Introduced `upload` and `base64` modes - `upload`: Uploads files to server (requires `onUploadFile` and `uploadProgress`) - `base64`: Converts files to base64 locally without server upload - **Improved type safety**: Refactored props using discriminated unions (`UploadModeProps | Base64ModeProps`) to ensure type-safe usage - **Enhanced file handling**: Added `getFileLabelFromValue` helper to extract file labels from base64 data URIs or file paths - **Better UX**: Added `showStorageNote` prop to control visibility of storage disclaimer #### FileWidget Integration - **Replaced legacy Input**: Migrated from legacy `Input` component to new `FileInput` component - **Smart variant selection**: Automatically selects `default` or `compact` variant based on form context size - **Base64 mode**: Uses base64 mode for form inputs, eliminating need for server uploads in builder context - **Improved accessibility**: Better disabled/readonly state handling with visual feedback #### Form Renderer Updates - **Disabled validation**: Added `noValidate={true}` and `liveValidate={false}` to prevent premature form validation #### Storybook Updates - **Expanded stories**: Added comprehensive stories for all variant/mode combinations: - `Default`: Default variant with upload mode - `Compact`: Compact variant with base64 mode - `CompactWithUpload`: Compact variant with upload mode - `DefaultWithBase64`: Default variant with base64 mode - **Improved documentation**: Updated component descriptions to clearly explain variants and modes ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Test FileInput component in Storybook with all variant/mode combinations - [x] Test file upload flow in default variant with upload mode - [x] Test base64 conversion in compact variant with base64 mode - [x] Test file widget in form renderer (node inputs) - [x] Test file type validation (accept prop) - [x] Test file size validation (maxFileSize prop) - [x] Test error handling for invalid files - [x] Test disabled and readonly states - [x] Test file clearing/removal functionality - [x] Verify compact variant renders correctly in tight spaces - [x] Verify default variant shows storage note only in upload mode - [x] Test drag & drop functionality in default variant --- .../atoms/FileInput/FileInput.stories.tsx | 123 +++++-- .../components/atoms/FileInput/FileInput.tsx | 318 ++++++++++++++---- .../renderers/input-renderer/FormRenderer.tsx | 2 + .../input-renderer/widgets/FileWidget.tsx | 36 +- 4 files changed, 371 insertions(+), 108 deletions(-) diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx index 303149084b..2df91f4c9e 100644 --- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx +++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx @@ -1,8 +1,8 @@ -import type { Meta, StoryObj } from "@storybook/nextjs"; +import type { Meta } from "@storybook/nextjs"; import { useState } from "react"; import { FileInput } from "./FileInput"; -const meta: Meta = { +const meta: Meta = { title: "Atoms/FileInput", component: FileInput, tags: ["autodocs"], @@ -11,26 +11,13 @@ const meta: Meta = { docs: { description: { component: - "File upload input with progress and removable preview.\n\nProps:\n- accept: optional MIME/extensions filter (e.g. ['image/*', '.pdf']).\n- maxFileSize: optional maximum size in bytes; larger files are rejected with an inline error.", + "File upload input with two variants and two modes.\n\n**Variants:**\n- `default`: Full-featured with drag & drop, progress bar, and storage note.\n- `compact`: Minimal inline design for tight spaces like node inputs.\n\n**Modes:**\n- `upload`: Uploads file to server (requires `onUploadFile` and `uploadProgress`).\n- `base64`: Converts file to base64 locally (no server upload).\n\n**Props:**\n- `accept`: optional MIME/extensions filter (e.g. ['image/*', '.pdf']).\n- `maxFileSize`: optional maximum size in bytes; larger files are rejected with an inline error.", }, }, }, - argTypes: { - onUploadFile: { action: "upload" }, - accept: { - control: "object", - description: - "Optional accept filter. Supports MIME types (image/*) and extensions (.pdf).", - }, - maxFileSize: { - control: "number", - description: "Optional maximum file size in bytes.", - }, - }, }; export default meta; -type Story = StoryObj; function mockUpload(file: File): Promise<{ file_name: string; @@ -52,16 +39,16 @@ function mockUpload(file: File): Promise<{ ); } -export const Basic: Story = { +export const Default = { parameters: { docs: { description: { story: - "This example accepts images or PDFs only and limits size to 5MB. Oversized or disallowed file types show an inline error and do not upload.", + "Default variant with upload mode. Full-featured with drag & drop dropzone, progress bar, and storage note. Accepts images or PDFs only and limits size to 5MB.", }, }, }, - render: function BasicStory() { + render: function DefaultStory() { const [value, setValue] = useState(""); const [progress, setProgress] = useState(0); @@ -79,6 +66,8 @@ export const Basic: Story = { return (
(""); + + return ( +
+ +
+ ); + }, +}; + +export const CompactWithUpload = { + parameters: { + docs: { + description: { + story: + "Compact variant with upload mode. Useful when you need minimal UI but still want server uploads.", + }, + }, + }, + render: function CompactUploadStory() { + const [value, setValue] = useState(""); + const [progress, setProgress] = useState(0); + + async function onUploadFile(file: File) { + setProgress(0); + const interval = setInterval(() => { + setProgress((p) => (p >= 100 ? 100 : p + 20)); + }, 80); + const result = await mockUpload(file); + clearInterval(interval); + setProgress(100); + return result; + } + + return ( +
+ +
+ ); + }, +}; + +export const DefaultWithBase64 = { + parameters: { + docs: { + description: { + story: + "Default variant with base64 mode. Full-featured UI but converts to base64 locally instead of uploading.", + }, + }, + }, + render: function DefaultBase64Story() { + const [value, setValue] = useState(""); + + return ( +
+ +
+ ); + }, +}; diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx index 5eadf26410..8f855ad47d 100644 --- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx +++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx @@ -1,9 +1,11 @@ import { FileTextIcon, TrashIcon, UploadIcon } from "@phosphor-icons/react"; +import { Cross2Icon } from "@radix-ui/react-icons"; import { useRef, useState } from "react"; import { Button } from "../Button/Button"; import { formatFileSize, getFileLabel } from "./helpers"; import { cn } from "@/lib/utils"; import { Progress } from "../Progress/Progress"; +import { Text } from "../Text/Text"; type UploadFileResult = { file_name: string; @@ -12,26 +14,51 @@ type UploadFileResult = { file_uri: string; }; -interface Props { - onUploadFile: (file: File) => Promise; - uploadProgress: number; - value?: string; // file URI or empty - placeholder?: string; // e.g. "Resume", "Document", etc. +type FileInputVariant = "default" | "compact"; + +interface BaseProps { + value?: string; + placeholder?: string; onChange: (value: string) => void; className?: string; - maxFileSize?: number; // bytes (optional) - accept?: string | string[]; // input accept filter (optional) + maxFileSize?: number; + accept?: string | string[]; + variant?: FileInputVariant; + showStorageNote?: boolean; } -export function FileInput({ - onUploadFile, - uploadProgress, - value, - onChange, - className, - maxFileSize, - accept, -}: Props) { +interface UploadModeProps extends BaseProps { + mode?: "upload"; + onUploadFile: (file: File) => Promise; + uploadProgress: number; +} + +interface Base64ModeProps extends BaseProps { + mode: "base64"; + onUploadFile?: never; + uploadProgress?: never; +} + +type Props = UploadModeProps | Base64ModeProps; + +export function FileInput(props: Props) { + const { + value, + onChange, + className, + maxFileSize, + accept, + placeholder, + variant = "default", + showStorageNote = true, + mode = "upload", + } = props; + + const onUploadFile = + mode === "upload" ? (props as UploadModeProps).onUploadFile : undefined; + const uploadProgress = + mode === "upload" ? (props as UploadModeProps).uploadProgress : 0; + const [isUploading, setIsUploading] = useState(false); const [uploadError, setUploadError] = useState(null); const [fileInfo, setFileInfo] = useState<{ @@ -40,7 +67,96 @@ export function FileInput({ content_type: string; } | null>(null); + const inputRef = useRef(null); + + const storageNote = + "Files are stored securely and will be automatically deleted at most 24 hours after upload."; + + function acceptToString(a?: string | string[]) { + if (!a) return "*/*"; + return Array.isArray(a) ? a.join(",") : a; + } + + function isAcceptedType(file: File, a?: string | string[]) { + if (!a) return true; + const list = Array.isArray(a) ? a : a.split(",").map((s) => s.trim()); + const fileType = file.type; + const fileExt = file.name.includes(".") + ? `.${file.name.split(".").pop()}`.toLowerCase() + : ""; + + for (const entry of list) { + if (!entry) continue; + const e = entry.toLowerCase(); + if (e.includes("/")) { + const [main, sub] = e.split("/"); + const [fMain, fSub] = fileType.toLowerCase().split("/"); + if (!fMain || !fSub) continue; + if (sub === "*") { + if (main === fMain) return true; + } else { + if (e === fileType.toLowerCase()) return true; + } + } else if (e.startsWith(".")) { + if (fileExt === e) return true; + } + } + return false; + } + + const getFileLabelFromValue = (val: string) => { + if (val.startsWith("data:")) { + const matches = val.match(/^data:([^;]+);/); + if (matches?.[1]) { + const mimeParts = matches[1].split("/"); + if (mimeParts.length > 1) { + return `${mimeParts[1].toUpperCase()} file`; + } + return `${matches[1]} file`; + } + } else { + const pathParts = val.split("."); + if (pathParts.length > 1) { + const ext = pathParts.pop(); + if (ext) return `${ext.toUpperCase()} file`; + } + } + return "File"; + }; + + const processFileBase64 = (file: File) => { + setIsUploading(true); + setUploadError(null); + + const reader = new FileReader(); + reader.onload = (e) => { + const base64String = e.target?.result as string; + setFileInfo({ + name: file.name, + size: file.size, + content_type: file.type || "application/octet-stream", + }); + onChange(base64String); + setIsUploading(false); + }; + reader.onerror = () => { + setUploadError("Failed to read file"); + setIsUploading(false); + }; + reader.readAsDataURL(file); + }; + const uploadFile = async (file: File) => { + if (mode === "base64") { + processFileBase64(file); + return; + } + + if (!onUploadFile) { + setUploadError("Upload handler not provided"); + return; + } + setIsUploading(true); setUploadError(null); @@ -53,7 +169,6 @@ export function FileInput({ content_type: result.content_type, }); - // Set the file URI as the value onChange(result.file_uri); } catch (error) { console.error("Upload failed:", error); @@ -87,43 +202,104 @@ export function FileInput({ if (file) uploadFile(file); }; - const inputRef = useRef(null); - - const storageNote = - "Files are stored securely and will be automatically deleted at most 24 hours after upload."; - - function acceptToString(a?: string | string[]) { - if (!a) return "*/*"; - return Array.isArray(a) ? a.join(",") : a; - } - - function isAcceptedType(file: File, a?: string | string[]) { - if (!a) return true; - const list = Array.isArray(a) ? a : a.split(",").map((s) => s.trim()); - const fileType = file.type; // e.g. image/png - const fileExt = file.name.includes(".") - ? `.${file.name.split(".").pop()}`.toLowerCase() - : ""; - - for (const entry of list) { - if (!entry) continue; - const e = entry.toLowerCase(); - if (e.includes("/")) { - // MIME type, support wildcards like image/* - const [main, sub] = e.split("/"); - const [fMain, fSub] = fileType.toLowerCase().split("/"); - if (!fMain || !fSub) continue; - if (sub === "*") { - if (main === fMain) return true; - } else { - if (e === fileType.toLowerCase()) return true; - } - } else if (e.startsWith(".")) { - // Extension match - if (fileExt === e) return true; - } + const handleClear = () => { + if (inputRef.current) { + inputRef.current.value = ""; } - return false; + onChange(""); + setFileInfo(null); + }; + + const displayName = placeholder || "File"; + + if (variant === "compact") { + return ( +
+
+ {isUploading ? ( +
+
+ + + {mode === "base64" ? "Processing..." : "Uploading..."} + + {mode === "upload" && ( + + {Math.round(uploadProgress)}% + + )} +
+ {mode === "upload" && ( + + )} +
+ ) : value ? ( +
+
+ + + + {fileInfo + ? getFileLabel(fileInfo.name, fileInfo.content_type) + : getFileLabelFromValue(value)} + + {fileInfo && ( + + {formatFileSize(fileInfo.size)} + + )} +
+ +
+ ) : ( +
+ +
+ )} + +
+ {uploadError && ( + + {uploadError} + + )} +
+ ); } return ( @@ -134,15 +310,23 @@ export function FileInput({
- Uploading... - - {Math.round(uploadProgress)}% + + {mode === "base64" ? "Processing..." : "Uploading..."} + {mode === "upload" && ( + + {Math.round(uploadProgress)}% + + )}
- + {mode === "upload" && ( + + )}
-

{storageNote}

+ {showStorageNote && mode === "upload" && ( +

{storageNote}

+ )}
) : value ? (
@@ -154,24 +338,20 @@ export function FileInput({ {fileInfo ? getFileLabel(fileInfo.name, fileInfo.content_type) - : "File"} + : getFileLabelFromValue(value)} {fileInfo ? formatFileSize(fileInfo.size) : ""}
{ - if (inputRef.current) { - inputRef.current.value = ""; - } - onChange(""); - setFileInfo(null); - }} + onClick={handleClear} />
-

{storageNote}

+ {showStorageNote && mode === "upload" && ( +

{storageNote}

+ )}
) : (
@@ -196,7 +376,9 @@ export function FileInput({
Error: {uploadError}
)} -

{storageNote}

+ {showStorageNote && mode === "upload" && ( +

{storageNote}

+ )}
)} diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx index 935c9e4337..22c5496efb 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx @@ -45,6 +45,8 @@ export const FormRenderer = ({ onChange={handleChange} uiSchema={uiSchema} formData={initialValues} + noValidate={true} + liveValidate={false} />
); diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx index e15d34a9ba..9d670add37 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx @@ -1,33 +1,27 @@ import { WidgetProps } from "@rjsf/utils"; -import { Input } from "@/components/__legacy__/ui/input"; +import { FileInput } from "@/components/atoms/FileInput/FileInput"; export const FileWidget = (props: WidgetProps) => { - const { onChange, multiple = false, disabled, readonly, id } = props; + const { onChange, disabled, readonly, value, schema, formContext } = props; - // TODO: It's temporary solution for file input, will complete it follow up prs - const handleChange = (event: React.ChangeEvent) => { - const files = event.target.files; - if (!files || files.length === 0) { - onChange(undefined); - return; - } + const { size } = formContext || {}; - const file = files[0]; - const reader = new FileReader(); - reader.onload = (e) => { - onChange(e.target?.result); - }; - reader.readAsDataURL(file); + const displayName = schema?.title || "File"; + + const handleChange = (fileUri: string) => { + onChange(fileUri); }; return ( - ); }; From 2b9816cfa5030eacb52d525f4462afb5a8b5436b Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 4 Dec 2025 20:43:13 +0530 Subject: [PATCH 15/58] fix(frontend): ensure node selection state is set before copying in context menu (#11535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes an issue where copying a node via the context menu dialog fails on the first attempt in a new session. The problem occurs because the node selection state update and the copy operation happen in quick succession, causing a race condition where `copySelectedNodes()` reads the store before the selection state is properly updated. ### Checklist πŸ“‹ - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Start a new browser session (or clear storage) - [x] Open the flow editor - [x] Right-click on a node and select "Copy Node" from the context menu - [x] Verify the node is successfully copied on the first attempt --- .../FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx index 3aefb81d91..6e482122f6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx @@ -28,6 +28,7 @@ export const NodeContextMenu = ({ })), })); + useCopyPasteStore.getState().copySelectedNodes(); useCopyPasteStore.getState().pasteNodes(); }; From 3ccc712463ace5c2e76d4b96ef4d59ce923dbd9a Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 4 Dec 2025 20:43:23 +0530 Subject: [PATCH 16/58] feat(frontend): add host-scoped credentials support to CredentialField (#11546) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ This PR adds support for `host_scoped` credential type in the new builder's `CredentialField` component. This enables blocks that require sensitive headers for custom API endpoints to configure host-scoped credentials directly from the credential field. Screenshot 2025-12-04 at 4 31 09β€―PM Screenshot 2025-12-04 at 4 36 02β€―PM **Key changes:** - **Added `HostScopedCredentialsModal` component** (`models/HostScopedCredentialsModal/`) - Modal dialog for creating host-scoped credentials with host pattern, optional title, and dynamic header pairs (key-value) - Auto-populates host from discriminator value (URL field) when available - Supports adding/removing multiple header pairs with validation - **Enhanced credential filtering logic** (`helpers.ts`) - Updated `filterCredentialsByProvider` to accept `schema` and `discriminatorValue` parameters - Added intelligent filtering for: - Credential types supported by the block - OAuth credentials with sufficient scopes - Host-scoped credentials matched by host from discriminator value - Extracted `getDiscriminatorValue` helper function for reusability - **Updated `CredentialField` component** - Added `supportsHostScoped` check in `useCredentialField` hook - Conditionally renders `HostScopedCredentialsModal` when `supportsHostScoped && discriminatorValue` is true - Exports `discriminatorValue` for use in child components - **Updated `useCredentialField` hook** - Calculates `discriminatorValue` using new `getDiscriminatorValue` helper - Passes `schema` and `discriminatorValue` to enhanced `filterCredentialsByProvider` function - Returns `supportsHostScoped` and `discriminatorValue` for component consumption **Technical details:** - Host extraction uses `getHostFromUrl` utility to parse host from discriminator value (URL) - Header pairs are managed as state with add/remove functionality - Form validation uses `react-hook-form` with `zod` schema - Credential creation integrates with existing API endpoints and query invalidation ### Checklist πŸ“‹ - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verify `HostScopedCredentialsModal` appears when block supports `host_scoped` credentials and discriminator value is present - [x] Test host auto-population from discriminator value (URL field) - [x] Test manual host entry when discriminator value is not available - [x] Test adding/removing multiple header pairs - [x] Test form validation (host required, empty header pairs filtered out) - [x] Test credential creation and successful toast notification - [x] Verify credentials list refreshes after creation - [x] Test host-scoped credential filtering matches credentials by host from URL - [x] Verify existing credential types (api_key, oauth2, user_password) still work correctly - [x] Test OAuth scope filtering still works as expected - [x] Verify modal only shows when `supportsHostScoped && discriminatorValue` conditions are met --- .../CredentialField/CredentialField.tsx | 10 + .../fields/CredentialField/helpers.ts | 59 +++++- .../HostScopedCredentialsModal.tsx | 185 ++++++++++++++++++ .../useHostScopedCredentialsModal.ts | 167 ++++++++++++++++ .../CredentialField/useCredentialField.ts | 17 +- 5 files changed, 430 insertions(+), 8 deletions(-) create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/HostScopedCredentialsModal.tsx create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/useHostScopedCredentialsModal.ts diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx index 879f9fe78e..a61d2f0b16 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx @@ -7,6 +7,7 @@ import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; import { APIKeyCredentialsModal } from "./models/APIKeyCredentialModal/APIKeyCredentialModal"; import { OAuthCredentialModal } from "./models/OAuthCredentialModal/OAuthCredentialModal"; import { PasswordCredentialsModal } from "./models/PasswordCredentialModal/PasswordCredentialModal"; +import { HostScopedCredentialsModal } from "./models/HostScopedCredentialsModal/HostScopedCredentialsModal"; export const CredentialsField = (props: FieldProps) => { const { @@ -22,9 +23,11 @@ export const CredentialsField = (props: FieldProps) => { supportsApiKey, supportsOAuth2, supportsUserPassword, + supportsHostScoped, credentialsExists, credentialProvider, setCredential, + discriminatorValue, } = useCredentialField({ credentialSchema: schema as BlockIOCredentialsSubSchema, formData, @@ -71,6 +74,13 @@ export const CredentialsField = (props: FieldProps) => { {supportsUserPassword && ( )} + {supportsHostScoped && discriminatorValue && ( + + )}
); diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/helpers.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/helpers.ts index 96137cd6b0..e2b1a05d54 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/helpers.ts +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/helpers.ts @@ -1,5 +1,6 @@ import { CredentialsMetaResponse } from "@/app/api/__generated__/models/credentialsMetaResponse"; import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; +import { getHostFromUrl } from "@/lib/utils/url"; import { GoogleLogoIcon, KeyholeIcon, @@ -14,9 +15,44 @@ import { export const filterCredentialsByProvider = ( credentials: CredentialsMetaResponse[] | undefined, provider: string, + schema?: BlockIOCredentialsSubSchema, + discriminatorValue?: string, ) => { const filtered = - credentials?.filter((credential) => provider === credential.provider) ?? []; + credentials?.filter((credential) => { + // First filter by provider + if (provider !== credential.provider) { + return false; + } + + // Check if credential type is supported by this block + if (schema && !schema.credentials_types.includes(credential.type)) { + return false; + } + + // Filter OAuth credentials that have sufficient scopes for this block + if (credential.type === "oauth2" && schema?.credentials_scopes) { + const credentialScopes = new Set(credential.scopes || []); + const requiredScopes = new Set(schema.credentials_scopes); + const hasAllScopes = [...requiredScopes].every((scope) => + credentialScopes.has(scope), + ); + if (!hasAllScopes) { + return false; + } + } + + // Filter host_scoped credentials by host matching + if (credential.type === "host_scoped") { + if (!discriminatorValue) { + return false; + } + const hostFromUrl = getHostFromUrl(discriminatorValue); + return hostFromUrl === credential.host; + } + + return true; + }) ?? []; return { credentials: filtered, exists: filtered.length > 0, @@ -96,22 +132,31 @@ export const providerIcons: Partial> = { zerobounce: KeyholeIcon, }; +export const getDiscriminatorValue = ( + formData: Record, + schema: BlockIOCredentialsSubSchema, +): string | undefined => { + const discriminator = schema.discriminator; + const discriminatorValues = schema.discriminator_values; + + return [ + discriminator ? formData[discriminator] : null, + ...(discriminatorValues || []), + ].find(Boolean); +}; + export const getCredentialProviderFromSchema = ( formData: Record, schema: BlockIOCredentialsSubSchema, ) => { const discriminator = schema.discriminator; const discriminatorMapping = schema.discriminator_mapping; - const discriminatorValues = schema.discriminator_values; const providers = schema.credentials_provider; - const discriminatorValue = [ - discriminator ? formData[discriminator] : null, - ...(discriminatorValues || []), - ].find(Boolean); + const discriminatorValue = getDiscriminatorValue(formData, schema); const discriminatedProvider = discriminatorMapping - ? discriminatorMapping[discriminatorValue] + ? discriminatorMapping[discriminatorValue ?? ""] : null; if (providers.length > 1) { diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/HostScopedCredentialsModal.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/HostScopedCredentialsModal.tsx new file mode 100644 index 0000000000..3264fca76f --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/HostScopedCredentialsModal.tsx @@ -0,0 +1,185 @@ +import { Input } from "@/components/atoms/Input/Input"; +import { Button } from "@/components/atoms/Button/Button"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + Form, + FormDescription, + FormField, + FormLabel, +} from "@/components/__legacy__/ui/form"; +import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types"; +import { useHostScopedCredentialsModal } from "./useHostScopedCredentialsModal"; +import { toDisplayName } from "../../helpers"; +import { GlobeIcon, PlusIcon, TrashIcon } from "@phosphor-icons/react"; +import { Text } from "@/components/atoms/Text/Text"; + +type Props = { + schema: BlockIOCredentialsSubSchema; + provider: string; + discriminatorValue?: string; +}; + +export function HostScopedCredentialsModal({ + schema, + provider, + discriminatorValue, +}: Props) { + const { + form, + schemaDescription, + onSubmit, + isOpen, + setIsOpen, + headerPairs, + addHeaderPair, + removeHeaderPair, + updateHeaderPair, + currentHost, + } = useHostScopedCredentialsModal({ schema, provider, discriminatorValue }); + + return ( + <> + { + if (!isOpen) setIsOpen(false); + }, + }} + onClose={() => setIsOpen(false)} + styling={{ + maxWidth: "38rem", + }} + > + +
+ {schemaDescription && ( +

{schemaDescription}

+ )} + +
+ + ( + + )} + /> + + ( + + )} + /> + +
+ Headers + + Add sensitive headers (like Authorization, X-API-Key) that + should be automatically included in requests to the + specified host. + + + {headerPairs.map((pair, index) => ( +
+ + updateHeaderPair(index, "key", e.target.value) + } + /> + + + updateHeaderPair(index, "value", e.target.value) + } + /> + +
+ ))} + + +
+ + + + +
+
+
+ + + ); +} diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/useHostScopedCredentialsModal.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/useHostScopedCredentialsModal.ts new file mode 100644 index 0000000000..066bc05b51 --- /dev/null +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/useHostScopedCredentialsModal.ts @@ -0,0 +1,167 @@ +import { z } from "zod"; +import { useForm, type UseFormReturn } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types"; +import { + getGetV1ListCredentialsQueryKey, + usePostV1CreateCredentials, +} from "@/app/api/__generated__/endpoints/integrations/integrations"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { HostScopedCredentialsInput } from "@/app/api/__generated__/models/hostScopedCredentialsInput"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; +import { getHostFromUrl } from "@/lib/utils/url"; + +export type HeaderPair = { + key: string; + value: string; +}; + +export type HostScopedFormValues = { + host: string; + title?: string; +}; + +type UseHostScopedCredentialsModalType = { + schema: BlockIOCredentialsSubSchema; + provider: string; + discriminatorValue?: string; +}; + +export function useHostScopedCredentialsModal({ + schema, + provider, + discriminatorValue, +}: UseHostScopedCredentialsModalType): { + form: UseFormReturn; + schemaDescription?: string; + onSubmit: (values: HostScopedFormValues) => Promise; + isOpen: boolean; + setIsOpen: (isOpen: boolean) => void; + headerPairs: HeaderPair[]; + addHeaderPair: () => void; + removeHeaderPair: (index: number) => void; + updateHeaderPair: ( + index: number, + field: "key" | "value", + value: string, + ) => void; + currentHost: string | null; +} { + const { toast } = useToast(); + const [isOpen, setIsOpen] = useState(false); + const [headerPairs, setHeaderPairs] = useState([ + { key: "", value: "" }, + ]); + const queryClient = useQueryClient(); + + // Get current host from discriminatorValue (URL field) + const currentHost = discriminatorValue + ? getHostFromUrl(discriminatorValue) + : null; + + const { mutateAsync: createCredentials } = usePostV1CreateCredentials({ + mutation: { + onSuccess: async () => { + form.reset(); + setHeaderPairs([{ key: "", value: "" }]); + setIsOpen(false); + toast({ + title: "Success", + description: "Host-scoped credentials created successfully", + variant: "default", + }); + + await queryClient.refetchQueries({ + queryKey: getGetV1ListCredentialsQueryKey(), + }); + }, + onError: () => { + toast({ + title: "Error", + description: "Failed to create host-scoped credentials.", + variant: "destructive", + }); + }, + }, + }); + + const formSchema = z.object({ + host: z.string().min(1, "Host is required"), + title: z.string().optional().default(""), + }); + + const form = useForm({ + resolver: zodResolver(formSchema), + defaultValues: { + host: currentHost || "", + title: currentHost || "Manual Entry", + }, + }); + + // Update form values when modal opens and discriminatorValue changes + const handleSetIsOpen = (open: boolean) => { + if (open && currentHost) { + form.setValue("host", currentHost); + form.setValue("title", currentHost); + } + setIsOpen(open); + }; + + const addHeaderPair = () => { + setHeaderPairs([...headerPairs, { key: "", value: "" }]); + }; + + const removeHeaderPair = (index: number) => { + if (headerPairs.length > 1) { + setHeaderPairs(headerPairs.filter((_, i) => i !== index)); + } + }; + + const updateHeaderPair = ( + index: number, + field: "key" | "value", + value: string, + ) => { + const newPairs = [...headerPairs]; + newPairs[index][field] = value; + setHeaderPairs(newPairs); + }; + + async function onSubmit(values: HostScopedFormValues) { + // Convert header pairs to object, filtering out empty pairs + const headers = headerPairs.reduce( + (acc, pair) => { + if (pair.key.trim() && pair.value.trim()) { + acc[pair.key.trim()] = pair.value.trim(); + } + return acc; + }, + {} as Record, + ); + + createCredentials({ + provider: provider, + data: { + provider: provider, + type: "host_scoped", + host: values.host, + title: values.title || values.host, + headers: headers, + } as HostScopedCredentialsInput, + }); + } + + return { + form, + schemaDescription: schema.description, + onSubmit, + isOpen, + setIsOpen: handleSetIsOpen, + headerPairs, + addHeaderPair, + removeHeaderPair, + updateHeaderPair, + currentHost, + }; +} diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/useCredentialField.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/useCredentialField.ts index 3d9282d2f9..a90add8aeb 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/useCredentialField.ts +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/useCredentialField.ts @@ -4,6 +4,7 @@ import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; import { filterCredentialsByProvider, getCredentialProviderFromSchema, + getDiscriminatorValue, } from "./helpers"; import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { useEffect, useRef } from "react"; @@ -44,13 +45,25 @@ export const useCredentialField = ({ credentialSchema, ); + const discriminatorValue = getDiscriminatorValue( + hardcodedValues, + credentialSchema, + ); + const supportsApiKey = credentialSchema.credentials_types.includes("api_key"); const supportsOAuth2 = credentialSchema.credentials_types.includes("oauth2"); const supportsUserPassword = credentialSchema.credentials_types.includes("user_password"); + const supportsHostScoped = + credentialSchema.credentials_types.includes("host_scoped"); const { credentials: filteredCredentials, exists: credentialsExists } = - filterCredentialsByProvider(credentials, credentialProvider ?? ""); + filterCredentialsByProvider( + credentials, + credentialProvider ?? "", + credentialSchema, + discriminatorValue, + ); const setCredential = (credentialId: string) => { const selectedCredential = filteredCredentials.find( @@ -120,7 +133,9 @@ export const useCredentialField = ({ supportsApiKey, supportsOAuth2, supportsUserPassword, + supportsHostScoped, credentialsExists, credentialProvider, + discriminatorValue, }; }; From f7a8e372ddf6ecc6a73eeefdff082847a5d33e18 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 4 Dec 2025 22:46:42 +0700 Subject: [PATCH 17/58] feat(frontend): implement new actions sidebar + summary (#11545) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ Screenshot 2025-12-04 at 17 40 10 Screenshot 2025-12-04 at 17 40 16 On the new Agent Library page: - Implement the new actions sidebar ( main change... ) - Refactor the layout/components to accommodate that - Implement the missing "Summary" functionality - Update icon buttons in Design system with new designs ## Checklist πŸ“‹ ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally and test it --- .../NewAgentLibraryView.tsx | 74 +++-- .../selected-views/AgentActionsDropdown.tsx | 237 ++++++++++++++-- .../selected-views/LoadingSelectedContent.tsx | 24 ++ .../OutputRenderers/components/OutputItem.tsx | 8 +- .../RunDetailCard/RunDetailCard.tsx | 7 +- .../RunDetailHeader/RunDetailHeader.tsx | 127 +-------- .../SelectedRunView/SelectedRunView.tsx | 200 ++++++++------ .../SelectedRunView/components/RunOutputs.tsx | 4 +- .../SelectedRunView/components/RunSummary.tsx | 100 +++++++ .../SelectedRunActions/SelectedRunActions.tsx | 118 ++++++++ .../useSelectedRunActions.ts} | 89 ++---- .../SelectedScheduleView.tsx | 256 +++++++++--------- .../components/ScheduleActions.tsx | 119 -------- .../components/SelectedScheduleActions.tsx | 38 +++ .../selected-views/SelectedViewLayout.tsx | 29 ++ .../ShareRunButton/ShareRunButton.tsx | 19 +- .../SidebarRunsList/SidebarRunsList.tsx | 13 +- .../useNewAgentLibraryView.ts | 5 +- .../atoms/Button/Button.stories.tsx | 8 + .../src/components/atoms/Button/Button.tsx | 61 ++++- .../src/components/atoms/Button/helpers.ts | 2 +- .../components/ActivityItem.tsx | 8 +- .../FloatingSafeModeToggle.tsx | 30 +- 23 files changed, 942 insertions(+), 634 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/LoadingSelectedContent.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunSummary.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/{RunDetailHeader/useRunDetailHeader.ts => SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts} (55%) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index 5970607bb9..7a7470a391 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -1,6 +1,5 @@ "use client"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import { Button } from "@/components/atoms/Button/Button"; import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; @@ -12,8 +11,10 @@ import { EmptySchedules } from "./components/other/EmptySchedules"; import { EmptyTasks } from "./components/other/EmptyTasks"; import { EmptyTemplates } from "./components/other/EmptyTemplates"; import { SectionWrap } from "./components/other/SectionWrap"; +import { LoadingSelectedContent } from "./components/selected-views/LoadingSelectedContent"; import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView"; import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView"; +import { SelectedViewLayout } from "./components/selected-views/SelectedViewLayout"; import { SidebarRunsList } from "./components/sidebar/SidebarRunsList/SidebarRunsList"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "./helpers"; import { useNewAgentLibraryView } from "./useNewAgentLibraryView"; @@ -101,49 +102,36 @@ export function NewAgentLibraryView() { /> - -
- -
-
- {activeItem ? ( - activeTab === "scheduled" ? ( - - ) : ( - - ) - ) : sidebarLoading ? ( -
- - - - -
- ) : activeTab === "scheduled" ? ( - - ) : activeTab === "templates" ? ( - - ) : ( - - )} -
-
+ ) : ( + + ) + ) : sidebarLoading ? ( + + ) : activeTab === "scheduled" ? ( + + + + ) : activeTab === "templates" ? ( + + + + ) : ( + + + + )}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx index 0a6b8b8c36..e94878f070 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx @@ -1,46 +1,83 @@ "use client"; -import React, { useState } from "react"; +import { + getGetV1ListGraphExecutionsInfiniteQueryOptions, + getV1GetGraphVersion, + useDeleteV1DeleteGraphExecution, +} from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2ListLibraryAgentsQueryKey, + useDeleteV2DeleteLibraryAgent, +} from "@/app/api/__generated__/endpoints/library/library"; +import { + getGetV1ListExecutionSchedulesForAGraphQueryOptions, + useDeleteV1DeleteExecutionSchedule, +} from "@/app/api/__generated__/endpoints/schedules/schedules"; +import type { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, + DropdownMenuSeparator, DropdownMenuTrigger, } from "@/components/molecules/DropdownMenu/DropdownMenu"; -import Link from "next/link"; -import { - FileArrowDownIcon, - PencilSimpleIcon, - TrashIcon, -} from "@phosphor-icons/react"; -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs"; -import { exportAsJSONFile } from "@/lib/utils"; import { useToast } from "@/components/molecules/Toast/use-toast"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { exportAsJSONFile } from "@/lib/utils"; +import { DotsThreeIcon } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import Link from "next/link"; import { useRouter } from "next/navigation"; -import { useDeleteV2DeleteLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; -import { Text } from "@/components/atoms/Text/Text"; +import { useState } from "react"; interface Props { agent: LibraryAgent; + scheduleId?: string; + run?: GraphExecution; + agentGraphId?: string; + onClearSelectedRun?: () => void; } -export function AgentActionsDropdown({ agent }: Props) { +export function AgentActionsDropdown({ + agent, + run, + agentGraphId, + scheduleId, + onClearSelectedRun, +}: Props) { const { toast } = useToast(); - const { mutateAsync: deleteAgent } = useDeleteV2DeleteLibraryAgent(); - const router = useRouter(); - const [isDeleting, setIsDeleting] = useState(false); - const [showDeleteDialog, setShowDeleteDialog] = useState(false); - async function handleDelete() { + const { mutateAsync: deleteAgent } = useDeleteV2DeleteLibraryAgent(); + + const { mutateAsync: deleteRun, isPending: isDeletingRun } = + useDeleteV1DeleteGraphExecution(); + + const queryClient = useQueryClient(); + const router = useRouter(); + const [isDeletingAgent, setIsDeletingAgent] = useState(false); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + const [showDeleteRunDialog, setShowDeleteRunDialog] = useState(false); + + const { mutateAsync: deleteSchedule } = useDeleteV1DeleteExecutionSchedule(); + const [isDeletingSchedule, setIsDeletingSchedule] = useState(false); + const [showDeleteScheduleDialog, setShowDeleteScheduleDialog] = + useState(false); + + async function handleDeleteAgent() { if (!agent.id) return; - setIsDeleting(true); + setIsDeletingAgent(true); try { await deleteAgent({ libraryAgentId: agent.id }); + + await queryClient.refetchQueries({ + queryKey: getGetV2ListLibraryAgentsQueryKey(), + }); + toast({ title: "Agent deleted" }); setShowDeleteDialog(false); router.push("/library"); @@ -54,7 +91,7 @@ export function AgentActionsDropdown({ agent }: Props) { variant: "destructive", }); } finally { - setIsDeleting(false); + setIsDeletingAgent(false); } } @@ -81,39 +118,145 @@ export function AgentActionsDropdown({ agent }: Props) { } } + async function handleDeleteRun() { + if (!run?.id || !agentGraphId) return; + + try { + await deleteRun({ graphExecId: run.id }); + + toast({ title: "Task deleted" }); + + await queryClient.refetchQueries({ + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) + .queryKey, + }); + + if (onClearSelectedRun) onClearSelectedRun(); + + setShowDeleteRunDialog(false); + } catch (error: unknown) { + toast({ + title: "Failed to delete task", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + async function handleDeleteSchedule() { + setIsDeletingSchedule(true); + try { + await deleteSchedule({ scheduleId: scheduleId ?? "" }); + toast({ title: "Schedule deleted" }); + + await queryClient.invalidateQueries({ + queryKey: getGetV1ListExecutionSchedulesForAGraphQueryOptions( + agentGraphId ?? "", + ).queryKey, + }); + + setShowDeleteDialog(false); + } catch (error: unknown) { + toast({ + title: "Failed to delete schedule", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } finally { + setIsDeletingSchedule(false); + } + } + return ( <> - + {run ? ( + <> + setShowDeleteRunDialog(true)} + className="flex items-center gap-2" + > + Delete this task + + + + ) : null} - Edit agent + Edit agent - Export agent + Export agent to file setShowDeleteDialog(true)} className="flex items-center gap-2" > - Delete agent + Delete agent + + +
+ + Are you sure you want to delete this task? This action cannot be + undone. + + + + + +
+
+
+ + +
+ + + + + +
+ + Are you sure you want to delete this schedule? This action cannot + be undone. + + + +
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/LoadingSelectedContent.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/LoadingSelectedContent.tsx new file mode 100644 index 0000000000..d239f57e31 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/LoadingSelectedContent.tsx @@ -0,0 +1,24 @@ +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { cn } from "@/lib/utils"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../helpers"; +import { SelectedViewLayout } from "./SelectedViewLayout"; + +interface Props { + agentName: string; + agentId: string; +} + +export function LoadingSelectedContent(props: Props) { + return ( + +
+ + + + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx index 4aa3017c18..c5c91d5d48 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx @@ -1,7 +1,7 @@ "use client"; -import React from "react"; -import { OutputRenderer, OutputMetadata } from "../types"; +import { Text } from "@/components/atoms/Text/Text"; +import { OutputMetadata, OutputRenderer } from "../types"; interface OutputItemProps { value: any; @@ -19,7 +19,9 @@ export function OutputItem({ return (
{label && ( - + + {label} + )}
{renderer.render(value, metadata)}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx index 83df2d026a..88f4f17756 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx @@ -1,18 +1,21 @@ +import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; type Props = { children: React.ReactNode; className?: string; + title?: string; }; -export function RunDetailCard({ children, className }: Props) { +export function RunDetailCard({ children, className, title }: Props) { return (
+ {title && {title}} {children}
); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx index 811c9d4f55..ce6814094b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx @@ -1,22 +1,9 @@ import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { FloatingSafeModeToggle } from "@/components/molecules/FloatingSafeModeToggle/FloatingSafeModeToggle"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { - ArrowSquareOutIcon, - PlayIcon, - StopIcon, - TrashIcon, -} from "@phosphor-icons/react"; import moment from "moment"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; -import { AgentActionsDropdown } from "../AgentActionsDropdown"; import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; -import { ShareRunButton } from "../ShareRunButton/ShareRunButton"; -import { useRunDetailHeader } from "./useRunDetailHeader"; type Props = { agent: LibraryAgent; @@ -26,29 +13,7 @@ type Props = { onClearSelectedRun?: () => void; }; -export function RunDetailHeader({ - agent, - run, - scheduleRecurrence, - onSelectRun, - onClearSelectedRun, -}: Props) { - const shareExecutionResultsEnabled = useGetFlag(Flag.SHARE_EXECUTION_RESULTS); - - const { - canStop, - isStopping, - isDeleting, - isRunning, - isRunningAgain, - openInBuilderHref, - showDeleteDialog, - handleStopRun, - handleRunAgain, - handleDeleteRun, - handleShowDeleteDialog, - } = useRunDetailHeader(agent.graph_id, run, onSelectRun, onClearSelectedRun); - +export function RunDetailHeader({ agent, run, scheduleRecurrence }: Props) { return (
@@ -60,62 +25,6 @@ export function RunDetailHeader({ {agent.name}
- {run ? ( -
- - {shareExecutionResultsEnabled && ( - - )} - - {!isRunning ? ( - - ) : null} - {openInBuilderHref ? ( - - ) : null} - {canStop ? ( - - ) : null} - -
- ) : null}
{run ? (
@@ -167,40 +76,6 @@ export function RunDetailHeader({ ) : null}
- - - -
- - Are you sure you want to delete this run? This action cannot be - undone. - - - - - -
-
-
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 857127164d..83e2a031d9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -2,25 +2,26 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { - TabsLine, - TabsLineContent, - TabsLineList, - TabsLineTrigger, -} from "@/components/molecules/TabsLine/TabsLine"; import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; -import { parseAsString, useQueryState } from "nuqs"; import { useEffect } from "react"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; +import { SelectedViewLayout } from "../SelectedViewLayout"; import { RunOutputs } from "./components/RunOutputs"; +import { RunSummary } from "./components/RunSummary"; +import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunActions"; import { useSelectedRunView } from "./useSelectedRunView"; +const anchorStyles = + "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; + interface Props { agent: LibraryAgent; runId: string; @@ -45,18 +46,22 @@ export function SelectedRunView({ refetch: refetchReviews, } = usePendingReviewsForExecution(runId); - // Tab state management - const [activeTab, setActiveTab] = useQueryState( - "tab", - parseAsString.withDefault("output"), - ); - useEffect(() => { if (run?.status === AgentExecutionStatus.REVIEW && runId) { refetchReviews(); } }, [run?.status, runId, refetchReviews]); + const withSummary = run?.stats?.activity_status; + const withReviews = run?.status === AgentExecutionStatus.REVIEW; + + function scrollToSection(id: string) { + const element = document.getElementById(id); + if (element) { + element.scrollIntoView({ behavior: "smooth", block: "start" }); + } + } + if (responseError || httpError) { return ( - - - - -
- ); + return ; } return ( -
- +
+
+ +
+ - {/* Content */} - - - Output - Your input - {run?.status === AgentExecutionStatus.REVIEW && ( - - Reviews ({pendingReviews.length}) - - )} - + {/* Navigation Links */} +
+ +
- - - {isLoading ? ( -
Loading…
- ) : run && "outputs" in run ? ( - - ) : ( -
No output from this run.
+ {/* Summary Section */} + {withSummary && ( +
+ + + +
)} -
-
- - - - - + {/* Output Section */} +
+ + {isLoading ? ( +
+ +
+ ) : run && "outputs" in run ? ( + + ) : ( + + No output from this run. + + )} +
+
- {run?.status === AgentExecutionStatus.REVIEW && ( - - - {reviewsLoading ? ( -
Loading reviews…
- ) : pendingReviews.length > 0 ? ( - + + - ) : ( -
- No pending reviews for this execution -
- )} -
-
- )} -
+ +
+ + {/* Reviews Section */} + {withReviews && ( +
+ + {reviewsLoading ? ( +
Loading reviews…
+ ) : pendingReviews.length > 0 ? ( + + ) : ( +
+ No pending reviews for this execution +
+ )} +
+
+ )} +
+ +
+
+ +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx index f165c4c964..20e218abb2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx @@ -83,8 +83,8 @@ export function RunOutputs({ outputs }: RunOutputsProps) { } return ( -
-
+
+
({ value: item.value, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunSummary.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunSummary.tsx new file mode 100644 index 0000000000..7d8d28774b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunSummary.tsx @@ -0,0 +1,100 @@ +"use client"; + +import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; +import { IconCircleAlert } from "@/components/__legacy__/ui/icons"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; +import { RunDetailCard } from "../../RunDetailCard/RunDetailCard"; + +interface Props { + run: GetV1GetExecutionDetails200; +} + +export function RunSummary({ run }: Props) { + if (!run.stats?.activity_status) return null; + + const correctnessScore = run.stats.correctness_score; + + return ( + +
+
+

Task Summary

+ + + + + + +

+ This AI-generated summary describes how the agent handled your + task. It's an experimental feature and may occasionally + be inaccurate. +

+
+
+
+
+ +

+ {run.stats.activity_status} +

+ + {typeof correctnessScore === "number" && ( +
+
+ + Success Estimate: + +
+
+
= 0.8 + ? "bg-green-500" + : correctnessScore >= 0.6 + ? "bg-yellow-500" + : correctnessScore >= 0.4 + ? "bg-orange-500" + : "bg-red-500" + }`} + style={{ + width: `${Math.round(correctnessScore * 100)}%`, + }} + /> +
+ + {Math.round(correctnessScore * 100)}% + +
+
+ + + + + + +

+ AI-generated estimate of how well this execution achieved + its intended purpose. This score indicates + {correctnessScore >= 0.8 + ? " the agent was highly successful." + : correctnessScore >= 0.6 + ? " the agent was mostly successful with minor issues." + : correctnessScore >= 0.4 + ? " the agent was partially successful with some gaps." + : " the agent had limited success with significant issues."} +

+
+
+
+
+ )} +
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx new file mode 100644 index 0000000000..4d3bea526e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx @@ -0,0 +1,118 @@ +import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { FloatingSafeModeToggle } from "@/components/molecules/FloatingSafeModeToggle/FloatingSafeModeToggle"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import { + ArrowBendLeftUpIcon, + ArrowBendRightDownIcon, + EyeIcon, + StopIcon, +} from "@phosphor-icons/react"; +import { AgentActionsDropdown } from "../../../AgentActionsDropdown"; +import { ShareRunButton } from "../../../ShareRunButton/ShareRunButton"; +import { useSelectedRunActions } from "./useSelectedRunActions"; + +type Props = { + agent: LibraryAgent; + run: GraphExecution | undefined; + scheduleRecurrence?: string; + onSelectRun?: (id: string) => void; + onClearSelectedRun?: () => void; +}; + +export function SelectedRunActions(props: Props) { + const { + handleRunAgain, + handleStopRun, + isRunningAgain, + canStop, + isStopping, + openInBuilderHref, + } = useSelectedRunActions({ + agentGraphId: props.agent.graph_id, + run: props.run, + onSelectRun: props.onSelectRun, + onClearSelectedRun: props.onClearSelectedRun, + }); + + const shareExecutionResultsEnabled = useGetFlag(Flag.SHARE_EXECUTION_RESULTS); + const isRunning = props.run?.status === "RUNNING"; + + if (!props.run || !props.agent) return null; + + return ( +
+ {!isRunning ? ( + + ) : null} + {canStop ? ( + + ) : null} + {openInBuilderHref ? ( + + ) : null} + {shareExecutionResultsEnabled && ( + + )} + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useRunDetailHeader.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts similarity index 55% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useRunDetailHeader.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts index 04cb844069..518affcee9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useRunDetailHeader.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts @@ -1,78 +1,50 @@ "use client"; -import { useToast } from "@/components/molecules/Toast/use-toast"; -import { useQueryClient } from "@tanstack/react-query"; import { - usePostV1StopGraphExecution, getGetV1ListGraphExecutionsInfiniteQueryOptions, - useDeleteV1DeleteGraphExecution, usePostV1ExecuteGraphAgent, + usePostV1StopGraphExecution, } from "@/app/api/__generated__/endpoints/graphs/graphs"; import type { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; -export function useRunDetailHeader( - agentGraphId: string, - run?: GraphExecution, - onSelectRun?: (id: string) => void, - onClearSelectedRun?: () => void, -) { +interface Args { + agentGraphId: string; + run?: GraphExecution; + onSelectRun?: (id: string) => void; + onClearSelectedRun?: () => void; +} + +export function useSelectedRunActions(args: Args) { const queryClient = useQueryClient(); const { toast } = useToast(); const [showDeleteDialog, setShowDeleteDialog] = useState(false); - const canStop = run?.status === "RUNNING" || run?.status === "QUEUED"; + const canStop = + args.run?.status === "RUNNING" || args.run?.status === "QUEUED"; const { mutateAsync: stopRun, isPending: isStopping } = usePostV1StopGraphExecution(); - const { mutateAsync: deleteRun, isPending: isDeleting } = - useDeleteV1DeleteGraphExecution(); - const { mutateAsync: executeRun, isPending: isRunningAgain } = usePostV1ExecuteGraphAgent(); - async function handleDeleteRun() { - try { - await deleteRun({ graphExecId: run?.id ?? "" }); - - toast({ title: "Run deleted" }); - - await queryClient.refetchQueries({ - queryKey: - getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) - .queryKey, - }); - - if (onClearSelectedRun) onClearSelectedRun(); - - setShowDeleteDialog(false); - } catch (error: unknown) { - toast({ - title: "Failed to delete run", - description: - error instanceof Error - ? error.message - : "An unexpected error occurred.", - variant: "destructive", - }); - } - } - async function handleStopRun() { try { await stopRun({ - graphId: run?.graph_id ?? "", - graphExecId: run?.id ?? "", + graphId: args.run?.graph_id ?? "", + graphExecId: args.run?.id ?? "", }); toast({ title: "Run stopped" }); await queryClient.invalidateQueries({ - queryKey: - getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) - .queryKey, + queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( + args.agentGraphId, + ).queryKey, }); } catch (error: unknown) { toast({ @@ -87,7 +59,7 @@ export function useRunDetailHeader( } async function handleRunAgain() { - if (!run) { + if (!args.run) { toast({ title: "Run not found", description: "Run not found", @@ -100,23 +72,23 @@ export function useRunDetailHeader( toast({ title: "Run started" }); const res = await executeRun({ - graphId: run.graph_id, - graphVersion: run.graph_version, + graphId: args.run.graph_id, + graphVersion: args.run.graph_version, data: { - inputs: (run as any).inputs || {}, - credentials_inputs: (run as any).credential_inputs || {}, + inputs: args.run.inputs || {}, + credentials_inputs: args.run.credential_inputs || {}, }, }); const newRunId = res?.status === 200 ? (res?.data?.id ?? "") : ""; await queryClient.invalidateQueries({ - queryKey: - getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) - .queryKey, + queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( + args.agentGraphId, + ).queryKey, }); - if (newRunId && onSelectRun) onSelectRun(newRunId); + if (newRunId && args.onSelectRun) args.onSelectRun(newRunId); } catch (error: unknown) { toast({ title: "Failed to start run", @@ -134,8 +106,8 @@ export function useRunDetailHeader( } // Open in builder URL helper - const openInBuilderHref = run - ? `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}` + const openInBuilderHref = args.run + ? `/build?flowID=${args.run.graph_id}&flowVersion=${args.run.graph_version}&flowExecutionID=${args.run.id}` : undefined; return { @@ -143,11 +115,8 @@ export function useRunDetailHeader( showDeleteDialog, canStop, isStopping, - isDeleting, - isRunning: run?.status === "RUNNING", isRunningAgain, handleShowDeleteDialog, - handleDeleteRun, handleStopRun, handleRunAgain, } as const; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 48d93ec64d..6eda578f87 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -2,24 +2,23 @@ import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { - TabsLine, - TabsLineContent, - TabsLineList, - TabsLineTrigger, -} from "@/components/molecules/TabsLine/TabsLine"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; -import { ScheduleActions } from "./components/ScheduleActions"; +import { SelectedViewLayout } from "../SelectedViewLayout"; +import { SelectedScheduleActions } from "./components/SelectedScheduleActions"; import { useSelectedScheduleView } from "./useSelectedScheduleView"; +const anchorStyles = + "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; + interface Props { agent: LibraryAgent; scheduleId: string; @@ -35,12 +34,20 @@ export function SelectedScheduleView({ agent.graph_id, scheduleId, ); + const { data: userTzRes } = useGetV1GetUserTimezone({ query: { select: (res) => (res.status === 200 ? res.data.timezone : undefined), }, }); + function scrollToSection(id: string) { + const element = document.getElementById(id); + if (element) { + element.scrollIntoView({ behavior: "smooth", block: "start" }); + } + } + if (error) { return ( - - - - -
- ); + return ; } return ( -
-
-
-
- -
- {schedule ? ( - - ) : null} -
-
- - - - Your input - Schedule - - - - -
- {/* {// TODO: re-enable edit inputs modal once the API supports it */} - {/* {schedule && Object.keys(schedule.input_data).length > 0 && ( - - )} */} - -
-
-
- - - - {isLoading || !schedule ? ( -
Loading…
- ) : ( -
- { - // TODO: re-enable edit schedule modal once the API supports it - /* */ - } -
- - Name - -

{schedule.name}

-
-
- - Recurrence - -

- {humanizeCronExpression(schedule.cron)} - {" β€’ "} - - {getTimezoneDisplayName( - schedule.timezone || userTzRes || "UTC", - )} - -

-
-
- - Next run - -

- {formatInTimezone( - schedule.next_run_time, - userTzRes || "UTC", - { - year: "numeric", - month: "long", - day: "numeric", - hour: "2-digit", - minute: "2-digit", - hour12: false, - }, - )}{" "} - β€’{" "} - - {getTimezoneDisplayName( - schedule.timezone || userTzRes || "UTC", - )} - -

+
+
+ +
+
+
+
+
- )} - - - +
+ + {/* Navigation Links */} +
+ +
+ + {/* Schedule Section */} +
+ + {isLoading || !schedule ? ( +
+ +
+ ) : ( +
+
+ Name + {schedule.name} +
+
+ Recurrence + + {humanizeCronExpression(schedule.cron)}{" "} + β€’{" "} + + {getTimezoneDisplayName( + schedule.timezone || userTzRes || "UTC", + )} + + +
+
+ Next run + + {formatInTimezone( + schedule.next_run_time, + userTzRes || "UTC", + { + year: "numeric", + month: "long", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + hour12: false, + }, + )}{" "} + β€’{" "} + + {getTimezoneDisplayName( + schedule.timezone || userTzRes || "UTC", + )} + + +
+
+ )} +
+
+ + {/* Input Section */} +
+ +
+ {/* {// TODO: re-enable edit inputs modal once the API supports it */} + {/* {schedule && Object.keys(schedule.input_data).length > 0 && ( + + )} */} + +
+
+
+
+
+
+ {schedule ? ( +
+ +
+ ) : null}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx deleted file mode 100644 index 2e21ccc2b3..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx +++ /dev/null @@ -1,119 +0,0 @@ -import { useScheduleDetailHeader } from "../../RunDetailHeader/useScheduleDetailHeader"; -import { useDeleteV1DeleteExecutionSchedule } from "@/app/api/__generated__/endpoints/schedules/schedules"; -import { useToast } from "@/components/molecules/Toast/use-toast"; -import { useState } from "react"; -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { - ArrowSquareOut, - PencilSimpleIcon, - TrashIcon, -} from "@phosphor-icons/react"; - -type Props = { - agent: LibraryAgent; - scheduleId: string; - onDeleted?: () => void; -}; - -export function ScheduleActions({ agent, scheduleId, onDeleted }: Props) { - const { toast } = useToast(); - const { mutateAsync: deleteSchedule } = useDeleteV1DeleteExecutionSchedule(); - const [isDeleting, setIsDeleting] = useState(false); - const [showDeleteDialog, setShowDeleteDialog] = useState(false); - - const { openInBuilderHref } = useScheduleDetailHeader( - agent.graph_id, - scheduleId, - agent.graph_version, - ); - - async function handleDelete() { - setIsDeleting(true); - try { - await deleteSchedule({ scheduleId }); - toast({ title: "Schedule deleted" }); - setShowDeleteDialog(false); - if (onDeleted) onDeleted(); - } catch (error: unknown) { - toast({ - title: "Failed to delete schedule", - description: - error instanceof Error - ? error.message - : "An unexpected error occurred.", - variant: "destructive", - }); - } finally { - setIsDeleting(false); - } - } - - return ( - <> -
- {openInBuilderHref && ( - - )} - - -
- - - -
- - Are you sure you want to delete this schedule? This action cannot - be undone. - - - - - -
-
-
- - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx new file mode 100644 index 0000000000..16e08e48ba --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx @@ -0,0 +1,38 @@ +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { EyeIcon } from "@phosphor-icons/react"; +import { AgentActionsDropdown } from "../../AgentActionsDropdown"; +import { useScheduleDetailHeader } from "../../RunDetailHeader/useScheduleDetailHeader"; + +type Props = { + agent: LibraryAgent; + scheduleId: string; + onDeleted?: () => void; +}; + +export function SelectedScheduleActions({ agent, scheduleId }: Props) { + const { openInBuilderHref } = useScheduleDetailHeader( + agent.graph_id, + scheduleId, + agent.graph_version, + ); + + return ( + <> +
+ {openInBuilderHref && ( + + )} + +
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx new file mode 100644 index 0000000000..7c1153374b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx @@ -0,0 +1,29 @@ +import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../helpers"; +import { SectionWrap } from "../other/SectionWrap"; + +interface Props { + agentName: string; + agentId: string; + children: React.ReactNode; +} + +export function SelectedViewLayout(props: Props) { + return ( + +
+ +
+
+ {props.children} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx index be356424bd..8756d2f4bd 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx @@ -1,18 +1,17 @@ "use client"; -import React from "react"; import { Button } from "@/components/atoms/Button/Button"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { Input } from "@/components/atoms/Input/Input"; +import { Text } from "@/components/atoms/Text/Text"; import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { - ShareFatIcon, - CopyIcon, CheckIcon, + CopyIcon, + ShareFatIcon, WarningIcon, } from "@phosphor-icons/react"; import { useShareRunButton } from "./useShareRunButton"; -import { Input } from "@/components/atoms/Input/Input"; -import { Text } from "@/components/atoms/Text/Text"; interface Props { graphId: string; @@ -49,12 +48,12 @@ export function ShareRunButton({ > diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx index 6d5f2f98a6..a8d0eeb8e9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx @@ -62,7 +62,12 @@ export function SidebarRunsList({ if (loading) { return ( -
+
@@ -119,7 +124,7 @@ export function SidebarRunsList({ hasMore={!!hasMoreRuns} isFetchingMore={isFetchingMoreRuns} onEndReached={fetchMoreRuns} - className="flex flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 lg:flex-col lg:gap-3 lg:overflow-x-hidden" + className="flex max-h-[76vh] flex-nowrap items-center justify-start gap-4 overflow-x-scroll px-1 pb-4 pt-1 scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 lg:flex-col lg:gap-3 lg:overflow-y-auto lg:overflow-x-hidden" itemWrapperClassName="w-auto lg:w-full" renderItem={(run) => (
@@ -140,7 +145,7 @@ export function SidebarRunsList({ AGENT_LIBRARY_SECTION_PADDING_X, )} > -
+
{schedules.length > 0 ? ( schedules.map((s: GraphExecutionJobInfo) => (
@@ -167,7 +172,7 @@ export function SidebarRunsList({ AGENT_LIBRARY_SECTION_PADDING_X, )} > -
+
No templates saved diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index 011956cb40..46b9c9abc7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -15,6 +15,7 @@ function parseTab(value: string | null): "runs" | "scheduled" | "templates" { export function useNewAgentLibraryView() { const { id } = useParams(); const agentId = id as string; + const { data: response, isSuccess, @@ -34,12 +35,12 @@ export function useNewAgentLibraryView() { const activeTab = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]); useEffect(() => { - if (!activeTabRaw) { + if (!activeTabRaw && !activeItem) { setQueryStates({ activeTab: "runs", }); } - }, [activeTabRaw, setQueryStates]); + }, [activeTabRaw, activeItem, setQueryStates]); const [sidebarCounts, setSidebarCounts] = useState({ runsCount: 0, diff --git a/autogpt_platform/frontend/src/components/atoms/Button/Button.stories.tsx b/autogpt_platform/frontend/src/components/atoms/Button/Button.stories.tsx index 35ef38bacb..e525d313b5 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/Button.stories.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Button/Button.stories.tsx @@ -1,11 +1,19 @@ import type { Meta, StoryObj } from "@storybook/nextjs"; import { Play, Plus } from "lucide-react"; +import { TooltipProvider } from "../Tooltip/BaseTooltip"; import { Button } from "./Button"; const meta: Meta = { title: "Atoms/Button", tags: ["autodocs"], component: Button, + decorators: [ + (Story) => ( + + + + ), + ], parameters: { layout: "centered", docs: { diff --git a/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx b/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx index 192e53fc00..29e86dd846 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx @@ -1,3 +1,8 @@ +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; import { cn } from "@/lib/utils"; import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr"; import NextLink, { type LinkProps } from "next/link"; @@ -20,6 +25,24 @@ export function Button(props: ButtonProps) { const disabled = "disabled" in props ? props.disabled : false; const isDisabled = disabled; + // Extract aria-label for tooltip on icon variant + const ariaLabel = + "aria-label" in restProps ? restProps["aria-label"] : undefined; + const shouldShowTooltip = variant === "icon" && ariaLabel && !loading; + + // Helper to wrap button with tooltip if needed + const wrapWithTooltip = (buttonElement: React.ReactElement) => { + if (shouldShowTooltip) { + return ( + + {buttonElement} + {ariaLabel} + + ); + } + return buttonElement; + }; + const buttonContent = ( <> {loading && ( @@ -38,7 +61,7 @@ export function Button(props: ButtonProps) { delete buttonRest.href; } - return ( + const linkButton = ( ); + + return wrapWithTooltip(loadingButton); } if (as === "NextLink") { - return ( + const nextLinkButton = ( ); + + return wrapWithTooltip(nextLinkButton); } - return ( + const regularButton = ( - )} - {supportsApiKey && ( - - )} - {supportsUserPassword && ( - - )} - {supportsHostScoped && credentials.discriminatorValue && ( - - )} -
- {modals} - {oAuthError && ( -
Error: {oAuthError}
+ return ( +
+
+ {displayName} credentials + {schema.description && ( + )}
- ); - } - function handleValueChange(newValue: string) { - if (newValue === "sign-in") { - // Trigger OAuth2 sign in flow - handleOAuthLogin(); - } else if (newValue === "add-api-key") { - // Open API key dialog - setAPICredentialsModalOpen(true); - } else if (newValue === "add-user-password") { - // Open user password dialog - setUserPasswordCredentialsModalOpen(true); - } else if (newValue === "add-host-scoped") { - // Open host-scoped credentials dialog - setHostScopedCredentialsModalOpen(true); - } else { - const selectedCreds = savedCredentials.find((c) => c.id == newValue)!; + {hasCredentialsToShow ? ( + <> + {credentialsToShow.length > 1 && !readOnly ? ( + + ) : ( +
+ {credentialsToShow.map((credential) => { + return ( + handleCredentialSelect(credential.id)} + onDelete={() => + handleDeleteCredential({ + id: credential.id, + title: getCredentialDisplayName( + credential, + displayName, + ), + }) + } + readOnly={readOnly} + /> + ); + })} +
+ )} + {!readOnly && ( + + )} + + ) : ( + !readOnly && ( + + ) + )} - onSelectCredentials({ - id: selectedCreds.id, - type: selectedCreds.type, - provider: provider, - // title: customTitle, // TODO: add input for title - }); - } - } + {!readOnly && ( + <> + {supportsApiKey ? ( + setAPICredentialsModalOpen(false)} + onCredentialsCreate={(credsMeta) => { + onSelectCredentials(credsMeta); + setAPICredentialsModalOpen(false); + }} + siblingInputs={siblingInputs} + /> + ) : null} + {supportsOAuth2 ? ( + oAuthPopupController?.abort("canceled")} + providerName={providerName} + /> + ) : null} + {supportsUserPassword ? ( + setUserPasswordCredentialsModalOpen(false)} + onCredentialsCreate={(creds) => { + onSelectCredentials(creds); + setUserPasswordCredentialsModalOpen(false); + }} + siblingInputs={siblingInputs} + /> + ) : null} + {supportsHostScoped ? ( + setHostScopedCredentialsModalOpen(false)} + onCredentialsCreate={(creds) => { + onSelectCredentials(creds); + setHostScopedCredentialsModalOpen(false); + }} + siblingInputs={siblingInputs} + /> + ) : null} - // Saved credentials exist - return ( -
- {fieldHeader} + {oAuthError ? ( + + Error: {oAuthError} + + ) : null} - - {modals} - {oAuthError && ( -
Error: {oAuthError}
+ setCredentialToDelete(null)} + onConfirm={handleDeleteConfirm} + /> + )}
); -}; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx new file mode 100644 index 0000000000..251b4cfbb4 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx @@ -0,0 +1,102 @@ +import { IconKey } from "@/components/__legacy__/ui/icons"; +import { Text } from "@/components/atoms/Text/Text"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { cn } from "@/lib/utils"; +import { CaretDown, DotsThreeVertical } from "@phosphor-icons/react"; +import { + fallbackIcon, + getCredentialDisplayName, + MASKED_KEY_LENGTH, + providerIcons, +} from "../../helpers"; + +type CredentialRowProps = { + credential: { + id: string; + title?: string; + username?: string; + type: string; + provider: string; + }; + provider: string; + displayName: string; + onSelect: () => void; + onDelete: () => void; + readOnly?: boolean; + showCaret?: boolean; + asSelectTrigger?: boolean; +}; + +export function CredentialRow({ + credential, + provider, + displayName, + onSelect, + onDelete, + readOnly = false, + showCaret = false, + asSelectTrigger = false, +}: CredentialRowProps) { + const ProviderIcon = providerIcons[provider] || fallbackIcon; + + return ( +
+
+ +
+ +
+ + {getCredentialDisplayName(credential, displayName)} + + + {"*".repeat(MASKED_KEY_LENGTH)} + +
+ {showCaret && !asSelectTrigger && ( + + )} + {!readOnly && !showCaret && !asSelectTrigger && ( + + + + + + { + e.stopPropagation(); + onDelete(); + }} + > + Delete + + + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx new file mode 100644 index 0000000000..29f9b09a22 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx @@ -0,0 +1,86 @@ +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/__legacy__/ui/select"; +import { Text } from "@/components/atoms/Text/Text"; +import { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useEffect } from "react"; +import { getCredentialDisplayName } from "../../helpers"; +import { CredentialRow } from "../CredentialRow/CredentialRow"; + +interface Props { + credentials: Array<{ + id: string; + title?: string; + username?: string; + type: string; + provider: string; + }>; + provider: string; + displayName: string; + selectedCredentials?: CredentialsMetaInput; + onSelectCredential: (credentialId: string) => void; + readOnly?: boolean; +} + +export function CredentialsSelect({ + credentials, + provider, + displayName, + selectedCredentials, + onSelectCredential, + readOnly = false, +}: Props) { + // Auto-select first credential if none is selected + useEffect(() => { + if (!selectedCredentials && credentials.length > 0) { + onSelectCredential(credentials[0].id); + } + }, [selectedCredentials, credentials, onSelectCredential]); + + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/DeleteConfirmationModal/DeleteConfirmationModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/DeleteConfirmationModal/DeleteConfirmationModal.tsx new file mode 100644 index 0000000000..e3dd811ccc --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/DeleteConfirmationModal/DeleteConfirmationModal.tsx @@ -0,0 +1,49 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; + +interface Props { + credentialToDelete: { id: string; title: string } | null; + isDeleting: boolean; + onClose: () => void; + onConfirm: () => void; +} + +export function DeleteConfirmationModal({ + credentialToDelete, + isDeleting, + onClose, + onConfirm, +}: Props) { + return ( + { + if (!open) onClose(); + }, + }} + title="Delete credential" + styling={{ maxWidth: "32rem" }} + > + + + Are you sure you want to delete "{credentialToDelete?.title} + "? This action cannot be undone. + + + + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/OAuthWaitingModal/OAuthWaitingModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/OAuthWaitingModal/OAuthWaitingModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/OAuthWaitingModal/OAuthWaitingModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/OAuthWaitingModal/OAuthWaitingModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/PasswordCredentialsModal/PasswordCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/PasswordCredentialsModal/PasswordCredentialsModal.tsx similarity index 94% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/PasswordCredentialsModal/PasswordCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/PasswordCredentialsModal/PasswordCredentialsModal.tsx index 5fbea007cc..c75b7be988 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/PasswordCredentialsModal/PasswordCredentialsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/PasswordCredentialsModal/PasswordCredentialsModal.tsx @@ -1,15 +1,15 @@ -import { z } from "zod"; -import { useForm } from "react-hook-form"; -import { zodResolver } from "@hookform/resolvers/zod"; -import { Input } from "@/components/atoms/Input/Input"; -import { Button } from "@/components/atoms/Button/Button"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Form, FormField } from "@/components/__legacy__/ui/form"; +import { Button } from "@/components/atoms/Button/Button"; +import { Input } from "@/components/atoms/Input/Input"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; import useCredentials from "@/hooks/useCredentials"; import { BlockIOCredentialsSubSchema, CredentialsMetaInput, } from "@/lib/autogpt-server-api/types"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useForm } from "react-hook-form"; +import { z } from "zod"; type Props = { schema: BlockIOCredentialsSubSchema; @@ -85,7 +85,7 @@ export function PasswordCredentialsModal({
)} @@ -110,7 +109,6 @@ export function PasswordCredentialsModal({ label="Password" type="password" placeholder="Enter password..." - size="small" {...field} /> )} @@ -124,12 +122,12 @@ export function PasswordCredentialsModal({ label="Name" type="text" placeholder="Enter a name for this user login..." - size="small" + className="mb-8" {...field} /> )} /> - diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts new file mode 100644 index 0000000000..9e6f374437 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts @@ -0,0 +1,102 @@ +import { KeyIcon } from "@phosphor-icons/react"; +import { NotionLogoIcon } from "@radix-ui/react-icons"; +import { + FaDiscord, + FaGithub, + FaGoogle, + FaHubspot, + FaMedium, + FaTwitter, +} from "react-icons/fa"; + +export const fallbackIcon = KeyIcon; + +export const providerIcons: Partial< + Record> +> = { + aiml_api: fallbackIcon, + anthropic: fallbackIcon, + apollo: fallbackIcon, + e2b: fallbackIcon, + github: FaGithub, + google: FaGoogle, + groq: fallbackIcon, + http: fallbackIcon, + notion: NotionLogoIcon, + nvidia: fallbackIcon, + discord: FaDiscord, + d_id: fallbackIcon, + google_maps: FaGoogle, + jina: fallbackIcon, + ideogram: fallbackIcon, + linear: fallbackIcon, + medium: FaMedium, + mem0: fallbackIcon, + ollama: fallbackIcon, + openai: fallbackIcon, + openweathermap: fallbackIcon, + open_router: fallbackIcon, + llama_api: fallbackIcon, + pinecone: fallbackIcon, + enrichlayer: fallbackIcon, + slant3d: fallbackIcon, + screenshotone: fallbackIcon, + smtp: fallbackIcon, + replicate: fallbackIcon, + reddit: fallbackIcon, + fal: fallbackIcon, + revid: fallbackIcon, + twitter: FaTwitter, + unreal_speech: fallbackIcon, + exa: fallbackIcon, + hubspot: FaHubspot, + smartlead: fallbackIcon, + todoist: fallbackIcon, + zerobounce: fallbackIcon, +}; + +export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( + | { + success: true; + code: string; + state: string; + } + | { + success: false; + message: string; + } +); + +export function getActionButtonText( + supportsOAuth2: boolean, + supportsApiKey: boolean, + supportsUserPassword: boolean, + supportsHostScoped: boolean, + hasExistingCredentials: boolean, +): string { + if (hasExistingCredentials) { + if (supportsOAuth2) return "Connect a different account"; + if (supportsApiKey) return "Use a different API key"; + if (supportsUserPassword) return "Use a different username and password"; + if (supportsHostScoped) return "Use different headers"; + return "Add credentials"; + } else { + if (supportsOAuth2) return "Add account"; + if (supportsApiKey) return "Add API key"; + if (supportsUserPassword) return "Add username and password"; + if (supportsHostScoped) return "Add headers"; + return "Add credentials"; + } +} + +export function getCredentialDisplayName( + credential: { title?: string; username?: string }, + displayName: string, +): string { + return ( + credential.title || credential.username || `Your ${displayName} account` + ); +} + +export const OAUTH_TIMEOUT_MS = 5 * 60 * 1000; +export const MASKED_KEY_LENGTH = 30; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/useCredentialsInputs.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/useCredentialsInputs.ts new file mode 100644 index 0000000000..460980c10b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/useCredentialsInputs.ts @@ -0,0 +1,318 @@ +import { useDeleteV1DeleteCredentials } from "@/app/api/__generated__/endpoints/integrations/integrations"; +import useCredentials from "@/hooks/useCredentials"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { + BlockIOCredentialsSubSchema, + CredentialsMetaInput, +} from "@/lib/autogpt-server-api/types"; +import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider"; +import { useQueryClient } from "@tanstack/react-query"; +import { useContext, useEffect, useMemo, useState } from "react"; +import { + getActionButtonText, + OAUTH_TIMEOUT_MS, + OAuthPopupResultMessage, +} from "./helpers"; + +type Args = { + schema: BlockIOCredentialsSubSchema; + selectedCredentials?: CredentialsMetaInput; + onSelectCredentials: (newValue?: CredentialsMetaInput) => void; + siblingInputs?: Record; + onLoaded?: (loaded: boolean) => void; + readOnly?: boolean; +}; + +export function useCredentialsInputs({ + schema, + selectedCredentials, + onSelectCredentials, + siblingInputs, + onLoaded, + readOnly = false, +}: Args) { + const [isAPICredentialsModalOpen, setAPICredentialsModalOpen] = + useState(false); + const [ + isUserPasswordCredentialsModalOpen, + setUserPasswordCredentialsModalOpen, + ] = useState(false); + const [isHostScopedCredentialsModalOpen, setHostScopedCredentialsModalOpen] = + useState(false); + const [isOAuth2FlowInProgress, setOAuth2FlowInProgress] = useState(false); + const [oAuthPopupController, setOAuthPopupController] = + useState(null); + const [oAuthError, setOAuthError] = useState(null); + const [credentialToDelete, setCredentialToDelete] = useState<{ + id: string; + title: string; + } | null>(null); + + const api = useBackendAPI(); + const queryClient = useQueryClient(); + const credentials = useCredentials(schema, siblingInputs); + const allProviders = useContext(CredentialsProvidersContext); + + const deleteCredentialsMutation = useDeleteV1DeleteCredentials({ + mutation: { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: ["/api/integrations/credentials"], + }); + queryClient.invalidateQueries({ + queryKey: [`/api/integrations/${credentials?.provider}/credentials`], + }); + setCredentialToDelete(null); + if (selectedCredentials?.id === credentialToDelete?.id) { + onSelectCredentials(undefined); + } + }, + }, + }); + + const rawProvider = credentials + ? allProviders?.[credentials.provider as keyof typeof allProviders] + : null; + + useEffect(() => { + if (onLoaded) { + onLoaded(Boolean(credentials && credentials.isLoading === false)); + } + }, [credentials, onLoaded]); + + useEffect(() => { + if (readOnly) return; + if (!credentials || !("savedCredentials" in credentials)) return; + if ( + selectedCredentials && + !credentials.savedCredentials.some((c) => c.id === selectedCredentials.id) + ) { + onSelectCredentials(undefined); + } + }, [credentials, selectedCredentials, onSelectCredentials, readOnly]); + + const { singleCredential } = useMemo(() => { + if (!credentials || !("savedCredentials" in credentials)) { + return { + singleCredential: null, + }; + } + + const single = + credentials.savedCredentials.length === 1 + ? credentials.savedCredentials[0] + : null; + + return { + singleCredential: single, + }; + }, [credentials]); + + useEffect(() => { + if (readOnly) return; + if (singleCredential && !selectedCredentials) { + onSelectCredentials(singleCredential); + } + }, [singleCredential, selectedCredentials, onSelectCredentials, readOnly]); + + if ( + !credentials || + credentials.isLoading || + !("savedCredentials" in credentials) + ) { + return { + isLoading: true, + }; + } + + const { + provider, + providerName, + supportsApiKey, + supportsOAuth2, + supportsUserPassword, + supportsHostScoped, + savedCredentials, + oAuthCallback, + } = credentials; + + const allSavedCredentials = rawProvider?.savedCredentials || savedCredentials; + + const credentialsToShow = (() => { + const creds = [...allSavedCredentials]; + if ( + !readOnly && + selectedCredentials && + !creds.some((c) => c.id === selectedCredentials.id) + ) { + creds.push({ + id: selectedCredentials.id, + type: selectedCredentials.type, + title: selectedCredentials.title || "Selected credential", + provider: provider, + } as any); + } + return creds; + })(); + + async function handleOAuthLogin() { + setOAuthError(null); + const { login_url, state_token } = await api.oAuthLogin( + provider, + schema.credentials_scopes, + ); + setOAuth2FlowInProgress(true); + const popup = window.open(login_url, "_blank", "popup=true"); + + if (!popup) { + throw new Error( + "Failed to open popup window. Please allow popups for this site.", + ); + } + + const controller = new AbortController(); + setOAuthPopupController(controller); + controller.signal.onabort = () => { + console.debug("OAuth flow aborted"); + setOAuth2FlowInProgress(false); + popup.close(); + }; + + const handleMessage = async (e: MessageEvent) => { + console.debug("Message received:", e.data); + if ( + typeof e.data != "object" || + !("message_type" in e.data) || + e.data.message_type !== "oauth_popup_result" + ) { + console.debug("Ignoring irrelevant message"); + return; + } + + if (!e.data.success) { + console.error("OAuth flow failed:", e.data.message); + setOAuthError(`OAuth flow failed: ${e.data.message}`); + setOAuth2FlowInProgress(false); + return; + } + + if (e.data.state !== state_token) { + console.error("Invalid state token received"); + setOAuthError("Invalid state token received"); + setOAuth2FlowInProgress(false); + return; + } + + try { + console.debug("Processing OAuth callback"); + const credentials = await oAuthCallback(e.data.code, e.data.state); + console.debug("OAuth callback processed successfully"); + onSelectCredentials({ + id: credentials.id, + type: "oauth2", + title: credentials.title, + provider, + }); + } catch (error) { + console.error("Error in OAuth callback:", error); + setOAuthError( + `Error in OAuth callback: ${ + error instanceof Error ? error.message : String(error) + }`, + ); + } finally { + console.debug("Finalizing OAuth flow"); + setOAuth2FlowInProgress(false); + controller.abort("success"); + } + }; + + console.debug("Adding message event listener"); + window.addEventListener("message", handleMessage, { + signal: controller.signal, + }); + + setTimeout(() => { + console.debug("OAuth flow timed out"); + controller.abort("timeout"); + setOAuth2FlowInProgress(false); + setOAuthError("OAuth flow timed out"); + }, OAUTH_TIMEOUT_MS); + } + + function handleActionButtonClick() { + if (supportsOAuth2) { + handleOAuthLogin(); + } else if (supportsApiKey) { + setAPICredentialsModalOpen(true); + } else if (supportsUserPassword) { + setUserPasswordCredentialsModalOpen(true); + } else if (supportsHostScoped) { + setHostScopedCredentialsModalOpen(true); + } + } + + function handleCredentialSelect(credentialId: string) { + const selectedCreds = credentialsToShow.find((c) => c.id === credentialId); + if (selectedCreds) { + onSelectCredentials({ + id: selectedCreds.id, + type: selectedCreds.type, + provider: provider, + title: (selectedCreds as any).title, + }); + } + } + + function handleDeleteCredential(credential: { id: string; title: string }) { + setCredentialToDelete(credential); + } + + function handleDeleteConfirm() { + if (credentialToDelete && credentials) { + deleteCredentialsMutation.mutate({ + provider: credentials.provider, + credId: credentialToDelete.id, + }); + } + } + + return { + isLoading: false as const, + provider, + providerName, + supportsApiKey, + supportsOAuth2, + supportsUserPassword, + supportsHostScoped, + credentialsToShow, + selectedCredentials, + oAuthError, + isAPICredentialsModalOpen, + isUserPasswordCredentialsModalOpen, + isHostScopedCredentialsModalOpen, + isOAuth2FlowInProgress, + oAuthPopupController, + credentialToDelete, + deleteCredentialsMutation, + actionButtonText: getActionButtonText( + supportsOAuth2, + supportsApiKey, + supportsUserPassword, + supportsHostScoped, + credentialsToShow.length > 0, + ), + setAPICredentialsModalOpen, + setUserPasswordCredentialsModalOpen, + setHostScopedCredentialsModalOpen, + setCredentialToDelete, + handleActionButtonClick, + handleCredentialSelect, + handleDeleteCredential, + handleDeleteConfirm, + handleOAuthLogin, + onSelectCredentials, + schema, + siblingInputs, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index 810dfc9e64..eff83cf824 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -5,12 +5,8 @@ import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutio import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { AlarmIcon } from "@phosphor-icons/react"; import { useState } from "react"; import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; -import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection"; -import { AgentDetails } from "./components/AgentDetails/AgentDetails"; -import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader"; import { ModalHeader } from "./components/ModalHeader/ModalHeader"; import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection"; import { RunActions } from "./components/RunActions/RunActions"; @@ -122,59 +118,34 @@ export function RunAgentModal({ > {triggerSlot} -
- {/* Header */} -
- - -
+ {/* Header */} + - {/* Scrollable content */} -
- {/* Setup Section */} -
- {hasAnySetupFields ? ( - - <> - - - - - ) : null} -
- - {/* Agent Details Section */} -
- - -
+ {/* Content */} + {hasAnySetupFields ? ( +
+ + +
-
- + ) : null} + +
- {/* TODO: enable once we have an API to show estimated cost for an agent run */} - {/*
- Cost - {cost} -
*/} -
- - {/* TODO: enable once we can easily link to the agent listing page from the library agent response */} - {/* */} -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentDetails/AgentDetails.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentDetails/AgentDetails.tsx deleted file mode 100644 index d88ad44cf3..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentDetails/AgentDetails.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { Text } from "@/components/atoms/Text/Text"; -import { Badge } from "@/components/atoms/Badge/Badge"; -import { formatDate } from "@/lib/utils/time"; - -interface Props { - agent: LibraryAgent; -} - -export function AgentDetails({ agent }: Props) { - return ( -
-
- - Version - -
- - v{agent.graph_version} - - {agent.is_latest_version && ( - - Latest - - )} -
-
-
- - Last Updated - - - {formatDate(agent.updated_at)} - -
- {agent.has_external_trigger && ( -
- - Trigger Type - - - External Webhook - -
- )} -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx deleted file mode 100644 index 6cd2f29d06..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; - -interface Props { - title: string; -} - -export function AgentSectionHeader({ title }: Props) { - return ( -
- - {title} - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx index 527475fe2f..2a31f62f82 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx @@ -1,8 +1,8 @@ -import { Badge } from "@/components/atoms/Badge/Badge"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Badge } from "@/components/atoms/Badge/Badge"; +import { Link } from "@/components/atoms/Link/Link"; import { Text } from "@/components/atoms/Text/Text"; import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; -import { ClockIcon, InfoIcon } from "@phosphor-icons/react"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; interface ModalHeaderProps { @@ -10,49 +10,56 @@ interface ModalHeaderProps { } export function ModalHeader({ agent }: ModalHeaderProps) { - const isUnknownCreator = agent.creator_name === "Unknown"; + const creator = agent.marketplace_listing?.creator; return ( -
-
- New Run -
+
+ + New Task +
- {agent.name} - {!isUnknownCreator ? ( - by {agent.creator_name} + {agent.name} + {creator ? ( + + by {creator.name} + ) : null} - - {agent.description} - - {/* Schedule recommendation tip */} - {agent.recommended_schedule_cron && !agent.has_external_trigger && ( -
- -

- Tip: For best results, run this agent{" "} + {agent.description ? ( + + {agent.description} + + ) : null} + + {agent.recommended_schedule_cron && !agent.has_external_trigger ? ( +

+ + Tip + + + For best results, run this agent{" "} {humanizeCronExpression( agent.recommended_schedule_cron, ).toLowerCase()} -

+
- )} + ) : null} - {/* Setup Instructions */} - {agent.instructions && ( -
- -
- Setup Instructions:{" "} - {agent.instructions} -
+ {agent.instructions ? ( +
+ + Instructions + + +
+ + {agent.instructions}
- )} + ) : null}
); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index f5d63852bf..d6fe7959b8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -1,17 +1,13 @@ import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { Input } from "@/components/atoms/Input/Input"; -import { Text } from "@/components/atoms/Text/Text"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; -import { toDisplayName } from "@/providers/agent-credentials/helper"; -import { InfoIcon } from "@phosphor-icons/react"; import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; import { useRunAgentModalContext } from "../../context"; +import { ModalSection } from "../ModalSection/ModalSection"; import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; -import { getCredentialTypeDisplayName } from "./helpers"; export function ModalRunSection() { const { - agent, defaultRunType, presetName, setPresetName, @@ -25,147 +21,104 @@ export function ModalRunSection() { agentCredentialsInputFields, } = useRunAgentModalContext(); + const inputFields = Object.entries(agentInputFields || {}); + const credentialFields = Object.entries(agentCredentialsInputFields || {}); + return ( -
- {defaultRunType === "automatic-trigger" && } - - {/* Preset/Trigger fields */} - {defaultRunType === "automatic-trigger" && ( -
-
- - setPresetName(e.target.value)} - /> +
+ {defaultRunType === "automatic-trigger" ? ( + + +
+
+ + setPresetName(e.target.value)} + /> +
+
+ + setPresetDescription(e.target.value)} + /> +
-
- - setPresetDescription(e.target.value)} - /> + + ) : null} + + {inputFields.length > 0 ? ( + + {/* Regular inputs */} + {inputFields.map(([key, inputSubSchema]) => ( +
+ + + setInputValue(key, value)} + data-testid={`agent-input-${key}`} + /> +
+ ))} +
+ ) : null} + + {credentialFields.length > 0 ? ( + +
+ {Object.entries(agentCredentialsInputFields || {}).map( + ([key, inputSubSchema]) => ( + + setInputCredentialsValue(key, value) + } + siblingInputs={inputValues} + /> + ), + )}
-
- )} - - {/* Instructions */} - {agent.instructions && ( -
- -
-

- How to use this agent -

-

- {agent.instructions} -

-
-
- )} - - {/* Credentials inputs */} - {Object.entries(agentCredentialsInputFields || {}).map( - ([key, inputSubSchema]) => ( - - setInputCredentialsValue(key, value) - } - siblingInputs={inputValues} - hideIfSingleCredentialAvailable={!agent.has_external_trigger} - /> - ), - )} - - {/* Regular inputs */} - {Object.entries(agentInputFields || {}).map(([key, inputSubSchema]) => ( -
- - - setInputValue(key, value)} - data-testid={`agent-input-${key}`} - /> -
- ))} - - {/* Selected Credentials Preview */} - {Object.keys(inputCredentials).length > 0 && ( -
- {Object.entries(agentCredentialsInputFields || {}).map( - ([key, _sub]) => { - const credential = inputCredentials[key]; - if (!credential) return null; - - return ( -
- - {toDisplayName(credential.provider)} credentials - -
-
- - Name - - - {getCredentialTypeDisplayName(credential.type)} - -
-
- - {credential.title || "Untitled"} - - - {"*".repeat(25)} - -
-
-
- ); - }, - )} -
- )} +
+ ) : null}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalSection/ModalSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalSection/ModalSection.tsx new file mode 100644 index 0000000000..db859ccfe7 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalSection/ModalSection.tsx @@ -0,0 +1,21 @@ +import { Text } from "@/components/atoms/Text/Text"; + +interface Props { + title: string; + subtitle: string; + children: React.ReactNode; +} + +export function ModalSection({ title, subtitle, children }: Props) { + return ( +
+
+ {title} + + {subtitle} + +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx index e4b47a7ea4..3eb9514cc4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx @@ -26,7 +26,7 @@ export function RunActions({ > {defaultRunType === "automatic-trigger" ? "Set up Trigger" - : "Run Agent"} + : "Start Task"}
); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/errorHelpers.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/errorHelpers.tsx new file mode 100644 index 0000000000..3b35e5cc21 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/errorHelpers.tsx @@ -0,0 +1,100 @@ +import { ApiError } from "@/lib/autogpt-server-api/helpers"; +import Link from "next/link"; +import React from "react"; + +type ValidationErrorDetail = { + type: string; + message?: string; + node_errors?: Record>; +}; + +type AgentInfo = { + graph_id: string; + graph_version: number; +}; + +export function formatValidationError( + error: any, + agentInfo?: AgentInfo, +): string | React.ReactNode { + if ( + !(error instanceof ApiError) || + !error.isGraphValidationError() || + !error.response?.detail + ) { + return error.message || "An unexpected error occurred."; + } + + const detail: ValidationErrorDetail = error.response.detail; + + // Format validation errors nicely + if (detail.type === "validation_error" && detail.node_errors) { + const nodeErrors = detail.node_errors; + const errorItems: React.ReactNode[] = []; + + // Collect all field errors + Object.entries(nodeErrors).forEach(([nodeId, fields]) => { + if (fields && typeof fields === "object") { + Object.entries(fields).forEach(([fieldName, fieldError]) => { + errorItems.push( +
+ {fieldName}:{" "} + {String(fieldError)} +
, + ); + }); + } + }); + + if (errorItems.length > 0) { + return ( +
+
+ {detail.message || "Validation failed"} +
+
{errorItems}
+ {agentInfo && ( +
+ Check the agent graph and try to run from there for further + details.{" "} + + Open in builder + +
+ )} +
+ ); + } else { + return detail.message || "Validation failed"; + } + } + + return detail.message || error.message || "An unexpected error occurred."; +} + +export function showExecutionErrorToast( + toast: (options: { + title: string; + description: string | React.ReactNode; + variant: "destructive"; + duration: number; + dismissable: boolean; + }) => void, + error: any, + agentInfo?: AgentInfo, +) { + const errorMessage = formatValidationError(error, agentInfo); + + toast({ + title: "Failed to execute agent", + description: errorMessage, + variant: "destructive", + duration: 10000, // 10 seconds - long enough to read and close + dismissable: true, + }); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx similarity index 97% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx index f1920d66ad..20320427d4 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx @@ -1,22 +1,23 @@ +import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth"; +import { + getGetV1ListGraphExecutionsInfiniteQueryOptions, + usePostV1ExecuteGraphAgent, +} from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { usePostV2SetupTrigger } from "@/app/api/__generated__/endpoints/presets/presets"; +import { + getGetV1ListExecutionSchedulesForAGraphQueryKey, + usePostV1CreateExecutionSchedule as useCreateSchedule, +} from "@/app/api/__generated__/endpoints/schedules/schedules"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { useState, useCallback, useMemo } from "react"; -import { useQueryClient } from "@tanstack/react-query"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { isEmpty } from "@/lib/utils"; -import { - usePostV1ExecuteGraphAgent, - getGetV1ListGraphExecutionsInfiniteQueryOptions, -} from "@/app/api/__generated__/endpoints/graphs/graphs"; -import { - usePostV1CreateExecutionSchedule as useCreateSchedule, - getGetV1ListExecutionSchedulesForAGraphQueryKey, -} from "@/app/api/__generated__/endpoints/schedules/schedules"; -import { usePostV2SetupTrigger } from "@/app/api/__generated__/endpoints/presets/presets"; -import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; -import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; -import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth"; import { analytics } from "@/services/analytics"; +import { useQueryClient } from "@tanstack/react-query"; +import { useCallback, useMemo, useState } from "react"; +import { showExecutionErrorToast } from "./errorHelpers"; export type RunVariant = | "manual" @@ -85,14 +86,9 @@ export function useAgentRunModal( } }, onError: (error: any) => { - const errorMessage = error.isGraphValidationError() - ? error.response.detail.message - : error.message; - - toast({ - title: "❌ Failed to execute agent", - description: errorMessage || "An unexpected error occurred.", - variant: "destructive", + showExecutionErrorToast(toast, error, { + graph_id: agent.graph_id, + graph_version: agent.graph_version, }); }, }, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx index d559faa654..f60c1bf934 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx @@ -15,7 +15,13 @@ export function RunDetailCard({ children, className, title }: Props) { className, )} > - {title && {title}} + {title ? ( + typeof title === "string" ? ( + {title} + ) : ( + title + ) + ) : null} {children}
); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 72144bc5bd..7cce125e7c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -129,7 +129,8 @@ export function SelectedRunView({
+
+ Summary diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index 77cf28708b..0289bbdb5f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -40,8 +40,8 @@ import { cn, isEmpty } from "@/lib/utils"; import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react"; import { CalendarClockIcon, Trash2Icon } from "lucide-react"; -import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; import { analytics } from "@/services/analytics"; +import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; export function AgentRunDraftView({ graph, @@ -674,9 +674,6 @@ export function AgentRunDraftView({ prev.add("credentials"), ); }} - hideIfSingleCredentialAvailable={ - !agentPreset && !graph.has_external_trigger - } /> ), )} diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx index c8b24b6d9f..7d8ead9df7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx @@ -1,5 +1,5 @@ "use client"; -import { providerIcons } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; + import { IconKey, IconUser } from "@/components/__legacy__/ui/icons"; import LoadingBox from "@/components/__legacy__/ui/loading"; import { @@ -13,6 +13,7 @@ import { import { Button } from "@/components/atoms/Button/Button"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useToast } from "@/components/molecules/Toast/use-toast"; +import { providerIcons } from "@/components/renderers/input-renderer/fields/CredentialField/helpers"; import { CredentialsProviderName } from "@/lib/autogpt-server-api"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider"; diff --git a/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx b/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx index 29e86dd846..de1dec2d25 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Button/Button.tsx @@ -15,6 +15,7 @@ export function Button(props: ButtonProps) { variant, size, loading = false, + withTooltip = true, leftIcon, rightIcon, children, @@ -28,7 +29,9 @@ export function Button(props: ButtonProps) { // Extract aria-label for tooltip on icon variant const ariaLabel = "aria-label" in restProps ? restProps["aria-label"] : undefined; - const shouldShowTooltip = variant === "icon" && ariaLabel && !loading; + + const shouldShowTooltip = + variant === "icon" && ariaLabel && !loading && withTooltip; // Helper to wrap button with tooltip if needed const wrapWithTooltip = (buttonElement: React.ReactElement) => { diff --git a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts index cc733d3df6..db7938661c 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts +++ b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts @@ -16,7 +16,7 @@ export const extendedButtonVariants = cva( primary: "bg-zinc-800 border-zinc-800 text-white hover:bg-zinc-900 hover:border-zinc-900 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", secondary: - "bg-zinc-200 border-zinc-200 text-black hover:bg-zinc-400 hover:border-zinc-400 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", + "bg-zinc-100 border-zinc-100 text-black hover:bg-zinc-300 hover:border-zinc-300 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", destructive: "bg-red-500 border-red-500 text-white hover:bg-red-600 hover:border-red-600 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", outline: @@ -49,6 +49,7 @@ type BaseButtonProps = { leftIcon?: React.ReactNode; rightIcon?: React.ReactNode; asChild?: boolean; + withTooltip?: boolean; } & VariantProps; type ButtonAsButton = BaseButtonProps & diff --git a/autogpt_platform/frontend/src/components/atoms/Input/Input.tsx b/autogpt_platform/frontend/src/components/atoms/Input/Input.tsx index 68ad12e522..f59c01e77d 100644 --- a/autogpt_platform/frontend/src/components/atoms/Input/Input.tsx +++ b/autogpt_platform/frontend/src/components/atoms/Input/Input.tsx @@ -92,7 +92,7 @@ export function Input({ className={cn( baseStyles, errorStyles, - "-mb-1 h-auto min-h-[2.875rem]", + "-mb-1 h-auto min-h-[2.875rem] rounded-medium", // Size variants for textarea size === "small" && [ "min-h-[2.25rem]", // 36px minimum @@ -222,7 +222,7 @@ export function Input({ ) : (
-

Analytics Generation

+

+ Execution Analytics & Accuracy Monitoring +

- This tool will identify completed executions missing activity - summaries or success scores and generate them using AI. Only - executions that meet the criteria and are missing these fields will - be processed. + Generate missing activity summaries and success scores for agent + executions. After generation, accuracy trends and alerts will + automatically be displayed to help monitor agent health over time.

Date: Tue, 9 Dec 2025 18:30:13 +0700 Subject: [PATCH 32/58] feat(frontend): add templates/triggers to new Library page view (#11580) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ Add Templates to the new Agent Library page: Screenshot 2025-12-09 at 14 10 01 - You can create a template from a run ( new action button ) - Templates are listed and can be selected on the sidebar - When viewing a template, you can edit it, create a task or delete it Add Triggers to the new Agent Library page: Screenshot 2025-12-09 at 14 10 43 - When an agent contains a trigger block, on the modal it will create a trigger - When there are triggers, they are listed on the sidebar - A trigger can be viewed and edited ## Checklist πŸ“‹ ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the new page locally --- .../NewAgentLibraryView.tsx | 22 ++ .../CredentialsSelect/CredentialsSelect.tsx | 12 +- .../modals/CredentialsInputs/helpers.ts | 10 +- .../modals/RunAgentModal/RunAgentModal.tsx | 54 ++- .../ModalRunSection/ModalRunSection.tsx | 4 +- .../WebhookTriggerBanner.tsx | 2 +- .../modals/RunAgentModal/useAgentRunModal.tsx | 182 ++-------- .../components/other/EmptyTriggers.tsx | 323 ++++++++++++++++++ .../RunDetailHeader/RunDetailHeader.tsx | 16 +- .../SelectedRunView/SelectedRunView.tsx | 19 +- .../CreateTemplateModal.tsx | 98 ++++++ .../SelectedRunActions/SelectedRunActions.tsx | 21 ++ .../useSelectedRunActions.ts | 61 ++++ .../components/WebhookTriggerSection.tsx | 92 +++++ .../SelectedRunView/useSelectedRunView.ts | 22 +- .../SelectedScheduleView.tsx | 24 +- .../SelectedTemplateView.tsx | 204 +++++++++++ .../components/SelectedTemplateActions.tsx | 172 ++++++++++ .../components/WebhookTriggerCard.tsx | 92 +++++ .../useSelectedTemplateView.ts | 189 ++++++++++ .../SelectedTriggerView.tsx | 196 +++++++++++ .../components/SelectedTriggerActions.tsx | 149 ++++++++ .../useSelectedTriggerView.ts | 141 ++++++++ .../SidebarRunsList/SidebarRunsList.tsx | 77 ++++- .../components/ScheduleListItem.tsx | 11 +- .../components/TemplateListItem.tsx | 33 ++ .../components/TriggerListItem.tsx | 33 ++ .../SidebarRunsList/useSidebarRunsList.ts | 81 ++++- .../useNewAgentLibraryView.ts | 42 ++- 29 files changed, 2156 insertions(+), 226 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index 7a7470a391..b06901f860 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -10,10 +10,13 @@ import { AgentRunsLoading } from "./components/other/AgentRunsLoading"; import { EmptySchedules } from "./components/other/EmptySchedules"; import { EmptyTasks } from "./components/other/EmptyTasks"; import { EmptyTemplates } from "./components/other/EmptyTemplates"; +import { EmptyTriggers } from "./components/other/EmptyTriggers"; import { SectionWrap } from "./components/other/SectionWrap"; import { LoadingSelectedContent } from "./components/selected-views/LoadingSelectedContent"; import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView"; import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView"; +import { SelectedTemplateView } from "./components/selected-views/SelectedTemplateView/SelectedTemplateView"; +import { SelectedTriggerView } from "./components/selected-views/SelectedTriggerView/SelectedTriggerView"; import { SelectedViewLayout } from "./components/selected-views/SelectedViewLayout"; import { SidebarRunsList } from "./components/sidebar/SidebarRunsList/SidebarRunsList"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "./helpers"; @@ -109,6 +112,21 @@ export function NewAgentLibraryView() { scheduleId={activeItem} onClearSelectedRun={handleClearSelectedRun} /> + ) : activeTab === "templates" ? ( + handleSelectRun(execution.id, "runs")} + onSwitchToRunsTab={() => setActiveTab("runs")} + /> + ) : activeTab === "triggers" ? ( + setActiveTab("runs")} + /> ) : ( + ) : activeTab === "triggers" ? ( + + + ) : ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx index 29f9b09a22..7adfa5772b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx @@ -48,8 +48,8 @@ export function CredentialsSelect({ onValueChange={(value) => onSelectCredential(value)} > - - {selectedCredentials ? ( + {selectedCredentials ? ( + - ) : ( - Select credential - )} - + + ) : ( + + )} {credentials.map((credential) => ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts index 9e6f374437..4cca825747 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts @@ -75,11 +75,11 @@ export function getActionButtonText( hasExistingCredentials: boolean, ): string { if (hasExistingCredentials) { - if (supportsOAuth2) return "Connect a different account"; - if (supportsApiKey) return "Use a different API key"; - if (supportsUserPassword) return "Use a different username and password"; - if (supportsHostScoped) return "Use different headers"; - return "Add credentials"; + if (supportsOAuth2) return "Connect another account"; + if (supportsApiKey) return "Use a new API key"; + if (supportsUserPassword) return "Add a new username and password"; + if (supportsHostScoped) return "Add new headers"; + return "Add new credentials"; } else { if (supportsOAuth2) return "Add account"; if (supportsApiKey) return "Add API key"; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index eff83cf824..3818c05c45 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -4,6 +4,12 @@ import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecu import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useState } from "react"; import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; @@ -147,15 +153,45 @@ export function RunAgentModal({
- + {!allRequiredInputsAreSet ? ( + + + + + + + + +

+ Please set up all required inputs and credentials before + scheduling +

+
+
+
+ ) : ( + + )} 0 ? ( {/* Regular inputs */} {inputFields.map(([key, inputSubSchema]) => ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx index bc69a5f633..23834cbd9d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx @@ -1,6 +1,6 @@ export function WebhookTriggerBanner() { return ( -
+
>({}); const [inputCredentials, setInputCredentials] = useState>( {}, ); const [presetName, setPresetName] = useState(""); const [presetDescription, setPresetDescription] = useState(""); - const defaultScheduleName = useMemo(() => `Run ${agent.name}`, [agent.name]); - const [scheduleName, setScheduleName] = useState(defaultScheduleName); - const [cronExpression, setCronExpression] = useState( - agent.recommended_schedule_cron || "0 9 * * 1", - ); - - // Get user timezone for scheduling - const { data: userTimezone } = useGetV1GetUserTimezone({ - query: { - select: (res) => (res.status === 200 ? res.data.timezone : undefined), - }, - }); // Determine the default run type based on agent capabilities const defaultRunType: RunVariant = agent.has_external_trigger @@ -94,38 +79,6 @@ export function useAgentRunModal( }, }); - const createScheduleMutation = useCreateSchedule({ - mutation: { - onSuccess: (response) => { - if (response.status === 200) { - toast({ - title: "Schedule created", - }); - callbacks?.onCreateSchedule?.(response.data); - // Invalidate schedules list for this graph - queryClient.invalidateQueries({ - queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey( - agent.graph_id, - ), - }); - analytics.sendDatafastEvent("schedule_agent", { - name: agent.name, - id: agent.graph_id, - cronExpression: cronExpression, - }); - setIsOpen(false); - } - }, - onError: (error: any) => { - toast({ - title: "❌ Failed to create schedule", - description: error.message || "An unexpected error occurred.", - variant: "destructive", - }); - }, - }, - }); - const setupTriggerMutation = usePostV2SetupTrigger({ mutation: { onSuccess: (response: any) => { @@ -134,6 +87,11 @@ export function useAgentRunModal( title: "Trigger setup complete", }); callbacks?.onSetupTrigger?.(response.data); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); setIsOpen(false); } }, @@ -220,33 +178,25 @@ export function useAgentRunModal( [allRequiredInputsAreSetRaw, credentialsRequired, allCredentialsAreSet], ); - const notifyMissingRequirements = useCallback( - (needScheduleName: boolean = false) => { - const allMissingFields = ( - needScheduleName && !scheduleName ? ["schedule_name"] : [] - ) - .concat(missingInputs) - .concat( - credentialsRequired && !allCredentialsAreSet - ? missingCredentials.map((k) => `credentials:${k}`) - : [], - ); + const notifyMissingRequirements = useCallback(() => { + const allMissingFields = missingInputs.concat( + credentialsRequired && !allCredentialsAreSet + ? missingCredentials.map((k) => `credentials:${k}`) + : [], + ); - toast({ - title: "⚠️ Missing required inputs", - description: `Please provide: ${allMissingFields.map((k) => `"${k}"`).join(", ")}`, - variant: "destructive", - }); - }, - [ - missingInputs, - scheduleName, - toast, - credentialsRequired, - allCredentialsAreSet, - missingCredentials, - ], - ); + toast({ + title: "⚠️ Missing required inputs", + description: `Please provide: ${allMissingFields.map((k) => `"${k}"`).join(", ")}`, + variant: "destructive", + }); + }, [ + missingInputs, + toast, + credentialsRequired, + allCredentialsAreSet, + missingCredentials, + ]); // Action handlers const handleRun = useCallback(() => { @@ -257,7 +207,7 @@ export function useAgentRunModal( if (defaultRunType === "automatic-trigger") { // Setup trigger - if (!scheduleName.trim()) { + if (!presetName.trim()) { toast({ title: "⚠️ Trigger name required", description: "Please provide a name for your trigger.", @@ -268,7 +218,7 @@ export function useAgentRunModal( setupTriggerMutation.mutate({ data: { - name: presetName || scheduleName, + name: presetName, description: presetDescription || `Trigger for ${agent.name}`, graph_id: agent.graph_id, graph_version: agent.graph_version, @@ -291,7 +241,6 @@ export function useAgentRunModal( }, [ allRequiredInputsAreSet, defaultRunType, - scheduleName, inputValues, inputCredentials, agent, @@ -303,70 +252,6 @@ export function useAgentRunModal( toast, ]); - const handleSchedule = useCallback(() => { - if (!allRequiredInputsAreSet) { - notifyMissingRequirements(true); - return; - } - - if (!scheduleName.trim()) { - toast({ - title: "⚠️ Schedule name required", - description: "Please provide a name for your schedule.", - variant: "destructive", - }); - return; - } - - createScheduleMutation.mutate({ - graphId: agent.graph_id, - data: { - name: presetName || scheduleName, - cron: cronExpression, - inputs: inputValues, - graph_version: agent.graph_version, - credentials: inputCredentials, - timezone: - userTimezone && userTimezone !== "not-set" ? userTimezone : undefined, - }, - }); - }, [ - allRequiredInputsAreSet, - scheduleName, - cronExpression, - inputValues, - inputCredentials, - agent, - notifyMissingRequirements, - createScheduleMutation, - toast, - userTimezone, - ]); - - function handleShowSchedule() { - // Initialize with sensible defaults when entering schedule view - setScheduleName((prev) => prev || defaultScheduleName); - setCronExpression( - (prev) => prev || agent.recommended_schedule_cron || "0 9 * * 1", - ); - setShowScheduleView(true); - } - - function handleGoBack() { - setShowScheduleView(false); - // Reset schedule fields on exit - setScheduleName(defaultScheduleName); - setCronExpression(agent.recommended_schedule_cron || "0 9 * * 1"); - } - - function handleSetScheduleName(name: string) { - setScheduleName(name); - } - - function handleSetCronExpression(expression: string) { - setCronExpression(expression); - } - const hasInputFields = useMemo(() => { return Object.keys(agentInputFields).length > 0; }, [agentInputFields]); @@ -375,7 +260,6 @@ export function useAgentRunModal( // UI state isOpen, setIsOpen, - showScheduleView, // Run mode defaultRunType, @@ -394,10 +278,6 @@ export function useAgentRunModal( setPresetName, setPresetDescription, - // Scheduling - scheduleName, - cronExpression, - // Validation/readiness allRequiredInputsAreSet, missingInputs, @@ -409,15 +289,9 @@ export function useAgentRunModal( // Async states isExecuting: executeGraphMutation.isPending, - isCreatingSchedule: createScheduleMutation.isPending, isSettingUpTrigger: setupTriggerMutation.isPending, // Actions handleRun, - handleSchedule, - handleShowSchedule, - handleGoBack, - handleSetScheduleName, - handleSetCronExpression, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx new file mode 100644 index 0000000000..0d9dc47fff --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx @@ -0,0 +1,323 @@ +import { Text } from "@/components/atoms/Text/Text"; + +export function EmptyTriggers() { + return ( +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + No triggers yet + + + Set up automatic triggers for your agent to run tasks automatically β€” + they'll show up here. + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx index 7c70b5e6aa..3d04234bb3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx @@ -1,6 +1,7 @@ import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Text } from "@/components/atoms/Text/Text"; +import { ClockClockwiseIcon } from "@phosphor-icons/react"; import moment from "moment"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; @@ -20,7 +21,20 @@ export function RunDetailHeader({ agent, run, scheduleRecurrence }: Props) {
- {run?.status ? : null} + {run?.status ? ( + + ) : scheduleRecurrence ? ( +
+ + + Scheduled + +
+ ) : null} {agent.name} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 7cce125e7c..1c7df0f680 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -24,6 +24,7 @@ import { SelectedViewLayout } from "../SelectedViewLayout"; import { RunOutputs } from "./components/RunOutputs"; import { RunSummary } from "./components/RunSummary"; import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunActions"; +import { WebhookTriggerSection } from "./components/WebhookTriggerSection"; import { useSelectedRunView } from "./useSelectedRunView"; const anchorStyles = @@ -42,10 +43,8 @@ export function SelectedRunView({ onSelectRun, onClearSelectedRun, }: Props) { - const { run, isLoading, responseError, httpError } = useSelectedRunView( - agent.graph_id, - runId, - ); + const { run, preset, isLoading, responseError, httpError } = + useSelectedRunView(agent.graph_id, runId); const { pendingReviews, @@ -90,6 +89,16 @@ export function SelectedRunView({
+ {preset && + agent.trigger_setup_info && + preset.webhook_id && + preset.webhook && ( + + )} + {/* Navigation Links */}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts index e88a4d6ea7..462490b6da 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts @@ -5,7 +5,12 @@ import { usePostV1ExecuteGraphAgent, usePostV1StopGraphExecution, } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2ListPresetsQueryKey, + usePostV2CreateANewPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; import type { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; @@ -13,6 +18,7 @@ import { useState } from "react"; interface Args { agentGraphId: string; run?: GraphExecution; + agent?: LibraryAgent; onSelectRun?: (id: string) => void; onClearSelectedRun?: () => void; } @@ -22,6 +28,8 @@ export function useSelectedRunActions(args: Args) { const { toast } = useToast(); const [showDeleteDialog, setShowDeleteDialog] = useState(false); + const [isCreateTemplateModalOpen, setIsCreateTemplateModalOpen] = + useState(false); const canStop = args.run?.status === "RUNNING" || args.run?.status === "QUEUED"; @@ -32,6 +40,9 @@ export function useSelectedRunActions(args: Args) { const { mutateAsync: executeRun, isPending: isRunningAgain } = usePostV1ExecuteGraphAgent(); + const { mutateAsync: createPreset, isPending: isCreatingTemplate } = + usePostV2CreateANewPreset(); + async function handleStopRun() { try { await stopRun({ @@ -106,6 +117,52 @@ export function useSelectedRunActions(args: Args) { setShowDeleteDialog(open); } + async function handleCreateTemplate(name: string, description: string) { + if (!args.run) { + toast({ + title: "Run not found", + description: "Cannot create template from missing run", + variant: "destructive", + }); + return; + } + + try { + const res = await createPreset({ + data: { + name, + description, + graph_execution_id: args.run.id, + }, + }); + + if (res.status === 200) { + toast({ + title: "Template created", + }); + + if (args.agent) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: args.agent.graph_id, + }), + }); + } + + setIsCreateTemplateModalOpen(false); + } + } catch (error: unknown) { + toast({ + title: "Failed to create template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + // Open in builder URL helper const openInBuilderHref = args.run ? `/build?flowID=${args.run.graph_id}&flowVersion=${args.run.graph_version}&flowExecutionID=${args.run.id}` @@ -120,5 +177,9 @@ export function useSelectedRunActions(args: Args) { handleShowDeleteDialog, handleStopRun, handleRunAgain, + handleCreateTemplate, + isCreatingTemplate, + isCreateTemplateModalOpen, + setIsCreateTemplateModalOpen, } as const; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx new file mode 100644 index 0000000000..0b24f38731 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { GraphTriggerInfo } from "@/app/api/__generated__/models/graphTriggerInfo"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { CopyIcon } from "@phosphor-icons/react"; +import { RunDetailCard } from "../../RunDetailCard/RunDetailCard"; + +interface Props { + preset: LibraryAgentPreset; + triggerSetupInfo: GraphTriggerInfo; +} + +function getTriggerStatus( + preset: LibraryAgentPreset, +): "active" | "inactive" | "broken" { + if (!preset.webhook_id || !preset.webhook) return "broken"; + return preset.is_active ? "active" : "inactive"; +} + +export function WebhookTriggerSection({ preset, triggerSetupInfo }: Props) { + const status = getTriggerStatus(preset); + const webhook = preset.webhook; + + function handleCopyWebhookUrl() { + if (webhook?.url) { + navigator.clipboard.writeText(webhook.url); + } + } + + return ( + +
+
+ Status + + {status === "active" + ? "Active" + : status === "inactive" + ? "Inactive" + : "Broken"} + +
+ + {!preset.webhook_id ? ( + + This trigger is not attached to a webhook. Use "Set up + trigger" to fix this. + + ) : !triggerSetupInfo.credentials_input_name && webhook ? ( +
+ + This trigger is ready to be used. Use the Webhook URL below to set + up the trigger connection with the service of your choosing. + +
+ Webhook URL: +
+ {webhook.url} + +
+
+
+ ) : ( + + This agent trigger is{" "} + {preset.is_active + ? "ready. When a trigger is received, it will run with the provided settings." + : "disabled. It will not respond to triggers until you enable it."} + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts index 276673d389..342241ef89 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts @@ -1,8 +1,11 @@ "use client"; import { useGetV1GetExecutionDetails } from "@/app/api/__generated__/endpoints/graphs/graphs"; -import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; +import { useGetV2GetASpecificPreset } from "@/app/api/__generated__/endpoints/presets/presets"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; +import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { okData } from "@/app/api/helpers"; export function useSelectedRunView(graphId: string, runId: string) { const query = useGetV1GetExecutionDetails(graphId, runId, { @@ -37,6 +40,18 @@ export function useSelectedRunView(graphId: string, runId: string) { ? (query.data?.data as GetV1GetExecutionDetails200) : undefined; + const presetId = + run && "preset_id" in run && run.preset_id + ? (run.preset_id as string) + : undefined; + + const presetQuery = useGetV2GetASpecificPreset(presetId || "", { + query: { + enabled: !!presetId, + select: (res) => okData(res), + }, + }); + const httpError = status && status !== 200 ? { status, statusText: `Request failed: ${status}` } @@ -44,8 +59,9 @@ export function useSelectedRunView(graphId: string, runId: string) { return { run, - isLoading: query.isLoading, - responseError: query.error, + preset: presetQuery.data, + isLoading: query.isLoading || presetQuery.isLoading, + responseError: query.error || presetQuery.error, httpError, } as const; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 6eda578f87..841ff04df9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -83,19 +83,17 @@ export function SelectedScheduleView({
-
-
-
- -
+
+
+
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx new file mode 100644 index 0000000000..b1c89c1945 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx @@ -0,0 +1,204 @@ +"use client"; + +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Input } from "@/components/atoms/Input/Input"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { + getAgentCredentialsFields, + getAgentInputFields, +} from "../../modals/AgentInputsReadOnly/helpers"; +import { CredentialsInput } from "../../modals/CredentialsInputs/CredentialsInputs"; +import { RunAgentInputs } from "../../modals/RunAgentInputs/RunAgentInputs"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; +import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; +import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; +import { SelectedViewLayout } from "../SelectedViewLayout"; +import { SelectedTemplateActions } from "./components/SelectedTemplateActions"; +import { WebhookTriggerCard } from "./components/WebhookTriggerCard"; +import { useSelectedTemplateView } from "./useSelectedTemplateView"; + +interface Props { + agent: LibraryAgent; + templateId: string; + onClearSelectedRun?: () => void; + onRunCreated?: (execution: GraphExecutionMeta) => void; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTemplateView({ + agent, + templateId, + onClearSelectedRun, + onRunCreated, + onSwitchToRunsTab, +}: Props) { + const { + template, + isLoading, + error, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + handleStartTask, + isSaving, + isStarting, + } = useSelectedTemplateView({ + templateId, + graphId: agent.graph_id, + onRunCreated, + }); + + const agentInputFields = getAgentInputFields(agent); + const agentCredentialsFields = getAgentCredentialsFields(agent); + const inputFields = Object.entries(agentInputFields); + const credentialFields = Object.entries(agentCredentialsFields); + + if (error) { + return ( + + ); + } + + if (isLoading && !template) { + return ; + } + + if (!template) { + return null; + } + + const hasWebhook = !!template.webhook_id && template.webhook; + + return ( +
+
+ +
+ + + {hasWebhook && agent.trigger_setup_info && ( + + )} + + +
+ setName(e.target.value)} + placeholder="Enter template name" + /> + + setDescription(e.target.value)} + placeholder="Enter template description" + /> +
+
+ + {inputFields.length > 0 && ( + +
+ {inputFields.map(([key, inputSubSchema]) => ( +
+ + setInputValue(key, value)} + /> +
+ ))} +
+
+ )} + + {credentialFields.length > 0 && ( + +
+ {credentialFields.map(([key, inputSubSchema]) => ( + + setCredentialValue(key, value!) + } + siblingInputs={inputs} + /> + ))} +
+
+ )} +
+
+
+ {template ? ( +
+ +
+ ) : null} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx new file mode 100644 index 0000000000..a3a2a9ac62 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx @@ -0,0 +1,172 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; +import { okData } from "@/app/api/helpers"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { FloppyDiskIcon, PlayIcon, TrashIcon } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + templateId: string; + onDeleted?: () => void; + onSaveChanges?: () => void; + onStartTask?: () => void; + isSaving?: boolean; + isStarting?: boolean; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTemplateActions({ + agent, + templateId, + onDeleted, + onSaveChanges, + onStartTask, + isSaving, + isStarting, + onSwitchToRunsTab, +}: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const deleteMutation = useDeleteV2DeleteAPreset({ + mutation: { + onSuccess: async () => { + toast({ + title: "Template deleted", + }); + const queryKey = getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }); + + queryClient.invalidateQueries({ + queryKey, + }); + + const queryData = queryClient.getQueryData<{ + data: LibraryAgentPresetResponse; + }>(queryKey); + + const presets = + okData(queryData)?.presets ?? []; + const templates = presets.filter( + (preset) => !preset.webhook_id || !preset.webhook, + ); + + setShowDeleteDialog(false); + onDeleted?.(); + + if (templates.length === 0 && onSwitchToRunsTab) { + onSwitchToRunsTab(); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to delete template", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleDelete() { + deleteMutation.mutate({ presetId: templateId }); + } + + return ( + <> +
+ + {onStartTask && ( + + )} + +
+ + + + + Are you sure you want to delete this template? This action cannot be + undone. + + + + + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx new file mode 100644 index 0000000000..d8a54f0474 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { GraphTriggerInfo } from "@/app/api/__generated__/models/graphTriggerInfo"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { CopyIcon } from "@phosphor-icons/react"; +import { RunDetailCard } from "../../RunDetailCard/RunDetailCard"; + +interface Props { + template: LibraryAgentPreset; + triggerSetupInfo: GraphTriggerInfo; +} + +function getTriggerStatus( + template: LibraryAgentPreset, +): "active" | "inactive" | "broken" { + if (!template.webhook_id || !template.webhook) return "broken"; + return template.is_active ? "active" : "inactive"; +} + +export function WebhookTriggerCard({ template, triggerSetupInfo }: Props) { + const status = getTriggerStatus(template); + const webhook = template.webhook; + + function handleCopyWebhookUrl() { + if (webhook?.url) { + navigator.clipboard.writeText(webhook.url); + } + } + + return ( + +
+
+ Status + + {status === "active" + ? "Active" + : status === "inactive" + ? "Inactive" + : "Broken"} + +
+ + {!template.webhook_id ? ( + + This trigger is not attached to a webhook. Use "Set up + trigger" to fix this. + + ) : !triggerSetupInfo.credentials_input_name && webhook ? ( +
+ + This trigger is ready to be used. Use the Webhook URL below to set + up the trigger connection with the service of your choosing. + +
+ Webhook URL: +
+ {webhook.url} + +
+
+
+ ) : ( + + This agent trigger is{" "} + {template.is_active + ? "ready. When a trigger is received, it will run with the provided settings." + : "disabled. It will not respond to triggers until you enable it."} + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts new file mode 100644 index 0000000000..58483fdc74 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts @@ -0,0 +1,189 @@ +"use client"; + +import { getGetV1ListGraphExecutionsInfiniteQueryOptions } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2GetASpecificPresetQueryKey, + getGetV2ListPresetsQueryKey, + useGetV2GetASpecificPreset, + usePatchV2UpdateAnExistingPreset, + usePostV2ExecuteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import type { LibraryAgentPresetUpdatable } from "@/app/api/__generated__/models/libraryAgentPresetUpdatable"; +import { okData } from "@/app/api/helpers"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useState } from "react"; + +type Args = { + templateId: string; + graphId: string; + onRunCreated?: (execution: GraphExecutionMeta) => void; +}; + +export function useSelectedTemplateView({ + templateId, + graphId, + onRunCreated, +}: Args) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + + const query = useGetV2GetASpecificPreset(templateId, { + query: { + enabled: !!templateId, + select: (res) => okData(res), + }, + }); + + const [name, setName] = useState(""); + const [description, setDescription] = useState(""); + const [inputs, setInputs] = useState>({}); + const [credentials, setCredentials] = useState< + Record + >({}); + + useEffect(() => { + if (query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [query.data]); + + const updateMutation = usePatchV2UpdateAnExistingPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + toast({ + title: "Template updated", + }); + queryClient.invalidateQueries({ + queryKey: getGetV2GetASpecificPresetQueryKey(templateId), + }); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ graph_id: graphId }), + }); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to update template", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + const executeMutation = usePostV2ExecuteAPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + const execution = okData(response); + if (execution) { + toast({ + title: "Task started", + }); + queryClient.invalidateQueries({ + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(graphId) + .queryKey, + }); + onRunCreated?.(execution); + } + } + }, + onError: (error: any) => { + toast({ + title: "Failed to start task", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleSaveChanges() { + if (!query.data) return; + + const updateData: LibraryAgentPresetUpdatable = {}; + if (name !== (query.data.name || "")) { + updateData.name = name; + } + + if (description !== (query.data.description || "")) { + updateData.description = description; + } + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + if (inputsChanged || credentialsChanged) { + updateData.inputs = inputs; + updateData.credentials = credentials; + } + + updateMutation.mutate({ + presetId: templateId, + data: updateData, + }); + } + + function handleStartTask() { + executeMutation.mutate({ + presetId: templateId, + data: { + inputs: {}, + credential_inputs: {}, + }, + }); + } + + function setInputValue(key: string, value: any) { + setInputs((prev) => ({ ...prev, [key]: value })); + } + + function setCredentialValue(key: string, value: CredentialsMetaInput) { + setCredentials((prev) => ({ ...prev, [key]: value })); + } + + const httpError = + query.isSuccess && !query.data + ? { status: 404, statusText: "Not found" } + : undefined; + + useEffect(() => { + if (updateMutation.isSuccess && query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [updateMutation.isSuccess, query.data]); + + return { + template: query.data, + isLoading: query.isLoading, + error: query.error || httpError, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + handleStartTask, + isSaving: updateMutation.isPending, + isStarting: executeMutation.isPending, + } as const; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx new file mode 100644 index 0000000000..2021251ad2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx @@ -0,0 +1,196 @@ +"use client"; + +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Input } from "@/components/atoms/Input/Input"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { + getAgentCredentialsFields, + getAgentInputFields, +} from "../../modals/AgentInputsReadOnly/helpers"; +import { CredentialsInput } from "../../modals/CredentialsInputs/CredentialsInputs"; +import { RunAgentInputs } from "../../modals/RunAgentInputs/RunAgentInputs"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; +import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; +import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; +import { WebhookTriggerCard } from "../SelectedTemplateView/components/WebhookTriggerCard"; +import { SelectedViewLayout } from "../SelectedViewLayout"; +import { SelectedTriggerActions } from "./components/SelectedTriggerActions"; +import { useSelectedTriggerView } from "./useSelectedTriggerView"; + +interface Props { + agent: LibraryAgent; + triggerId: string; + onClearSelectedRun?: () => void; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTriggerView({ + agent, + triggerId, + onClearSelectedRun, + onSwitchToRunsTab, +}: Props) { + const { + trigger, + isLoading, + error, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + isSaving, + } = useSelectedTriggerView({ + triggerId, + graphId: agent.graph_id, + }); + + const agentInputFields = getAgentInputFields(agent); + const agentCredentialsFields = getAgentCredentialsFields(agent); + const inputFields = Object.entries(agentInputFields); + const credentialFields = Object.entries(agentCredentialsFields); + + if (error) { + return ( + + ); + } + + if (isLoading && !trigger) { + return ; + } + + if (!trigger) { + return null; + } + + const hasWebhook = !!trigger.webhook_id && trigger.webhook; + + return ( +
+
+ +
+ + + +
+ setName(e.target.value)} + placeholder="Enter trigger name" + /> + + setDescription(e.target.value)} + placeholder="Enter trigger description" + /> +
+
+ + {hasWebhook && agent.trigger_setup_info && ( + + )} + + {inputFields.length > 0 && ( + +
+ {inputFields.map(([key, inputSubSchema]) => ( +
+ + setInputValue(key, value)} + /> +
+ ))} +
+
+ )} + + {credentialFields.length > 0 && ( + +
+ {credentialFields.map(([key, inputSubSchema]) => ( + + setCredentialValue(key, value!) + } + siblingInputs={inputs} + /> + ))} +
+
+ )} +
+
+
+ {trigger ? ( +
+ +
+ ) : null} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx new file mode 100644 index 0000000000..ed37c864a2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx @@ -0,0 +1,149 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; +import { okData } from "@/app/api/helpers"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { FloppyDiskIcon, TrashIcon } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + triggerId: string; + onDeleted?: () => void; + onSaveChanges?: () => void; + isSaving?: boolean; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTriggerActions({ + agent, + triggerId, + onDeleted, + onSaveChanges, + isSaving, + onSwitchToRunsTab, +}: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const deleteMutation = useDeleteV2DeleteAPreset({ + mutation: { + onSuccess: async () => { + toast({ + title: "Trigger deleted", + }); + const queryKey = getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }); + + queryClient.invalidateQueries({ + queryKey, + }); + + const queryData = queryClient.getQueryData<{ + data: LibraryAgentPresetResponse; + }>(queryKey); + + const presets = + okData(queryData)?.presets ?? []; + const triggers = presets.filter( + (preset) => preset.webhook_id && preset.webhook, + ); + + setShowDeleteDialog(false); + onDeleted?.(); + + if (triggers.length === 0 && onSwitchToRunsTab) { + onSwitchToRunsTab(); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to delete trigger", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleDelete() { + deleteMutation.mutate({ presetId: triggerId }); + } + + return ( + <> +
+ + +
+ + + + + Are you sure you want to delete this trigger? This action cannot be + undone. + + + + + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts new file mode 100644 index 0000000000..4669d850b2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts @@ -0,0 +1,141 @@ +"use client"; + +import { + getGetV2GetASpecificPresetQueryKey, + getGetV2ListPresetsQueryKey, + useGetV2GetASpecificPreset, + usePatchV2UpdateAnExistingPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import type { LibraryAgentPresetUpdatable } from "@/app/api/__generated__/models/libraryAgentPresetUpdatable"; +import { okData } from "@/app/api/helpers"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useState } from "react"; + +type Args = { + triggerId: string; + graphId: string; +}; + +export function useSelectedTriggerView({ triggerId, graphId }: Args) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + + const query = useGetV2GetASpecificPreset(triggerId, { + query: { + enabled: !!triggerId, + select: (res) => okData(res), + }, + }); + + const [name, setName] = useState(""); + const [description, setDescription] = useState(""); + const [inputs, setInputs] = useState>({}); + const [credentials, setCredentials] = useState< + Record + >({}); + + useEffect(() => { + if (query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [query.data]); + + const updateMutation = usePatchV2UpdateAnExistingPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + toast({ + title: "Trigger updated", + }); + queryClient.invalidateQueries({ + queryKey: getGetV2GetASpecificPresetQueryKey(triggerId), + }); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ graph_id: graphId }), + }); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to update trigger", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleSaveChanges() { + if (!query.data) return; + + const updateData: LibraryAgentPresetUpdatable = {}; + if (name !== (query.data.name || "")) { + updateData.name = name; + } + + if (description !== (query.data.description || "")) { + updateData.description = description; + } + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + if (inputsChanged || credentialsChanged) { + updateData.inputs = inputs; + updateData.credentials = credentials; + } + + updateMutation.mutate({ + presetId: triggerId, + data: updateData, + }); + } + + function setInputValue(key: string, value: any) { + setInputs((prev) => ({ ...prev, [key]: value })); + } + + function setCredentialValue(key: string, value: CredentialsMetaInput) { + setCredentials((prev) => ({ ...prev, [key]: value })); + } + + const httpError = + query.isSuccess && !query.data + ? { status: 404, statusText: "Not found" } + : undefined; + + useEffect(() => { + if (updateMutation.isSuccess && query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [updateMutation.isSuccess, query.data]); + + return { + trigger: query.data, + isLoading: query.isLoading, + error: query.error || httpError, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + isSaving: updateMutation.isPending, + } as const; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx index a8d0eeb8e9..e893f2101b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx @@ -16,17 +16,24 @@ import { cn } from "@/lib/utils"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { RunListItem } from "./components/RunListItem"; import { ScheduleListItem } from "./components/ScheduleListItem"; +import { TemplateListItem } from "./components/TemplateListItem"; +import { TriggerListItem } from "./components/TriggerListItem"; import { useSidebarRunsList } from "./useSidebarRunsList"; interface Props { agent: LibraryAgent; selectedRunId?: string; - onSelectRun: (id: string, tab?: "runs" | "scheduled") => void; + onSelectRun: ( + id: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) => void; onClearSelectedRun?: () => void; - onTabChange?: (tab: "runs" | "scheduled" | "templates") => void; + onTabChange?: (tab: "runs" | "scheduled" | "templates" | "triggers") => void; onCountsChange?: (info: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => void; } @@ -42,8 +49,12 @@ export function SidebarRunsList({ const { runs, schedules, + templates, + triggers, runsCount, schedulesCount, + templatesCount, + triggersCount, error, loading, fetchMoreRuns, @@ -79,7 +90,7 @@ export function SidebarRunsList({ { - const value = v as "runs" | "scheduled" | "templates"; + const value = v as "runs" | "scheduled" | "templates" | "triggers"; onTabChange?.(value); if (value === "runs") { if (runs && runs.length) { @@ -95,6 +106,8 @@ export function SidebarRunsList({ } } else if (value === "templates") { onClearSelectedRun?.(); + } else if (value === "triggers") { + onClearSelectedRun?.(); } }} className="flex min-h-0 flex-col overflow-hidden" @@ -106,8 +119,13 @@ export function SidebarRunsList({ Scheduled {schedulesCount} + {triggersCount > 0 && ( + + Triggers {triggersCount} + + )} - Templates 0 + Templates {templatesCount} @@ -165,6 +183,35 @@ export function SidebarRunsList({ )}
+ {triggersCount > 0 && ( + +
+ {triggers.length > 0 ? ( + triggers.map((trigger) => ( +
+ onSelectRun(trigger.id, "triggers")} + /> +
+ )) + ) : ( +
+ + No triggers set up + +
+ )} +
+
+ )}
-
- - No templates saved - -
+ {templates.length > 0 ? ( + templates.map((template) => ( +
+ onSelectRun(template.id, "templates")} + /> +
+ )) + ) : ( +
+ + No templates saved + +
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx index b06b67647d..2265a92f62 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx @@ -1,11 +1,10 @@ "use client"; -import React from "react"; import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import moment from "moment"; -import { RunSidebarCard } from "./RunSidebarCard"; -import { IconWrapper } from "./RunIconWrapper"; import { ClockClockwiseIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./RunIconWrapper"; +import { RunSidebarCard } from "./RunSidebarCard"; interface ScheduleListItemProps { schedule: GraphExecutionJobInfo; @@ -25,10 +24,10 @@ export function ScheduleListItem({ onClick={onClick} selected={selected} icon={ - + diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx new file mode 100644 index 0000000000..ffdd746416 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx @@ -0,0 +1,33 @@ +"use client"; + +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { FileTextIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./RunIconWrapper"; +import { RunSidebarCard } from "./RunSidebarCard"; + +interface TemplateListItemProps { + template: LibraryAgentPreset; + selected?: boolean; + onClick?: () => void; +} + +export function TemplateListItem({ + template, + selected, + onClick, +}: TemplateListItemProps) { + return ( + + + + } + title={template.name} + description={moment(template.updated_at).fromNow()} + onClick={onClick} + selected={selected} + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx new file mode 100644 index 0000000000..a5d339ad36 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx @@ -0,0 +1,33 @@ +"use client"; + +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { LightningIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./RunIconWrapper"; +import { RunSidebarCard } from "./RunSidebarCard"; + +interface TriggerListItemProps { + trigger: LibraryAgentPreset; + selected?: boolean; + onClick?: () => void; +} + +export function TriggerListItem({ + trigger, + selected, + onClick, +}: TriggerListItemProps) { + return ( + + + + } + title={trigger.name} + description={moment(trigger.updated_at).fromNow()} + onClick={onClick} + selected={selected} + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts index eecada463a..38ac1d79c2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts @@ -3,8 +3,10 @@ import { useEffect, useMemo } from "react"; import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { useGetV2ListPresets } from "@/app/api/__generated__/endpoints/presets/presets"; import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules"; import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; import { okData } from "@/app/api/helpers"; import { useExecutionEvents } from "@/hooks/useExecutionEvents"; import { useQueryClient } from "@tanstack/react-query"; @@ -15,8 +17,15 @@ import { getNextRunsPageParam, } from "./helpers"; -function parseTab(value: string | null): "runs" | "scheduled" | "templates" { - if (value === "runs" || value === "scheduled" || value === "templates") { +function parseTab( + value: string | null, +): "runs" | "scheduled" | "templates" | "triggers" { + if ( + value === "runs" || + value === "scheduled" || + value === "templates" || + value === "triggers" + ) { return value; } return "runs"; @@ -24,10 +33,15 @@ function parseTab(value: string | null): "runs" | "scheduled" | "templates" { type Args = { graphId?: string; - onSelectRun: (runId: string, tab?: "runs" | "scheduled") => void; + onSelectRun: ( + runId: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) => void; onCountsChange?: (info: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => void; }; @@ -67,16 +81,40 @@ export function useSidebarRunsList({ }, ); + const presetsQuery = useGetV2ListPresets( + { graph_id: graphId || null, page: 1, page_size: 100 }, + { + query: { + enabled: !!graphId, + select: (r) => okData(r)?.presets ?? [], + }, + }, + ); + const runs = useMemo( () => extractRunsFromPages(runsQuery.data), [runsQuery.data], ); const schedules = schedulesQuery.data || []; + const allPresets = presetsQuery.data || []; + const triggers = useMemo( + () => allPresets.filter((preset) => preset.webhook_id && preset.webhook), + [allPresets], + ); + const templates = useMemo( + () => allPresets.filter((preset) => !preset.webhook_id || !preset.webhook), + [allPresets], + ); const runsCount = computeRunsCount(runsQuery.data, runs.length); const schedulesCount = schedules.length; - const loading = !schedulesQuery.isSuccess || !runsQuery.isSuccess; + const templatesCount = templates.length; + const triggersCount = triggers.length; + const loading = + !schedulesQuery.isSuccess || + !runsQuery.isSuccess || + !presetsQuery.isSuccess; // Update query cache when execution events arrive via websocket useExecutionEvents({ @@ -94,9 +132,22 @@ export function useSidebarRunsList({ // Notify parent about counts and loading state useEffect(() => { if (onCountsChange) { - onCountsChange({ runsCount, schedulesCount, loading }); + onCountsChange({ + runsCount, + schedulesCount, + templatesCount, + triggersCount, + loading, + }); } - }, [runsCount, schedulesCount, loading, onCountsChange]); + }, [ + runsCount, + schedulesCount, + templatesCount, + triggersCount, + loading, + onCountsChange, + ]); useEffect(() => { if (runs.length > 0 && tabValue === "runs" && !activeItem) { @@ -111,15 +162,31 @@ export function useSidebarRunsList({ } }, [activeItem, runs.length, schedules, onSelectRun]); + useEffect(() => { + if (templates.length > 0 && tabValue === "templates" && !activeItem) { + onSelectRun(templates[0].id, "templates"); + } + }, [templates, activeItem, tabValue, onSelectRun]); + + useEffect(() => { + if (triggers.length > 0 && tabValue === "triggers" && !activeItem) { + onSelectRun(triggers[0].id, "triggers"); + } + }, [triggers, activeItem, tabValue, onSelectRun]); + return { runs, schedules, - error: schedulesQuery.error || runsQuery.error, + templates, + triggers, + error: schedulesQuery.error || runsQuery.error || presetsQuery.error, loading, runsQuery, tabValue, runsCount, schedulesCount, + templatesCount, + triggersCount, fetchMoreRuns: runsQuery.fetchNextPage, hasMoreRuns: runsQuery.hasNextPage, isFetchingMoreRuns: runsQuery.isFetchingNextPage, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index 46b9c9abc7..b7b6301ad6 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -5,8 +5,15 @@ import { useParams } from "next/navigation"; import { parseAsString, useQueryStates } from "nuqs"; import { useCallback, useEffect, useMemo, useState } from "react"; -function parseTab(value: string | null): "runs" | "scheduled" | "templates" { - if (value === "runs" || value === "scheduled" || value === "templates") { +function parseTab( + value: string | null, +): "runs" | "scheduled" | "templates" | "triggers" { + if ( + value === "runs" || + value === "scheduled" || + value === "templates" || + value === "triggers" + ) { return value; } return "runs"; @@ -45,6 +52,8 @@ export function useNewAgentLibraryView() { const [sidebarCounts, setSidebarCounts] = useState({ runsCount: 0, schedulesCount: 0, + templatesCount: 0, + triggersCount: 0, }); const [sidebarLoading, setSidebarLoading] = useState(true); @@ -52,7 +61,9 @@ export function useNewAgentLibraryView() { const hasAnyItems = useMemo( () => (sidebarCounts.runsCount ?? 0) > 0 || - (sidebarCounts.schedulesCount ?? 0) > 0, + (sidebarCounts.schedulesCount ?? 0) > 0 || + (sidebarCounts.templatesCount ?? 0) > 0 || + (sidebarCounts.triggersCount ?? 0) > 0, [sidebarCounts], ); @@ -65,7 +76,22 @@ export function useNewAgentLibraryView() { } }, [response]); - function handleSelectRun(id: string, tab?: "runs" | "scheduled") { + useEffect(() => { + if ( + activeTab === "triggers" && + sidebarCounts.triggersCount === 0 && + !sidebarLoading + ) { + setQueryStates({ + activeTab: "runs", + }); + } + }, [activeTab, sidebarCounts.triggersCount, sidebarLoading, setQueryStates]); + + function handleSelectRun( + id: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) { setQueryStates({ activeItem: id, activeTab: tab ?? "runs", @@ -78,7 +104,9 @@ export function useNewAgentLibraryView() { }); } - function handleSetActiveTab(tab: "runs" | "scheduled" | "templates") { + function handleSetActiveTab( + tab: "runs" | "scheduled" | "templates" | "triggers", + ) { setQueryStates({ activeTab: tab, }); @@ -88,11 +116,15 @@ export function useNewAgentLibraryView() { (counts: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => { setSidebarCounts({ runsCount: counts.runsCount, schedulesCount: counts.schedulesCount, + templatesCount: counts.templatesCount, + triggersCount: counts.triggersCount, }); if (counts.loading !== undefined) { setSidebarLoading(counts.loading); From 4f349281bdb929106184fea3b244c3f4effaa448 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 9 Dec 2025 20:56:12 +0530 Subject: [PATCH 33/58] feat(frontend): switch copied graph storage from local storage to clipboard (#11578) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ This PR migrates the copy/paste functionality for graph nodes and edges from local storage to the Clipboard API. This change addresses storage limitations and enables cross-tab copying. https://github.com/user-attachments/assets/6ef55713-ca5b-4562-bb54-4c12db241d30 **Key changes:** - Replaced `localStorage` with `navigator.clipboard` API for copy/paste operations - Added `CLIPBOARD_PREFIX` constant (`"autogpt-flow-data:"`) to identify our clipboard data and prevent conflicts with other clipboard content - Added toast notifications to provide user feedback when copying nodes - Added error handling for clipboard read/write operations with console error logging - Removed dependency on `@/services/storage/local-storage` for copied flow data - Updated `useCopyPaste` hook to use async clipboard operations with proper promise handling **Benefits:** - βœ… Removes local storage size limitations (5-10MB) - βœ… Enables copying nodes between browser tabs/windows - βœ… Provides better user feedback through toast notifications - βœ… More standard approach using native browser Clipboard API ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Select one or more nodes in the flow editor - [x] Press `Ctrl+C` (or `Cmd+C` on Mac) to copy nodes - [x] Verify toast notification appears showing "Copied successfully" with node count - [x] Press `Ctrl+V` (or `Cmd+V` on Mac) to paste nodes - [x] Verify nodes are pasted at viewport center with new unique IDs - [x] Verify edges between copied nodes are also pasted correctly - [x] Test copying nodes in one browser tab and pasting in another tab (should work) - [x] Test copying non-flow data (e.g., text) and verify paste doesn't interfere with flow editor --- .../FlowEditor/Flow/useCopyPaste.ts | 155 ++++++++++-------- 1 file changed, 88 insertions(+), 67 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useCopyPaste.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useCopyPaste.ts index 1114fb778c..7a8213da22 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useCopyPaste.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useCopyPaste.ts @@ -1,24 +1,25 @@ import { useCallback } from "react"; import { useReactFlow } from "@xyflow/react"; -import { Key, storage } from "@/services/storage/local-storage"; import { v4 as uuidv4 } from "uuid"; import { useNodeStore } from "../../../stores/nodeStore"; import { useEdgeStore } from "../../../stores/edgeStore"; import { CustomNode } from "../nodes/CustomNode/CustomNode"; import { CustomEdge } from "../edges/CustomEdge"; +import { useToast } from "@/components/molecules/Toast/use-toast"; interface CopyableData { nodes: CustomNode[]; edges: CustomEdge[]; } +const CLIPBOARD_PREFIX = "autogpt-flow-data:"; + export function useCopyPaste() { - // Only use useReactFlow for viewport (not managed by stores) const { getViewport } = useReactFlow(); + const { toast } = useToast(); const handleCopyPaste = useCallback( (event: KeyboardEvent) => { - // Prevent copy/paste if any modal is open or if the focus is on an input element const activeElement = document.activeElement; const isInputField = activeElement?.tagName === "INPUT" || @@ -28,7 +29,6 @@ export function useCopyPaste() { if (isInputField) return; if (event.ctrlKey || event.metaKey) { - // COPY: Ctrl+C or Cmd+C if (event.key === "c" || event.key === "C") { const { nodes } = useNodeStore.getState(); const { edges } = useEdgeStore.getState(); @@ -53,81 +53,102 @@ export function useCopyPaste() { edges: selectedEdges, }; - storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData)); + const clipboardText = `${CLIPBOARD_PREFIX}${JSON.stringify(copiedData)}`; + navigator.clipboard + .writeText(clipboardText) + .then(() => { + toast({ + title: "Copied successfully", + description: `${selectedNodes.length} node(s) copied to clipboard`, + }); + }) + .catch((error) => { + console.error("Failed to copy to clipboard:", error); + }); } - // PASTE: Ctrl+V or Cmd+V if (event.key === "v" || event.key === "V") { - const copiedDataString = storage.get(Key.COPIED_FLOW_DATA); - if (copiedDataString) { - const copiedData = JSON.parse(copiedDataString) as CopyableData; - const oldToNewIdMap: Record = {}; + navigator.clipboard + .readText() + .then((clipboardText) => { + if (!clipboardText.startsWith(CLIPBOARD_PREFIX)) { + return; // Not our data, ignore + } - // Get fresh viewport values at paste time to ensure correct positioning - const { x, y, zoom } = getViewport(); - const viewportCenter = { - x: (window.innerWidth / 2 - x) / zoom, - y: (window.innerHeight / 2 - y) / zoom, - }; + const jsonString = clipboardText.slice(CLIPBOARD_PREFIX.length); + const copiedData = JSON.parse(jsonString) as CopyableData; + const oldToNewIdMap: Record = {}; - let minX = Infinity, - minY = Infinity, - maxX = -Infinity, - maxY = -Infinity; - copiedData.nodes.forEach((node) => { - minX = Math.min(minX, node.position.x); - minY = Math.min(minY, node.position.y); - maxX = Math.max(maxX, node.position.x); - maxY = Math.max(maxY, node.position.y); - }); - - const offsetX = viewportCenter.x - (minX + maxX) / 2; - const offsetY = viewportCenter.y - (minY + maxY) / 2; - - // Deselect existing nodes first - useNodeStore.setState((state) => ({ - nodes: state.nodes.map((node) => ({ ...node, selected: false })), - })); - - // Create and add new nodes with UNIQUE IDs using UUID - copiedData.nodes.forEach((node) => { - const newNodeId = uuidv4(); - oldToNewIdMap[node.id] = newNodeId; - - const newNode: CustomNode = { - ...node, - id: newNodeId, - selected: true, - position: { - x: node.position.x + offsetX, - y: node.position.y + offsetY, - }, + const { x, y, zoom } = getViewport(); + const viewportCenter = { + x: (window.innerWidth / 2 - x) / zoom, + y: (window.innerHeight / 2 - y) / zoom, }; - useNodeStore.getState().addNode(newNode); - }); - - // Add edges with updated source/target IDs - const { addEdge } = useEdgeStore.getState(); - copiedData.edges.forEach((edge) => { - const newSourceId = oldToNewIdMap[edge.source] ?? edge.source; - const newTargetId = oldToNewIdMap[edge.target] ?? edge.target; - - addEdge({ - source: newSourceId, - target: newTargetId, - sourceHandle: edge.sourceHandle ?? "", - targetHandle: edge.targetHandle ?? "", - data: { - ...edge.data, - }, + let minX = Infinity, + minY = Infinity, + maxX = -Infinity, + maxY = -Infinity; + copiedData.nodes.forEach((node) => { + minX = Math.min(minX, node.position.x); + minY = Math.min(minY, node.position.y); + maxX = Math.max(maxX, node.position.x); + maxY = Math.max(maxY, node.position.y); }); + + const offsetX = viewportCenter.x - (minX + maxX) / 2; + const offsetY = viewportCenter.y - (minY + maxY) / 2; + + // Deselect existing nodes first + useNodeStore.setState((state) => ({ + nodes: state.nodes.map((node) => ({ + ...node, + selected: false, + })), + })); + + // Create and add new nodes with UNIQUE IDs using UUID + copiedData.nodes.forEach((node) => { + const newNodeId = uuidv4(); + oldToNewIdMap[node.id] = newNodeId; + + const newNode: CustomNode = { + ...node, + id: newNodeId, + selected: true, + position: { + x: node.position.x + offsetX, + y: node.position.y + offsetY, + }, + }; + + useNodeStore.getState().addNode(newNode); + }); + + // Add edges with updated source/target IDs + const { addEdge } = useEdgeStore.getState(); + copiedData.edges.forEach((edge) => { + const newSourceId = oldToNewIdMap[edge.source] ?? edge.source; + const newTargetId = oldToNewIdMap[edge.target] ?? edge.target; + + addEdge({ + source: newSourceId, + target: newTargetId, + sourceHandle: edge.sourceHandle ?? "", + targetHandle: edge.targetHandle ?? "", + data: { + ...edge.data, + }, + }); + }); + }) + .catch((error) => { + console.error("Failed to read from clipboard:", error); }); - } } } }, - [getViewport], + [getViewport, toast], ); return handleCopyPaste; From 130532581307df50122be464894be78ab42a8926 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 9 Dec 2025 20:56:22 +0530 Subject: [PATCH 34/58] fix(frontend): preserve button shape in credential select when content is long (#11577) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the content inside the credential select dropdown becomes too long, the adjacent link buttons lose their rounded shape and appear squarish. This happens when the text stretches the container or affects the layout of the buttons. The issue occurs because the button's width can shrink below its intended size when the flex container is stretched by long credential names. By adding an explicit minimum width constraint with `!min-w-8`, we ensure the button maintains its proper dimensions and rounded appearance regardless of the select dropdown's content length. ### Changes πŸ—οΈ - Added `!min-w-8` to the external link button's className in `SelectCredential` component to enforce a minimum width of 2rem (8 * 0.25rem) - This ensures the button maintains its rounded shape even when the adjacent select dropdown contains long credential names ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Tested credential select with short credential names - button should maintain rounded shape - [x] Tested credential select with very long credential names (e.g., long provider names, usernames, and hosts) - button should maintain rounded shape --- .../input-renderer/fields/CredentialField/SelectCredential.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx index 602c9b0e2d..55081332aa 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx @@ -84,7 +84,7 @@ export const SelectCredential: React.FC = ({ rel="noopener noreferrer" variant="outline" size="icon" - className="h-8 w-8 border-zinc-300 p-0" + className="h-8 w-8 !min-w-8 border-zinc-300 p-0" > From c9681f5d4415d7a82b7b1ec37220a2c4faaa8ebb Mon Sep 17 00:00:00 2001 From: Ubbe Date: Tue, 9 Dec 2025 23:17:44 +0700 Subject: [PATCH 35/58] fix(frontend): library page adjustments (#11587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ ### Adjust layout and styles on mobile πŸ“± Screenshot 2025-12-09 at 22 53 14 ### Make the sidebar cards have contextual actions Screenshot 2025-12-09 at 22 53 27 Depending on the card type, different type of actions are shown... ### Make buttons in "About agent" card do something Screenshot 2025-12-09 at 22 54 01 ### Other - Hide `Schedule` button for agents with trigger run type - Adjust secondary button background colour... - Make drawer content scrollable on mobile ## Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally and test the above Co-authored-by: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> --- .../NewAgentLibraryView.tsx | 2 +- .../modals/RunAgentModal/RunAgentModal.tsx | 4 +- .../components/other/EmptyTasks.tsx | 41 +++- .../selected-views/AnchorLinksWrap.tsx | 14 ++ .../selected-views/SelectedActionsWrap.tsx | 11 ++ .../SelectedRunView/SelectedRunView.tsx | 79 ++++---- .../SelectedRunActions/SelectedRunActions.tsx | 5 +- .../SelectedScheduleView.tsx | 75 +++---- .../components/SelectedScheduleActions.tsx | 5 +- .../components/SelectedTemplateActions.tsx | 2 + .../components/SelectedTriggerActions.tsx | 2 + .../selected-views/SelectedViewLayout.tsx | 2 +- .../SidebarRunsList/SidebarRunsList.tsx | 50 +++-- .../{RunIconWrapper.tsx => IconWrapper.tsx} | 0 .../components/ScheduleActionsDropdown.tsx | 123 ++++++++++++ .../components/ScheduleListItem.tsx | 23 ++- ...RunSidebarCard.tsx => SidebarItemCard.tsx} | 17 +- .../components/TaskActionsDropdown.tsx | 185 ++++++++++++++++++ .../{RunListItem.tsx => TaskListItem.tsx} | 21 +- .../components/TemplateActionsDropdown.tsx | 125 ++++++++++++ .../components/TemplateListItem.tsx | 23 ++- .../components/TriggerActionsDropdown.tsx | 125 ++++++++++++ .../components/TriggerListItem.tsx | 23 ++- .../src/components/__legacy__/ui/skeleton.tsx | 2 +- .../src/components/atoms/Button/helpers.ts | 2 +- .../Dialog/components/DrawerWrap.tsx | 15 +- .../molecules/Dialog/components/styles.ts | 2 +- 27 files changed, 850 insertions(+), 128 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/{RunIconWrapper.tsx => IconWrapper.tsx} (100%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/{RunSidebarCard.tsx => SidebarItemCard.tsx} (68%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx rename autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/{RunListItem.tsx => TaskListItem.tsx} (80%) create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index b06901f860..f951c09522 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -72,7 +72,7 @@ export function NewAgentLibraryView() { } return ( -
+
0 || Object.keys(agentCredentialsInputFields || {}).length > 0; + const isTriggerRunType = defaultRunType.includes("trigger"); + function handleInputChange(key: string, value: string) { setInputValues((prev) => ({ ...prev, @@ -153,7 +155,7 @@ export function RunAgentModal({
- {!allRequiredInputsAreSet ? ( + {isTriggerRunType ? null : !allRequiredInputsAreSet ? ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx index c0c2c900a1..62a75e4993 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx @@ -1,8 +1,14 @@ +"use client"; + +import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { exportAsJSONFile } from "@/lib/utils"; import { formatDate } from "@/lib/utils/time"; +import Link from "next/link"; import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal"; import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard"; import { EmptyTasksIllustration } from "./EmptyTasksIllustration"; @@ -12,6 +18,30 @@ type Props = { }; export function EmptyTasks({ agent }: Props) { + const { toast } = useToast(); + + async function handleExport() { + try { + const res = await getV1GetGraphVersion( + agent.graph_id, + agent.graph_version, + { for_export: true }, + ); + if (res.status === 200) { + const filename = `${agent.name}_v${agent.graph_version}.json`; + exportAsJSONFile(res.data as any, filename); + toast({ title: "Agent exported" }); + } else { + toast({ title: "Failed to export agent", variant: "destructive" }); + } + } catch (e: any) { + toast({ + title: "Failed to export agent", + description: e?.message, + variant: "destructive", + }); + } + } const isPublished = Boolean(agent.marketplace_listing); const createdAt = formatDate(agent.created_at); const updatedAt = formatDate(agent.updated_at); @@ -93,10 +123,15 @@ export function EmptyTasks({ agent }: Props) { ) : null}
- -
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx new file mode 100644 index 0000000000..6dae969142 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/lib/utils"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../helpers"; + +type Props = { + children: React.ReactNode; +}; + +export function AnchorLinksWrap({ children }: Props) { + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx new file mode 100644 index 0000000000..da7985e3e2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx @@ -0,0 +1,11 @@ +type Props = { + children: React.ReactNode; +}; + +export function SelectedActionsWrap({ children }: Props) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 1c7df0f680..97292b85ce 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -13,10 +13,11 @@ import { import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; +import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { InfoIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; -import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; +import { AnchorLinksWrap } from "../AnchorLinksWrap"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; @@ -46,6 +47,9 @@ export function SelectedRunView({ const { run, preset, isLoading, responseError, httpError } = useSelectedRunView(agent.graph_id, runId); + const breakpoint = useBreakpoint(); + const isLgScreenUp = isLargeScreen(breakpoint); + const { pendingReviews, isLoading: reviewsLoading, @@ -89,6 +93,15 @@ export function SelectedRunView({
+ {!isLgScreenUp ? ( + + ) : null} + {preset && agent.trigger_setup_info && preset.webhook_id && @@ -100,38 +113,36 @@ export function SelectedRunView({ )} {/* Navigation Links */} -
- -
+ )} + {/* Summary Section */} {withSummary && ( @@ -216,14 +227,16 @@ export function SelectedRunView({
-
- -
+ {isLgScreenUp ? ( +
+ +
+ ) : null}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx index 7cbbd2ff09..7533577bf5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx @@ -12,6 +12,7 @@ import { StopIcon, } from "@phosphor-icons/react"; import { AgentActionsDropdown } from "../../../AgentActionsDropdown"; +import { SelectedActionsWrap } from "../../../SelectedActionsWrap"; import { ShareRunButton } from "../../../ShareRunButton/ShareRunButton"; import { CreateTemplateModal } from "../CreateTemplateModal/CreateTemplateModal"; import { useSelectedRunActions } from "./useSelectedRunActions"; @@ -49,7 +50,7 @@ export function SelectedRunActions(props: Props) { if (!props.run || !props.agent) return null; return ( -
+ {!isRunning ? (
+ ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 841ff04df9..6563e19d5d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -6,9 +6,10 @@ import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner import { Text } from "@/components/atoms/Text/Text"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; +import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; -import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; +import { AnchorLinksWrap } from "../AnchorLinksWrap"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; @@ -41,6 +42,9 @@ export function SelectedScheduleView({ }, }); + const breakpoint = useBreakpoint(); + const isLgScreenUp = isLargeScreen(breakpoint); + function scrollToSection(id: string) { const element = document.getElementById(id); if (element) { @@ -83,37 +87,42 @@ export function SelectedScheduleView({
-
-
- -
+
+ + {schedule && !isLgScreenUp ? ( +
+ +
+ ) : null}
{/* Navigation Links */} -
- -
+ + + + {/* Schedule Section */}
@@ -172,10 +181,6 @@ export function SelectedScheduleView({
- {/* {// TODO: re-enable edit inputs modal once the API supports it */} - {/* {schedule && Object.keys(schedule.input_data).length > 0 && ( - - )} */}
- {schedule ? ( -
+ {schedule && isLgScreenUp ? ( +
-
+ {openInBuilderHref && (
+ ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx index a3a2a9ac62..1d50ec7c85 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx @@ -15,6 +15,7 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; import { FloppyDiskIcon, PlayIcon, TrashIcon } from "@phosphor-icons/react"; import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; +import { AgentActionsDropdown } from "../../AgentActionsDropdown"; interface Props { agent: LibraryAgent; @@ -134,6 +135,7 @@ export function SelectedTemplateActions({ )} +
)} +
- - - Tasks {runsCount} - - - Scheduled {schedulesCount} - - {triggersCount > 0 && ( - - Triggers {triggersCount} - - )} - - Templates {templatesCount} - - +
+
+
+ + + Tasks {runsCount} + + + Scheduled{" "} + {schedulesCount} + + {triggersCount > 0 && ( + + Triggers{" "} + {triggersCount} + + )} + + Templates{" "} + {templatesCount} + + +
+
<> (
- onSelectRun && onSelectRun(run.id, "runs")} /> @@ -169,6 +180,7 @@ export function SidebarRunsList({
onSelectRun(s.id, "scheduled")} /> @@ -197,6 +209,7 @@ export function SidebarRunsList({
onSelectRun(trigger.id, "triggers")} /> @@ -225,6 +238,7 @@ export function SidebarRunsList({
onSelectRun(template.id, "templates")} /> diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/IconWrapper.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/IconWrapper.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx new file mode 100644 index 0000000000..d85d3ddfaf --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx @@ -0,0 +1,123 @@ +"use client"; + +import { + getGetV1ListExecutionSchedulesForAGraphQueryOptions, + useDeleteV1DeleteExecutionSchedule, +} from "@/app/api/__generated__/endpoints/schedules/schedules"; +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + schedule: GraphExecutionJobInfo; + onDeleted?: () => void; +} + +export function ScheduleActionsDropdown({ agent, schedule, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deleteSchedule, isPending: isDeleting } = + useDeleteV1DeleteExecutionSchedule(); + + async function handleDelete() { + try { + await deleteSchedule({ scheduleId: schedule.id }); + + toast({ title: "Schedule deleted" }); + + queryClient.invalidateQueries({ + queryKey: getGetV1ListExecutionSchedulesForAGraphQueryOptions( + agent.graph_id, + ).queryKey, + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete schedule", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete schedule + + + + + + +
+ + Are you sure you want to delete this schedule? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx index 2265a92f62..a389fb4fc8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx @@ -1,24 +1,30 @@ "use client"; import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { ClockClockwiseIcon } from "@phosphor-icons/react"; import moment from "moment"; -import { IconWrapper } from "./RunIconWrapper"; -import { RunSidebarCard } from "./RunSidebarCard"; +import { IconWrapper } from "./IconWrapper"; +import { ScheduleActionsDropdown } from "./ScheduleActionsDropdown"; +import { SidebarItemCard } from "./SidebarItemCard"; -interface ScheduleListItemProps { +interface Props { schedule: GraphExecutionJobInfo; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } export function ScheduleListItem({ schedule, + agent, selected, onClick, -}: ScheduleListItemProps) { + onDeleted, +}: Props) { return ( - } + actions={ + + } /> ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx similarity index 68% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx index eb163f7337..4f4e9962ce 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx @@ -4,25 +4,27 @@ import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import React from "react"; -interface RunListItemProps { +interface Props { title: string; description?: string; icon?: React.ReactNode; selected?: boolean; onClick?: () => void; + actions?: React.ReactNode; } -export function RunSidebarCard({ +export function SidebarItemCard({ title, description, icon, selected, onClick, -}: RunListItemProps) { + actions, +}: Props) { return ( -
+ {actions ? ( +
e.stopPropagation()}>{actions}
+ ) : null}
- +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx new file mode 100644 index 0000000000..95cc7740f8 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx @@ -0,0 +1,185 @@ +"use client"; + +import { + getGetV1ListGraphExecutionsInfiniteQueryOptions, + useDeleteV1DeleteGraphExecution, +} from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2ListPresetsQueryKey, + usePostV2CreateANewPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; +import { CreateTemplateModal } from "../../../selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal"; + +interface Props { + agent: LibraryAgent; + run: GraphExecutionMeta; + onDeleted?: () => void; +} + +export function TaskActionsDropdown({ agent, run, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + const [isCreateTemplateModalOpen, setIsCreateTemplateModalOpen] = + useState(false); + + const { mutateAsync: deleteRun, isPending: isDeletingRun } = + useDeleteV1DeleteGraphExecution(); + + const { mutateAsync: createPreset } = usePostV2CreateANewPreset(); + + async function handleDeleteRun() { + try { + await deleteRun({ graphExecId: run.id }); + + toast({ title: "Task deleted" }); + + await queryClient.refetchQueries({ + queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( + agent.graph_id, + ).queryKey, + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete task", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + async function handleCreateTemplate(name: string, description: string) { + try { + const res = await createPreset({ + data: { + name, + description, + graph_execution_id: run.id, + }, + }); + + if (res.status === 200) { + toast({ + title: "Template created", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setIsCreateTemplateModalOpen(false); + } + } catch (error: unknown) { + toast({ + title: "Failed to create template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setIsCreateTemplateModalOpen(true); + }} + className="flex items-center gap-2" + > + Save as template + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete task + + + + + + +
+ + Are you sure you want to delete this task? This action cannot be + undone. + + + + + +
+
+
+ + setIsCreateTemplateModalOpen(false)} + onCreate={handleCreateTemplate} + run={run as any} + /> + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx similarity index 80% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx index c038217f72..22adc54e4f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx @@ -2,6 +2,7 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { CheckCircleIcon, ClockIcon, @@ -12,8 +13,9 @@ import { } from "@phosphor-icons/react"; import moment from "moment"; import React from "react"; -import { IconWrapper } from "./RunIconWrapper"; -import { RunSidebarCard } from "./RunSidebarCard"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TaskActionsDropdown } from "./TaskActionsDropdown"; const statusIconMap: Record = { INCOMPLETE: ( @@ -53,26 +55,33 @@ const statusIconMap: Record = { ), }; -interface RunListItemProps { +interface Props { run: GraphExecutionMeta; title: string; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } -export function RunListItem({ +export function TaskListItem({ run, title, + agent, selected, onClick, -}: RunListItemProps) { + onDeleted, +}: Props) { return ( - + } /> ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx new file mode 100644 index 0000000000..b65e0fd44a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx @@ -0,0 +1,125 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + template: LibraryAgentPreset; + onDeleted?: () => void; +} + +export function TemplateActionsDropdown({ agent, template, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deletePreset, isPending: isDeleting } = + useDeleteV2DeleteAPreset(); + + async function handleDelete() { + try { + await deletePreset({ presetId: template.id }); + + toast({ + title: "Template deleted", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete template + + + + + + +
+ + Are you sure you want to delete this template? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx index ffdd746416..c970cd1522 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx @@ -1,24 +1,30 @@ "use client"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { FileTextIcon } from "@phosphor-icons/react"; import moment from "moment"; -import { IconWrapper } from "./RunIconWrapper"; -import { RunSidebarCard } from "./RunSidebarCard"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TemplateActionsDropdown } from "./TemplateActionsDropdown"; -interface TemplateListItemProps { +interface Props { template: LibraryAgentPreset; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } export function TemplateListItem({ template, + agent, selected, onClick, -}: TemplateListItemProps) { + onDeleted, +}: Props) { return ( - @@ -28,6 +34,13 @@ export function TemplateListItem({ description={moment(template.updated_at).fromNow()} onClick={onClick} selected={selected} + actions={ + + } /> ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx new file mode 100644 index 0000000000..35296948c1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx @@ -0,0 +1,125 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + trigger: LibraryAgentPreset; + onDeleted?: () => void; +} + +export function TriggerActionsDropdown({ agent, trigger, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deletePreset, isPending: isDeleting } = + useDeleteV2DeleteAPreset(); + + async function handleDelete() { + try { + await deletePreset({ presetId: trigger.id }); + + toast({ + title: "Trigger deleted", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete trigger", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete trigger + + + + + + +
+ + Are you sure you want to delete this trigger? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx index a5d339ad36..4c399e640a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx @@ -1,24 +1,30 @@ "use client"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { LightningIcon } from "@phosphor-icons/react"; import moment from "moment"; -import { IconWrapper } from "./RunIconWrapper"; -import { RunSidebarCard } from "./RunSidebarCard"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TriggerActionsDropdown } from "./TriggerActionsDropdown"; -interface TriggerListItemProps { +interface Props { trigger: LibraryAgentPreset; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } export function TriggerListItem({ trigger, + agent, selected, onClick, -}: TriggerListItemProps) { + onDeleted, +}: Props) { return ( - @@ -28,6 +34,13 @@ export function TriggerListItem({ description={moment(trigger.updated_at).fromNow()} onClick={onClick} selected={selected} + actions={ + + } /> ); } diff --git a/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx b/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx index 869263d5f0..805fedddcc 100644 --- a/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx +++ b/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx @@ -6,7 +6,7 @@ function Skeleton({ }: React.HTMLAttributes) { return (
); diff --git a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts index db7938661c..4d9706838c 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts +++ b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts @@ -16,7 +16,7 @@ export const extendedButtonVariants = cva( primary: "bg-zinc-800 border-zinc-800 text-white hover:bg-zinc-900 hover:border-zinc-900 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", secondary: - "bg-zinc-100 border-zinc-100 text-black hover:bg-zinc-300 hover:border-zinc-300 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", + "bg-zinc-200 border-zinc-200 text-black hover:bg-zinc-300 hover:border-zinc-300 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", destructive: "bg-red-500 border-red-500 text-white hover:bg-red-600 hover:border-red-600 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", outline: diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx index b9d3b2e118..d00817bf59 100644 --- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx +++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx @@ -1,4 +1,6 @@ import { Button } from "@/components/__legacy__/ui/button"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; import { X } from "@phosphor-icons/react"; import { PropsWithChildren } from "react"; import { Drawer } from "vaul"; @@ -41,7 +43,7 @@ export function DrawerWrap({ onInteractOutside={handleClose} >
@@ -61,7 +63,16 @@ export function DrawerWrap({ ) ) : null}
-
{children}
+
+
+ {children} +
+
); diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts index b04dcdd193..3b7d12e8e9 100644 --- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts +++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts @@ -19,5 +19,5 @@ export const modalStyles = { // Drawer specific styles export const drawerStyles = { ...commonStyles, - content: `${commonStyles.content} max-h-[90vh] w-full bottom-0 rounded-br-none rounded-bl-none`, + content: `${commonStyles.content} max-h-[90vh] w-full bottom-0 rounded-br-none rounded-bl-none min-h-0`, }; From 7edf01777e36d1ad1f7f36aee7897711cdefda73 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 9 Dec 2025 20:56:45 +0530 Subject: [PATCH 36/58] fix(frontend): sync flowVersion to URL when loading graph from Library (#11585) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When opening a graph from the Library, the `flowVersion` query parameter was not being set in the URL. This caused issues when the graph data didn't contain an internal `graphVersion`, resulting in the builder failing and graphs breaking when running. The `useGetV1GetSpecificGraph` hook relies on the `flowVersion` query parameter to fetch the correct graph version. Without it being properly set in the URL, the graph loading logic fails when the version information is missing from the graph data itself. ### Changes πŸ—οΈ - Added `setQueryStates` to the `useQueryStates` hook return value in `useFlow.ts` - Added logic to sync `flowVersion` to the URL query parameters when a graph is loaded - When `graph.version` is available, it now updates the `flowVersion` query parameter in the URL (defaults to `1` if version is undefined) This ensures the URL stays in sync with the loaded graph's version, preventing builder failures and execution issues. ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Open a graph from the Library that doesn't have a version in the URL - [x] Verify that `flowVersion` is automatically added to the URL query parameters - [x] Verify that the graph loads correctly and can be executed - [x] Verify that graphs with existing `flowVersion` in URL continue to work correctly - [x] Verify that graphs opened from Library with version information sync correctly --- .../build/components/FlowEditor/Flow/useFlow.ts | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index 64f00871d8..bf0ebf0a97 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -42,11 +42,12 @@ export const useFlow = () => { const setBlockMenuOpen = useControlPanelStore( useShallow((state) => state.setBlockMenuOpen), ); - const [{ flowID, flowVersion, flowExecutionID }] = useQueryStates({ - flowID: parseAsString, - flowVersion: parseAsInteger, - flowExecutionID: parseAsString, - }); + const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] = + useQueryStates({ + flowID: parseAsString, + flowVersion: parseAsInteger, + flowExecutionID: parseAsString, + }); const { data: executionDetails } = useGetV1GetExecutionDetails( flowID || "", @@ -102,6 +103,9 @@ export const useFlow = () => { // load graph schemas useEffect(() => { if (graph) { + setQueryStates({ + flowVersion: graph.version ?? 1, + }); setGraphSchemas( graph.input_schema as Record | null, graph.credentials_input_schema as Record | null, From f8afc6044ed85921909d7de83fff093311c1a547 Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Tue, 9 Dec 2025 21:24:17 +0530 Subject: [PATCH 37/58] fix(frontend): prevent file upload buttons from triggering form submission (#11576) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the File Widget, the upload button was incorrectly behaving like a submit button. When users clicked it, the rjsf library immediately triggered form validation and displayed validation errors, even though the user was only trying to upload a file. This happened because HTML buttons inside a form default to `type="submit"`, which triggers form submission on click. By explicitly setting `type="button"` on all file-related buttons, we prevent them from submitting the form while still allowing them to trigger the file input dialog. ### Changes πŸ—οΈ - Added `type="button"` attribute to the clear button in the compact variant - Added `type="button"` attribute to the upload button in the compact variant - Added `type="button"` attribute to the "Browse File" button in the default variant This ensures that clicking any of these buttons only triggers the intended file selection/upload action without causing unwanted form validation or submission. ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Tested clicking the upload button in a form with File Widget - form should not submit or show validation errors - [x] Tested clicking the clear button - should clear the file without triggering form validation - [x] Tested clicking the "Browse File" button - should open file dialog without triggering form validation - [x] Verified file upload functionality still works correctly after selecting a file --- .../frontend/src/components/atoms/FileInput/FileInput.tsx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx index 8f855ad47d..d43063b411 100644 --- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx +++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx @@ -266,6 +266,7 @@ export function FileInput(props: Props) { size="small" className="h-7 w-7 min-w-0 flex-shrink-0 border-zinc-300 p-0 text-gray-500 hover:text-red-600 dark:text-gray-400 dark:hover:text-red-500" onClick={handleClear} + type="button" > @@ -278,6 +279,7 @@ export function FileInput(props: Props) { onClick={() => inputRef.current?.click()} className="flex-1 border-zinc-300 text-xs" disabled={isUploading} + type="button" > {`Upload ${displayName}`} @@ -367,6 +369,7 @@ export function FileInput(props: Props) { From 95200b67f8235654c4972cd3e054f3e271482625 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 9 Dec 2025 11:28:22 -0600 Subject: [PATCH 38/58] feat(blocks): add many new spreadsheet blocks (#11574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have lots we want to do with google sheets and we don't want a lack of blocks to be a limiter so I pre-ddi a lot of blocks! ### Changes πŸ—οΈ Adds 24 new blocks for google sheets (tested and working) ``` |-----|-------------------------------------------|----------------------------------------| | 1 | GoogleSheetsFilterRowsBlock | Filter rows based on column conditions | βœ… | | 2 | GoogleSheetsLookupRowBlock | VLOOKUP-style row lookup | βœ… | | 3 | GoogleSheetsDeleteRowsBlock | Delete rows from a sheet | βœ… | | 4 | GoogleSheetsGetColumnBlock | Get data from a specific column | βœ… | | 5 | GoogleSheetsSortBlock | Sort sheet data | βœ… | | 6 | GoogleSheetsGetUniqueValuesBlock | Get unique values from a column | βœ… | | 7 | GoogleSheetsInsertRowBlock | Insert rows into a sheet | βœ… | | 8 | GoogleSheetsAddColumnBlock | Add a new column | βœ… | | 9 | GoogleSheetsGetRowCountBlock | Get the number of rows | βœ… | | 10 | GoogleSheetsRemoveDuplicatesBlock | Remove duplicate rows | βœ… | | 11 | GoogleSheetsUpdateRowBlock | Update an existing row | βœ… | | 12 | GoogleSheetsGetRowBlock | Get a specific row by index | βœ… | | 13 | GoogleSheetsDeleteColumnBlock | Delete a column | βœ… | | 14 | GoogleSheetsCreateNamedRangeBlock | Create a named range | βœ… | | 15 | GoogleSheetsListNamedRangesBlock | List all named ranges | βœ… | | 16 | GoogleSheetsAddDropdownBlock | Add dropdown validation to cells | βœ… | | 17 | GoogleSheetsCopyToSpreadsheetBlock | Copy sheet to another spreadsheet | βœ… | | 18 | GoogleSheetsProtectRangeBlock | Protect a range from editing | βœ… | | 19 | GoogleSheetsExportCsvBlock | Export sheet as CSV | βœ… | | 20 | GoogleSheetsImportCsvBlock | Import CSV data | βœ… | | 21 | GoogleSheetsAddNoteBlock | Add notes to cells | βœ… | | 22 | GoogleSheetsGetNotesBlock | Get notes from cells | βœ… | | 23 | GoogleSheetsShareSpreadsheetBlock | Share spreadsheet with users | βœ… | | 24 | GoogleSheetsSetPublicAccessBlock | Set public access permissions | βœ… | ``` ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Tested using the attached agent [super test for spreadsheets_v9.json](https://github.com/user-attachments/files/24041582/super.test.for.spreadsheets_v9.json) --- > [!NOTE] > Introduces a large suite of Google Sheets blocks for row/column ops, filtering/sorting/lookup, CSV import/export, notes, named ranges, protections, sheet copy, and sharing/public access, plus refactors append to a simpler single-row append. > > - **Google Sheets blocks (new)**: > - **Data ops**: `GoogleSheetsFilterRowsBlock`, `GoogleSheetsLookupRowBlock`, `GoogleSheetsDeleteRowsBlock`, `GoogleSheetsGetColumnBlock`, `GoogleSheetsSortBlock`, `GoogleSheetsGetUniqueValuesBlock`, `GoogleSheetsInsertRowBlock`, `GoogleSheetsAddColumnBlock`, `GoogleSheetsGetRowCountBlock`, `GoogleSheetsRemoveDuplicatesBlock`, `GoogleSheetsUpdateRowBlock`, `GoogleSheetsGetRowBlock`, `GoogleSheetsDeleteColumnBlock`. > - **Named ranges & validation**: `GoogleSheetsCreateNamedRangeBlock`, `GoogleSheetsListNamedRangesBlock`, `GoogleSheetsAddDropdownBlock`. > - **Sheet/admin**: `GoogleSheetsCopyToSpreadsheetBlock`, `GoogleSheetsProtectRangeBlock`. > - **CSV & notes**: `GoogleSheetsExportCsvBlock`, `GoogleSheetsImportCsvBlock`, `GoogleSheetsAddNoteBlock`, `GoogleSheetsGetNotesBlock`. > - **Sharing**: `GoogleSheetsShareSpreadsheetBlock`, `GoogleSheetsSetPublicAccessBlock`. > - **Refactor**: > - Rename and simplify append: `GoogleSheetsAppendRowBlock` (replaces multi-row/dict input with single `row`), fixed insert option to `INSERT_ROWS` and streamlined response. > - **Utilities/Enums**: > - Add helpers (`_column_letter_to_index`, `_index_to_column_letter`, `_apply_filter`) and enums (`FilterOperator`, `SortOrder`, `ShareRole`, `PublicAccessRole`). > - Drive/Sheets service builders and file validation reused across new blocks. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 6e9e2f4024199f63f6b9223d77da90bcf596ac3a. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --------- Co-authored-by: Claude Co-authored-by: Cursor Agent --- .../backend/backend/blocks/google/sheets.py | 4533 ++++++++++++++++- 1 file changed, 4428 insertions(+), 105 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py index fac4e2d1aa..7b9ba2161e 100644 --- a/autogpt_platform/backend/backend/blocks/google/sheets.py +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -1,6 +1,8 @@ import asyncio +import csv +import io +import re from enum import Enum -from typing import Any from google.oauth2.credentials import Credentials from googleapiclient.discovery import build @@ -131,35 +133,6 @@ def sheet_id_by_name(service, spreadsheet_id: str, sheet_name: str) -> int | Non return None -def _convert_dicts_to_rows( - data: list[dict[str, Any]], headers: list[str] -) -> list[list[str]]: - """Convert list of dictionaries to list of rows using the specified header order. - - Args: - data: List of dictionaries to convert - headers: List of column headers to use for ordering - - Returns: - List of rows where each row is a list of string values in header order - """ - if not data: - return [] - - if not headers: - raise ValueError("Headers are required when using list[dict] format") - - rows = [] - for item in data: - row = [] - for header in headers: - value = item.get(header, "") - row.append(str(value) if value is not None else "") - rows.append(row) - - return rows - - def _build_sheets_service(credentials: GoogleCredentials): """Build Sheets service from platform credentials (with refresh token).""" settings = Settings() @@ -260,6 +233,17 @@ class BatchOperationType(str, Enum): CLEAR = "clear" +class PublicAccessRole(str, Enum): + READER = "reader" + COMMENTER = "commenter" + + +class ShareRole(str, Enum): + READER = "reader" + WRITER = "writer" + COMMENTER = "commenter" + + class BatchOperation(BlockSchemaInput): type: BatchOperationType = SchemaField( description="The type of operation to perform" @@ -531,7 +515,9 @@ class GoogleSheetsWriteBlock(Block): return result -class GoogleSheetsAppendBlock(Block): +class GoogleSheetsAppendRowBlock(Block): + """Append a single row to the end of a Google Sheet.""" + class Input(BlockSchemaInput): spreadsheet: GoogleDriveFile = GoogleDriveFileField( title="Spreadsheet", @@ -540,54 +526,33 @@ class GoogleSheetsAppendBlock(Block): allowed_views=["SPREADSHEETS"], allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) + row: list[str] = SchemaField( + description="Row values to append (e.g., ['Alice', 'alice@example.com', '25'])", + ) sheet_name: str = SchemaField( - description="Optional sheet to append to (defaults to first sheet)", + description="Sheet to append to (optional, defaults to first sheet)", default="", ) - values: list[list[str]] = SchemaField( - description="Rows to append as list of rows (list[list[str]])", - default=[], - ) - dict_values: list[dict[str, Any]] = SchemaField( - description="Rows to append as list of dictionaries (list[dict])", - default=[], - ) - headers: list[str] = SchemaField( - description="Column headers to use for ordering dict values (required when dict_values is provided)", - default=[], - ) - range: str = SchemaField( - description="Range to append to (e.g. 'A:A' for column A only, 'A:C' for columns A-C, or leave empty for unlimited columns). When empty, data will span as many columns as needed.", - default="", - advanced=True, - ) value_input_option: ValueInputOption = SchemaField( - description="How input data should be interpreted", + description="How values are interpreted. USER_ENTERED: parsed like typed input (e.g., '=SUM(A1:A5)' becomes a formula, '1/2/2024' becomes a date). RAW: stored as-is without parsing.", default=ValueInputOption.USER_ENTERED, advanced=True, ) - insert_data_option: InsertDataOption = SchemaField( - description="How new data should be inserted", - default=InsertDataOption.INSERT_ROWS, - advanced=True, - ) class Output(BlockSchemaOutput): result: dict = SchemaField(description="Append API response") spreadsheet: GoogleDriveFile = SchemaField( - description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", - ) - error: str = SchemaField( - description="Error message if any", + description="The spreadsheet for chaining to other blocks", ) + error: str = SchemaField(description="Error message if any") def __init__(self): super().__init__( id="531d50c0-d6b9-4cf9-a013-7bf783d313c7", - description="Append data to a Google Sheet. Use 'values' for list of rows (list[list[str]]) or 'dict_values' with 'headers' for list of dictionaries (list[dict]). Data is added to the next empty row without overwriting existing content. Leave range empty for unlimited columns, or specify range like 'A:A' to constrain to specific columns.", + description="Append or Add a single row to the end of a Google Sheet. The row is added after the last row with data.", categories={BlockCategory.DATA}, - input_schema=GoogleSheetsAppendBlock.Input, - output_schema=GoogleSheetsAppendBlock.Output, + input_schema=GoogleSheetsAppendRowBlock.Input, + output_schema=GoogleSheetsAppendRowBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ "spreadsheet": { @@ -595,7 +560,7 @@ class GoogleSheetsAppendBlock(Block): "name": "Test Spreadsheet", "mimeType": "application/vnd.google-apps.spreadsheet", }, - "values": [["Charlie", "95"]], + "row": ["Charlie", "95"], }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -614,7 +579,7 @@ class GoogleSheetsAppendBlock(Block): ), ], test_mock={ - "_append_sheet": lambda *args, **kwargs: { + "_append_row": lambda *args, **kwargs: { "updatedCells": 2, "updatedColumns": 2, "updatedRows": 1, @@ -629,44 +594,26 @@ class GoogleSheetsAppendBlock(Block): yield "error", "No spreadsheet selected" return - # Check if the selected file is actually a Google Sheets spreadsheet validation_error = _validate_spreadsheet_file(input_data.spreadsheet) if validation_error: yield "error", validation_error return + + if not input_data.row: + yield "error", "Row data is required" + return + try: service = _build_sheets_service(credentials) - - # Determine which values to use and convert if needed - processed_values: list[list[str]] - - # Validate that only one format is provided - if input_data.values and input_data.dict_values: - raise ValueError("Provide either 'values' or 'dict_values', not both") - - if input_data.dict_values: - if not input_data.headers: - raise ValueError("Headers are required when using dict_values") - processed_values = _convert_dicts_to_rows( - input_data.dict_values, input_data.headers - ) - elif input_data.values: - processed_values = input_data.values - else: - raise ValueError("Either 'values' or 'dict_values' must be provided") - result = await asyncio.to_thread( - self._append_sheet, + self._append_row, service, input_data.spreadsheet.id, input_data.sheet_name, - processed_values, - input_data.range, + input_data.row, input_data.value_input_option, - input_data.insert_data_option, ) yield "result", result - # Output the GoogleDriveFile for chaining (preserves credentials_id) yield "spreadsheet", GoogleDriveFile( id=input_data.spreadsheet.id, name=input_data.spreadsheet.name, @@ -677,40 +624,37 @@ class GoogleSheetsAppendBlock(Block): _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: - yield "error", f"Failed to append to Google Sheet: {str(e)}" + yield "error", f"Failed to append row: {str(e)}" - def _append_sheet( + def _append_row( self, service, spreadsheet_id: str, sheet_name: str, - values: list[list[str]], - range: str, + row: list[str], value_input_option: ValueInputOption, - insert_data_option: InsertDataOption, ) -> dict: - target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name) + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) formatted_sheet = format_sheet_name(target_sheet) - # If no range specified, use A1 to let Google Sheets find the next empty row with unlimited columns - # If range specified, use it to constrain columns (e.g., A:A for column A only) - if range: - append_range = f"{formatted_sheet}!{range}" - else: - # Use A1 as starting point for unlimited columns - Google Sheets will find next empty row - append_range = f"{formatted_sheet}!A1" - body = {"values": values} - return ( + append_range = f"{formatted_sheet}!A1" + body = {"values": [row]} # Wrap single row in list for API + result = ( service.spreadsheets() .values() .append( spreadsheetId=spreadsheet_id, range=append_range, valueInputOption=value_input_option.value, - insertDataOption=insert_data_option.value, + insertDataOption="INSERT_ROWS", body=body, ) .execute() ) + return { + "updatedCells": result.get("updates", {}).get("updatedCells", 0), + "updatedRows": result.get("updates", {}).get("updatedRows", 0), + "updatedColumns": result.get("updates", {}).get("updatedColumns", 0), + } class GoogleSheetsClearBlock(Block): @@ -2206,3 +2150,4382 @@ class GoogleSheetsUpdateCellBlock(Block): "updatedRows": result.get("updatedRows", 0), "updatedColumns": result.get("updatedColumns", 0), } + + +class FilterOperator(str, Enum): + EQUALS = "equals" + NOT_EQUALS = "not_equals" + CONTAINS = "contains" + NOT_CONTAINS = "not_contains" + GREATER_THAN = "greater_than" + LESS_THAN = "less_than" + GREATER_THAN_OR_EQUAL = "greater_than_or_equal" + LESS_THAN_OR_EQUAL = "less_than_or_equal" + IS_EMPTY = "is_empty" + IS_NOT_EMPTY = "is_not_empty" + + +class SortOrder(str, Enum): + ASCENDING = "ascending" + DESCENDING = "descending" + + +def _column_letter_to_index(letter: str) -> int: + """Convert column letter (A, B, ..., Z, AA, AB, ...) to 0-based index.""" + result = 0 + for char in letter.upper(): + result = result * 26 + (ord(char) - ord("A") + 1) + return result - 1 + + +def _index_to_column_letter(index: int) -> str: + """Convert 0-based column index to column letter (A, B, ..., Z, AA, AB, ...).""" + result = "" + index += 1 # Convert to 1-based + while index > 0: + index, remainder = divmod(index - 1, 26) + result = chr(ord("A") + remainder) + result + return result + + +def _apply_filter( + cell_value: str, + filter_value: str, + operator: FilterOperator, + match_case: bool, +) -> bool: + """Apply a filter condition to a cell value.""" + if operator == FilterOperator.IS_EMPTY: + return cell_value.strip() == "" + if operator == FilterOperator.IS_NOT_EMPTY: + return cell_value.strip() != "" + + # For comparison operators, apply case sensitivity + compare_cell = cell_value if match_case else cell_value.lower() + compare_filter = filter_value if match_case else filter_value.lower() + + if operator == FilterOperator.EQUALS: + return compare_cell == compare_filter + elif operator == FilterOperator.NOT_EQUALS: + return compare_cell != compare_filter + elif operator == FilterOperator.CONTAINS: + return compare_filter in compare_cell + elif operator == FilterOperator.NOT_CONTAINS: + return compare_filter not in compare_cell + elif operator in ( + FilterOperator.GREATER_THAN, + FilterOperator.LESS_THAN, + FilterOperator.GREATER_THAN_OR_EQUAL, + FilterOperator.LESS_THAN_OR_EQUAL, + ): + # Try numeric comparison first + try: + num_cell = float(cell_value) + num_filter = float(filter_value) + if operator == FilterOperator.GREATER_THAN: + return num_cell > num_filter + elif operator == FilterOperator.LESS_THAN: + return num_cell < num_filter + elif operator == FilterOperator.GREATER_THAN_OR_EQUAL: + return num_cell >= num_filter + elif operator == FilterOperator.LESS_THAN_OR_EQUAL: + return num_cell <= num_filter + except ValueError: + # Fall back to string comparison + if operator == FilterOperator.GREATER_THAN: + return compare_cell > compare_filter + elif operator == FilterOperator.LESS_THAN: + return compare_cell < compare_filter + elif operator == FilterOperator.GREATER_THAN_OR_EQUAL: + return compare_cell >= compare_filter + elif operator == FilterOperator.LESS_THAN_OR_EQUAL: + return compare_cell <= compare_filter + + return False + + +class GoogleSheetsFilterRowsBlock(Block): + """Filter rows in a Google Sheet based on column conditions.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + filter_column: str = SchemaField( + description="Column to filter on (header name or column letter like 'A', 'B')", + placeholder="Status", + ) + filter_value: str = SchemaField( + description="Value to filter by (not used for is_empty/is_not_empty operators)", + default="", + ) + operator: FilterOperator = SchemaField( + description="Filter comparison operator", + default=FilterOperator.EQUALS, + ) + match_case: bool = SchemaField( + description="Whether to match case in comparisons", + default=False, + ) + include_header: bool = SchemaField( + description="Include header row in output", + default=True, + ) + + class Output(BlockSchemaOutput): + rows: list[list[str]] = SchemaField( + description="Filtered rows (including header if requested)", + ) + row_indices: list[int] = SchemaField( + description="Original 1-based row indices of matching rows (useful for deletion)", + ) + count: int = SchemaField( + description="Number of matching rows (excluding header)", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="582195c2-ccee-4fc2-b646-18f72eb9906c", + description="Filter rows in a Google Sheet based on a column condition. Returns matching rows and their indices.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsFilterRowsBlock.Input, + output_schema=GoogleSheetsFilterRowsBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "filter_column": "Status", + "filter_value": "Active", + "operator": FilterOperator.EQUALS, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "rows", + [ + ["Name", "Status", "Score"], + ["Alice", "Active", "85"], + ["Charlie", "Active", "92"], + ], + ), + ("row_indices", [2, 4]), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_filter_rows": lambda *args, **kwargs: { + "rows": [ + ["Name", "Status", "Score"], + ["Alice", "Active", "85"], + ["Charlie", "Active", "92"], + ], + "row_indices": [2, 4], + "count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._filter_rows, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.filter_column, + input_data.filter_value, + input_data.operator, + input_data.match_case, + input_data.include_header, + ) + yield "rows", result["rows"] + yield "row_indices", result["row_indices"] + yield "count", result["count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to filter rows: {str(e)}" + + def _filter_rows( + self, + service, + spreadsheet_id: str, + sheet_name: str, + filter_column: str, + filter_value: str, + operator: FilterOperator, + match_case: bool, + include_header: bool, + ) -> dict: + # Resolve sheet name + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + # Read all data from the sheet + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"rows": [], "row_indices": [], "count": 0} + + header = all_rows[0] + data_rows = all_rows[1:] + + # Determine filter column index + filter_col_idx = -1 + + # First, try to match against header names (handles "ID", "No", "To", etc.) + for idx, col_name in enumerate(header): + if (match_case and col_name == filter_column) or ( + not match_case and col_name.lower() == filter_column.lower() + ): + filter_col_idx = idx + break + + # If no header match and looks like a column letter (A, B, AA, etc.), try that + if filter_col_idx < 0 and filter_column.isalpha() and len(filter_column) <= 2: + filter_col_idx = _column_letter_to_index(filter_column) + # Validate column letter is within data range + if filter_col_idx >= len(header): + raise ValueError( + f"Column '{filter_column}' (index {filter_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if filter_col_idx < 0: + raise ValueError( + f"Column '{filter_column}' not found. Available columns: {header}" + ) + + # Filter rows + filtered_rows = [] + row_indices = [] + + for row_idx, row in enumerate(data_rows): + # Get cell value (handle rows shorter than filter column) + cell_value = row[filter_col_idx] if filter_col_idx < len(row) else "" + + if _apply_filter(str(cell_value), filter_value, operator, match_case): + filtered_rows.append(row) + row_indices.append(row_idx + 2) # +2 for 1-based index and header + + # Prepare output + output_rows = [] + if include_header: + output_rows.append(header) + output_rows.extend(filtered_rows) + + return { + "rows": output_rows, + "row_indices": row_indices, + "count": len(filtered_rows), + } + + +class GoogleSheetsLookupRowBlock(Block): + """Look up a row by matching a value in a column (VLOOKUP-style).""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + lookup_column: str = SchemaField( + description="Column to search in (header name or column letter)", + placeholder="ID", + ) + lookup_value: str = SchemaField( + description="Value to search for", + ) + return_columns: list[str] = SchemaField( + description="Columns to return (header names or letters). Empty = all columns.", + default=[], + ) + match_case: bool = SchemaField( + description="Whether to match case", + default=False, + ) + + class Output(BlockSchemaOutput): + row: list[str] = SchemaField( + description="The matching row (all or selected columns)", + ) + row_dict: dict[str, str] = SchemaField( + description="The matching row as a dictionary (header: value)", + ) + row_index: int = SchemaField( + description="1-based row index of the match", + ) + found: bool = SchemaField( + description="Whether a match was found", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="e58c0bad-6597-400c-9548-d151ec428ffc", + description="Look up a row by finding a value in a specific column. Returns the first matching row and optionally specific columns.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsLookupRowBlock.Input, + output_schema=GoogleSheetsLookupRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "lookup_column": "ID", + "lookup_value": "123", + "return_columns": ["Name", "Email"], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("row", ["Alice", "alice@example.com"]), + ("row_dict", {"Name": "Alice", "Email": "alice@example.com"}), + ("row_index", 2), + ("found", True), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_lookup_row": lambda *args, **kwargs: { + "row": ["Alice", "alice@example.com"], + "row_dict": {"Name": "Alice", "Email": "alice@example.com"}, + "row_index": 2, + "found": True, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._lookup_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.lookup_column, + input_data.lookup_value, + input_data.return_columns, + input_data.match_case, + ) + yield "row", result["row"] + yield "row_dict", result["row_dict"] + yield "row_index", result["row_index"] + yield "found", result["found"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to lookup row: {str(e)}" + + def _lookup_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + lookup_column: str, + lookup_value: str, + return_columns: list[str], + match_case: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"row": [], "row_dict": {}, "row_index": 0, "found": False} + + header = all_rows[0] + data_rows = all_rows[1:] + + # Find lookup column index - first try header name match, then column letter + lookup_col_idx = -1 + for idx, col_name in enumerate(header): + if (match_case and col_name == lookup_column) or ( + not match_case and col_name.lower() == lookup_column.lower() + ): + lookup_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if lookup_col_idx < 0 and lookup_column.isalpha() and len(lookup_column) <= 2: + lookup_col_idx = _column_letter_to_index(lookup_column) + # Validate column letter is within data range + if lookup_col_idx >= len(header): + raise ValueError( + f"Column '{lookup_column}' (index {lookup_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if lookup_col_idx < 0: + raise ValueError( + f"Lookup column '{lookup_column}' not found. Available: {header}" + ) + + # Find return column indices - first try header name match, then column letter + return_col_indices = [] + return_col_headers = [] + if return_columns: + for ret_col in return_columns: + found = False + # First try header name match + for idx, col_name in enumerate(header): + if (match_case and col_name == ret_col) or ( + not match_case and col_name.lower() == ret_col.lower() + ): + return_col_indices.append(idx) + return_col_headers.append(col_name) + found = True + break + + # If no header match and looks like a column letter, try that + if not found and ret_col.isalpha() and len(ret_col) <= 2: + idx = _column_letter_to_index(ret_col) + # Validate column letter is within data range + if idx >= len(header): + raise ValueError( + f"Return column '{ret_col}' (index {idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + return_col_indices.append(idx) + return_col_headers.append(header[idx]) + found = True + + if not found: + raise ValueError( + f"Return column '{ret_col}' not found. Available: {header}" + ) + else: + return_col_indices = list(range(len(header))) + return_col_headers = header + + # Search for matching row + compare_value = lookup_value if match_case else lookup_value.lower() + + for row_idx, row in enumerate(data_rows): + cell_value = row[lookup_col_idx] if lookup_col_idx < len(row) else "" + compare_cell = str(cell_value) if match_case else str(cell_value).lower() + + if compare_cell == compare_value: + # Found a match - extract requested columns + result_row = [] + result_dict = {} + for i, col_idx in enumerate(return_col_indices): + value = row[col_idx] if col_idx < len(row) else "" + result_row.append(value) + result_dict[return_col_headers[i]] = value + + return { + "row": result_row, + "row_dict": result_dict, + "row_index": row_idx + 2, + "found": True, + } + + return {"row": [], "row_dict": {}, "row_index": 0, "found": False} + + +class GoogleSheetsDeleteRowsBlock(Block): + """Delete rows from a Google Sheet by row indices.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_indices: list[int] = SchemaField( + description="1-based row indices to delete (e.g., [2, 5, 7])", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the delete operation", + ) + deleted_count: int = SchemaField( + description="Number of rows deleted", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="24bcd490-b02d-44c6-847d-b62a2319f5eb", + description="Delete specific rows from a Google Sheet by their row indices. Works well with FilterRowsBlock output.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsDeleteRowsBlock.Input, + output_schema=GoogleSheetsDeleteRowsBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_indices": [2, 5], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("deleted_count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_delete_rows": lambda *args, **kwargs: { + "success": True, + "deleted_count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._delete_rows, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_indices, + ) + yield "result", {"success": True} + yield "deleted_count", result["deleted_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to delete rows: {str(e)}" + + def _delete_rows( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_indices: list[int], + ) -> dict: + if not row_indices: + return {"success": True, "deleted_count": 0} + + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Deduplicate and sort row indices in descending order to delete from bottom to top + # Deduplication prevents deleting wrong rows if same index appears multiple times + sorted_indices = sorted(set(row_indices), reverse=True) + + # Build delete requests + requests = [] + for row_idx in sorted_indices: + # Convert to 0-based index + start_idx = row_idx - 1 + requests.append( + { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + } + } + } + ) + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": requests} + ).execute() + + return {"success": True, "deleted_count": len(sorted_indices)} + + +class GoogleSheetsGetColumnBlock(Block): + """Get all values from a specific column by header name.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to extract (header name or column letter like 'A', 'B')", + placeholder="Email", + ) + include_header: bool = SchemaField( + description="Include the header in output", + default=False, + ) + skip_empty: bool = SchemaField( + description="Skip empty cells", + default=False, + ) + + class Output(BlockSchemaOutput): + values: list[str] = SchemaField( + description="List of values from the column", + ) + count: int = SchemaField( + description="Number of values (excluding header if not included)", + ) + column_index: int = SchemaField( + description="0-based column index", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="108d911f-e109-47fb-addc-2259792ee850", + description="Extract all values from a specific column. Useful for getting a list of emails, IDs, or any single field.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetColumnBlock.Input, + output_schema=GoogleSheetsGetColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Email", + "include_header": False, + "skip_empty": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "values", + ["alice@example.com", "bob@example.com", "charlie@example.com"], + ), + ("count", 3), + ("column_index", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_column": lambda *args, **kwargs: { + "values": [ + "alice@example.com", + "bob@example.com", + "charlie@example.com", + ], + "count": 3, + "column_index": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + input_data.include_header, + input_data.skip_empty, + ) + yield "values", result["values"] + yield "count", result["count"] + yield "column_index", result["column_index"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get column: {str(e)}" + + def _get_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + include_header: bool, + skip_empty: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"values": [], "count": 0, "column_index": -1} + + header = all_rows[0] + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError( + f"Column '{column}' not found. Available columns: {header}" + ) + + # Extract column values + values = [] + start_row = 0 if include_header else 1 + + for row in all_rows[start_row:]: + value = row[col_idx] if col_idx < len(row) else "" + if skip_empty and not str(value).strip(): + continue + values.append(str(value)) + + return {"values": values, "count": len(values), "column_index": col_idx} + + +class GoogleSheetsSortBlock(Block): + """Sort a Google Sheet by one or more columns.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + sort_column: str = SchemaField( + description="Primary column to sort by (header name or column letter)", + placeholder="Date", + ) + sort_order: SortOrder = SchemaField( + description="Sort order for primary column", + default=SortOrder.ASCENDING, + ) + secondary_column: str = SchemaField( + description="Secondary column to sort by (optional)", + default="", + ) + secondary_order: SortOrder = SchemaField( + description="Sort order for secondary column", + default=SortOrder.ASCENDING, + ) + has_header: bool = SchemaField( + description="Whether the data has a header row (header won't be sorted)", + default=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the sort operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="a265bd84-c93b-459d-bbe0-94e6addaa38f", + description="Sort a Google Sheet by one or two columns. The sheet is sorted in-place.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsSortBlock.Input, + output_schema=GoogleSheetsSortBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "sort_column": "Score", + "sort_order": SortOrder.DESCENDING, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_sort_sheet": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._sort_sheet, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.sort_column, + input_data.sort_order, + input_data.secondary_column, + input_data.secondary_order, + input_data.has_header, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to sort sheet: {str(e)}" + + def _sort_sheet( + self, + service, + spreadsheet_id: str, + sheet_name: str, + sort_column: str, + sort_order: SortOrder, + secondary_column: str, + secondary_order: SortOrder, + has_header: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get sheet metadata to find column indices and grid properties + meta = service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() + sheet_meta = None + for sheet in meta.get("sheets", []): + if sheet.get("properties", {}).get("sheetId") == sheet_id: + sheet_meta = sheet + break + + if not sheet_meta: + raise ValueError(f"Could not find metadata for sheet '{target_sheet}'") + + grid_props = sheet_meta.get("properties", {}).get("gridProperties", {}) + row_count = grid_props.get("rowCount", 1000) + col_count = grid_props.get("columnCount", 26) + + # Get header to resolve column names + formatted_sheet = format_sheet_name(target_sheet) + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Find primary sort column index - first try header name match, then column letter + sort_col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == sort_column.lower(): + sort_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if sort_col_idx < 0 and sort_column.isalpha() and len(sort_column) <= 2: + sort_col_idx = _column_letter_to_index(sort_column) + # Validate column letter is within data range + if sort_col_idx >= len(header): + raise ValueError( + f"Sort column '{sort_column}' (index {sort_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if sort_col_idx < 0: + raise ValueError( + f"Sort column '{sort_column}' not found. Available: {header}" + ) + + # Build sort specs + sort_specs = [ + { + "dimensionIndex": sort_col_idx, + "sortOrder": ( + "ASCENDING" if sort_order == SortOrder.ASCENDING else "DESCENDING" + ), + } + ] + + # Add secondary sort if specified + if secondary_column: + sec_col_idx = -1 + # First try header name match + for idx, col_name in enumerate(header): + if col_name.lower() == secondary_column.lower(): + sec_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if ( + sec_col_idx < 0 + and secondary_column.isalpha() + and len(secondary_column) <= 2 + ): + sec_col_idx = _column_letter_to_index(secondary_column) + # Validate column letter is within data range + if sec_col_idx >= len(header): + raise ValueError( + f"Secondary sort column '{secondary_column}' (index {sec_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if sec_col_idx < 0: + raise ValueError( + f"Secondary sort column '{secondary_column}' not found. Available: {header}" + ) + + sort_specs.append( + { + "dimensionIndex": sec_col_idx, + "sortOrder": ( + "ASCENDING" + if secondary_order == SortOrder.ASCENDING + else "DESCENDING" + ), + } + ) + + # Build sort range request + start_row = 1 if has_header else 0 # Skip header if present + + request = { + "sortRange": { + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": row_count, + "startColumnIndex": 0, + "endColumnIndex": col_count, + }, + "sortSpecs": sort_specs, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsGetUniqueValuesBlock(Block): + """Get unique values from a column.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to get unique values from (header name or column letter)", + placeholder="Category", + ) + include_count: bool = SchemaField( + description="Include count of each unique value", + default=False, + ) + sort_by_count: bool = SchemaField( + description="Sort results by count (most frequent first)", + default=False, + ) + + class Output(BlockSchemaOutput): + values: list[str] = SchemaField( + description="List of unique values", + ) + counts: dict[str, int] = SchemaField( + description="Count of each unique value (if include_count is True)", + ) + total_unique: int = SchemaField( + description="Total number of unique values", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="0f296c0b-6b6e-4280-b96e-ae1459b98dff", + description="Get unique values from a column. Useful for building dropdown options or finding distinct categories.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetUniqueValuesBlock.Input, + output_schema=GoogleSheetsGetUniqueValuesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Status", + "include_count": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("values", ["Active", "Inactive", "Pending"]), + ("counts", {"Active": 5, "Inactive": 3, "Pending": 2}), + ("total_unique", 3), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_unique_values": lambda *args, **kwargs: { + "values": ["Active", "Inactive", "Pending"], + "counts": {"Active": 5, "Inactive": 3, "Pending": 2}, + "total_unique": 3, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_unique_values, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + input_data.include_count, + input_data.sort_by_count, + ) + yield "values", result["values"] + yield "counts", result["counts"] + yield "total_unique", result["total_unique"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get unique values: {str(e)}" + + def _get_unique_values( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + include_count: bool, + sort_by_count: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"values": [], "counts": {}, "total_unique": 0} + + header = all_rows[0] + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError( + f"Column '{column}' not found. Available columns: {header}" + ) + + # Count values + value_counts: dict[str, int] = {} + for row in all_rows[1:]: # Skip header + value = str(row[col_idx]) if col_idx < len(row) else "" + if value.strip(): # Skip empty values + value_counts[value] = value_counts.get(value, 0) + 1 + + # Sort values + if sort_by_count: + sorted_items = sorted(value_counts.items(), key=lambda x: -x[1]) + unique_values = [item[0] for item in sorted_items] + else: + unique_values = sorted(value_counts.keys()) + + return { + "values": unique_values, + "counts": value_counts if include_count else {}, + "total_unique": len(unique_values), + } + + +class GoogleSheetsInsertRowBlock(Block): + """Insert a single row at a specific position in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + row: list[str] = SchemaField( + description="Row values to insert (e.g., ['Alice', 'alice@example.com', '25'])", + ) + row_index: int = SchemaField( + description="1-based row index where to insert (existing rows shift down)", + placeholder="2", + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + value_input_option: ValueInputOption = SchemaField( + description="How values are interpreted. USER_ENTERED: parsed like typed input (e.g., '=SUM(A1:A5)' becomes a formula, '1/2/2024' becomes a date). RAW: stored as-is without parsing.", + default=ValueInputOption.USER_ENTERED, + advanced=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the insert operation") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="03eda5df-8080-4ed1-bfdf-212f543d657e", + description="Insert a single row at a specific position. Existing rows shift down.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsInsertRowBlock.Input, + output_schema=GoogleSheetsInsertRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row": ["New", "Row", "Data"], + "row_index": 3, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_insert_row": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.row: + yield "error", "Row data is required" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._insert_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + input_data.row, + input_data.value_input_option, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to insert row: {str(e)}" + + def _insert_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + row: list[str], + value_input_option: ValueInputOption, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + start_idx = row_index - 1 # Convert to 0-based + + # First, insert an empty row + insert_request = { + "insertDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + }, + "inheritFromBefore": start_idx > 0, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [insert_request]} + ).execute() + + # Then, write the values + formatted_sheet = format_sheet_name(target_sheet) + write_range = f"{formatted_sheet}!A{row_index}" + + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=write_range, + valueInputOption=value_input_option.value, + body={"values": [row]}, # Wrap single row in list for API + ).execute() + + return {"success": True} + + +class GoogleSheetsAddColumnBlock(Block): + """Add a new column with a header to a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + header: str = SchemaField( + description="Header name for the new column", + placeholder="New Column", + ) + position: str = SchemaField( + description="Where to add: 'end' for last column, or column letter (e.g., 'C') to insert before", + default="end", + ) + default_value: str = SchemaField( + description="Default value to fill in all data rows (optional). Requires existing data rows.", + default="", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + column_letter: str = SchemaField( + description="Letter of the new column (e.g., 'D')", + ) + column_index: int = SchemaField( + description="0-based index of the new column", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="cac51050-fc9e-4e63-987a-66c2ba2a127b", + description="Add a new column with a header. Can add at the end or insert at a specific position.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddColumnBlock.Input, + output_schema=GoogleSheetsAddColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "header": "New Status", + "position": "end", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("column_letter", "D"), + ("column_index", 3), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_column": lambda *args, **kwargs: { + "success": True, + "column_letter": "D", + "column_index": 3, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._add_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.header, + input_data.position, + input_data.default_value, + ) + yield "result", {"success": True} + yield "column_letter", result["column_letter"] + yield "column_index", result["column_index"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add column: {str(e)}" + + def _add_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + header: str, + position: str, + default_value: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get current data to determine column count and row count + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + current_col_count = max(len(row) for row in all_rows) if all_rows else 0 + row_count = len(all_rows) + + # Determine target column index + if position.lower() == "end": + col_idx = current_col_count + elif position.isalpha() and len(position) <= 2: + col_idx = _column_letter_to_index(position) + # Insert a new column at this position + insert_request = { + "insertDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "COLUMNS", + "startIndex": col_idx, + "endIndex": col_idx + 1, + }, + "inheritFromBefore": col_idx > 0, + } + } + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [insert_request]} + ).execute() + else: + raise ValueError( + f"Invalid position: '{position}'. Use 'end' or a column letter." + ) + + col_letter = _index_to_column_letter(col_idx) + + # Write header + header_range = f"{formatted_sheet}!{col_letter}1" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=header_range, + valueInputOption="USER_ENTERED", + body={"values": [[header]]}, + ).execute() + + # Fill default value if provided and there are data rows + if default_value and row_count > 1: + values_to_fill = [[default_value]] * (row_count - 1) + data_range = f"{formatted_sheet}!{col_letter}2:{col_letter}{row_count}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=data_range, + valueInputOption="USER_ENTERED", + body={"values": values_to_fill}, + ).execute() + + return { + "success": True, + "column_letter": col_letter, + "column_index": col_idx, + } + + +class GoogleSheetsGetRowCountBlock(Block): + """Get the number of rows in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + include_header: bool = SchemaField( + description="Include header row in count", + default=True, + ) + count_empty: bool = SchemaField( + description="Count rows with only empty cells", + default=False, + ) + + class Output(BlockSchemaOutput): + total_rows: int = SchemaField( + description="Total number of rows", + ) + data_rows: int = SchemaField( + description="Number of data rows (excluding header)", + ) + last_row: int = SchemaField( + description="1-based index of the last row with data", + ) + column_count: int = SchemaField( + description="Number of columns", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="080cc84b-a94a-4fb4-90e3-dcc55ee783af", + description="Get row count and dimensions of a Google Sheet. Useful for knowing where data ends.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetRowCountBlock.Input, + output_schema=GoogleSheetsGetRowCountBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("total_rows", 101), + ("data_rows", 100), + ("last_row", 101), + ("column_count", 5), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_row_count": lambda *args, **kwargs: { + "total_rows": 101, + "data_rows": 100, + "last_row": 101, + "column_count": 5, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_row_count, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.include_header, + input_data.count_empty, + ) + yield "total_rows", result["total_rows"] + yield "data_rows", result["data_rows"] + yield "last_row", result["last_row"] + yield "column_count", result["column_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get row count: {str(e)}" + + def _get_row_count( + self, + service, + spreadsheet_id: str, + sheet_name: str, + include_header: bool, + count_empty: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return { + "total_rows": 0, + "data_rows": 0, + "last_row": 0, + "column_count": 0, + } + + # Count non-empty rows + if count_empty: + total_rows = len(all_rows) + last_row = total_rows + else: + # Find last row with actual data + last_row = 0 + for idx, row in enumerate(all_rows): + if any(str(cell).strip() for cell in row): + last_row = idx + 1 + total_rows = last_row + + data_rows = total_rows - 1 if total_rows > 0 else 0 + if not include_header: + total_rows = data_rows + + column_count = max(len(row) for row in all_rows) if all_rows else 0 + + return { + "total_rows": total_rows, + "data_rows": data_rows, + "last_row": last_row, + "column_count": column_count, + } + + +class GoogleSheetsRemoveDuplicatesBlock(Block): + """Remove duplicate rows from a Google Sheet based on specified columns.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + columns: list[str] = SchemaField( + description="Columns to check for duplicates (header names or letters). Empty = all columns.", + default=[], + ) + keep: str = SchemaField( + description="Which duplicate to keep: 'first' or 'last'", + default="first", + ) + match_case: bool = SchemaField( + description="Whether to match case when comparing", + default=False, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + removed_count: int = SchemaField( + description="Number of duplicate rows removed", + ) + remaining_rows: int = SchemaField( + description="Number of rows remaining", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="6eb50ff7-205b-400e-8ecc-1ce8d50075be", + description="Remove duplicate rows based on specified columns. Keeps either the first or last occurrence.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsRemoveDuplicatesBlock.Input, + output_schema=GoogleSheetsRemoveDuplicatesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "columns": ["Email"], + "keep": "first", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("removed_count", 5), + ("remaining_rows", 95), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_remove_duplicates": lambda *args, **kwargs: { + "success": True, + "removed_count": 5, + "remaining_rows": 95, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._remove_duplicates, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.columns, + input_data.keep, + input_data.match_case, + ) + yield "result", {"success": True} + yield "removed_count", result["removed_count"] + yield "remaining_rows", result["remaining_rows"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to remove duplicates: {str(e)}" + + def _remove_duplicates( + self, + service, + spreadsheet_id: str, + sheet_name: str, + columns: list[str], + keep: str, + match_case: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Read all data + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if len(all_rows) <= 1: # Only header or empty + return { + "success": True, + "removed_count": 0, + "remaining_rows": len(all_rows), + } + + header = all_rows[0] + data_rows = all_rows[1:] + + # Determine which column indices to use for comparison + # First try header name match, then column letter + if columns: + col_indices = [] + for col in columns: + found = False + # First try header name match + for idx, col_name in enumerate(header): + if col_name.lower() == col.lower(): + col_indices.append(idx) + found = True + break + + # If no header match and looks like a column letter, try that + if not found and col.isalpha() and len(col) <= 2: + col_idx = _column_letter_to_index(col) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{col}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + col_indices.append(col_idx) + found = True + + if not found: + raise ValueError( + f"Column '{col}' not found in sheet. " + f"Available columns: {', '.join(header)}" + ) + else: + col_indices = list(range(len(header))) + + # Find duplicates + seen: dict[tuple, int] = {} + rows_to_delete: list[int] = [] + + for row_idx, row in enumerate(data_rows): + # Build key from specified columns + key_parts = [] + for col_idx in col_indices: + value = str(row[col_idx]) if col_idx < len(row) else "" + if not match_case: + value = value.lower() + key_parts.append(value) + key = tuple(key_parts) + + if key in seen: + if keep == "first": + # Delete this row (keep the first one we saw) + rows_to_delete.append(row_idx + 2) # +2 for 1-based and header + else: + # Delete the previous row, then update seen to keep this one + prev_row = seen[key] + rows_to_delete.append(prev_row) + seen[key] = row_idx + 2 + else: + seen[key] = row_idx + 2 + + if not rows_to_delete: + return { + "success": True, + "removed_count": 0, + "remaining_rows": len(all_rows), + } + + # Sort in descending order to delete from bottom to top + rows_to_delete = sorted(set(rows_to_delete), reverse=True) + + # Delete rows + requests = [] + for row_idx in rows_to_delete: + start_idx = row_idx - 1 + requests.append( + { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + } + } + } + ) + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": requests} + ).execute() + + remaining = len(all_rows) - len(rows_to_delete) + return { + "success": True, + "removed_count": len(rows_to_delete), + "remaining_rows": remaining, + } + + +class GoogleSheetsUpdateRowBlock(Block): + """Update a specific row by index with new values.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_index: int = SchemaField( + description="1-based row index to update", + ) + values: list[str] = SchemaField( + description="New values for the row (in column order)", + default=[], + ) + dict_values: dict[str, str] = SchemaField( + description="Values as dict with column headers as keys (alternative to values)", + default={}, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the update operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="b8a934d5-fca0-4be3-9fc2-a99bf63bd385", + description="Update a specific row by its index. Can use list or dict format for values.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsUpdateRowBlock.Input, + output_schema=GoogleSheetsUpdateRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_index": 5, + "dict_values": {"Name": "Updated Name", "Status": "Active"}, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True, "updatedCells": 2}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_update_row": lambda *args, **kwargs: { + "success": True, + "updatedCells": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.values and not input_data.dict_values: + yield "error", "Either values or dict_values must be provided" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._update_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + input_data.values, + input_data.dict_values, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to update row: {str(e)}" + + def _update_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + values: list[str], + dict_values: dict[str, str], + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + if dict_values: + # Get header to map column names to indices + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] + if header_result.get("values") + else [] + ) + + # Get current row values + row_range = f"{formatted_sheet}!{row_index}:{row_index}" + current_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=row_range) + .execute() + ) + current_row = ( + current_result.get("values", [[]])[0] + if current_result.get("values") + else [] + ) + + # Extend current row to match header length + while len(current_row) < len(header): + current_row.append("") + + # Update specific columns from dict - validate all column names first + for col_name in dict_values.keys(): + found = False + for h in header: + if h.lower() == col_name.lower(): + found = True + break + if not found: + raise ValueError( + f"Column '{col_name}' not found in sheet. " + f"Available columns: {', '.join(header)}" + ) + + # Now apply updates + updated_count = 0 + for col_name, value in dict_values.items(): + for idx, h in enumerate(header): + if h.lower() == col_name.lower(): + current_row[idx] = value + updated_count += 1 + break + + values = current_row + else: + updated_count = len(values) + + # Write the row + write_range = f"{formatted_sheet}!A{row_index}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=write_range, + valueInputOption="USER_ENTERED", + body={"values": [values]}, + ).execute() + + return {"success": True, "updatedCells": updated_count} + + +class GoogleSheetsGetRowBlock(Block): + """Get a specific row by its index.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_index: int = SchemaField( + description="1-based row index to retrieve", + ) + + class Output(BlockSchemaOutput): + row: list[str] = SchemaField( + description="The row values as a list", + ) + row_dict: dict[str, str] = SchemaField( + description="The row as a dictionary (header: value)", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="c4be9390-2431-4682-9769-7025b22a5fa7", + description="Get a specific row by its index. Returns both list and dict formats.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetRowBlock.Input, + output_schema=GoogleSheetsGetRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_index": 3, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("row", ["Alice", "Active", "85"]), + ("row_dict", {"Name": "Alice", "Status": "Active", "Score": "85"}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_row": lambda *args, **kwargs: { + "row": ["Alice", "Active", "85"], + "row_dict": {"Name": "Alice", "Status": "Active", "Score": "85"}, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + ) + yield "row", result["row"] + yield "row_dict", result["row_dict"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get row: {str(e)}" + + def _get_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + # Get header + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Get the row + row_range = f"{formatted_sheet}!{row_index}:{row_index}" + row_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=row_range) + .execute() + ) + row = row_result.get("values", [[]])[0] if row_result.get("values") else [] + + # Build dictionary + row_dict = {} + for idx, h in enumerate(header): + row_dict[h] = row[idx] if idx < len(row) else "" + + return {"row": row, "row_dict": row_dict} + + +class GoogleSheetsDeleteColumnBlock(Block): + """Delete a column from a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to delete (header name or column letter like 'A', 'B')", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the delete operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="59b266b6-5cce-4661-a1d3-c417e64d68e9", + description="Delete a column by header name or column letter.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsDeleteColumnBlock.Input, + output_schema=GoogleSheetsDeleteColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Status", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_delete_column": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._delete_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to delete column: {str(e)}" + + def _delete_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get header to find column by name or validate column letter + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, h in enumerate(header): + if h.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError(f"Column '{column}' not found") + + # Delete the column + request = { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "COLUMNS", + "startIndex": col_idx, + "endIndex": col_idx + 1, + } + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsCreateNamedRangeBlock(Block): + """Create a named range in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + name: str = SchemaField( + description="Name for the range (e.g., 'SalesData', 'CustomerList')", + placeholder="MyNamedRange", + ) + range: str = SchemaField( + description="Cell range in A1 notation (e.g., 'A1:D10', 'B2:B100')", + placeholder="A1:D10", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + named_range_id: str = SchemaField( + description="ID of the created named range", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="a2707376-8016-494b-98c4-d0e2752ab9cb", + description="Create a named range to reference cells by name instead of A1 notation.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsCreateNamedRangeBlock.Input, + output_schema=GoogleSheetsCreateNamedRangeBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "name": "SalesData", + "range": "A1:D10", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("named_range_id", "nr_12345"), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_create_named_range": lambda *args, **kwargs: { + "success": True, + "named_range_id": "nr_12345", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._create_named_range, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.name, + input_data.range, + ) + yield "result", {"success": True} + yield "named_range_id", result["named_range_id"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to create named range: {str(e)}" + + def _create_named_range( + self, + service, + spreadsheet_id: str, + sheet_name: str, + name: str, + range_str: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse range to get grid coordinates + # Handle both "A1:D10" and "Sheet1!A1:D10" formats + if "!" in range_str: + range_str = range_str.split("!")[1] + + # Parse start and end cells + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + start_col = _column_letter_to_index(match.group(1)) + start_row = int(match.group(2)) - 1 # 0-based + end_col = _column_letter_to_index(match.group(3)) + 1 # exclusive + end_row = int(match.group(4)) # exclusive (already 1-based becomes 0-based + 1) + + request = { + "addNamedRange": { + "namedRange": { + "name": name, + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": end_row, + "startColumnIndex": start_col, + "endColumnIndex": end_col, + }, + } + } + } + + result = ( + service.spreadsheets() + .batchUpdate(spreadsheetId=spreadsheet_id, body={"requests": [request]}) + .execute() + ) + + # Extract the named range ID from the response + named_range_id = "" + replies = result.get("replies", []) + if replies and "addNamedRange" in replies[0]: + named_range_id = replies[0]["addNamedRange"]["namedRange"]["namedRangeId"] + + return {"success": True, "named_range_id": named_range_id} + + +class GoogleSheetsListNamedRangesBlock(Block): + """List all named ranges in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + + class Output(BlockSchemaOutput): + named_ranges: list[dict] = SchemaField( + description="List of named ranges with name, id, and range info", + ) + count: int = SchemaField( + description="Number of named ranges", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="b81a9d27-3997-4860-9303-cc68086db13a", + description="List all named ranges in a spreadsheet.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsListNamedRangesBlock.Input, + output_schema=GoogleSheetsListNamedRangesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "named_ranges", + [ + {"name": "SalesData", "id": "nr_1", "range": "Sheet1!A1:D10"}, + { + "name": "CustomerList", + "id": "nr_2", + "range": "Sheet1!E1:F50", + }, + ], + ), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_list_named_ranges": lambda *args, **kwargs: { + "named_ranges": [ + {"name": "SalesData", "id": "nr_1", "range": "Sheet1!A1:D10"}, + { + "name": "CustomerList", + "id": "nr_2", + "range": "Sheet1!E1:F50", + }, + ], + "count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._list_named_ranges, + service, + input_data.spreadsheet.id, + ) + yield "named_ranges", result["named_ranges"] + yield "count", result["count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to list named ranges: {str(e)}" + + def _list_named_ranges( + self, + service, + spreadsheet_id: str, + ) -> dict: + # Get spreadsheet metadata including named ranges + meta = service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() + + named_ranges_list = [] + named_ranges = meta.get("namedRanges", []) + + # Get sheet names for reference + sheets = { + sheet["properties"]["sheetId"]: sheet["properties"]["title"] + for sheet in meta.get("sheets", []) + } + + for nr in named_ranges: + range_info = nr.get("range", {}) + sheet_id = range_info.get("sheetId", 0) + sheet_name = sheets.get(sheet_id, "Sheet1") + + # Convert grid range back to A1 notation + start_col = _index_to_column_letter(range_info.get("startColumnIndex", 0)) + end_col = _index_to_column_letter(range_info.get("endColumnIndex", 1) - 1) + start_row = range_info.get("startRowIndex", 0) + 1 + end_row = range_info.get("endRowIndex", 1) + + range_str = f"{sheet_name}!{start_col}{start_row}:{end_col}{end_row}" + + named_ranges_list.append( + { + "name": nr.get("name", ""), + "id": nr.get("namedRangeId", ""), + "range": range_str, + } + ) + + return {"named_ranges": named_ranges_list, "count": len(named_ranges_list)} + + +class GoogleSheetsAddDropdownBlock(Block): + """Add a dropdown (data validation) to cells.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + range: str = SchemaField( + description="Cell range to add dropdown to (e.g., 'B2:B100')", + placeholder="B2:B100", + ) + options: list[str] = SchemaField( + description="List of dropdown options", + ) + strict: bool = SchemaField( + description="Reject input not in the list", + default=True, + ) + show_dropdown: bool = SchemaField( + description="Show dropdown arrow in cells", + default=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="725431c9-71ba-4fce-b829-5a3e495a8a88", + description="Add a dropdown list (data validation) to cells. Useful for enforcing valid inputs.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddDropdownBlock.Input, + output_schema=GoogleSheetsAddDropdownBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "B2:B100", + "options": ["Active", "Inactive", "Pending"], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_dropdown": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.options: + yield "error", "Options list cannot be empty" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._add_dropdown, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + input_data.options, + input_data.strict, + input_data.show_dropdown, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add dropdown: {str(e)}" + + def _add_dropdown( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + options: list[str], + strict: bool, + show_dropdown: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse range + if "!" in range_str: + range_str = range_str.split("!")[1] + + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + start_col = _column_letter_to_index(match.group(1)) + start_row = int(match.group(2)) - 1 + end_col = _column_letter_to_index(match.group(3)) + 1 + end_row = int(match.group(4)) + + # Build condition values + condition_values = [{"userEnteredValue": opt} for opt in options] + + request = { + "setDataValidation": { + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": end_row, + "startColumnIndex": start_col, + "endColumnIndex": end_col, + }, + "rule": { + "condition": { + "type": "ONE_OF_LIST", + "values": condition_values, + }, + "strict": strict, + "showCustomUi": show_dropdown, + }, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsCopyToSpreadsheetBlock(Block): + """Copy a sheet to another spreadsheet.""" + + class Input(BlockSchemaInput): + source_spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Source Spreadsheet", + description="Select the source spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + source_sheet_name: str = SchemaField( + description="Sheet to copy (optional, defaults to first sheet)", + default="", + ) + destination_spreadsheet_id: str = SchemaField( + description="ID of the destination spreadsheet", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the copy operation", + ) + new_sheet_id: int = SchemaField( + description="ID of the new sheet in the destination", + ) + new_sheet_name: str = SchemaField( + description="Name of the new sheet", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The source spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="740eec3f-2b51-4e95-b87f-22ce2acafdfa", + description="Copy a sheet from one spreadsheet to another.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsCopyToSpreadsheetBlock.Input, + output_schema=GoogleSheetsCopyToSpreadsheetBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "source_spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Source Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "destination_spreadsheet_id": "dest_spreadsheet_id_123", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("new_sheet_id", 12345), + ("new_sheet_name", "Copy of Sheet1"), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Source Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_copy_to_spreadsheet": lambda *args, **kwargs: { + "success": True, + "new_sheet_id": 12345, + "new_sheet_name": "Copy of Sheet1", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.source_spreadsheet: + yield "error", "No source spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.source_spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._copy_to_spreadsheet, + service, + input_data.source_spreadsheet.id, + input_data.source_sheet_name, + input_data.destination_spreadsheet_id, + ) + yield "result", {"success": True} + yield "new_sheet_id", result["new_sheet_id"] + yield "new_sheet_name", result["new_sheet_name"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.source_spreadsheet.id, + name=input_data.source_spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.source_spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to copy sheet: {str(e)}" + + def _copy_to_spreadsheet( + self, + service, + source_spreadsheet_id: str, + source_sheet_name: str, + destination_spreadsheet_id: str, + ) -> dict: + target_sheet = resolve_sheet_name( + service, source_spreadsheet_id, source_sheet_name or None + ) + sheet_id = sheet_id_by_name(service, source_spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + result = ( + service.spreadsheets() + .sheets() + .copyTo( + spreadsheetId=source_spreadsheet_id, + sheetId=sheet_id, + body={"destinationSpreadsheetId": destination_spreadsheet_id}, + ) + .execute() + ) + + return { + "success": True, + "new_sheet_id": result.get("sheetId", 0), + "new_sheet_name": result.get("title", ""), + } + + +class GoogleSheetsProtectRangeBlock(Block): + """Protect a range from editing.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + range: str = SchemaField( + description="Cell range to protect (e.g., 'A1:D10'). Leave empty to protect entire sheet.", + default="", + ) + description: str = SchemaField( + description="Description for the protected range", + default="Protected by automation", + ) + warning_only: bool = SchemaField( + description="Show warning but allow editing (vs blocking completely)", + default=False, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + protection_id: int = SchemaField( + description="ID of the protection", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="d0e4f5d1-76e7-4082-9be8-e656ec1f432d", + description="Protect a cell range or entire sheet from editing.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsProtectRangeBlock.Input, + output_schema=GoogleSheetsProtectRangeBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "A1:D10", + "description": "Header row protection", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("protection_id", 12345), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_protect_range": lambda *args, **kwargs: { + "success": True, + "protection_id": 12345, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._protect_range, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + input_data.description, + input_data.warning_only, + ) + yield "result", {"success": True} + yield "protection_id", result["protection_id"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to protect range: {str(e)}" + + def _protect_range( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + description: str, + warning_only: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + protected_range: dict = {"sheetId": sheet_id} + + if range_str: + # Parse specific range + if "!" in range_str: + range_str = range_str.split("!")[1] + + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + protected_range["startRowIndex"] = int(match.group(2)) - 1 + protected_range["endRowIndex"] = int(match.group(4)) + protected_range["startColumnIndex"] = _column_letter_to_index( + match.group(1) + ) + protected_range["endColumnIndex"] = ( + _column_letter_to_index(match.group(3)) + 1 + ) + + request = { + "addProtectedRange": { + "protectedRange": { + "range": protected_range, + "description": description, + "warningOnly": warning_only, + } + } + } + + result = ( + service.spreadsheets() + .batchUpdate(spreadsheetId=spreadsheet_id, body={"requests": [request]}) + .execute() + ) + + protection_id = 0 + replies = result.get("replies", []) + if replies and "addProtectedRange" in replies[0]: + protection_id = replies[0]["addProtectedRange"]["protectedRange"][ + "protectedRangeId" + ] + + return {"success": True, "protection_id": protection_id} + + +class GoogleSheetsExportCsvBlock(Block): + """Export a sheet as CSV data.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to export from", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + default="", + description="Name of the sheet to export. Defaults to first sheet.", + ) + include_headers: bool = SchemaField( + default=True, + description="Include the first row (headers) in the CSV output", + ) + + class Output(BlockSchemaOutput): + csv_data: str = SchemaField(description="The sheet data as CSV string") + row_count: int = SchemaField(description="Number of rows exported") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if export failed") + + def __init__(self): + super().__init__( + id="2617e68a-43b3-441f-8b11-66bb041105b8", + description="Export a Google Sheet as CSV data", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsExportCsvBlock.Input, + output_schema=GoogleSheetsExportCsvBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("csv_data", "Name,Email,Status\nJohn,john@test.com,Active\n"), + ("row_count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_export_csv": lambda *args, **kwargs: { + "csv_data": "Name,Email,Status\nJohn,john@test.com,Active\n", + "row_count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._export_csv, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.include_headers, + ) + yield "csv_data", result["csv_data"] + yield "row_count", result["row_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to export CSV: {str(e)}" + + def _export_csv( + self, + service, + spreadsheet_id: str, + sheet_name: str, + include_headers: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + range_name = f"'{target_sheet}'" + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=range_name) + .execute() + ) + + rows = result.get("values", []) + + # Skip header row if not including headers + if not include_headers and rows: + rows = rows[1:] + + output = io.StringIO() + writer = csv.writer(output) + for row in rows: + writer.writerow(row) + + csv_data = output.getvalue() + return {"csv_data": csv_data, "row_count": len(rows)} + + +class GoogleSheetsImportCsvBlock(Block): + """Import CSV data into a sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to import into", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + csv_data: str = SchemaField(description="CSV data to import") + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + start_cell: str = SchemaField( + default="A1", + description="Cell to start importing at (e.g., A1, B2)", + ) + clear_existing: bool = SchemaField( + default=False, + description="Clear existing data before importing", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Import result") + rows_imported: int = SchemaField(description="Number of rows imported") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if import failed") + + def __init__(self): + super().__init__( + id="cb992884-1ff2-450a-8f1b-7650d63e3aa0", + description="Import CSV data into a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsImportCsvBlock.Input, + output_schema=GoogleSheetsImportCsvBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "csv_data": "Name,Email,Status\nJohn,john@test.com,Active\n", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("rows_imported", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_import_csv": lambda *args, **kwargs: { + "success": True, + "rows_imported": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._import_csv, + service, + input_data.spreadsheet.id, + input_data.csv_data, + input_data.sheet_name, + input_data.start_cell, + input_data.clear_existing, + ) + yield "result", {"success": True} + yield "rows_imported", result["rows_imported"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to import CSV: {str(e)}" + + def _import_csv( + self, + service, + spreadsheet_id: str, + csv_data: str, + sheet_name: str, + start_cell: str, + clear_existing: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + + # Parse CSV data + reader = csv.reader(io.StringIO(csv_data)) + rows = list(reader) + + if not rows: + return {"success": True, "rows_imported": 0} + + # Clear existing data if requested + if clear_existing: + service.spreadsheets().values().clear( + spreadsheetId=spreadsheet_id, + range=f"'{target_sheet}'", + ).execute() + + # Write data + range_name = f"'{target_sheet}'!{start_cell}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=range_name, + valueInputOption="RAW", + body={"values": rows}, + ).execute() + + return {"success": True, "rows_imported": len(rows)} + + +class GoogleSheetsAddNoteBlock(Block): + """Add a note (comment) to a cell.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to add note to", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + cell: str = SchemaField( + description="Cell to add note to (e.g., A1, B2)", + ) + note: str = SchemaField(description="Note text to add") + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the operation") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="774ac529-74f9-41da-bbba-6a06a51a5d7e", + description="Add a note to a cell in a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddNoteBlock.Input, + output_schema=GoogleSheetsAddNoteBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "cell": "A1", + "note": "This is a test note", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_note": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + await asyncio.to_thread( + self._add_note, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.cell, + input_data.note, + ) + yield "result", {"success": True} + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add note: {str(e)}" + + def _add_note( + self, + service, + spreadsheet_id: str, + sheet_name: str, + cell: str, + note: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse cell reference + match = re.match(r"([A-Z]+)(\d+)", cell.upper()) + if not match: + raise ValueError(f"Invalid cell reference: {cell}") + + col_index = _column_letter_to_index(match.group(1)) + row_index = int(match.group(2)) - 1 + + request = { + "updateCells": { + "rows": [{"values": [{"note": note}]}], + "fields": "note", + "start": { + "sheetId": sheet_id, + "rowIndex": row_index, + "columnIndex": col_index, + }, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsGetNotesBlock(Block): + """Get notes from cells in a range.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to get notes from", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + range: str = SchemaField( + default="A1:Z100", + description="Range to get notes from (e.g., A1:B10)", + ) + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + + class Output(BlockSchemaOutput): + notes: list[dict] = SchemaField(description="List of notes with cell and text") + count: int = SchemaField(description="Number of notes found") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="fa16834f-fff4-4d7a-9f7f-531ced90492b", + description="Get notes from cells in a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetNotesBlock.Input, + output_schema=GoogleSheetsGetNotesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "notes", + [ + {"cell": "A1", "note": "Header note"}, + {"cell": "B2", "note": "Data note"}, + ], + ), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_notes": lambda *args, **kwargs: { + "notes": [ + {"cell": "A1", "note": "Header note"}, + {"cell": "B2", "note": "Data note"}, + ], + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_notes, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + ) + notes = result["notes"] + yield "notes", notes + yield "count", len(notes) + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get notes: {str(e)}" + + def _get_notes( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + ) -> dict: + + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + full_range = f"'{target_sheet}'!{range_str}" + + # Get spreadsheet data including notes + result = ( + service.spreadsheets() + .get( + spreadsheetId=spreadsheet_id, + ranges=[full_range], + includeGridData=True, + ) + .execute() + ) + + notes = [] + sheets = result.get("sheets", []) + + for sheet in sheets: + data = sheet.get("data", []) + for grid_data in data: + start_row = grid_data.get("startRow", 0) + start_col = grid_data.get("startColumn", 0) + row_data = grid_data.get("rowData", []) + + for row_idx, row in enumerate(row_data): + values = row.get("values", []) + for col_idx, cell in enumerate(values): + note = cell.get("note") + if note: + col_letter = _index_to_column_letter(start_col + col_idx) + cell_ref = f"{col_letter}{start_row + row_idx + 1}" + notes.append({"cell": cell_ref, "note": note}) + + return {"notes": notes} + + +class GoogleSheetsShareSpreadsheetBlock(Block): + """Share a spreadsheet with specific users or make it accessible.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to share", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + email: str = SchemaField( + default="", + description="Email address to share with. Leave empty for link sharing.", + ) + role: ShareRole = SchemaField( + default=ShareRole.READER, + description="Permission role for the user", + ) + send_notification: bool = SchemaField( + default=True, + description="Send notification email to the user", + ) + message: str = SchemaField( + default="", + description="Optional message to include in notification email", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the share operation") + share_link: str = SchemaField(description="Link to the spreadsheet") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if share failed") + + def __init__(self): + super().__init__( + id="3e47e8ac-511a-4eb6-89c5-a6bcedc4236f", + description="Share a Google Spreadsheet with users or get shareable link", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsShareSpreadsheetBlock.Input, + output_schema=GoogleSheetsShareSpreadsheetBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "email": "test@example.com", + "role": "reader", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "share_link", + "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_share_spreadsheet": lambda *args, **kwargs: { + "success": True, + "share_link": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_drive_service(credentials) + result = await asyncio.to_thread( + self._share_spreadsheet, + service, + input_data.spreadsheet.id, + input_data.email, + input_data.role, + input_data.send_notification, + input_data.message, + ) + yield "result", {"success": True} + yield "share_link", result["share_link"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to share spreadsheet: {str(e)}" + + def _share_spreadsheet( + self, + service, + spreadsheet_id: str, + email: str, + role: ShareRole, + send_notification: bool, + message: str, + ) -> dict: + share_link = f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit" + + if email: + # Share with specific user + permission = {"type": "user", "role": role.value, "emailAddress": email} + + kwargs: dict = { + "fileId": spreadsheet_id, + "body": permission, + "sendNotificationEmail": send_notification, + } + if message: + kwargs["emailMessage"] = message + + service.permissions().create(**kwargs).execute() + else: + # Get shareable link - use reader or commenter only (writer not allowed for "anyone") + link_role = "reader" if role == ShareRole.WRITER else role.value + permission = {"type": "anyone", "role": link_role} + service.permissions().create( + fileId=spreadsheet_id, body=permission + ).execute() + share_link += "?usp=sharing" + + return {"success": True, "share_link": share_link} + + +class GoogleSheetsSetPublicAccessBlock(Block): + """Make a spreadsheet publicly accessible or private.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to modify access for", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + public: bool = SchemaField( + default=True, + description="True to make public, False to make private", + ) + role: PublicAccessRole = SchemaField( + default=PublicAccessRole.READER, + description="Permission role for public access", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the operation") + share_link: str = SchemaField(description="Link to the spreadsheet") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="d08d46cd-088b-4ba7-a545-45050f33b889", + description="Make a Google Spreadsheet public or private", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsSetPublicAccessBlock.Input, + output_schema=GoogleSheetsSetPublicAccessBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "public": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True, "is_public": True}), + ( + "share_link", + "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit?usp=sharing", + ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_set_public_access": lambda *args, **kwargs: { + "success": True, + "is_public": True, + "share_link": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit?usp=sharing", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_drive_service(credentials) + result = await asyncio.to_thread( + self._set_public_access, + service, + input_data.spreadsheet.id, + input_data.public, + input_data.role, + ) + yield "result", {"success": True, "is_public": result["is_public"]} + yield "share_link", result["share_link"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to set public access: {str(e)}" + + def _set_public_access( + self, + service, + spreadsheet_id: str, + public: bool, + role: PublicAccessRole, + ) -> dict: + share_link = f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit" + + if public: + # Make public + permission = {"type": "anyone", "role": role.value} + service.permissions().create( + fileId=spreadsheet_id, body=permission + ).execute() + share_link += "?usp=sharing" + else: + # Make private - remove 'anyone' permissions + permissions = service.permissions().list(fileId=spreadsheet_id).execute() + for perm in permissions.get("permissions", []): + if perm.get("type") == "anyone": + service.permissions().delete( + fileId=spreadsheet_id, permissionId=perm["id"] + ).execute() + + return {"success": True, "is_public": public, "share_link": share_link} From 979d7c3b74d8fdfc5faf2d3d4147fcd529378a4d Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Tue, 9 Dec 2025 15:25:43 -0600 Subject: [PATCH 39/58] feat(blocks): Add 4 new GitHub webhook trigger blocks (#11588) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I want to be able to automate some actions on social media or our sevrver in response to actions from discord ### Changes πŸ—οΈ Add trigger blocks for common GitHub events to enable OSS automation: - GithubReleaseTriggerBlock: Trigger on release events (published, etc.) - GithubStarTriggerBlock: Trigger on star events for milestone celebrations - GithubIssuesTriggerBlock: Trigger on issue events for triage/notifications - GithubDiscussionTriggerBlock: Trigger on discussion events for Q&A sync ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Test Stars - [x] Test Discussions - [x] Test Issues - [x] Test Release πŸ€– Generated with [Claude Code](https://claude.com/claude-code) --------- Co-authored-by: Claude Opus 4.5 Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../example_payloads/discussion.created.json | 108 +++++ .../example_payloads/issues.opened.json | 112 +++++ .../example_payloads/release.published.json | 97 +++++ .../github/example_payloads/star.created.json | 53 +++ .../backend/backend/blocks/github/triggers.py | 388 ++++++++++++++++++ .../frontend/src/tests/pages/build.page.ts | 50 ++- 6 files changed, 799 insertions(+), 9 deletions(-) create mode 100644 autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json create mode 100644 autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json create mode 100644 autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json create mode 100644 autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json new file mode 100644 index 0000000000..6b0d73dda3 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json @@ -0,0 +1,108 @@ +{ + "action": "created", + "discussion": { + "repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "category": { + "id": 12345678, + "node_id": "DIC_kwDOJKSTjM4CXXXX", + "repository_id": 614765452, + "emoji": ":pray:", + "name": "Q&A", + "description": "Ask the community for help", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2023-03-16T09:21:07Z", + "slug": "q-a", + "is_answerable": true + }, + "answer_html_url": null, + "answer_chosen_at": null, + "answer_chosen_by": null, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/discussions/9999", + "id": 5000000001, + "node_id": "D_kwDOJKSTjM4AYYYY", + "number": 9999, + "title": "How do I configure custom blocks?", + "user": { + "login": "curious-user", + "id": 22222222, + "node_id": "MDQ6VXNlcjIyMjIyMjIy", + "avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4", + "url": "https://api.github.com/users/curious-user", + "html_url": "https://github.com/curious-user", + "type": "User", + "site_admin": false + }, + "state": "open", + "state_reason": null, + "locked": false, + "comments": 0, + "created_at": "2024-12-01T17:00:00Z", + "updated_at": "2024-12-01T17:00:00Z", + "author_association": "NONE", + "active_lock_reason": null, + "body": "## Question\n\nI'm trying to create a custom block for my specific use case. I've read the documentation but I'm not sure how to:\n\n1. Define the input/output schema\n2. Handle authentication\n3. Test my block locally\n\nCan someone point me to examples or provide guidance?\n\n## Environment\n\n- AutoGPT Platform version: latest\n- Python: 3.11", + "reactions": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/reactions", + "total_count": 0, + "+1": 0, + "-1": 0, + "laugh": 0, + "hooray": 0, + "confused": 0, + "heart": 0, + "rocket": 0, + "eyes": 0 + }, + "timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/timeline" + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T17:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "has_discussions": true, + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "curious-user", + "id": 22222222, + "node_id": "MDQ6VXNlcjIyMjIyMjIy", + "avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/curious-user", + "html_url": "https://github.com/curious-user", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json new file mode 100644 index 0000000000..078d5da0be --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json @@ -0,0 +1,112 @@ +{ + "action": "opened", + "issue": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345", + "repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/labels{/name}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/comments", + "events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/events", + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/issues/12345", + "id": 2000000001, + "node_id": "I_kwDOJKSTjM5wXXXX", + "number": 12345, + "title": "Bug: Application crashes when processing large files", + "user": { + "login": "bug-reporter", + "id": 11111111, + "node_id": "MDQ6VXNlcjExMTExMTEx", + "avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4", + "url": "https://api.github.com/users/bug-reporter", + "html_url": "https://github.com/bug-reporter", + "type": "User", + "site_admin": false + }, + "labels": [ + { + "id": 5272676214, + "node_id": "LA_kwDOJKSTjM8AAAABOkandg", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/bug", + "name": "bug", + "color": "d73a4a", + "default": true, + "description": "Something isn't working" + } + ], + "state": "open", + "locked": false, + "assignee": null, + "assignees": [], + "milestone": null, + "comments": 0, + "created_at": "2024-12-01T16:00:00Z", + "updated_at": "2024-12-01T16:00:00Z", + "closed_at": null, + "author_association": "NONE", + "active_lock_reason": null, + "body": "## Description\n\nWhen I try to process a file larger than 100MB, the application crashes with an out of memory error.\n\n## Steps to Reproduce\n\n1. Open the application\n2. Select a file larger than 100MB\n3. Click 'Process'\n4. Application crashes\n\n## Expected Behavior\n\nThe application should handle large files gracefully.\n\n## Environment\n\n- OS: Ubuntu 22.04\n- Python: 3.11\n- AutoGPT Version: 1.0.0", + "reactions": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/reactions", + "total_count": 0, + "+1": 0, + "-1": 0, + "laugh": 0, + "hooray": 0, + "confused": 0, + "heart": 0, + "rocket": 0, + "eyes": 0 + }, + "timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/timeline", + "state_reason": null + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T16:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "forks_count": 45000, + "open_issues_count": 190, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "bug-reporter", + "id": 11111111, + "node_id": "MDQ6VXNlcjExMTExMTEx", + "avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/bug-reporter", + "html_url": "https://github.com/bug-reporter", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json new file mode 100644 index 0000000000..eac8461e59 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json @@ -0,0 +1,97 @@ +{ + "action": "published", + "release": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789", + "assets_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets", + "upload_url": "https://uploads.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets{?name,label}", + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/tag/v1.0.0", + "id": 123456789, + "author": { + "login": "ntindle", + "id": 12345678, + "node_id": "MDQ6VXNlcjEyMzQ1Njc4", + "avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/ntindle", + "html_url": "https://github.com/ntindle", + "type": "User", + "site_admin": false + }, + "node_id": "RE_kwDOJKSTjM4HWwAA", + "tag_name": "v1.0.0", + "target_commitish": "master", + "name": "AutoGPT Platform v1.0.0", + "draft": false, + "prerelease": false, + "created_at": "2024-12-01T10:00:00Z", + "published_at": "2024-12-01T12:00:00Z", + "assets": [ + { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/assets/987654321", + "id": 987654321, + "node_id": "RA_kwDOJKSTjM4HWwBB", + "name": "autogpt-v1.0.0.zip", + "label": "Release Package", + "content_type": "application/zip", + "state": "uploaded", + "size": 52428800, + "download_count": 0, + "created_at": "2024-12-01T11:30:00Z", + "updated_at": "2024-12-01T11:35:00Z", + "browser_download_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/download/v1.0.0/autogpt-v1.0.0.zip" + } + ], + "tarball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tarball/v1.0.0", + "zipball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/zipball/v1.0.0", + "body": "## What's New\n\n- Feature 1: Amazing new capability\n- Feature 2: Performance improvements\n- Bug fixes and stability improvements\n\n## Breaking Changes\n\nNone\n\n## Contributors\n\nThanks to all our contributors!" + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T12:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "ntindle", + "id": 12345678, + "node_id": "MDQ6VXNlcjEyMzQ1Njc4", + "avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/ntindle", + "html_url": "https://github.com/ntindle", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json new file mode 100644 index 0000000000..cb2dfd7522 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json @@ -0,0 +1,53 @@ +{ + "action": "created", + "starred_at": "2024-12-01T15:30:00Z", + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T15:30:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170001, + "watchers_count": 170001, + "language": "Python", + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "awesome-contributor", + "id": 98765432, + "node_id": "MDQ6VXNlcjk4NzY1NDMy", + "avatar_url": "https://avatars.githubusercontent.com/u/98765432?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/awesome-contributor", + "html_url": "https://github.com/awesome-contributor", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/triggers.py b/autogpt_platform/backend/backend/blocks/github/triggers.py index f7215b8f8e..2fc568a468 100644 --- a/autogpt_platform/backend/backend/blocks/github/triggers.py +++ b/autogpt_platform/backend/backend/blocks/github/triggers.py @@ -159,3 +159,391 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block): # --8<-- [end:GithubTriggerExample] + + +class GithubStarTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub star events - useful for milestone celebrations.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "star.created.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#star + """ + + created: bool = False + deleted: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The star events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The star event that triggered the webhook ('created' or 'deleted')" + ) + starred_at: str = SchemaField( + description="ISO timestamp when the repo was starred (empty if deleted)" + ) + stargazers_count: int = SchemaField( + description="Current number of stars on the repository" + ) + repository_name: str = SchemaField( + description="Full name of the repository (owner/repo)" + ) + repository_url: str = SchemaField(description="URL to the repository") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="551e0a35-100b-49b7-89b8-3031322239b6", + description="This block triggers on GitHub star events. " + "Useful for celebrating milestones (e.g., 1k, 10k stars) or tracking engagement.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubStarTriggerBlock.Input, + output_schema=GithubStarTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="star.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"created": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("starred_at", example_payload.get("starred_at", "")), + ("stargazers_count", example_payload["repository"]["stargazers_count"]), + ("repository_name", example_payload["repository"]["full_name"]), + ("repository_url", example_payload["repository"]["html_url"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + yield "event", input_data.payload["action"] + yield "starred_at", input_data.payload.get("starred_at", "") + yield "stargazers_count", input_data.payload["repository"]["stargazers_count"] + yield "repository_name", input_data.payload["repository"]["full_name"] + yield "repository_url", input_data.payload["repository"]["html_url"] + + +class GithubReleaseTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub release events - ideal for announcing new versions.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "release.published.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#release + """ + + published: bool = False + unpublished: bool = False + created: bool = False + edited: bool = False + deleted: bool = False + prereleased: bool = False + released: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The release events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The release event that triggered the webhook (e.g., 'published')" + ) + release: dict = SchemaField(description="The full release object") + release_url: str = SchemaField(description="URL to the release page") + tag_name: str = SchemaField(description="The release tag name (e.g., 'v1.0.0')") + release_name: str = SchemaField(description="Human-readable release name") + body: str = SchemaField(description="Release notes/description") + prerelease: bool = SchemaField(description="Whether this is a prerelease") + draft: bool = SchemaField(description="Whether this is a draft release") + assets: list = SchemaField(description="List of release assets/files") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="2052dd1b-74e1-46ac-9c87-c7a0e057b60b", + description="This block triggers on GitHub release events. " + "Perfect for automating announcements to Discord, Twitter, or other platforms.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubReleaseTriggerBlock.Input, + output_schema=GithubReleaseTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="release.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"published": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("release", example_payload["release"]), + ("release_url", example_payload["release"]["html_url"]), + ("tag_name", example_payload["release"]["tag_name"]), + ("release_name", example_payload["release"]["name"]), + ("body", example_payload["release"]["body"]), + ("prerelease", example_payload["release"]["prerelease"]), + ("draft", example_payload["release"]["draft"]), + ("assets", example_payload["release"]["assets"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + release = input_data.payload["release"] + yield "event", input_data.payload["action"] + yield "release", release + yield "release_url", release["html_url"] + yield "tag_name", release["tag_name"] + yield "release_name", release.get("name", "") + yield "body", release.get("body", "") + yield "prerelease", release["prerelease"] + yield "draft", release["draft"] + yield "assets", release["assets"] + + +class GithubIssuesTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub issues events - great for triage and notifications.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "issues.opened.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues + """ + + opened: bool = False + edited: bool = False + deleted: bool = False + closed: bool = False + reopened: bool = False + assigned: bool = False + unassigned: bool = False + labeled: bool = False + unlabeled: bool = False + locked: bool = False + unlocked: bool = False + transferred: bool = False + milestoned: bool = False + demilestoned: bool = False + pinned: bool = False + unpinned: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The issue events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The issue event that triggered the webhook (e.g., 'opened')" + ) + number: int = SchemaField(description="The issue number") + issue: dict = SchemaField(description="The full issue object") + issue_url: str = SchemaField(description="URL to the issue") + issue_title: str = SchemaField(description="The issue title") + issue_body: str = SchemaField(description="The issue body/description") + labels: list = SchemaField(description="List of labels on the issue") + assignees: list = SchemaField(description="List of assignees") + state: str = SchemaField(description="Issue state ('open' or 'closed')") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="b2605464-e486-4bf4-aad3-d8a213c8a48a", + description="This block triggers on GitHub issues events. " + "Useful for automated triage, notifications, and welcoming first-time contributors.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubIssuesTriggerBlock.Input, + output_schema=GithubIssuesTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="issues.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"opened": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("number", example_payload["issue"]["number"]), + ("issue", example_payload["issue"]), + ("issue_url", example_payload["issue"]["html_url"]), + ("issue_title", example_payload["issue"]["title"]), + ("issue_body", example_payload["issue"]["body"]), + ("labels", example_payload["issue"]["labels"]), + ("assignees", example_payload["issue"]["assignees"]), + ("state", example_payload["issue"]["state"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + issue = input_data.payload["issue"] + yield "event", input_data.payload["action"] + yield "number", issue["number"] + yield "issue", issue + yield "issue_url", issue["html_url"] + yield "issue_title", issue["title"] + yield "issue_body", issue.get("body") or "" + yield "labels", issue["labels"] + yield "assignees", issue["assignees"] + yield "state", issue["state"] + + +class GithubDiscussionTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub discussion events - perfect for community Q&A sync.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "discussion.created.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#discussion + """ + + created: bool = False + edited: bool = False + deleted: bool = False + answered: bool = False + unanswered: bool = False + labeled: bool = False + unlabeled: bool = False + locked: bool = False + unlocked: bool = False + category_changed: bool = False + transferred: bool = False + pinned: bool = False + unpinned: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The discussion events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The discussion event that triggered the webhook" + ) + number: int = SchemaField(description="The discussion number") + discussion: dict = SchemaField(description="The full discussion object") + discussion_url: str = SchemaField(description="URL to the discussion") + title: str = SchemaField(description="The discussion title") + body: str = SchemaField(description="The discussion body") + category: dict = SchemaField(description="The discussion category object") + category_name: str = SchemaField(description="Name of the category") + state: str = SchemaField(description="Discussion state") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="87f847b3-d81a-424e-8e89-acadb5c9d52b", + description="This block triggers on GitHub Discussions events. " + "Great for syncing Q&A to Discord or auto-responding to common questions. " + "Note: Discussions must be enabled on the repository.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubDiscussionTriggerBlock.Input, + output_schema=GithubDiscussionTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="discussion.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"created": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("number", example_payload["discussion"]["number"]), + ("discussion", example_payload["discussion"]), + ("discussion_url", example_payload["discussion"]["html_url"]), + ("title", example_payload["discussion"]["title"]), + ("body", example_payload["discussion"]["body"]), + ("category", example_payload["discussion"]["category"]), + ("category_name", example_payload["discussion"]["category"]["name"]), + ("state", example_payload["discussion"]["state"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + discussion = input_data.payload["discussion"] + yield "event", input_data.payload["action"] + yield "number", discussion["number"] + yield "discussion", discussion + yield "discussion_url", discussion["html_url"] + yield "title", discussion["title"] + yield "body", discussion.get("body") or "" + yield "category", discussion["category"] + yield "category_name", discussion["category"]["name"] + yield "state", discussion["state"] diff --git a/autogpt_platform/frontend/src/tests/pages/build.page.ts b/autogpt_platform/frontend/src/tests/pages/build.page.ts index 709b0ef3ed..8acc9a8f40 100644 --- a/autogpt_platform/frontend/src/tests/pages/build.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/build.page.ts @@ -472,14 +472,44 @@ export class BuildPage extends BasePage { ); } - async getGithubTriggerBlockDetails(): Promise { - return { - id: "6c60ec01-8128-419e-988f-96a063ee2fea", - name: "Github Trigger", - description: - "This block triggers on pull request events and outputs the event type and payload.", - type: "Standard", - }; + async getGithubTriggerBlockDetails(): Promise { + return [ + { + id: "6c60ec01-8128-419e-988f-96a063ee2fea", + name: "Github Trigger", + description: + "This block triggers on pull request events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "551e0a35-100b-49b7-89b8-3031322239b6", + name: "Github Star Trigger", + description: + "This block triggers on star events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "2052dd1b-74e1-46ac-9c87-c7a0e057b60b", + name: "Github Release Trigger", + description: + "This block triggers on release events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "b2605464-e486-4bf4-aad3-d8a213c8a48a", + name: "Github Issue Trigger", + description: + "This block triggers on issue events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "87f847b3-d81a-424e-8e89-acadb5c9d52b", + name: "Github Discussion Trigger", + description: + "This block triggers on discussion events and outputs the event type and payload.", + type: "Standard", + }, + ]; } async nextTutorialStep(): Promise { @@ -488,7 +518,9 @@ export class BuildPage extends BasePage { } async getBlocksToSkip(): Promise { - return [(await this.getGithubTriggerBlockDetails()).id]; + return [ + (await this.getGithubTriggerBlockDetails()).map((b) => b.id), + ].flat(); } async createDummyAgent() { From 117bb0543897fa360f5f0511e7d442145198b64a Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 10 Dec 2025 16:52:02 +0100 Subject: [PATCH 40/58] fix(frontend/library): Fix trigger UX flows in v3 library (#11589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Resolves #11586 - Follow-up to #11580 ### Changes πŸ—οΈ - Fix logic to include manual triggers as a possibility - Fix input render logic to use trigger setup schema if applicable - Fix rendering payload input for externally triggered runs - Amend `RunAgentModal` to load preset inputs+credentials if selected - Amend `SelectedTemplateView` to use modified input for run (if applicable) - Hide non-applicable buttons in `SelectedRunView` for externally triggered runs - Implement auto-navigation to `SelectedTriggerView` on trigger setup ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Can set up manual triggers - [x] Navigates to trigger view after setup - [x] Can set up automatic triggers - [x] Can create templates from runs - [x] Can run templates - [x] Can run templates with modified input --- .../NewAgentLibraryView.tsx | 32 ++++++-- .../AgentInputsReadOnly.tsx | 35 +++++--- .../modals/AgentInputsReadOnly/helpers.ts | 3 +- .../modals/RunAgentModal/RunAgentModal.tsx | 12 ++- .../ModalRunSection/ModalRunSection.tsx | 3 +- .../components/RunActions/RunActions.tsx | 3 +- .../modals/RunAgentModal/useAgentRunModal.tsx | 43 ++++++---- .../components/other/EmptyTasks.tsx | 6 +- .../SelectedRunView/SelectedRunView.tsx | 4 +- .../SelectedRunActions/SelectedRunActions.tsx | 80 ++++++++++--------- .../useSelectedRunActions.ts | 56 +++++++------ .../SelectedTemplateView.tsx | 7 +- .../useSelectedTemplateView.ts | 14 +++- .../useNewAgentLibraryView.ts | 34 ++++++-- 14 files changed, 216 insertions(+), 116 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index f951c09522..8ee76ce37a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -1,5 +1,6 @@ "use client"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; @@ -24,11 +25,13 @@ import { useNewAgentLibraryView } from "./useNewAgentLibraryView"; export function NewAgentLibraryView() { const { - agent, - hasAnyItems, - ready, - error, agentId, + agent, + ready, + activeTemplate, + isTemplateLoading, + error, + hasAnyItems, activeItem, sidebarLoading, activeTab, @@ -38,6 +41,12 @@ export function NewAgentLibraryView() { handleClearSelectedRun, } = useNewAgentLibraryView(); + function onTriggerSetup(newTrigger: LibraryAgentPreset) { + if (!agent) return; + + handleSelectRun(newTrigger.id, "triggers"); + } + if (error) { return (
- +
); @@ -82,16 +91,23 @@ export function NewAgentLibraryView() { > + } agent={agent} - agentId={agent.id.toString()} onRunCreated={(execution) => handleSelectRun(execution.id, "runs")} onScheduleCreated={(schedule) => handleSelectRun(schedule.id, "scheduled") } + onTriggerSetup={onTriggerSetup} + initialInputValues={activeTemplate?.inputs} + initialInputCredentials={activeTemplate?.credentials} />
@@ -151,7 +167,7 @@ export function NewAgentLibraryView() { ) : ( - + )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx index d46a6bfc9e..a11e88f29a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx @@ -1,7 +1,10 @@ "use client"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import type { + BlockIOSubSchema, + CredentialsMetaInput, +} from "@/lib/autogpt-server-api/types"; import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs"; import { getAgentCredentialsFields, @@ -20,13 +23,21 @@ export function AgentInputsReadOnly({ inputs, credentialInputs, }: Props) { - const fields = getAgentInputFields(agent); - const credentialFields = getAgentCredentialsFields(agent); - const inputEntries = Object.entries(fields); - const credentialEntries = Object.entries(credentialFields); + const inputFields = getAgentInputFields(agent); + const credentialFieldEntries = Object.entries( + getAgentCredentialsFields(agent), + ); - const hasInputs = inputs && inputEntries.length > 0; - const hasCredentials = credentialInputs && credentialEntries.length > 0; + // Take actual input entries as leading; augment with schema from input fields. + // TODO: ensure consistent ordering. + const inputEntries = + inputs && + Object.entries(inputs).map<[string, [BlockIOSubSchema | undefined, any]]>( + ([k, v]) => [k, [inputFields[k], v]], + ); + + const hasInputs = inputEntries && inputEntries.length > 0; + const hasCredentials = credentialInputs && credentialFieldEntries.length > 0; if (!hasInputs && !hasCredentials) { return
No input for this run.
; @@ -37,11 +48,13 @@ export function AgentInputsReadOnly({ {/* Regular inputs */} {hasInputs && (
- {inputEntries.map(([key, sub]) => ( + {inputEntries.map(([key, [schema, value]]) => (
- +

- {renderValue((inputs as Record)[key])} + {renderValue(value)}

))} @@ -52,7 +65,7 @@ export function AgentInputsReadOnly({ {hasCredentials && (
{hasInputs &&
} - {credentialEntries.map(([key, inputSubSchema]) => { + {credentialFieldEntries.map(([key, inputSubSchema]) => { const credential = credentialInputs![key]; if (!credential) return null; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts index 5fd8ff4fe7..95069b1d30 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts @@ -13,7 +13,8 @@ export function getCredentialTypeDisplayName(type: string): string { } export function getAgentInputFields(agent: LibraryAgent): Record { - const schema = agent.input_schema as unknown as { + const schema = (agent.trigger_setup_info?.config_schema ?? + agent.input_schema) as unknown as { properties?: Record; } | null; if (!schema || !schema.properties) return {}; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index 94849b5828..e53f31a349 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -3,6 +3,7 @@ import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; import { Tooltip, @@ -22,16 +23,20 @@ import { useAgentRunModal } from "./useAgentRunModal"; interface Props { triggerSlot: React.ReactNode; agent: LibraryAgent; - agentId: string; - agentVersion?: number; + initialInputValues?: Record; + initialInputCredentials?: Record; onRunCreated?: (execution: GraphExecutionMeta) => void; + onTriggerSetup?: (preset: LibraryAgentPreset) => void; onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void; } export function RunAgentModal({ triggerSlot, agent, + initialInputValues, + initialInputCredentials, onRunCreated, + onTriggerSetup, onScheduleCreated, }: Props) { const { @@ -71,6 +76,9 @@ export function RunAgentModal({ handleRun, } = useAgentRunModal(agent, { onRun: onRunCreated, + onSetupTrigger: onTriggerSetup, + initialInputValues, + initialInputCredentials, }); const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index c2528ccdc9..d8c4ecb730 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -26,7 +26,8 @@ export function ModalRunSection() { return (
- {defaultRunType === "automatic-trigger" ? ( + {defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" ? ( - {defaultRunType === "automatic-trigger" + {defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" ? "Set up Trigger" : "Start Task"} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx index 2d3da4bdc7..fcd3a7b87a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx @@ -6,7 +6,6 @@ import { getGetV2ListPresetsQueryKey, usePostV2SetupTrigger, } from "@/app/api/__generated__/endpoints/presets/presets"; -import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; @@ -14,7 +13,7 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; import { isEmpty } from "@/lib/utils"; import { analytics } from "@/services/analytics"; import { useQueryClient } from "@tanstack/react-query"; -import { useCallback, useMemo, useState } from "react"; +import { useCallback, useEffect, useMemo, useState } from "react"; import { showExecutionErrorToast } from "./errorHelpers"; export type RunVariant = @@ -25,8 +24,9 @@ export type RunVariant = interface UseAgentRunModalCallbacks { onRun?: (execution: GraphExecutionMeta) => void; - onCreateSchedule?: (schedule: GraphExecutionJobInfo) => void; onSetupTrigger?: (preset: LibraryAgentPreset) => void; + initialInputValues?: Record; + initialInputCredentials?: Record; } export function useAgentRunModal( @@ -36,18 +36,28 @@ export function useAgentRunModal( const { toast } = useToast(); const queryClient = useQueryClient(); const [isOpen, setIsOpen] = useState(false); - const [inputValues, setInputValues] = useState>({}); + const [inputValues, setInputValues] = useState>( + callbacks?.initialInputValues || {}, + ); const [inputCredentials, setInputCredentials] = useState>( - {}, + callbacks?.initialInputCredentials || {}, ); const [presetName, setPresetName] = useState(""); const [presetDescription, setPresetDescription] = useState(""); // Determine the default run type based on agent capabilities - const defaultRunType: RunVariant = agent.has_external_trigger - ? "automatic-trigger" + const defaultRunType: RunVariant = agent.trigger_setup_info + ? agent.trigger_setup_info.credentials_input_name + ? "automatic-trigger" + : "manual-trigger" : "manual"; + // Update input values/credentials if template is selected/unselected + useEffect(() => { + setInputValues(callbacks?.initialInputValues || {}); + setInputCredentials(callbacks?.initialInputCredentials || {}); + }, [callbacks?.initialInputValues, callbacks?.initialInputCredentials]); + // API mutations const executeGraphMutation = usePostV1ExecuteGraphAgent({ mutation: { @@ -105,11 +115,13 @@ export function useAgentRunModal( }, }); - // Input schema validation - const agentInputSchema = useMemo( - () => agent.input_schema || { properties: {}, required: [] }, - [agent.input_schema], - ); + // Input schema validation (use trigger schema for triggered agents) + const agentInputSchema = useMemo(() => { + if (agent.trigger_setup_info?.config_schema) { + return agent.trigger_setup_info.config_schema; + } + return agent.input_schema || { properties: {}, required: [] }; + }, [agent.input_schema, agent.trigger_setup_info]); const agentInputFields = useMemo(() => { if ( @@ -205,7 +217,10 @@ export function useAgentRunModal( return; } - if (defaultRunType === "automatic-trigger") { + if ( + defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" + ) { // Setup trigger if (!presetName.trim()) { toast({ @@ -262,7 +277,7 @@ export function useAgentRunModal( setIsOpen, // Run mode - defaultRunType, + defaultRunType: defaultRunType as RunVariant, // Form: regular inputs inputValues, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx index 62a75e4993..26bfbde882 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx @@ -2,6 +2,7 @@ import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; @@ -15,9 +16,10 @@ import { EmptyTasksIllustration } from "./EmptyTasksIllustration"; type Props = { agent: LibraryAgent; + onTriggerSetup?: (preset: LibraryAgentPreset) => void; }; -export function EmptyTasks({ agent }: Props) { +export function EmptyTasks({ agent, onTriggerSetup }: Props) { const { toast } = useToast(); async function handleExport() { @@ -75,7 +77,7 @@ export function EmptyTasks({ agent }: Props) { } agent={agent} - agentId={agent.id.toString()} + onTriggerSetup={onTriggerSetup} />
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 97292b85ce..cc5872097e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -198,8 +198,8 @@ export function SelectedRunView({
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx index 7533577bf5..92db3e0b37 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx @@ -20,13 +20,18 @@ import { useSelectedRunActions } from "./useSelectedRunActions"; type Props = { agent: LibraryAgent; run: GraphExecution | undefined; - scheduleRecurrence?: string; onSelectRun?: (id: string) => void; onClearSelectedRun?: () => void; }; -export function SelectedRunActions(props: Props) { +export function SelectedRunActions({ + agent, + run, + onSelectRun, + onClearSelectedRun, +}: Props) { const { + canRunManually, handleRunAgain, handleStopRun, isRunningAgain, @@ -37,21 +42,20 @@ export function SelectedRunActions(props: Props) { isCreateTemplateModalOpen, setIsCreateTemplateModalOpen, } = useSelectedRunActions({ - agentGraphId: props.agent.graph_id, - run: props.run, - agent: props.agent, - onSelectRun: props.onSelectRun, - onClearSelectedRun: props.onClearSelectedRun, + agentGraphId: agent.graph_id, + run: run, + agent: agent, + onSelectRun: onSelectRun, }); const shareExecutionResultsEnabled = useGetFlag(Flag.SHARE_EXECUTION_RESULTS); - const isRunning = props.run?.status === "RUNNING"; + const isRunning = run?.status === "RUNNING"; - if (!props.run || !props.agent) return null; + if (!run || !agent) return null; return ( - {!isRunning ? ( + {canRunManually && !isRunning ? ( + + {canRunManually && ( + <> + + setIsCreateTemplateModalOpen(false)} + onCreate={handleCreateTemplate} + run={run} + /> + + )} - setIsCreateTemplateModalOpen(false)} - onCreate={handleCreateTemplate} - run={props.run} + agent={agent} + run={run} + agentGraphId={agent.graph_id} + onClearSelectedRun={onClearSelectedRun} /> ); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts index 462490b6da..03fc0b4ae8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts @@ -15,15 +15,19 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; -interface Args { +interface Params { agentGraphId: string; run?: GraphExecution; agent?: LibraryAgent; onSelectRun?: (id: string) => void; - onClearSelectedRun?: () => void; } -export function useSelectedRunActions(args: Args) { +export function useSelectedRunActions({ + agentGraphId, + run, + agent, + onSelectRun, +}: Params) { const queryClient = useQueryClient(); const { toast } = useToast(); @@ -31,8 +35,9 @@ export function useSelectedRunActions(args: Args) { const [isCreateTemplateModalOpen, setIsCreateTemplateModalOpen] = useState(false); - const canStop = - args.run?.status === "RUNNING" || args.run?.status === "QUEUED"; + const canStop = run?.status === "RUNNING" || run?.status === "QUEUED"; + + const canRunManually = !agent?.trigger_setup_info; const { mutateAsync: stopRun, isPending: isStopping } = usePostV1StopGraphExecution(); @@ -46,16 +51,16 @@ export function useSelectedRunActions(args: Args) { async function handleStopRun() { try { await stopRun({ - graphId: args.run?.graph_id ?? "", - graphExecId: args.run?.id ?? "", + graphId: run?.graph_id ?? "", + graphExecId: run?.id ?? "", }); toast({ title: "Run stopped" }); await queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - args.agentGraphId, - ).queryKey, + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) + .queryKey, }); } catch (error: unknown) { toast({ @@ -70,7 +75,7 @@ export function useSelectedRunActions(args: Args) { } async function handleRunAgain() { - if (!args.run) { + if (!run) { toast({ title: "Run not found", description: "Run not found", @@ -83,11 +88,11 @@ export function useSelectedRunActions(args: Args) { toast({ title: "Run started" }); const res = await executeRun({ - graphId: args.run.graph_id, - graphVersion: args.run.graph_version, + graphId: run.graph_id, + graphVersion: run.graph_version, data: { - inputs: args.run.inputs || {}, - credentials_inputs: args.run.credential_inputs || {}, + inputs: run.inputs || {}, + credentials_inputs: run.credential_inputs || {}, source: "library", }, }); @@ -95,12 +100,12 @@ export function useSelectedRunActions(args: Args) { const newRunId = res?.status === 200 ? (res?.data?.id ?? "") : ""; await queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - args.agentGraphId, - ).queryKey, + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) + .queryKey, }); - if (newRunId && args.onSelectRun) args.onSelectRun(newRunId); + if (newRunId && onSelectRun) onSelectRun(newRunId); } catch (error: unknown) { toast({ title: "Failed to start run", @@ -118,7 +123,7 @@ export function useSelectedRunActions(args: Args) { } async function handleCreateTemplate(name: string, description: string) { - if (!args.run) { + if (!run) { toast({ title: "Run not found", description: "Cannot create template from missing run", @@ -132,7 +137,7 @@ export function useSelectedRunActions(args: Args) { data: { name, description, - graph_execution_id: args.run.id, + graph_execution_id: run.id, }, }); @@ -141,10 +146,10 @@ export function useSelectedRunActions(args: Args) { title: "Template created", }); - if (args.agent) { + if (agent) { queryClient.invalidateQueries({ queryKey: getGetV2ListPresetsQueryKey({ - graph_id: args.agent.graph_id, + graph_id: agent.graph_id, }), }); } @@ -164,8 +169,8 @@ export function useSelectedRunActions(args: Args) { } // Open in builder URL helper - const openInBuilderHref = args.run - ? `/build?flowID=${args.run.graph_id}&flowVersion=${args.run.graph_version}&flowExecutionID=${args.run.id}` + const openInBuilderHref = run + ? `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}` : undefined; return { @@ -173,6 +178,7 @@ export function useSelectedRunActions(args: Args) { showDeleteDialog, canStop, isStopping, + canRunManually, isRunningAgain, handleShowDeleteDialog, handleStopRun, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx index b1c89c1945..8f64417f77 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx @@ -95,6 +95,7 @@ export function SelectedTemplateView({ return null; } + const templateOrTrigger = agent.trigger_setup_info ? "Trigger" : "Template"; const hasWebhook = !!template.webhook_id && template.webhook; return ( @@ -111,14 +112,14 @@ export function SelectedTemplateView({ /> )} - +
setName(e.target.value)} - placeholder="Enter template name" + placeholder={`Enter ${templateOrTrigger.toLowerCase()} name`} /> setDescription(e.target.value)} - placeholder="Enter template description" + placeholder={`Enter ${templateOrTrigger.toLowerCase()} description`} />
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts index 58483fdc74..a0f34f54a2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts @@ -138,11 +138,21 @@ export function useSelectedTemplateView({ } function handleStartTask() { + if (!query.data) return; + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + // Use changed unpersisted inputs if applicable executeMutation.mutate({ presetId: templateId, data: { - inputs: {}, - credential_inputs: {}, + inputs: inputsChanged ? inputs : undefined, + credential_inputs: credentialsChanged ? credentials : undefined, }, }); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index b7b6301ad6..c016a27722 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -1,5 +1,7 @@ import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; +import { useGetV2GetASpecificPreset } from "@/app/api/__generated__/endpoints/presets/presets"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { okData } from "@/app/api/helpers"; import { useParams } from "next/navigation"; import { parseAsString, useQueryStates } from "nuqs"; @@ -24,7 +26,7 @@ export function useNewAgentLibraryView() { const agentId = id as string; const { - data: response, + data: agent, isSuccess, error, } = useGetV2GetLibraryAgent(agentId, { @@ -41,6 +43,24 @@ export function useNewAgentLibraryView() { const activeTab = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]); + const { + data: _template, + isSuccess: isTemplateLoaded, + isLoading: isTemplateLoading, + error: templateError, + } = useGetV2GetASpecificPreset(activeItem ?? "", { + query: { + enabled: Boolean(activeTab === "templates" && activeItem), + select: okData, + }, + }); + const activeTemplate = + isTemplateLoaded && + activeTab === "templates" && + _template?.id === activeItem + ? _template + : null; + useEffect(() => { if (!activeTabRaw && !activeItem) { setQueryStates({ @@ -71,10 +91,10 @@ export function useNewAgentLibraryView() { const showSidebarLayout = sidebarLoading || hasAnyItems; useEffect(() => { - if (response) { - document.title = `${response.name} - Library - AutoGPT Platform`; + if (agent) { + document.title = `${agent.name} - Library - AutoGPT Platform`; } - }, [response]); + }, [agent]); useEffect(() => { if ( @@ -135,9 +155,11 @@ export function useNewAgentLibraryView() { return { agentId: id, + agent, ready: isSuccess, - error, - agent: response, + activeTemplate, + isTemplateLoading, + error: error || templateError, hasAnyItems, showSidebarLayout, activeItem, From 7ff282c9088f904a5af59cae87b611386f3d97d9 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 10 Dec 2025 11:10:52 -0600 Subject: [PATCH 41/58] =?UTF-8?q?fix(frontend):=20Disable=20single=20dolla?= =?UTF-8?q?r=20sign=20LaTeX=20mode=20in=20markdown=20rend=E2=80=A6=20(#115?= =?UTF-8?q?98)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Single dollar signs ($10, $variable) are commonly used in content and were being incorrectly interpreted as inline LaTeX math delimiters. This change disables that behavior while keeping double dollar sign ($$...$$) math blocks working. ## Changes πŸ—οΈ β€’ Configure remarkMath plugin with singleDollarTextMath: false in MarkdownRenderer.tsx β€’ Double dollar sign display math ($$...$$) continues to work as expected β€’ Single dollar signs are no longer interpreted as inline math delimiters ## Checklist πŸ“‹ For code changes: -[x] I have clearly listed my changes in the PR description -[x] I have made a test plan -[x] I have tested my changes according to the test plan: -[x] Verify content with dollar amounts (e.g., β€œ$100”) renders as plain text -[x] Verify double dollar sign math blocks ($$x^2$$) still render as LaTeX -[x] Verify other markdown features (code blocks, tables, links) still work correctly Co-authored-by: Claude --- .../OutputRenderers/renderers/MarkdownRenderer.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx index 97ff97d46c..d94966c6c8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx @@ -166,7 +166,7 @@ function renderMarkdown( className="prose prose-sm dark:prose-invert max-w-none" remarkPlugins={[ remarkGfm, // GitHub Flavored Markdown (tables, task lists, strikethrough) - remarkMath, // Math support for LaTeX + [remarkMath, { singleDollarTextMath: false }], // Math support for LaTeX ]} rehypePlugins={[ rehypeKatex, // Render math with KaTeX From bd37fe946d313a2bca2efdb02f80d93005ff2660 Mon Sep 17 00:00:00 2001 From: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com> Date: Thu, 11 Dec 2025 02:32:17 +0900 Subject: [PATCH 42/58] feat(platform): Builder search history (#11457) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preserve user searches in the new builder and cache search results for more efficiency. Search is saved, so the user can see their previous searches. ### Changes πŸ—οΈ - Add `BuilderSearch` column&migration to save user search (with all filters) - Builder `db.py` now caches all search results using `@cached` and returns paginated results, so following pages are returned much quicker - Score and sort results - Update models&routes - Update frontend, so it works properly with modified endpoints - Frontend: store `serachId` and use it for subsequent searches, so we don't save partial searches (e.g. "b", "bl", ..., "block"). Search id is reset when user clears the search field. - Add clickable chips to the Suggestions builder tab - Add `HorizontalScroll` component (chips use it) ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Search works and is cached - [x] Search sorts results - [x] Searches are preserved properly --------- Co-authored-by: Reinier van der Leer --- .../backend/backend/server/v2/builder/db.py | 440 +++++++++++++++--- .../backend/server/v2/builder/model.py | 21 +- .../backend/server/v2/builder/routes.py | 103 ++-- .../migration.sql | 15 + autogpt_platform/backend/schema.prisma | 14 + .../BlockMenuSearch/useBlockMenuSearch.ts | 50 +- .../useBlockMenuSearchBar.ts | 26 +- .../NewBlockMenu/HorizontalScroll.tsx | 109 +++++ .../SuggestionContent/SuggestionContent.tsx | 45 +- .../frontend/src/app/api/openapi.json | 60 ++- 10 files changed, 710 insertions(+), 173 deletions(-) create mode 100644 autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql create mode 100644 autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx diff --git a/autogpt_platform/backend/backend/server/v2/builder/db.py b/autogpt_platform/backend/backend/server/v2/builder/db.py index c3f6ac88ab..9856d53c0e 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/db.py +++ b/autogpt_platform/backend/backend/server/v2/builder/db.py @@ -1,9 +1,16 @@ import logging +from dataclasses import dataclass from datetime import datetime, timedelta, timezone +from difflib import SequenceMatcher +from typing import Sequence import prisma import backend.data.block +import backend.server.v2.library.db as library_db +import backend.server.v2.library.model as library_model +import backend.server.v2.store.db as store_db +import backend.server.v2.store.model as store_model from backend.blocks import load_all_blocks from backend.blocks.llm import LlmModel from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema @@ -14,17 +21,36 @@ from backend.server.v2.builder.model import ( BlockResponse, BlockType, CountResponse, + FilterType, Provider, ProviderResponse, - SearchBlocksResponse, + SearchEntry, ) from backend.util.cache import cached from backend.util.models import Pagination logger = logging.getLogger(__name__) llm_models = [name.name.lower().replace("_", " ") for name in LlmModel] -_static_counts_cache: dict | None = None -_suggested_blocks: list[BlockInfo] | None = None + +MAX_LIBRARY_AGENT_RESULTS = 100 +MAX_MARKETPLACE_AGENT_RESULTS = 100 +MIN_SCORE_FOR_FILTERED_RESULTS = 10.0 + +SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent + + +@dataclass +class _ScoredItem: + item: SearchResultItem + filter_type: FilterType + score: float + sort_key: str + + +@dataclass +class _SearchCacheEntry: + items: list[SearchResultItem] + total_items: dict[FilterType, int] def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]: @@ -130,71 +156,244 @@ def get_block_by_id(block_id: str) -> BlockInfo | None: return None -def search_blocks( - include_blocks: bool = True, - include_integrations: bool = True, - query: str = "", - page: int = 1, - page_size: int = 50, -) -> SearchBlocksResponse: +async def update_search(user_id: str, search: SearchEntry) -> str: """ - Get blocks based on the filter and query. - `providers` only applies for `integrations` filter. + Upsert a search request for the user and return the search ID. """ - blocks: list[AnyBlockSchema] = [] - query = query.lower() + if search.search_id: + # Update existing search + await prisma.models.BuilderSearchHistory.prisma().update( + where={ + "id": search.search_id, + }, + data={ + "searchQuery": search.search_query or "", + "filter": search.filter or [], # type: ignore + "byCreator": search.by_creator or [], + }, + ) + return search.search_id + else: + # Create new search + new_search = await prisma.models.BuilderSearchHistory.prisma().create( + data={ + "userId": user_id, + "searchQuery": search.search_query or "", + "filter": search.filter or [], # type: ignore + "byCreator": search.by_creator or [], + } + ) + return new_search.id - total = 0 - skip = (page - 1) * page_size - take = page_size + +async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]: + """ + Get the user's most recent search requests. + """ + searches = await prisma.models.BuilderSearchHistory.prisma().find_many( + where={ + "userId": user_id, + }, + order={ + "updatedAt": "desc", + }, + take=limit, + ) + return [ + SearchEntry( + search_query=s.searchQuery, + filter=s.filter, # type: ignore + by_creator=s.byCreator, + search_id=s.id, + ) + for s in searches + ] + + +async def get_sorted_search_results( + *, + user_id: str, + search_query: str | None, + filters: Sequence[FilterType], + by_creator: Sequence[str] | None = None, +) -> _SearchCacheEntry: + normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or []))) + normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or []))) + return await _build_cached_search_results( + user_id=user_id, + search_query=search_query or "", + filters=normalized_filters, + by_creator=normalized_creators, + ) + + +@cached(ttl_seconds=300, shared_cache=True) +async def _build_cached_search_results( + user_id: str, + search_query: str, + filters: tuple[FilterType, ...], + by_creator: tuple[str, ...], +) -> _SearchCacheEntry: + normalized_query = (search_query or "").strip().lower() + + include_blocks = "blocks" in filters + include_integrations = "integrations" in filters + include_library_agents = "my_agents" in filters + include_marketplace_agents = "marketplace_agents" in filters + + scored_items: list[_ScoredItem] = [] + total_items: dict[FilterType, int] = { + "blocks": 0, + "integrations": 0, + "marketplace_agents": 0, + "my_agents": 0, + } + + block_results, block_total, integration_total = _collect_block_results( + normalized_query=normalized_query, + include_blocks=include_blocks, + include_integrations=include_integrations, + ) + scored_items.extend(block_results) + total_items["blocks"] = block_total + total_items["integrations"] = integration_total + + if include_library_agents: + library_response = await library_db.list_library_agents( + user_id=user_id, + search_term=search_query or None, + page=1, + page_size=MAX_LIBRARY_AGENT_RESULTS, + ) + total_items["my_agents"] = library_response.pagination.total_items + scored_items.extend( + _build_library_items( + agents=library_response.agents, + normalized_query=normalized_query, + ) + ) + + if include_marketplace_agents: + marketplace_response = await store_db.get_store_agents( + creators=list(by_creator) or None, + search_query=search_query or None, + page=1, + page_size=MAX_MARKETPLACE_AGENT_RESULTS, + ) + total_items["marketplace_agents"] = marketplace_response.pagination.total_items + scored_items.extend( + _build_marketplace_items( + agents=marketplace_response.agents, + normalized_query=normalized_query, + ) + ) + + sorted_items = sorted( + scored_items, + key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type), + ) + + return _SearchCacheEntry( + items=[entry.item for entry in sorted_items], + total_items=total_items, + ) + + +def _collect_block_results( + *, + normalized_query: str, + include_blocks: bool, + include_integrations: bool, +) -> tuple[list[_ScoredItem], int, int]: + results: list[_ScoredItem] = [] block_count = 0 integration_count = 0 + if not include_blocks and not include_integrations: + return results, block_count, integration_count + for block_type in load_all_blocks().values(): block: AnyBlockSchema = block_type() - # Skip disabled blocks if block.disabled: continue - # Skip blocks that don't match the query - if ( - query not in block.name.lower() - and query not in block.description.lower() - and not _matches_llm_model(block.input_schema, query) - ): - continue - keep = False + + block_info = block.get_info() credentials = list(block.input_schema.get_credentials_fields().values()) - if include_integrations and len(credentials) > 0: - keep = True + is_integration = len(credentials) > 0 + + if is_integration and not include_integrations: + continue + if not is_integration and not include_blocks: + continue + + score = _score_block(block, block_info, normalized_query) + if not _should_include_item(score, normalized_query): + continue + + filter_type: FilterType = "integrations" if is_integration else "blocks" + if is_integration: integration_count += 1 - if include_blocks and len(credentials) == 0: - keep = True + else: block_count += 1 - if not keep: + results.append( + _ScoredItem( + item=block_info, + filter_type=filter_type, + score=score, + sort_key=_get_item_name(block_info), + ) + ) + + return results, block_count, integration_count + + +def _build_library_items( + *, + agents: list[library_model.LibraryAgent], + normalized_query: str, +) -> list[_ScoredItem]: + results: list[_ScoredItem] = [] + + for agent in agents: + score = _score_library_agent(agent, normalized_query) + if not _should_include_item(score, normalized_query): continue - total += 1 - if skip > 0: - skip -= 1 - continue - if take > 0: - take -= 1 - blocks.append(block) + results.append( + _ScoredItem( + item=agent, + filter_type="my_agents", + score=score, + sort_key=_get_item_name(agent), + ) + ) - return SearchBlocksResponse( - blocks=BlockResponse( - blocks=[b.get_info() for b in blocks], - pagination=Pagination( - total_items=total, - total_pages=(total + page_size - 1) // page_size, - current_page=page, - page_size=page_size, - ), - ), - total_block_count=block_count, - total_integration_count=integration_count, - ) + return results + + +def _build_marketplace_items( + *, + agents: list[store_model.StoreAgent], + normalized_query: str, +) -> list[_ScoredItem]: + results: list[_ScoredItem] = [] + + for agent in agents: + score = _score_store_agent(agent, normalized_query) + if not _should_include_item(score, normalized_query): + continue + + results.append( + _ScoredItem( + item=agent, + filter_type="marketplace_agents", + score=score, + sort_key=_get_item_name(agent), + ) + ) + + return results def get_providers( @@ -251,16 +450,12 @@ async def get_counts(user_id: str) -> CountResponse: ) +@cached(ttl_seconds=3600) async def _get_static_counts(): """ Get counts of blocks, integrations, and marketplace agents. This is cached to avoid unnecessary database queries and calculations. - Can't use functools.cache here because the function is async. """ - global _static_counts_cache - if _static_counts_cache is not None: - return _static_counts_cache - all_blocks = 0 input_blocks = 0 action_blocks = 0 @@ -287,7 +482,7 @@ async def _get_static_counts(): marketplace_agents = await prisma.models.StoreAgent.prisma().count() - _static_counts_cache = { + return { "all_blocks": all_blocks, "input_blocks": input_blocks, "action_blocks": action_blocks, @@ -296,8 +491,6 @@ async def _get_static_counts(): "marketplace_agents": marketplace_agents, } - return _static_counts_cache - def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool: for field in schema_cls.model_fields.values(): @@ -308,6 +501,123 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool: return False +def _score_block( + block: AnyBlockSchema, + block_info: BlockInfo, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = block_info.name.lower() + description = block_info.description.lower() + score = _score_primary_fields(name, description, normalized_query) + + category_text = " ".join( + category.get("category", "").lower() for category in block_info.categories + ) + score += _score_additional_field(category_text, normalized_query, 12, 6) + + credentials_info = block.input_schema.get_credentials_fields_info().values() + provider_names = [ + provider.value.lower() + for info in credentials_info + for provider in info.provider + ] + provider_text = " ".join(provider_names) + score += _score_additional_field(provider_text, normalized_query, 15, 6) + + if _matches_llm_model(block.input_schema, normalized_query): + score += 20 + + return score + + +def _score_library_agent( + agent: library_model.LibraryAgent, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = agent.name.lower() + description = (agent.description or "").lower() + instructions = (agent.instructions or "").lower() + + score = _score_primary_fields(name, description, normalized_query) + score += _score_additional_field(instructions, normalized_query, 15, 6) + score += _score_additional_field( + agent.creator_name.lower(), normalized_query, 10, 5 + ) + + return score + + +def _score_store_agent( + agent: store_model.StoreAgent, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = agent.agent_name.lower() + description = agent.description.lower() + sub_heading = agent.sub_heading.lower() + + score = _score_primary_fields(name, description, normalized_query) + score += _score_additional_field(sub_heading, normalized_query, 12, 6) + score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5) + + return score + + +def _score_primary_fields(name: str, description: str, query: str) -> float: + score = 0.0 + if name == query: + score += 120 + elif name.startswith(query): + score += 90 + elif query in name: + score += 60 + + score += SequenceMatcher(None, name, query).ratio() * 50 + if description: + if query in description: + score += 30 + score += SequenceMatcher(None, description, query).ratio() * 25 + return score + + +def _score_additional_field( + value: str, + query: str, + contains_weight: float, + similarity_weight: float, +) -> float: + if not value or not query: + return 0.0 + + score = 0.0 + if query in value: + score += contains_weight + score += SequenceMatcher(None, value, query).ratio() * similarity_weight + return score + + +def _should_include_item(score: float, normalized_query: str) -> bool: + if not normalized_query: + return True + return score >= MIN_SCORE_FOR_FILTERED_RESULTS + + +def _get_item_name(item: SearchResultItem) -> str: + if isinstance(item, BlockInfo): + return item.name.lower() + if isinstance(item, library_model.LibraryAgent): + return item.name.lower() + return item.agent_name.lower() + + @cached(ttl_seconds=3600) def _get_all_providers() -> dict[ProviderName, Provider]: providers: dict[ProviderName, Provider] = {} @@ -329,13 +639,9 @@ def _get_all_providers() -> dict[ProviderName, Provider]: return providers +@cached(ttl_seconds=3600) async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: - global _suggested_blocks - - if _suggested_blocks is not None and len(_suggested_blocks) >= count: - return _suggested_blocks[:count] - - _suggested_blocks = [] + suggested_blocks = [] # Sum the number of executions for each block type # Prisma cannot group by nested relations, so we do a raw query # Calculate the cutoff timestamp @@ -376,7 +682,7 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: # Sort blocks by execution count blocks.sort(key=lambda x: x[1], reverse=True) - _suggested_blocks = [block[0] for block in blocks] + suggested_blocks = [block[0] for block in blocks] # Return the top blocks - return _suggested_blocks[:count] + return suggested_blocks[:count] diff --git a/autogpt_platform/backend/backend/server/v2/builder/model.py b/autogpt_platform/backend/backend/server/v2/builder/model.py index e1a7e744fd..4a1de595d1 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/model.py +++ b/autogpt_platform/backend/backend/server/v2/builder/model.py @@ -18,10 +18,17 @@ FilterType = Literal[ BlockType = Literal["all", "input", "action", "output"] +class SearchEntry(BaseModel): + search_query: str | None = None + filter: list[FilterType] | None = None + by_creator: list[str] | None = None + search_id: str | None = None + + # Suggestions class SuggestionsResponse(BaseModel): otto_suggestions: list[str] - recent_searches: list[str] + recent_searches: list[SearchEntry] providers: list[ProviderName] top_blocks: list[BlockInfo] @@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel): total_blocks: int blocks: list[BlockInfo] - model_config = {"use_enum_values": False} # <== use enum names like "AI" + model_config = {"use_enum_values": False} # Use enum names like "AI" # Input/Action/Output and see all for block categories @@ -53,17 +60,11 @@ class ProviderResponse(BaseModel): pagination: Pagination -class SearchBlocksResponse(BaseModel): - blocks: BlockResponse - total_block_count: int - total_integration_count: int - - class SearchResponse(BaseModel): items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent] + search_id: str total_items: dict[FilterType, int] - page: int - more_pages: bool + pagination: Pagination class CountResponse(BaseModel): diff --git a/autogpt_platform/backend/backend/server/v2/builder/routes.py b/autogpt_platform/backend/backend/server/v2/builder/routes.py index ebc9fd5baf..b87bf8ca1a 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/routes.py +++ b/autogpt_platform/backend/backend/server/v2/builder/routes.py @@ -6,10 +6,6 @@ from autogpt_libs.auth.dependencies import get_user_id, requires_user import backend.server.v2.builder.db as builder_db import backend.server.v2.builder.model as builder_model -import backend.server.v2.library.db as library_db -import backend.server.v2.library.model as library_model -import backend.server.v2.store.db as store_db -import backend.server.v2.store.model as store_model from backend.integrations.providers import ProviderName from backend.util.models import Pagination @@ -45,7 +41,9 @@ def sanitize_query(query: str | None) -> str | None: summary="Get Builder suggestions", response_model=builder_model.SuggestionsResponse, ) -async def get_suggestions() -> builder_model.SuggestionsResponse: +async def get_suggestions( + user_id: Annotated[str, fastapi.Security(get_user_id)], +) -> builder_model.SuggestionsResponse: """ Get all suggestions for the Blocks Menu. """ @@ -55,11 +53,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse: "Help me create a list", "Help me feed my data to Google Maps", ], - recent_searches=[ - "image generation", - "deepfake", - "competitor analysis", - ], + recent_searches=await builder_db.get_recent_searches(user_id), providers=[ ProviderName.TWITTER, ProviderName.GITHUB, @@ -147,7 +141,6 @@ async def get_providers( ) -# Not using post method because on frontend, orval doesn't support Infinite Query with POST method. @router.get( "/search", summary="Builder search", @@ -157,7 +150,7 @@ async def get_providers( async def search( user_id: Annotated[str, fastapi.Security(get_user_id)], search_query: Annotated[str | None, fastapi.Query()] = None, - filter: Annotated[list[str] | None, fastapi.Query()] = None, + filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None, search_id: Annotated[str | None, fastapi.Query()] = None, by_creator: Annotated[list[str] | None, fastapi.Query()] = None, page: Annotated[int, fastapi.Query()] = 1, @@ -176,69 +169,43 @@ async def search( ] search_query = sanitize_query(search_query) - # Blocks&Integrations - blocks = builder_model.SearchBlocksResponse( - blocks=builder_model.BlockResponse( - blocks=[], - pagination=Pagination.empty(), - ), - total_block_count=0, - total_integration_count=0, + # Get all possible results + cached_results = await builder_db.get_sorted_search_results( + user_id=user_id, + search_query=search_query, + filters=filter, + by_creator=by_creator, ) - if "blocks" in filter or "integrations" in filter: - blocks = builder_db.search_blocks( - include_blocks="blocks" in filter, - include_integrations="integrations" in filter, - query=search_query or "", - page=page, - page_size=page_size, - ) - # Library Agents - my_agents = library_model.LibraryAgentResponse( - agents=[], - pagination=Pagination.empty(), + # Paginate results + total_combined_items = len(cached_results.items) + pagination = Pagination( + total_items=total_combined_items, + total_pages=(total_combined_items + page_size - 1) // page_size, + current_page=page, + page_size=page_size, ) - if "my_agents" in filter: - my_agents = await library_db.list_library_agents( - user_id=user_id, - search_term=search_query, - page=page, - page_size=page_size, - ) - # Marketplace Agents - marketplace_agents = store_model.StoreAgentsResponse( - agents=[], - pagination=Pagination.empty(), - ) - if "marketplace_agents" in filter: - marketplace_agents = await store_db.get_store_agents( - creators=by_creator, + start_idx = (page - 1) * page_size + end_idx = start_idx + page_size + paginated_items = cached_results.items[start_idx:end_idx] + + # Update the search entry by id + search_id = await builder_db.update_search( + user_id, + builder_model.SearchEntry( search_query=search_query, - page=page, - page_size=page_size, - ) - - more_pages = False - if ( - blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages - or my_agents.pagination.current_page < my_agents.pagination.total_pages - or marketplace_agents.pagination.current_page - < marketplace_agents.pagination.total_pages - ): - more_pages = True + filter=filter, + by_creator=by_creator, + search_id=search_id, + ), + ) return builder_model.SearchResponse( - items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents, - total_items={ - "blocks": blocks.total_block_count, - "integrations": blocks.total_integration_count, - "marketplace_agents": marketplace_agents.pagination.total_items, - "my_agents": my_agents.pagination.total_items, - }, - page=page, - more_pages=more_pages, + items=paginated_items, + search_id=search_id, + total_items=cached_results.total_items, + pagination=pagination, ) diff --git a/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql b/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql new file mode 100644 index 0000000000..8b9786e47c --- /dev/null +++ b/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql @@ -0,0 +1,15 @@ +-- Create BuilderSearchHistory table +CREATE TABLE "BuilderSearchHistory" ( + "id" TEXT NOT NULL, + "userId" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "searchQuery" TEXT NOT NULL, + "filter" TEXT[] DEFAULT ARRAY[]::TEXT[], + "byCreator" TEXT[] DEFAULT ARRAY[]::TEXT[], + + CONSTRAINT "BuilderSearchHistory_pkey" PRIMARY KEY ("id") +); + +-- Define User foreign relation +ALTER TABLE "BuilderSearchHistory" ADD CONSTRAINT "BuilderSearchHistory_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index c54b014471..121ccab5fc 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -53,6 +53,7 @@ model User { Profile Profile[] UserOnboarding UserOnboarding? + BuilderSearchHistory BuilderSearchHistory[] StoreListings StoreListing[] StoreListingReviews StoreListingReview[] StoreVersionsReviewed StoreListingVersion[] @@ -114,6 +115,19 @@ model UserOnboarding { User User @relation(fields: [userId], references: [id], onDelete: Cascade) } +model BuilderSearchHistory { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + searchQuery String + filter String[] @default([]) + byCreator String[] @default([]) + + userId String + User User @relation(fields: [userId], references: [id], onDelete: Cascade) +} + // This model describes the Agent Graph/Flow (Multi Agent System). model AgentGraph { id String @default(uuid()) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts index bff61f2d85..5e9007e617 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts @@ -1,7 +1,7 @@ import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; import { useGetV2BuilderSearchInfinite } from "@/app/api/__generated__/endpoints/store/store"; import { SearchResponse } from "@/app/api/__generated__/models/searchResponse"; -import { useState } from "react"; +import { useCallback, useEffect, useState } from "react"; import { useAddAgentToBuilder } from "../hooks/useAddAgentToBuilder"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store"; @@ -9,16 +9,27 @@ import { getGetV2ListLibraryAgentsQueryKey, usePostV2AddMarketplaceAgent, } from "@/app/api/__generated__/endpoints/library/library"; -import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; +import { + getGetV2GetBuilderItemCountsQueryKey, + getGetV2GetBuilderSuggestionsQueryKey, +} from "@/app/api/__generated__/endpoints/default/default"; import { getQueryClient } from "@/lib/react-query/queryClient"; import { useToast } from "@/components/molecules/Toast/use-toast"; import * as Sentry from "@sentry/nextjs"; export const useBlockMenuSearch = () => { - const { searchQuery } = useBlockMenuStore(); + const { searchQuery, searchId, setSearchId } = useBlockMenuStore(); const { toast } = useToast(); const { addAgentToBuilder, addLibraryAgentToBuilder } = useAddAgentToBuilder(); + const queryClient = getQueryClient(); + + const resetSearchSession = useCallback(() => { + setSearchId(undefined); + queryClient.invalidateQueries({ + queryKey: getGetV2GetBuilderSuggestionsQueryKey(), + }); + }, [queryClient, setSearchId]); const [addingLibraryAgentId, setAddingLibraryAgentId] = useState< string | null @@ -38,13 +49,19 @@ export const useBlockMenuSearch = () => { page: 1, page_size: 8, search_query: searchQuery, + search_id: searchId, }, { query: { - getNextPageParam: (lastPage, allPages) => { - const pagination = lastPage.data as SearchResponse; - const isMore = pagination.more_pages; - return isMore ? allPages.length + 1 : undefined; + getNextPageParam: (lastPage) => { + const response = lastPage.data as SearchResponse; + const { pagination } = response; + if (!pagination) { + return undefined; + } + + const { current_page, total_pages } = pagination; + return current_page < total_pages ? current_page + 1 : undefined; }, }, }, @@ -53,7 +70,6 @@ export const useBlockMenuSearch = () => { const { mutateAsync: addMarketplaceAgent } = usePostV2AddMarketplaceAgent({ mutation: { onSuccess: () => { - const queryClient = getQueryClient(); queryClient.invalidateQueries({ queryKey: getGetV2ListLibraryAgentsQueryKey(), }); @@ -75,6 +91,24 @@ export const useBlockMenuSearch = () => { }, }); + useEffect(() => { + if (!searchData?.pages?.length) { + return; + } + + const latestPage = searchData.pages[searchData.pages.length - 1]; + const response = latestPage?.data as SearchResponse; + if (response?.search_id && response.search_id !== searchId) { + setSearchId(response.search_id); + } + }, [searchData, searchId, setSearchId]); + + useEffect(() => { + if (searchId && !searchQuery) { + resetSearchSession(); + } + }, [resetSearchSession, searchId, searchQuery]); + const allSearchData = searchData?.pages?.flatMap((page) => { const response = page.data as SearchResponse; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts index b55a638e08..ab1af16584 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts @@ -1,30 +1,32 @@ import { debounce } from "lodash"; import { useCallback, useEffect, useRef, useState } from "react"; import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; +import { getQueryClient } from "@/lib/react-query/queryClient"; +import { getGetV2GetBuilderSuggestionsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; const SEARCH_DEBOUNCE_MS = 300; export const useBlockMenuSearchBar = () => { const inputRef = useRef(null); const [localQuery, setLocalQuery] = useState(""); - const { setSearchQuery, setSearchId, searchId, searchQuery } = - useBlockMenuStore(); + const { setSearchQuery, setSearchId, searchQuery } = useBlockMenuStore(); + const queryClient = getQueryClient(); - const searchIdRef = useRef(searchId); - useEffect(() => { - searchIdRef.current = searchId; - }, [searchId]); + const clearSearchSession = useCallback(() => { + setSearchId(undefined); + queryClient.invalidateQueries({ + queryKey: getGetV2GetBuilderSuggestionsQueryKey(), + }); + }, [queryClient, setSearchId]); const debouncedSetSearchQuery = useCallback( debounce((value: string) => { setSearchQuery(value); if (value.length === 0) { - setSearchId(undefined); - } else if (!searchIdRef.current) { - setSearchId(crypto.randomUUID()); + clearSearchSession(); } }, SEARCH_DEBOUNCE_MS), - [setSearchQuery, setSearchId], + [clearSearchSession, setSearchQuery], ); useEffect(() => { @@ -36,13 +38,13 @@ export const useBlockMenuSearchBar = () => { const handleClear = () => { setLocalQuery(""); setSearchQuery(""); - setSearchId(undefined); + clearSearchSession(); debouncedSetSearchQuery.cancel(); }; useEffect(() => { setLocalQuery(searchQuery); - }, []); + }, [searchQuery]); return { handleClear, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx new file mode 100644 index 0000000000..0f953394e6 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx @@ -0,0 +1,109 @@ +import React, { useEffect, useRef, useState } from "react"; +import { ArrowLeftIcon, ArrowRightIcon } from "@phosphor-icons/react"; +import { cn } from "@/lib/utils"; + +interface HorizontalScrollAreaProps { + children: React.ReactNode; + wrapperClassName?: string; + scrollContainerClassName?: string; + scrollAmount?: number; + dependencyList?: React.DependencyList; +} + +const defaultDependencies: React.DependencyList = []; +const baseScrollClasses = + "flex gap-2 overflow-x-auto px-8 [scrollbar-width:none] [-ms-overflow-style:'none'] [&::-webkit-scrollbar]:hidden"; + +export const HorizontalScroll: React.FC = ({ + children, + wrapperClassName, + scrollContainerClassName, + scrollAmount = 300, + dependencyList = defaultDependencies, +}) => { + const scrollRef = useRef(null); + const [canScrollLeft, setCanScrollLeft] = useState(false); + const [canScrollRight, setCanScrollRight] = useState(false); + + const scrollByDelta = (delta: number) => { + if (!scrollRef.current) { + return; + } + scrollRef.current.scrollBy({ left: delta, behavior: "smooth" }); + }; + + const updateScrollState = () => { + const element = scrollRef.current; + if (!element) { + setCanScrollLeft(false); + setCanScrollRight(false); + return; + } + setCanScrollLeft(element.scrollLeft > 0); + setCanScrollRight( + Math.ceil(element.scrollLeft + element.clientWidth) < element.scrollWidth, + ); + }; + + useEffect(() => { + updateScrollState(); + const element = scrollRef.current; + if (!element) { + return; + } + const handleScroll = () => updateScrollState(); + element.addEventListener("scroll", handleScroll); + window.addEventListener("resize", handleScroll); + return () => { + element.removeEventListener("scroll", handleScroll); + window.removeEventListener("resize", handleScroll); + }; + }, dependencyList); + + return ( +
+
+
+ {children} +
+ {canScrollLeft && ( +
+ )} + {canScrollRight && ( +
+ )} + {canScrollLeft && ( + + )} + {canScrollRight && ( + + )} +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx index 94efe063a6..b00714f4ca 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx @@ -6,10 +6,15 @@ import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { blockMenuContainerStyle } from "../style"; import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; import { DefaultStateType } from "../types"; +import { SearchHistoryChip } from "../SearchHistoryChip"; +import { HorizontalScroll } from "../HorizontalScroll"; export const SuggestionContent = () => { - const { setIntegration, setDefaultState } = useBlockMenuStore(); + const { setIntegration, setDefaultState, setSearchQuery, setSearchId } = + useBlockMenuStore(); const { data, isLoading, isError, error, refetch } = useSuggestionContent(); + const suggestions = data?.suggestions; + const hasRecentSearches = (suggestions?.recent_searches?.length ?? 0) > 0; if (isError) { return ( @@ -29,11 +34,45 @@ export const SuggestionContent = () => { ); } - const suggestions = data?.suggestions; - return (
+ {/* Recent searches */} + {hasRecentSearches && ( +
+

+ Recent searches +

+ + {!isLoading && suggestions + ? suggestions.recent_searches.map((entry, index) => ( + { + setSearchQuery(entry.search_query || ""); + setSearchId(entry.search_id || undefined); + }} + /> + )) + : Array(3) + .fill(0) + .map((_, index) => ( + + ))} + +
+ )} + {/* Integrations */}

diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 9a35a7b465..f8c5563476 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -3662,7 +3662,18 @@ "required": false, "schema": { "anyOf": [ - { "type": "array", "items": { "type": "string" } }, + { + "type": "array", + "items": { + "enum": [ + "blocks", + "integrations", + "marketplace_agents", + "my_agents" + ], + "type": "string" + } + }, { "type": "null" } ], "title": "Filter" @@ -8612,6 +8623,45 @@ "required": ["name", "cron", "inputs"], "title": "ScheduleCreationRequest" }, + "SearchEntry": { + "properties": { + "search_query": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Search Query" + }, + "filter": { + "anyOf": [ + { + "items": { + "type": "string", + "enum": [ + "blocks", + "integrations", + "marketplace_agents", + "my_agents" + ] + }, + "type": "array" + }, + { "type": "null" } + ], + "title": "Filter" + }, + "by_creator": { + "anyOf": [ + { "items": { "type": "string" }, "type": "array" }, + { "type": "null" } + ], + "title": "By Creator" + }, + "search_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Search Id" + } + }, + "type": "object", + "title": "SearchEntry" + }, "SearchResponse": { "properties": { "items": { @@ -8625,6 +8675,7 @@ "type": "array", "title": "Items" }, + "search_id": { "type": "string", "title": "Search Id" }, "total_items": { "additionalProperties": { "type": "integer" }, "propertyNames": { @@ -8638,11 +8689,10 @@ "type": "object", "title": "Total Items" }, - "page": { "type": "integer", "title": "Page" }, - "more_pages": { "type": "boolean", "title": "More Pages" } + "pagination": { "$ref": "#/components/schemas/Pagination" } }, "type": "object", - "required": ["items", "total_items", "page", "more_pages"], + "required": ["items", "search_id", "total_items", "pagination"], "title": "SearchResponse" }, "SessionDetailResponse": { @@ -9199,7 +9249,7 @@ "title": "Otto Suggestions" }, "recent_searches": { - "items": { "type": "string" }, + "items": { "$ref": "#/components/schemas/SearchEntry" }, "type": "array", "title": "Recent Searches" }, From 4d4741d5586b29cbf2a0a97acd7872c1b557d1c1 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 11 Dec 2025 12:13:53 +0100 Subject: [PATCH 43/58] fix(frontend/library): Transition from empty tasks view on task init (#11600) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Resolves #11599 ### Changes πŸ—οΈ - Manually update item counts when initiating a task from `EmptyTasks` view - Other improvements made while debugging ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] `NewAgentLibraryView` transitions to full layout when a first task is created - [x] `NewAgentLibraryView` transitions to full layout when a first trigger is set up --- .../NewAgentLibraryView.tsx | 30 ++++++++------- .../modals/RunAgentModal/useAgentRunModal.tsx | 16 +++----- .../components/other/EmptyTasks.tsx | 13 ++++++- .../SidebarRunsList/useSidebarRunsList.ts | 34 ++++++++--------- .../useNewAgentLibraryView.ts | 38 +++++++++++++++++++ 5 files changed, 90 insertions(+), 41 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index 8ee76ce37a..2831d6cdba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -1,6 +1,5 @@ "use client"; -import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; @@ -39,14 +38,11 @@ export function NewAgentLibraryView() { handleSelectRun, handleCountsChange, handleClearSelectedRun, + onRunInitiated, + onTriggerSetup, + onScheduleCreated, } = useNewAgentLibraryView(); - function onTriggerSetup(newTrigger: LibraryAgentPreset) { - if (!agent) return; - - handleSelectRun(newTrigger.id, "triggers"); - } - if (error) { return (

- +
); @@ -101,10 +102,8 @@ export function NewAgentLibraryView() { } agent={agent} - onRunCreated={(execution) => handleSelectRun(execution.id, "runs")} - onScheduleCreated={(schedule) => - handleSelectRun(schedule.id, "scheduled") - } + onRunCreated={onRunInitiated} + onScheduleCreated={onScheduleCreated} onTriggerSetup={onTriggerSetup} initialInputValues={activeTemplate?.inputs} initialInputCredentials={activeTemplate?.credentials} @@ -167,7 +166,12 @@ export function NewAgentLibraryView() { ) : ( - + )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx index fcd3a7b87a..b997a33fcf 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.tsx @@ -1,5 +1,5 @@ import { - getGetV1ListGraphExecutionsInfiniteQueryOptions, + getGetV1ListGraphExecutionsQueryKey, usePostV1ExecuteGraphAgent, } from "@/app/api/__generated__/endpoints/graphs/graphs"; import { @@ -66,13 +66,11 @@ export function useAgentRunModal( toast({ title: "Agent execution started", }); - callbacks?.onRun?.(response.data as unknown as GraphExecutionMeta); // Invalidate runs list for this graph queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - agent.graph_id, - ).queryKey, + queryKey: getGetV1ListGraphExecutionsQueryKey(agent.graph_id), }); + callbacks?.onRun?.(response.data); analytics.sendDatafastEvent("run_agent", { name: agent.name, id: agent.graph_id, @@ -91,17 +89,15 @@ export function useAgentRunModal( const setupTriggerMutation = usePostV2SetupTrigger({ mutation: { - onSuccess: (response: any) => { + onSuccess: (response) => { if (response.status === 200) { toast({ title: "Trigger setup complete", }); - callbacks?.onSetupTrigger?.(response.data); queryClient.invalidateQueries({ - queryKey: getGetV2ListPresetsQueryKey({ - graph_id: agent.graph_id, - }), + queryKey: getGetV2ListPresetsQueryKey({ graph_id: agent.graph_id }), }); + callbacks?.onSetupTrigger?.(response.data); setIsOpen(false); } }, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx index 26bfbde882..3446611827 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx @@ -1,6 +1,8 @@ "use client"; import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; @@ -16,10 +18,17 @@ import { EmptyTasksIllustration } from "./EmptyTasksIllustration"; type Props = { agent: LibraryAgent; + onRun?: (run: GraphExecutionMeta) => void; onTriggerSetup?: (preset: LibraryAgentPreset) => void; + onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void; }; -export function EmptyTasks({ agent, onTriggerSetup }: Props) { +export function EmptyTasks({ + agent, + onRun, + onTriggerSetup, + onScheduleCreated, +}: Props) { const { toast } = useToast(); async function handleExport() { @@ -77,7 +86,9 @@ export function EmptyTasks({ agent, onTriggerSetup }: Props) { } agent={agent} + onRunCreated={onRun} onTriggerSetup={onTriggerSetup} + onScheduleCreated={onScheduleCreated} />
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts index 38ac1d79c2..7f7155bbdf 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts @@ -32,7 +32,7 @@ function parseTab( } type Args = { - graphId?: string; + graphId: string; onSelectRun: ( runId: string, tab?: "runs" | "scheduled" | "templates" | "triggers", @@ -60,7 +60,7 @@ export function useSidebarRunsList({ const queryClient = useQueryClient(); const runsQuery = useGetV1ListGraphExecutionsInfinite( - graphId || "", + graphId, { page: 1, page_size: 20 }, { query: { @@ -71,22 +71,19 @@ export function useSidebarRunsList({ }, ); - const schedulesQuery = useGetV1ListExecutionSchedulesForAGraph( - graphId || "", - { - query: { - enabled: !!graphId, - select: (r) => okData(r) ?? [], - }, + const schedulesQuery = useGetV1ListExecutionSchedulesForAGraph(graphId, { + query: { + enabled: !!graphId, + select: (r) => okData(r), }, - ); + }); const presetsQuery = useGetV2ListPresets( - { graph_id: graphId || null, page: 1, page_size: 100 }, + { graph_id: graphId, page: 1, page_size: 100 }, { query: { enabled: !!graphId, - select: (r) => okData(r)?.presets ?? [], + select: (r) => okData(r)?.presets, }, }, ); @@ -99,11 +96,11 @@ export function useSidebarRunsList({ const schedules = schedulesQuery.data || []; const allPresets = presetsQuery.data || []; const triggers = useMemo( - () => allPresets.filter((preset) => preset.webhook_id && preset.webhook), + () => allPresets.filter((preset) => preset.webhook_id), [allPresets], ); const templates = useMemo( - () => allPresets.filter((preset) => !preset.webhook_id || !preset.webhook), + () => allPresets.filter((preset) => !preset.webhook_id), [allPresets], ); @@ -112,9 +109,11 @@ export function useSidebarRunsList({ const templatesCount = templates.length; const triggersCount = triggers.length; const loading = - !schedulesQuery.isSuccess || !runsQuery.isSuccess || + !schedulesQuery.isSuccess || !presetsQuery.isSuccess; + const stale = + runsQuery.isStale || schedulesQuery.isStale || presetsQuery.isStale; // Update query cache when execution events arrive via websocket useExecutionEvents({ @@ -131,7 +130,7 @@ export function useSidebarRunsList({ // Notify parent about counts and loading state useEffect(() => { - if (onCountsChange) { + if (onCountsChange && !stale) { onCountsChange({ runsCount, schedulesCount, @@ -141,12 +140,13 @@ export function useSidebarRunsList({ }); } }, [ + onCountsChange, runsCount, schedulesCount, templatesCount, triggersCount, loading, - onCountsChange, + stale, ]); useEffect(() => { diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index c016a27722..b280400401 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -1,5 +1,7 @@ import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; import { useGetV2GetASpecificPreset } from "@/app/api/__generated__/endpoints/presets/presets"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { okData } from "@/app/api/helpers"; @@ -153,6 +155,39 @@ export function useNewAgentLibraryView() { [], ); + function onItemCreated( + createEvent: + | { type: "runs"; item: GraphExecutionMeta } + | { type: "triggers"; item: LibraryAgentPreset } + | { type: "scheduled"; item: GraphExecutionJobInfo }, + ) { + if (!hasAnyItems) { + // Manually increment item count to flip hasAnyItems and showSidebarLayout + const counts = { + runsCount: createEvent.type === "runs" ? 1 : 0, + triggersCount: createEvent.type === "triggers" ? 1 : 0, + schedulesCount: createEvent.type === "scheduled" ? 1 : 0, + templatesCount: 0, + }; + handleCountsChange(counts); + } + } + + function onRunInitiated(newRun: GraphExecutionMeta) { + if (!agent) return; + onItemCreated({ item: newRun, type: "runs" }); + } + + function onTriggerSetup(newTrigger: LibraryAgentPreset) { + if (!agent) return; + onItemCreated({ item: newTrigger, type: "triggers" }); + } + + function onScheduleCreated(newSchedule: GraphExecutionJobInfo) { + if (!agent) return; + onItemCreated({ item: newSchedule, type: "scheduled" }); + } + return { agentId: id, agent, @@ -169,5 +204,8 @@ export function useNewAgentLibraryView() { handleClearSelectedRun, handleCountsChange, handleSelectRun, + onRunInitiated, + onTriggerSetup, + onScheduleCreated, }; } From 71157bddd77032bd111183ddf79c918d4daa78fe Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 12 Dec 2025 16:58:06 +0700 Subject: [PATCH 44/58] feat(backend): add agent mode support to SmartDecisionMakerBlock with autonomous tool execution loops (#11547) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary image **πŸš€ Major Feature: Agent Mode Support** Adds autonomous agent mode to SmartDecisionMakerBlock, enabling it to execute tools directly in loops until tasks are completed, rather than just yielding tool calls for external execution. ## ⭐ **Key New Features** ### πŸ€– **Agent Mode with Tool Execution Loops** - **New `agent_mode_max_iterations` parameter** controls execution behavior: - `0` = Traditional mode (single LLM call, yield tool calls) - `1+` = Agent mode with iteration limit - `-1` = Infinite agent mode (loop until finished) ### πŸ”„ **Autonomous Tool Execution** - **Direct tool execution** instead of yielding for external handling - **Multi-iteration loops** with conversation state management - **Automatic completion detection** when LLM stops making tool calls - **Iteration limit handling** with graceful completion messages ### πŸ—οΈ **Proper Database Operations** - **Replace manual execution ID generation** with proper `upsert_execution_input`/`upsert_execution_output` - **Real NodeExecutionEntry objects** from database results - **Proper execution status management**: QUEUED β†’ RUNNING β†’ COMPLETED/FAILED ### πŸ”§ **Enhanced Type Safety** - **Pydantic models** replace TypedDict: `ToolInfo` and `ExecutionParams` - **Runtime validation** with better error messages - **Improved developer experience** with IDE support ## πŸ”§ **Technical Implementation** ### Agent Mode Flow: ```python # Agent mode enabled with iterations if input_data.agent_mode_max_iterations != 0: async for result in self._execute_tools_agent_mode(...): yield result # "conversations", "finished" return # Traditional mode (existing behavior) # Single LLM call + yield tool calls for external execution ``` ### Tool Execution with Database Operations: ```python # Before: Manual execution IDs tool_exec_id = f"{node_exec_id}_tool_{sink_node_id}_{len(input_data)}" # After: Proper database operations node_exec_result, final_input_data = await db_client.upsert_execution_input( node_id=sink_node_id, graph_exec_id=execution_params.graph_exec_id, input_name=input_name, input_data=input_value, ) ``` ### Type Safety with Pydantic: ```python # Before: Dict access prone to errors execution_params["user_id"] # After: Validated model access execution_params.user_id # Runtime validation + IDE support ``` ## πŸ§ͺ **Comprehensive Test Coverage** - **Agent mode execution tests** with multi-iteration scenarios - **Database operation verification** - **Type safety validation** - **Backward compatibility** for traditional mode - **Enhanced dynamic fields tests** ## πŸ“Š **Usage Examples** ### Traditional Mode (Existing Behavior): ```python SmartDecisionMakerBlock.Input( prompt="Search for keywords", agent_mode_max_iterations=0 # Default ) # β†’ Yields tool calls for external execution ``` ### Agent Mode (New Feature): ```python SmartDecisionMakerBlock.Input( prompt="Complete this task using available tools", agent_mode_max_iterations=5 # Max 5 iterations ) # β†’ Executes tools directly until task completion or iteration limit ``` ### Infinite Agent Mode: ```python SmartDecisionMakerBlock.Input( prompt="Analyze and process this data thoroughly", agent_mode_max_iterations=-1 # No limit, run until finished ) # β†’ Executes tools autonomously until LLM indicates completion ``` ## βœ… **Backward Compatibility** - **Zero breaking changes** to existing functionality - **Traditional mode remains default** (`agent_mode_max_iterations=0`) - **All existing tests pass** - **Same API for tool definitions and execution** This transforms the SmartDecisionMakerBlock from a simple tool call generator into a powerful autonomous agent capable of complex multi-step task execution! 🎯 πŸ€– Generated with [Claude Code](https://claude.ai/code) --------- Co-authored-by: Claude --- .../backend/blocks/smart_decision_maker.py | 415 +++++++++++++++++- .../blocks/test/test_smart_decision_maker.py | 375 +++++++++++++++- ...est_smart_decision_maker_dynamic_fields.py | 187 ++++++-- .../backend/backend/data/execution.py | 28 ++ .../backend/backend/executor/database.py | 3 + .../backend/backend/executor/manager.py | 20 +- .../backend/backend/util/prompt.py | 113 ++++- 7 files changed, 1046 insertions(+), 95 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py index 5f9d562e60..e2e5cfa3e4 100644 --- a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py @@ -1,8 +1,11 @@ import logging import re from collections import Counter +from concurrent.futures import Future from typing import TYPE_CHECKING, Any +from pydantic import BaseModel + import backend.blocks.llm as llm from backend.blocks.agent import AgentExecutorBlock from backend.data.block import ( @@ -20,16 +23,41 @@ from backend.data.dynamic_fields import ( is_dynamic_field, is_tool_pin, ) +from backend.data.execution import ExecutionContext from backend.data.model import NodeExecutionStats, SchemaField from backend.util import json from backend.util.clients import get_database_manager_async_client +from backend.util.prompt import MAIN_OBJECTIVE_PREFIX if TYPE_CHECKING: from backend.data.graph import Link, Node + from backend.executor.manager import ExecutionProcessor logger = logging.getLogger(__name__) +class ToolInfo(BaseModel): + """Processed tool call information.""" + + tool_call: Any # The original tool call object from LLM response + tool_name: str # The function name + tool_def: dict[str, Any] # The tool definition from tool_functions + input_data: dict[str, Any] # Processed input data ready for tool execution + field_mapping: dict[str, str] # Field name mapping for the tool + + +class ExecutionParams(BaseModel): + """Tool execution parameters.""" + + user_id: str + graph_id: str + node_id: str + graph_version: int + graph_exec_id: str + node_exec_id: str + execution_context: "ExecutionContext" + + def _get_tool_requests(entry: dict[str, Any]) -> list[str]: """ Return a list of tool_call_ids if the entry is a tool request. @@ -105,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]: return {"role": "tool", "tool_call_id": call_id, "content": content} +def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]: + """ + Combine multiple Anthropic tool responses into a single user message. + For non-Anthropic formats, returns the original list unchanged. + """ + if len(tool_outputs) <= 1: + return tool_outputs + + # Anthropic responses have role="user", type="message", and content is a list with tool_result items + anthropic_responses = [ + output + for output in tool_outputs + if ( + output.get("role") == "user" + and output.get("type") == "message" + and isinstance(output.get("content"), list) + and any( + item.get("type") == "tool_result" + for item in output.get("content", []) + if isinstance(item, dict) + ) + ) + ] + + if len(anthropic_responses) > 1: + combined_content = [ + item for response in anthropic_responses for item in response["content"] + ] + + combined_response = { + "role": "user", + "type": "message", + "content": combined_content, + } + + non_anthropic_responses = [ + output for output in tool_outputs if output not in anthropic_responses + ] + + return [combined_response] + non_anthropic_responses + + return tool_outputs + + def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]: """ Safely convert raw_response to dictionary format for conversation history. @@ -204,6 +276,17 @@ class SmartDecisionMakerBlock(Block): default="localhost:11434", description="Ollama host for local models", ) + agent_mode_max_iterations: int = SchemaField( + title="Agent Mode Max Iterations", + description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.", + advanced=True, + default=0, + ) + conversation_compaction: bool = SchemaField( + default=True, + title="Context window auto-compaction", + description="Automatically compact the context window once it hits the limit", + ) @classmethod def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]: @@ -506,6 +589,7 @@ class SmartDecisionMakerBlock(Block): Returns the response if successful, raises ValueError if validation fails. """ resp = await llm.llm_call( + compress_prompt_to_fit=input_data.conversation_compaction, credentials=credentials, llm_model=input_data.model, prompt=current_prompt, @@ -593,6 +677,291 @@ class SmartDecisionMakerBlock(Block): return resp + def _process_tool_calls( + self, response, tool_functions: list[dict[str, Any]] + ) -> list[ToolInfo]: + """Process tool calls and extract tool definitions, arguments, and input data. + + Returns a list of tool info dicts with: + - tool_call: The original tool call object + - tool_name: The function name + - tool_def: The tool definition from tool_functions + - input_data: Processed input data dict (includes None values) + - field_mapping: Field name mapping for the tool + """ + if not response.tool_calls: + return [] + + processed_tools = [] + for tool_call in response.tool_calls: + tool_name = tool_call.function.name + tool_args = json.loads(tool_call.function.arguments) + + tool_def = next( + ( + tool + for tool in tool_functions + if tool["function"]["name"] == tool_name + ), + None, + ) + if not tool_def: + if len(tool_functions) == 1: + tool_def = tool_functions[0] + else: + continue + + # Build input data for the tool + input_data = {} + field_mapping = tool_def["function"].get("_field_mapping", {}) + if "function" in tool_def and "parameters" in tool_def["function"]: + expected_args = tool_def["function"]["parameters"].get("properties", {}) + for clean_arg_name in expected_args: + original_field_name = field_mapping.get( + clean_arg_name, clean_arg_name + ) + arg_value = tool_args.get(clean_arg_name) + # Include all expected parameters, even if None (for backward compatibility with tests) + input_data[original_field_name] = arg_value + + processed_tools.append( + ToolInfo( + tool_call=tool_call, + tool_name=tool_name, + tool_def=tool_def, + input_data=input_data, + field_mapping=field_mapping, + ) + ) + + return processed_tools + + def _update_conversation( + self, prompt: list[dict], response, tool_outputs: list | None = None + ): + """Update conversation history with response and tool outputs.""" + # Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing) + assistant_message = _convert_raw_response_to_dict(response.raw_response) + has_tool_calls = isinstance(assistant_message.get("content"), list) and any( + item.get("type") == "tool_use" + for item in assistant_message.get("content", []) + ) + + if response.reasoning and not has_tool_calls: + prompt.append( + {"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"} + ) + + prompt.append(assistant_message) + + if tool_outputs: + prompt.extend(tool_outputs) + + async def _execute_single_tool_with_manager( + self, + tool_info: ToolInfo, + execution_params: ExecutionParams, + execution_processor: "ExecutionProcessor", + ) -> dict: + """Execute a single tool using the execution manager for proper integration.""" + # Lazy imports to avoid circular dependencies + from backend.data.execution import NodeExecutionEntry + + tool_call = tool_info.tool_call + tool_def = tool_info.tool_def + raw_input_data = tool_info.input_data + + # Get sink node and field mapping + sink_node_id = tool_def["function"]["_sink_node_id"] + + # Use proper database operations for tool execution + db_client = get_database_manager_async_client() + + # Get target node + target_node = await db_client.get_node(sink_node_id) + if not target_node: + raise ValueError(f"Target node {sink_node_id} not found") + + # Create proper node execution using upsert_execution_input + node_exec_result = None + final_input_data = None + + # Add all inputs to the execution + if not raw_input_data: + raise ValueError(f"Tool call has no input data: {tool_call}") + + for input_name, input_value in raw_input_data.items(): + node_exec_result, final_input_data = await db_client.upsert_execution_input( + node_id=sink_node_id, + graph_exec_id=execution_params.graph_exec_id, + input_name=input_name, + input_data=input_value, + ) + + assert node_exec_result is not None, "node_exec_result should not be None" + + # Create NodeExecutionEntry for execution manager + node_exec_entry = NodeExecutionEntry( + user_id=execution_params.user_id, + graph_exec_id=execution_params.graph_exec_id, + graph_id=execution_params.graph_id, + graph_version=execution_params.graph_version, + node_exec_id=node_exec_result.node_exec_id, + node_id=sink_node_id, + block_id=target_node.block_id, + inputs=final_input_data or {}, + execution_context=execution_params.execution_context, + ) + + # Use the execution manager to execute the tool node + try: + # Get NodeExecutionProgress from the execution manager's running nodes + node_exec_progress = execution_processor.running_node_execution[ + sink_node_id + ] + + # Use the execution manager's own graph stats + graph_stats_pair = ( + execution_processor.execution_stats, + execution_processor.execution_stats_lock, + ) + + # Create a completed future for the task tracking system + node_exec_future = Future() + node_exec_progress.add_task( + node_exec_id=node_exec_result.node_exec_id, + task=node_exec_future, + ) + + # Execute the node directly since we're in the SmartDecisionMaker context + node_exec_future.set_result( + await execution_processor.on_node_execution( + node_exec=node_exec_entry, + node_exec_progress=node_exec_progress, + nodes_input_masks=None, + graph_stats_pair=graph_stats_pair, + ) + ) + + # Get outputs from database after execution completes using database manager client + node_outputs = await db_client.get_execution_outputs_by_node_exec_id( + node_exec_result.node_exec_id + ) + + # Create tool response + tool_response_content = ( + json.dumps(node_outputs) + if node_outputs + else "Tool executed successfully" + ) + return _create_tool_response(tool_call.id, tool_response_content) + + except Exception as e: + logger.error(f"Tool execution with manager failed: {e}") + # Return error response + return _create_tool_response( + tool_call.id, f"Tool execution failed: {str(e)}" + ) + + async def _execute_tools_agent_mode( + self, + input_data, + credentials, + tool_functions: list[dict[str, Any]], + prompt: list[dict], + graph_exec_id: str, + node_id: str, + node_exec_id: str, + user_id: str, + graph_id: str, + graph_version: int, + execution_context: ExecutionContext, + execution_processor: "ExecutionProcessor", + ): + """Execute tools in agent mode with a loop until finished.""" + max_iterations = input_data.agent_mode_max_iterations + iteration = 0 + + # Execution parameters for tool execution + execution_params = ExecutionParams( + user_id=user_id, + graph_id=graph_id, + node_id=node_id, + graph_version=graph_version, + graph_exec_id=graph_exec_id, + node_exec_id=node_exec_id, + execution_context=execution_context, + ) + + current_prompt = list(prompt) + + while max_iterations < 0 or iteration < max_iterations: + iteration += 1 + logger.debug(f"Agent mode iteration {iteration}") + + # Prepare prompt for this iteration + iteration_prompt = list(current_prompt) + + # On the last iteration, add a special system message to encourage completion + if max_iterations > 0 and iteration == max_iterations: + last_iteration_message = { + "role": "system", + "content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). " + "Try to complete the task with the information you have. If you cannot fully complete it, " + "provide a summary of what you've accomplished and what remains to be done. " + "Prefer finishing with a clear response rather than making additional tool calls.", + } + iteration_prompt.append(last_iteration_message) + + # Get LLM response + try: + response = await self._attempt_llm_call_with_validation( + credentials, input_data, iteration_prompt, tool_functions + ) + except Exception as e: + yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}" + return + + # Process tool calls + processed_tools = self._process_tool_calls(response, tool_functions) + + # If no tool calls, we're done + if not processed_tools: + yield "finished", response.response + self._update_conversation(current_prompt, response) + yield "conversations", current_prompt + return + + # Execute tools and collect responses + tool_outputs = [] + for tool_info in processed_tools: + try: + tool_response = await self._execute_single_tool_with_manager( + tool_info, execution_params, execution_processor + ) + tool_outputs.append(tool_response) + except Exception as e: + logger.error(f"Tool execution failed: {e}") + # Create error response for the tool + error_response = _create_tool_response( + tool_info.tool_call.id, f"Error: {str(e)}" + ) + tool_outputs.append(error_response) + + tool_outputs = _combine_tool_responses(tool_outputs) + + self._update_conversation(current_prompt, response, tool_outputs) + + # Yield intermediate conversation state + yield "conversations", current_prompt + + # If we reach max iterations, yield the current state + if max_iterations < 0: + yield "finished", f"Agent mode completed after {iteration} iterations" + else: + yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)" + yield "conversations", current_prompt + async def run( self, input_data: Input, @@ -603,8 +972,12 @@ class SmartDecisionMakerBlock(Block): graph_exec_id: str, node_exec_id: str, user_id: str, + graph_version: int, + execution_context: ExecutionContext, + execution_processor: "ExecutionProcessor", **kwargs, ) -> BlockOutput: + tool_functions = await self._create_tool_node_signatures(node_id) yield "tool_functions", json.dumps(tool_functions) @@ -648,24 +1021,52 @@ class SmartDecisionMakerBlock(Block): input_data.prompt = llm.fmt.format_string(input_data.prompt, values) input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values) - prefix = "[Main Objective Prompt]: " - if input_data.sys_prompt and not any( - p["role"] == "system" and p["content"].startswith(prefix) for p in prompt + p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX) + for p in prompt ): - prompt.append({"role": "system", "content": prefix + input_data.sys_prompt}) + prompt.append( + { + "role": "system", + "content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt, + } + ) if input_data.prompt and not any( - p["role"] == "user" and p["content"].startswith(prefix) for p in prompt + p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX) + for p in prompt ): - prompt.append({"role": "user", "content": prefix + input_data.prompt}) + prompt.append( + {"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt} + ) + # Execute tools based on the selected mode + if input_data.agent_mode_max_iterations != 0: + # In agent mode, execute tools directly in a loop until finished + async for result in self._execute_tools_agent_mode( + input_data=input_data, + credentials=credentials, + tool_functions=tool_functions, + prompt=prompt, + graph_exec_id=graph_exec_id, + node_id=node_id, + node_exec_id=node_exec_id, + user_id=user_id, + graph_id=graph_id, + graph_version=graph_version, + execution_context=execution_context, + execution_processor=execution_processor, + ): + yield result + return + + # One-off mode: single LLM call and yield tool calls for external execution current_prompt = list(prompt) max_attempts = max(1, int(input_data.retry)) response = None last_error = None - for attempt in range(max_attempts): + for _ in range(max_attempts): try: response = await self._attempt_llm_call_with_validation( credentials, input_data, current_prompt, tool_functions diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 9d649e36c5..29f572d0d6 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -1,7 +1,11 @@ import logging +import threading +from collections import defaultdict +from unittest.mock import AsyncMock, MagicMock, patch import pytest +from backend.data.execution import ExecutionContext from backend.data.model import ProviderName, User from backend.server.model import CreateGraph from backend.server.rest_api import AgentServer @@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User): async def create_credentials(s: SpinTestServer, u: User): - import backend.blocks.llm as llm + import backend.blocks.llm as llm_module provider = ProviderName.OPENAI - credentials = llm.TEST_CREDENTIALS + credentials = llm_module.TEST_CREDENTIALS return await s.agent_server.test_create_credentials(u.id, provider, credentials) @@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer): @pytest.mark.asyncio async def test_smart_decision_maker_tracks_llm_stats(): """Test that SmartDecisionMakerBlock correctly tracks LLM usage stats.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -216,7 +218,6 @@ async def test_smart_decision_maker_tracks_llm_stats(): } # Mock the _create_tool_node_signatures method to avoid database calls - from unittest.mock import AsyncMock with patch( "backend.blocks.llm.llm_call", @@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats(): prompt="Should I continue with this task?", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Execute the block outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats(): @pytest.mark.asyncio async def test_smart_decision_maker_parameter_validation(): """Test that SmartDecisionMakerBlock correctly validates tool call parameters.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -311,8 +322,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_with_typo.reasoning = None mock_response_with_typo.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -329,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation(): model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore retry=2, # Set retry to 2 for testing + agent_mode_max_iterations=0, ) + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + # Should raise ValueError after retries due to typo'd parameter name with pytest.raises(ValueError) as exc_info: outputs = {} @@ -342,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -368,8 +389,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_missing_required.reasoning = None mock_response_missing_required.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -385,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + # Should raise ValueError due to missing required parameter with pytest.raises(ValueError) as exc_info: outputs = {} @@ -398,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -418,8 +449,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_valid.reasoning = None mock_response_valid.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -435,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Should succeed - optional parameter missing is OK outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -447,6 +485,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -472,8 +513,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_all_params.reasoning = None mock_response_all_params.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -489,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Should succeed with all parameters outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -501,6 +549,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -513,8 +564,6 @@ async def test_smart_decision_maker_parameter_validation(): @pytest.mark.asyncio async def test_smart_decision_maker_raw_response_conversion(): """Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -584,7 +633,6 @@ async def test_smart_decision_maker_raw_response_conversion(): ) # Mock llm_call to return different responses on different calls - from unittest.mock import AsyncMock with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock @@ -603,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion(): model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore retry=2, + agent_mode_max_iterations=0, ) # Should succeed after retry, demonstrating our helper function works outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -615,6 +672,9 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -650,8 +710,6 @@ async def test_smart_decision_maker_raw_response_conversion(): "I'll help you with that." # Ollama returns string ) - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -666,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion(): prompt="Simple prompt", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -677,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -696,8 +766,6 @@ async def test_smart_decision_maker_raw_response_conversion(): "content": "Test response", } # Dict format - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -712,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion(): prompt="Another test", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, + ) + + outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + + async for output_name, output_data in block.run( + input_data, + credentials=llm_module.TEST_CREDENTIALS, + graph_id="test-graph-id", + node_id="test-node-id", + graph_exec_id="test-exec-id", + node_exec_id="test-node-exec-id", + user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_data + + assert "finished" in outputs + assert outputs["finished"] == "Test response" + + +@pytest.mark.asyncio +async def test_smart_decision_maker_agent_mode(): + """Test that agent mode executes tools directly and loops until finished.""" + import backend.blocks.llm as llm_module + from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock + + block = SmartDecisionMakerBlock() + + # Mock tool call that requires multiple iterations + mock_tool_call_1 = MagicMock() + mock_tool_call_1.id = "call_1" + mock_tool_call_1.function.name = "search_keywords" + mock_tool_call_1.function.arguments = ( + '{"query": "test", "max_keyword_difficulty": 50}' + ) + + mock_response_1 = MagicMock() + mock_response_1.response = None + mock_response_1.tool_calls = [mock_tool_call_1] + mock_response_1.prompt_tokens = 50 + mock_response_1.completion_tokens = 25 + mock_response_1.reasoning = "Using search tool" + mock_response_1.raw_response = { + "role": "assistant", + "content": None, + "tool_calls": [{"id": "call_1", "type": "function"}], + } + + # Final response with no tool calls (finished) + mock_response_2 = MagicMock() + mock_response_2.response = "Task completed successfully" + mock_response_2.tool_calls = [] + mock_response_2.prompt_tokens = 30 + mock_response_2.completion_tokens = 15 + mock_response_2.reasoning = None + mock_response_2.raw_response = { + "role": "assistant", + "content": "Task completed successfully", + } + + # Mock the LLM call to return different responses on each iteration + llm_call_mock = AsyncMock() + llm_call_mock.side_effect = [mock_response_1, mock_response_2] + + # Mock tool node signatures + mock_tool_signatures = [ + { + "type": "function", + "function": { + "name": "search_keywords", + "_sink_node_id": "test-sink-node-id", + "_field_mapping": {}, + "parameters": { + "properties": { + "query": {"type": "string"}, + "max_keyword_difficulty": {"type": "integer"}, + }, + "required": ["query", "max_keyword_difficulty"], + }, + }, + } + ] + + # Mock database and execution components + mock_db_client = AsyncMock() + mock_node = MagicMock() + mock_node.block_id = "test-block-id" + mock_db_client.get_node.return_value = mock_node + + # Mock upsert_execution_input to return proper NodeExecutionResult and input data + mock_node_exec_result = MagicMock() + mock_node_exec_result.node_exec_id = "test-tool-exec-id" + mock_input_data = {"query": "test", "max_keyword_difficulty": 50} + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_input_data, + ) + + # No longer need mock_execute_node since we use execution_processor.on_node_execution + + with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures + ), patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client", + return_value=mock_db_client, + ), patch( + "backend.executor.manager.async_update_node_execution_status", + new_callable=AsyncMock, + ), patch( + "backend.integrations.creds_manager.IntegrationCredentialsManager" + ): + + # Create a mock execution context + + mock_execution_context = ExecutionContext( + safe_mode=False, + ) + + # Create a mock execution processor for agent mode tests + + mock_execution_processor = AsyncMock() + # Configure the execution processor mock with required attributes + mock_execution_processor.running_node_execution = defaultdict(MagicMock) + mock_execution_processor.execution_stats = MagicMock() + mock_execution_processor.execution_stats_lock = threading.Lock() + + # Mock the on_node_execution method to return successful stats + mock_node_stats = MagicMock() + mock_node_stats.error = None # No error + mock_execution_processor.on_node_execution = AsyncMock( + return_value=mock_node_stats + ) + + # Mock the get_execution_outputs_by_node_exec_id method + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = { + "result": {"status": "success", "data": "search completed"} + } + + # Test agent mode with max_iterations = 3 + input_data = SmartDecisionMakerBlock.Input( + prompt="Complete this task using tools", + model=llm_module.LlmModel.GPT4O, + credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations ) outputs = {} @@ -723,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data + # Verify agent mode behavior + assert "tool_functions" in outputs # tool_functions is yielded in both modes assert "finished" in outputs - assert outputs["finished"] == "Test response" + assert outputs["finished"] == "Task completed successfully" + assert "conversations" in outputs + + # Verify the conversation includes tool responses + conversations = outputs["conversations"] + assert len(conversations) > 2 # Should have multiple conversation entries + + # Verify LLM was called twice (once for tool call, once for finish) + assert llm_call_mock.call_count == 2 + + # Verify tool was executed via execution processor + assert mock_execution_processor.on_node_execution.call_count == 1 + + +@pytest.mark.asyncio +async def test_smart_decision_maker_traditional_mode_default(): + """Test that default behavior (agent_mode_max_iterations=0) works as traditional mode.""" + import backend.blocks.llm as llm_module + from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock + + block = SmartDecisionMakerBlock() + + # Mock tool call + mock_tool_call = MagicMock() + mock_tool_call.function.name = "search_keywords" + mock_tool_call.function.arguments = ( + '{"query": "test", "max_keyword_difficulty": 50}' + ) + + mock_response = MagicMock() + mock_response.response = None + mock_response.tool_calls = [mock_tool_call] + mock_response.prompt_tokens = 50 + mock_response.completion_tokens = 25 + mock_response.reasoning = None + mock_response.raw_response = {"role": "assistant", "content": None} + + mock_tool_signatures = [ + { + "type": "function", + "function": { + "name": "search_keywords", + "_sink_node_id": "test-sink-node-id", + "_field_mapping": {}, + "parameters": { + "properties": { + "query": {"type": "string"}, + "max_keyword_difficulty": {"type": "integer"}, + }, + "required": ["query", "max_keyword_difficulty"], + }, + }, + } + ] + + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response, + ), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures + ): + + # Test default behavior (traditional mode) + input_data = SmartDecisionMakerBlock.Input( + prompt="Test prompt", + model=llm_module.LlmModel.GPT4O, + credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, # Traditional mode + ) + + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + + outputs = {} + async for output_name, output_data in block.run( + input_data, + credentials=llm_module.TEST_CREDENTIALS, + graph_id="test-graph-id", + node_id="test-node-id", + graph_exec_id="test-exec-id", + node_exec_id="test-node-exec-id", + user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_data + + # Verify traditional mode behavior + assert ( + "tool_functions" in outputs + ) # Should yield tool_functions in traditional mode + assert ( + "tools_^_test-sink-node-id_~_query" in outputs + ) # Should yield individual tool parameters + assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs + assert "conversations" in outputs diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index 55bcf4091e..d6a0c0fe39 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -1,7 +1,7 @@ """Comprehensive tests for SmartDecisionMakerBlock dynamic field handling.""" import json -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, MagicMock, Mock, patch import pytest @@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields(): ) as mock_llm: mock_llm.return_value = mock_response - # Mock the function signature creation - with patch.object( + # Mock the database manager to avoid HTTP calls during tool execution + with patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client" + ) as mock_db_manager, patch.object( block, "_create_tool_node_signatures", new_callable=AsyncMock ) as mock_sig: + # Set up the mock database manager + mock_db_client = AsyncMock() + mock_db_manager.return_value = mock_db_client + + # Mock the node retrieval + mock_target_node = Mock() + mock_target_node.id = "test-sink-node-id" + mock_target_node.block_id = "CreateDictionaryBlock" + mock_target_node.block = Mock() + mock_target_node.block.name = "Create Dictionary" + mock_db_client.get_node.return_value = mock_target_node + + # Mock the execution result creation + mock_node_exec_result = Mock() + mock_node_exec_result.node_exec_id = "mock-node-exec-id" + mock_final_input_data = { + "values_#_name": "Alice", + "values_#_age": 30, + "values_#_email": "alice@example.com", + } + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_final_input_data, + ) + + # Mock the output retrieval + mock_outputs = { + "values_#_name": "Alice", + "values_#_age": 30, + "values_#_email": "alice@example.com", + } + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = ( + mock_outputs + ) + mock_sig.return_value = [ { "type": "function", @@ -337,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields(): prompt="Create a user dictionary", credentials=llm.TEST_CREDENTIALS_INPUT, model=llm.LlmModel.GPT4O, + agent_mode_max_iterations=0, # Use traditional mode to test output yielding ) # Run the block outputs = {} + from backend.data.execution import ExecutionContext + + mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_processor = MagicMock() + async for output_name, output_value in block.run( input_data, credentials=llm.TEST_CREDENTIALS, @@ -349,6 +392,9 @@ async def test_output_yielding_with_dynamic_fields(): graph_exec_id="test_exec", node_exec_id="test_node_exec", user_id="test_user", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_value @@ -511,45 +557,108 @@ async def test_validation_errors_dont_pollute_conversation(): } ] - # Create input data - from backend.blocks import llm + # Mock the database manager to avoid HTTP calls during tool execution + with patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client" + ) as mock_db_manager: + # Set up the mock database manager for agent mode + mock_db_client = AsyncMock() + mock_db_manager.return_value = mock_db_client - input_data = block.input_schema( - prompt="Test prompt", - credentials=llm.TEST_CREDENTIALS_INPUT, - model=llm.LlmModel.GPT4O, - retry=3, # Allow retries - ) + # Mock the node retrieval + mock_target_node = Mock() + mock_target_node.id = "test-sink-node-id" + mock_target_node.block_id = "TestBlock" + mock_target_node.block = Mock() + mock_target_node.block.name = "Test Block" + mock_db_client.get_node.return_value = mock_target_node - # Run the block - outputs = {} - async for output_name, output_value in block.run( - input_data, - credentials=llm.TEST_CREDENTIALS, - graph_id="test_graph", - node_id="test_node", - graph_exec_id="test_exec", - node_exec_id="test_node_exec", - user_id="test_user", - ): - outputs[output_name] = output_value + # Mock the execution result creation + mock_node_exec_result = Mock() + mock_node_exec_result.node_exec_id = "mock-node-exec-id" + mock_final_input_data = {"correct_param": "value"} + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_final_input_data, + ) - # Verify we had 2 LLM calls (initial + retry) - assert call_count == 2 + # Mock the output retrieval + mock_outputs = {"correct_param": "value"} + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = ( + mock_outputs + ) - # Check the final conversation output - final_conversation = outputs.get("conversations", []) + # Create input data + from backend.blocks import llm - # The final conversation should NOT contain the validation error message - error_messages = [ - msg - for msg in final_conversation - if msg.get("role") == "user" - and "parameter errors" in msg.get("content", "") - ] - assert ( - len(error_messages) == 0 - ), "Validation error leaked into final conversation" + input_data = block.input_schema( + prompt="Test prompt", + credentials=llm.TEST_CREDENTIALS_INPUT, + model=llm.LlmModel.GPT4O, + retry=3, # Allow retries + agent_mode_max_iterations=1, + ) - # The final conversation should only have the successful response - assert final_conversation[-1]["content"] == "valid" + # Run the block + outputs = {} + from backend.data.execution import ExecutionContext + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a proper mock execution processor for agent mode + from collections import defaultdict + + mock_execution_processor = AsyncMock() + mock_execution_processor.execution_stats = MagicMock() + mock_execution_processor.execution_stats_lock = MagicMock() + + # Create a mock NodeExecutionProgress for the sink node + mock_node_exec_progress = MagicMock() + mock_node_exec_progress.add_task = MagicMock() + mock_node_exec_progress.pop_output = MagicMock( + return_value=None + ) # No outputs to process + + # Set up running_node_execution as a defaultdict that returns our mock for any key + mock_execution_processor.running_node_execution = defaultdict( + lambda: mock_node_exec_progress + ) + + # Mock the on_node_execution method that gets called during tool execution + mock_node_stats = MagicMock() + mock_node_stats.error = None + mock_execution_processor.on_node_execution.return_value = ( + mock_node_stats + ) + + async for output_name, output_value in block.run( + input_data, + credentials=llm.TEST_CREDENTIALS, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_exec", + node_exec_id="test_node_exec", + user_id="test_user", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_value + + # Verify we had at least 1 LLM call + assert call_count >= 1 + + # Check the final conversation output + final_conversation = outputs.get("conversations", []) + + # The final conversation should NOT contain validation error messages + # Even if retries don't happen in agent mode, we should not leak errors + error_messages = [ + msg + for msg in final_conversation + if msg.get("role") == "user" + and "parameter errors" in msg.get("content", "") + ] + assert ( + len(error_messages) == 0 + ), "Validation error leaked into final conversation" diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index d4b81bb1d3..020a5a1906 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -5,6 +5,7 @@ from enum import Enum from multiprocessing import Manager from queue import Empty from typing import ( + TYPE_CHECKING, Annotated, Any, AsyncGenerator, @@ -65,6 +66,9 @@ from .includes import ( ) from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats +if TYPE_CHECKING: + pass + T = TypeVar("T") logger = logging.getLogger(__name__) @@ -836,6 +840,30 @@ async def upsert_execution_output( await AgentNodeExecutionInputOutput.prisma().create(data=data) +async def get_execution_outputs_by_node_exec_id( + node_exec_id: str, +) -> dict[str, Any]: + """ + Get all execution outputs for a specific node execution ID. + + Args: + node_exec_id: The node execution ID to get outputs for + + Returns: + Dictionary mapping output names to their data values + """ + outputs = await AgentNodeExecutionInputOutput.prisma().find_many( + where={"referencedByOutputExecId": node_exec_id} + ) + + result = {} + for output in outputs: + if output.data is not None: + result[output.name] = type_utils.convert(output.data, JsonValue) + + return result + + async def update_graph_execution_start_time( graph_exec_id: str, ) -> GraphExecution | None: diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 3d3ce3b791..9c2b3970c6 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -13,6 +13,7 @@ from backend.data.execution import ( get_block_error_stats, get_child_graph_executions, get_execution_kv_data, + get_execution_outputs_by_node_exec_id, get_frequently_executed_graphs, get_graph_execution_meta, get_graph_executions, @@ -147,6 +148,7 @@ class DatabaseManager(AppService): update_graph_execution_stats = _(update_graph_execution_stats) upsert_execution_input = _(upsert_execution_input) upsert_execution_output = _(upsert_execution_output) + get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id) get_execution_kv_data = _(get_execution_kv_data) set_execution_kv_data = _(set_execution_kv_data) get_block_error_stats = _(get_block_error_stats) @@ -277,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient): get_user_integrations = d.get_user_integrations upsert_execution_input = d.upsert_execution_input upsert_execution_output = d.upsert_execution_output + get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id update_graph_execution_stats = d.update_graph_execution_stats update_node_execution_status = d.update_node_execution_status update_node_execution_status_batch = d.update_node_execution_status_batch diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index ee875bbf55..234f8127c8 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -133,9 +133,8 @@ def execute_graph( cluster_lock: ClusterLock, ): """Execute graph using thread-local ExecutionProcessor instance""" - return _tls.processor.on_graph_execution( - graph_exec_entry, cancel_event, cluster_lock - ) + processor: ExecutionProcessor = _tls.processor + return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock) T = TypeVar("T") @@ -143,8 +142,8 @@ T = TypeVar("T") async def execute_node( node: Node, - creds_manager: IntegrationCredentialsManager, data: NodeExecutionEntry, + execution_processor: "ExecutionProcessor", execution_stats: NodeExecutionStats | None = None, nodes_input_masks: Optional[NodesInputMasks] = None, ) -> BlockOutput: @@ -169,6 +168,7 @@ async def execute_node( node_id = data.node_id node_block = node.block execution_context = data.execution_context + creds_manager = execution_processor.creds_manager log_metadata = LogMetadata( logger=_logger, @@ -212,6 +212,7 @@ async def execute_node( "node_exec_id": node_exec_id, "user_id": user_id, "execution_context": execution_context, + "execution_processor": execution_processor, } # Last-minute fetch credentials + acquire a system-wide read-write lock to prevent @@ -608,8 +609,8 @@ class ExecutionProcessor: async for output_name, output_data in execute_node( node=node, - creds_manager=self.creds_manager, data=node_exec, + execution_processor=self, execution_stats=stats, nodes_input_masks=nodes_input_masks, ): @@ -860,12 +861,17 @@ class ExecutionProcessor: execution_stats_lock = threading.Lock() # State holders ---------------------------------------------------- - running_node_execution: dict[str, NodeExecutionProgress] = defaultdict( + self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict( NodeExecutionProgress ) - running_node_evaluation: dict[str, Future] = {} + self.running_node_evaluation: dict[str, Future] = {} + self.execution_stats = execution_stats + self.execution_stats_lock = execution_stats_lock execution_queue = ExecutionQueue[NodeExecutionEntry]() + running_node_execution = self.running_node_execution + running_node_evaluation = self.running_node_evaluation + try: if db_client.get_credits(graph_exec.user_id) <= 0: raise InsufficientBalanceError( diff --git a/autogpt_platform/backend/backend/util/prompt.py b/autogpt_platform/backend/backend/util/prompt.py index a39f0367dd..775d1c932b 100644 --- a/autogpt_platform/backend/backend/util/prompt.py +++ b/autogpt_platform/backend/backend/util/prompt.py @@ -5,6 +5,13 @@ from tiktoken import encoding_for_model from backend.util import json +# ---------------------------------------------------------------------------# +# CONSTANTS # +# ---------------------------------------------------------------------------# + +# Message prefixes for important system messages that should be protected during compression +MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: " + # ---------------------------------------------------------------------------# # INTERNAL UTILITIES # # ---------------------------------------------------------------------------# @@ -63,6 +70,55 @@ def _msg_tokens(msg: dict, enc) -> int: return WRAPPER + content_tokens + tool_call_tokens +def _is_tool_message(msg: dict) -> bool: + """Check if a message contains tool calls or results that should be protected.""" + content = msg.get("content") + + # Check for Anthropic-style tool messages + if isinstance(content, list) and any( + isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result") + for item in content + ): + return True + + # Check for OpenAI-style tool calls in the message + if "tool_calls" in msg or msg.get("role") == "tool": + return True + + return False + + +def _is_objective_message(msg: dict) -> bool: + """Check if a message contains objective/system prompts that should be absolutely protected.""" + content = msg.get("content", "") + if isinstance(content, str): + # Protect any message with the main objective prefix + return content.startswith(MAIN_OBJECTIVE_PREFIX) + return False + + +def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None: + """ + Carefully truncate tool message content while preserving tool structure. + Only truncates tool_result content, leaves tool_use intact. + """ + content = msg.get("content") + if not isinstance(content, list): + return + + for item in content: + # Only process tool_result items, leave tool_use blocks completely intact + if not (isinstance(item, dict) and item.get("type") == "tool_result"): + continue + + result_content = item.get("content", "") + if ( + isinstance(result_content, str) + and _tok_len(result_content, enc) > max_tokens + ): + item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens) + + def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str: """ Return *text* shortened to β‰ˆmax_tok tokens by keeping the head & tail @@ -140,13 +196,21 @@ def compress_prompt( return sum(_msg_tokens(m, enc) for m in msgs) original_token_count = total_tokens() + if original_token_count + reserve <= target_tokens: return msgs # ---- STEP 0 : normalise content -------------------------------------- # Convert non-string payloads to strings so token counting is coherent. - for m in msgs[1:-1]: # keep the first & last intact + for i, m in enumerate(msgs): if not isinstance(m.get("content"), str) and m.get("content") is not None: + if _is_tool_message(m): + continue + + # Keep first and last messages intact (unless they're tool messages) + if i == 0 or i == len(msgs) - 1: + continue + # Reasonable 20k-char ceiling prevents pathological blobs content_str = json.dumps(m["content"], separators=(",", ":")) if len(content_str) > 20_000: @@ -157,34 +221,45 @@ def compress_prompt( cap = start_cap while total_tokens() + reserve > target_tokens and cap >= floor_cap: for m in msgs[1:-1]: # keep first & last intact - if _tok_len(m.get("content") or "", enc) > cap: - m["content"] = _truncate_middle_tokens(m["content"], enc, cap) + if _is_tool_message(m): + # For tool messages, only truncate tool result content, preserve structure + _truncate_tool_message_content(m, enc, cap) + continue + + if _is_objective_message(m): + # Never truncate objective messages - they contain the core task + continue + + content = m.get("content") or "" + if _tok_len(content, enc) > cap: + m["content"] = _truncate_middle_tokens(content, enc, cap) cap //= 2 # tighten the screw # ---- STEP 2 : middle-out deletion ----------------------------------- while total_tokens() + reserve > target_tokens and len(msgs) > 2: + # Identify all deletable messages (not first/last, not tool messages, not objective messages) + deletable_indices = [] + for i in range(1, len(msgs) - 1): # Skip first and last + if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]): + deletable_indices.append(i) + + if not deletable_indices: + break # nothing more we can drop + + # Delete from center outward - find the index closest to center centre = len(msgs) // 2 - # Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ... - order = [centre] + [ - i - for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1)) - for i in pair - ] - removed = False - for i in order: - msg = msgs[i] - if "tool_calls" in msg or msg.get("role") == "tool": - continue # protect tool shells - del msgs[i] - removed = True - break - if not removed: # nothing more we can drop - break + to_delete = min(deletable_indices, key=lambda i: abs(i - centre)) + del msgs[to_delete] # ---- STEP 3 : final safety-net trim on first & last ------------------ cap = start_cap while total_tokens() + reserve > target_tokens and cap >= floor_cap: for idx in (0, -1): # first and last + if _is_tool_message(msgs[idx]): + # For tool messages at first/last position, truncate tool result content only + _truncate_tool_message_content(msgs[idx], enc, cap) + continue + text = msgs[idx].get("content") or "" if _tok_len(text, enc) > cap: msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap) From f121a22544455066be83d0c9ba2a1f92e6c29b46 Mon Sep 17 00:00:00 2001 From: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com> Date: Fri, 12 Dec 2025 21:42:36 +0900 Subject: [PATCH 45/58] hotfix: update next (#11612) Update next to `15.4.10` --- autogpt_platform/frontend/package.json | 2 +- autogpt_platform/frontend/pnpm-lock.yaml | 60 ++++++++++++------------ 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 556b733e96..89c367c788 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -82,7 +82,7 @@ "lodash": "4.17.21", "lucide-react": "0.552.0", "moment": "2.30.1", - "next": "15.4.8", + "next": "15.4.10", "next-themes": "0.4.6", "nuqs": "2.7.2", "party-js": "2.2.0", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 8e699cd907..de9e029fac 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -16,7 +16,7 @@ importers: version: 5.2.2(react-hook-form@7.66.0(react@18.3.1)) '@next/third-parties': specifier: 15.4.6 - version: 15.4.6(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@phosphor-icons/react': specifier: 2.1.10 version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -88,7 +88,7 @@ importers: version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1)) '@sentry/nextjs': specifier: 10.27.0 - version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) + version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) '@supabase/ssr': specifier: 0.7.0 version: 0.7.0(@supabase/supabase-js@2.78.0) @@ -106,10 +106,10 @@ importers: version: 0.2.4 '@vercel/analytics': specifier: 1.5.0 - version: 1.5.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@vercel/speed-insights': specifier: 1.2.0 - version: 1.2.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@xyflow/react': specifier: 12.9.2 version: 12.9.2(@types/react@18.3.17)(immer@10.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -148,7 +148,7 @@ importers: version: 12.23.24(@emotion/is-prop-valid@1.2.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) geist: specifier: 1.5.1 - version: 1.5.1(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) + version: 1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) highlight.js: specifier: 11.11.1 version: 11.11.1 @@ -171,14 +171,14 @@ importers: specifier: 2.30.1 version: 2.30.1 next: - specifier: 15.4.8 - version: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: 15.4.10 + version: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: specifier: 0.4.6 version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nuqs: specifier: 2.7.2 - version: 2.7.2(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) party-js: specifier: 2.2.0 version: 2.2.0 @@ -284,7 +284,7 @@ importers: version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) '@storybook/nextjs': specifier: 9.1.5 - version: 9.1.5(esbuild@0.25.9)(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9)) + version: 9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9)) '@tanstack/eslint-plugin-query': specifier: 5.91.2 version: 5.91.2(eslint@8.57.1)(typescript@5.9.3) @@ -1602,8 +1602,8 @@ packages: '@neoconfetti/react@1.0.0': resolution: {integrity: sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==} - '@next/env@15.4.8': - resolution: {integrity: sha512-LydLa2MDI1NMrOFSkO54mTc8iIHSttj6R6dthITky9ylXV2gCGi0bHQjVCtLGRshdRPjyh2kXbxJukDtBWQZtQ==} + '@next/env@15.4.10': + resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==} '@next/eslint-plugin-next@15.5.2': resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==} @@ -5920,8 +5920,8 @@ packages: react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc - next@15.4.8: - resolution: {integrity: sha512-jwOXTz/bo0Pvlf20FSb6VXVeWRssA2vbvq9SdrOPEg9x8E1B27C2rQtvriAn600o9hH61kjrVRexEffv3JybuA==} + next@15.4.10: + resolution: {integrity: sha512-itVlc79QjpKMFMRhP+kbGKaSG/gZM6RCvwhEbwmCNF06CdDiNaoHcbeg0PqkEa2GOcn8KJ0nnc7+yL7EjoYLHQ==} engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} hasBin: true peerDependencies: @@ -9003,7 +9003,7 @@ snapshots: '@neoconfetti/react@1.0.0': {} - '@next/env@15.4.8': {} + '@next/env@15.4.10': {} '@next/eslint-plugin-next@15.5.2': dependencies: @@ -9033,9 +9033,9 @@ snapshots: '@next/swc-win32-x64-msvc@15.4.8': optional: true - '@next/third-parties@15.4.6(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@next/third-parties@15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': dependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 third-party-capital: 1.0.20 @@ -10267,7 +10267,7 @@ snapshots: '@sentry/core@10.27.0': {} - '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': + '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.37.0 @@ -10280,7 +10280,7 @@ snapshots: '@sentry/react': 10.27.0(react@18.3.1) '@sentry/vercel-edge': 10.27.0 '@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9)) - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) resolve: 1.22.8 rollup: 4.52.2 stacktrace-parser: 0.1.11 @@ -10642,7 +10642,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))': + '@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))': dependencies: '@babel/core': 7.28.4 '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.4) @@ -10666,7 +10666,7 @@ snapshots: css-loader: 6.11.0(webpack@5.101.3(esbuild@0.25.9)) image-size: 2.0.2 loader-utils: 3.3.1 - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) node-polyfill-webpack-plugin: 2.0.1(webpack@5.101.3(esbuild@0.25.9)) postcss: 8.5.6 postcss-loader: 8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.101.3(esbuild@0.25.9)) @@ -11271,14 +11271,14 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vercel/analytics@1.5.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@vercel/analytics@1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 - '@vercel/speed-insights@1.2.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 '@vitest/expect@3.2.4': @@ -12954,9 +12954,9 @@ snapshots: functions-have-names@1.2.3: {} - geist@1.5.1(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): + geist@1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): dependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) gensync@1.0.0-beta.2: {} @@ -14226,9 +14226,9 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@next/env': 15.4.8 + '@next/env': 15.4.10 '@swc/helpers': 0.5.15 caniuse-lite: 1.0.30001741 postcss: 8.4.31 @@ -14321,12 +14321,12 @@ snapshots: dependencies: boolbase: 1.0.0 - nuqs@2.7.2(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): + nuqs@2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): dependencies: '@standard-schema/spec': 1.0.0 react: 18.3.1 optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) oas-kit-common@1.0.8: dependencies: From aefac541d9c13b5195745c14d4ed3046b6ed75f2 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Mon, 15 Dec 2025 00:10:36 +0700 Subject: [PATCH 46/58] fix(frontend): force light mode for now (#11619) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ We have the setup for light/dark mode support ( Tailwind + `next-themes` ), but not the capacity yet from contributions to make the app dark-mode ready. First, we need to make it look good in light mode πŸ˜† This disables `dark:` mode classes on the code, to prevent the app looking oopsie when the user is seeing it with a browser with dark mode preference: ### Before these changes Screenshot 2025-12-14 at 17 09 25 ### After Screenshot 2025-12-14 at 16 55 46 ## Checklist πŸ“‹ ### For code changes - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app on a browser with dark mode preference - [x] It still looks in light mode without broken styles --- .../frontend/src/app/providers.tsx | 39 +++++++++---------- autogpt_platform/frontend/tailwind.config.ts | 4 +- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx index c798f6e487..8ea199abc8 100644 --- a/autogpt_platform/frontend/src/app/providers.tsx +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -1,36 +1,33 @@ "use client"; -import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; -import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip"; +import { SentryUserTracker } from "@/components/monitor/SentryUserTracker"; import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; import { getQueryClient } from "@/lib/react-query/queryClient"; -import { QueryClientProvider } from "@tanstack/react-query"; -import { - ThemeProvider as NextThemesProvider, - ThemeProviderProps, -} from "next-themes"; -import { NuqsAdapter } from "nuqs/adapters/next/app"; -import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip"; import CredentialsProvider from "@/providers/agent-credentials/credentials-provider"; -import { SentryUserTracker } from "@/components/monitor/SentryUserTracker"; +import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; +import { QueryClientProvider } from "@tanstack/react-query"; +import { ThemeProvider, ThemeProviderProps } from "next-themes"; +import { NuqsAdapter } from "nuqs/adapters/next/app"; export function Providers({ children, ...props }: ThemeProviderProps) { const queryClient = getQueryClient(); return ( - - - - - - + + + + + + {children} - - - - - + + + + + ); diff --git a/autogpt_platform/frontend/tailwind.config.ts b/autogpt_platform/frontend/tailwind.config.ts index 2a0d039b1a..ab3ea9bc74 100644 --- a/autogpt_platform/frontend/tailwind.config.ts +++ b/autogpt_platform/frontend/tailwind.config.ts @@ -1,10 +1,10 @@ +import scrollbar from "tailwind-scrollbar"; import type { Config } from "tailwindcss"; import tailwindcssAnimate from "tailwindcss-animate"; -import scrollbar from "tailwind-scrollbar"; import { colors } from "./src/components/styles/colors"; const config = { - darkMode: ["class"], + darkMode: ["class", ".dark-mode"], // ignore dark: prefix classes for now until we fully support dark mode content: ["./src/**/*.{ts,tsx}"], prefix: "", theme: { From 7f7ef6a27150b67abf3c49b7f51e210296153566 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Mon, 15 Dec 2025 00:11:27 +0700 Subject: [PATCH 47/58] feat(frontend): imporve agent inputs read-only (#11621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ The main goal of this PR is to improve how we display inputs used for a given task. Agent inputs can be of many types (text, long text, date, select, file, etc.). Until now, we have tried to display them as text, which has not always worked. Given we already have ``, which uses form elements to display the inputs ( _prefilled with data_ ), most of the time it will look better and less buggy than text. ### Before Screenshot 2025-12-14 at 17 45 44 ### After Screenshot 2025-12-14 at 17 45 21 ### Other improvements - πŸ—‘οΈ Removed `` - it is not used given the API does not support editing inputs for a schedule yt - Made `` icon size customisable ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally - [x] Check the new view tasks use the form elements instead of text to display inputs --- .../AgentInputsReadOnly.tsx | 46 +++++----- .../modals/RunAgentInputs/RunAgentInputs.tsx | 23 +++-- .../ModalRunSection/ModalRunSection.tsx | 23 ++--- .../SelectedRunView/SelectedRunView.tsx | 43 ++++------ .../EditInputsModal/EditInputsModal.tsx | 84 ------------------- .../EditInputsModal/useEditInputsModal.ts | 78 ----------------- .../SelectedTemplateView.tsx | 25 ++---- .../SelectedTriggerView.tsx | 25 ++---- .../components/agent-run-draft-view.tsx | 36 ++++---- .../InformationTooltip/InformationTooltip.tsx | 8 +- 10 files changed, 94 insertions(+), 297 deletions(-) delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx index a11e88f29a..948aae3ad2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx @@ -1,16 +1,10 @@ "use client"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import type { - BlockIOSubSchema, - CredentialsMetaInput, -} from "@/lib/autogpt-server-api/types"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs"; -import { - getAgentCredentialsFields, - getAgentInputFields, - renderValue, -} from "./helpers"; +import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs"; +import { getAgentCredentialsFields, getAgentInputFields } from "./helpers"; type Props = { agent: LibraryAgent; @@ -28,13 +22,13 @@ export function AgentInputsReadOnly({ getAgentCredentialsFields(agent), ); - // Take actual input entries as leading; augment with schema from input fields. - // TODO: ensure consistent ordering. const inputEntries = inputs && - Object.entries(inputs).map<[string, [BlockIOSubSchema | undefined, any]]>( - ([k, v]) => [k, [inputFields[k], v]], - ); + Object.entries(inputs).map(([key, value]) => ({ + key, + schema: inputFields[key], + value, + })); const hasInputs = inputEntries && inputEntries.length > 0; const hasCredentials = credentialInputs && credentialFieldEntries.length > 0; @@ -48,16 +42,20 @@ export function AgentInputsReadOnly({ {/* Regular inputs */} {hasInputs && (
- {inputEntries.map(([key, [schema, value]]) => ( -
- -

- {renderValue(value)} -

-
- ))} + {inputEntries.map(({ key, schema, value }) => { + if (!schema) return null; + + return ( + {}} + readOnly={true} + /> + ); + })}
)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx index d98d3cb10d..ea372193c5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx @@ -9,6 +9,7 @@ import { Button } from "@/components/atoms/Button/Button"; import { FileInput } from "@/components/atoms/FileInput/FileInput"; import { Switch } from "@/components/atoms/Switch/Switch"; import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { TimePicker } from "@/components/molecules/TimePicker/TimePicker"; import { BlockIOObjectSubSchema, @@ -32,6 +33,7 @@ interface Props { value?: any; placeholder?: string; onChange: (value: any) => void; + readOnly?: boolean; } /** @@ -44,6 +46,7 @@ export function RunAgentInputs({ value, placeholder, onChange, + readOnly = false, ...props }: Props & React.HTMLAttributes) { const { handleUploadFile, uploadProgress } = useRunAgentInputs(); @@ -62,7 +65,6 @@ export function RunAgentInputs({ id={`${baseId}-number`} label={schema.title ?? placeholder ?? "Number"} hideLabel - size="small" type="number" value={value ?? ""} placeholder={placeholder || "Enter number"} @@ -80,7 +82,6 @@ export function RunAgentInputs({ id={`${baseId}-textarea`} label={schema.title ?? placeholder ?? "Text"} hideLabel - size="small" type="textarea" rows={3} value={value ?? ""} @@ -130,7 +131,6 @@ export function RunAgentInputs({ id={`${baseId}-date`} label={schema.title ?? placeholder ?? "Date"} hideLabel - size="small" type="date" value={value ? format(value as Date, "yyyy-MM-dd") : ""} onChange={(e) => { @@ -159,7 +159,6 @@ export function RunAgentInputs({ id={`${baseId}-datetime`} label={schema.title ?? placeholder ?? "Date time"} hideLabel - size="small" type="datetime-local" value={value ?? ""} onChange={(e) => onChange((e.target as HTMLInputElement).value)} @@ -194,7 +193,6 @@ export function RunAgentInputs({ label={schema.title ?? placeholder ?? "Select"} hideLabel value={value ?? ""} - size="small" onValueChange={(val: string) => onChange(val)} placeholder={placeholder || "Select an option"} options={schema.enum @@ -217,7 +215,6 @@ export function RunAgentInputs({ items={allKeys.map((key) => ({ value: key, label: _schema.properties[key]?.title ?? key, - size: "small", }))} selectedValues={selectedValues} onChange={(values: string[]) => @@ -336,7 +333,6 @@ export function RunAgentInputs({ id={`${baseId}-text`} label={schema.title ?? placeholder ?? "Text"} hideLabel - size="small" type="text" value={value ?? ""} onChange={(e) => onChange((e.target as HTMLInputElement).value)} @@ -347,6 +343,17 @@ export function RunAgentInputs({ } return ( -
{innerInputElement}
+
+ +
+ {innerInputElement} +
+
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index d8c4ecb730..f3b02bfbc9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -73,22 +73,15 @@ export function ModalRunSection() { title="Task Inputs" subtitle="Enter the information you want to provide to the agent for this task" > - {/* Regular inputs */} {inputFields.map(([key, inputSubSchema]) => ( -
- - - setInputValue(key, value)} - data-testid={`agent-input-${key}`} - /> -
+ setInputValue(key, value)} + data-testid={`agent-input-${key}`} + /> ))} ) : null} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index cc5872097e..1fd4b3fb9d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -4,17 +4,11 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/atoms/Tooltip/BaseTooltip"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; -import { InfoIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { AnchorLinksWrap } from "../AnchorLinksWrap"; @@ -149,25 +143,12 @@ export function SelectedRunView({
+
Summary - - - - - - -

- This AI-generated summary describes how the agent - handled your task. It's an experimental - feature and may occasionally be inaccurate. -

-
-
-
+
} > @@ -195,7 +176,17 @@ export function SelectedRunView({ {/* Input Section */}
- + + Your input + +
+ } + > - - - - -
- Edit inputs -
- {Object.entries(inputFields).map(([key, fieldSchema]) => ( -
- - setValues((prev) => ({ ...prev, [key]: v }))} - /> -
- ))} -
-
- -
- - -
-
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts deleted file mode 100644 index 1c061ea7b7..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts +++ /dev/null @@ -1,78 +0,0 @@ -"use client"; - -import { useMemo, useState } from "react"; -import { useQueryClient } from "@tanstack/react-query"; -import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules"; -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { useToast } from "@/components/molecules/Toast/use-toast"; - -function getAgentInputFields(agent: LibraryAgent): Record { - const schema = agent.input_schema as unknown as { - properties?: Record; - } | null; - if (!schema || !schema.properties) return {}; - const properties = schema.properties as Record; - const visibleEntries = Object.entries(properties).filter( - ([, sub]) => !sub?.hidden, - ); - return Object.fromEntries(visibleEntries); -} - -export function useEditInputsModal( - agent: LibraryAgent, - schedule: GraphExecutionJobInfo, -) { - const queryClient = useQueryClient(); - const { toast } = useToast(); - const [isOpen, setIsOpen] = useState(false); - const [isSaving, setIsSaving] = useState(false); - const inputFields = useMemo(() => getAgentInputFields(agent), [agent]); - const [values, setValues] = useState>({ - ...(schedule.input_data as Record), - }); - - async function handleSave() { - setIsSaving(true); - try { - const res = await fetch(`/api/schedules/${schedule.id}`, { - method: "PATCH", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ inputs: values }), - }); - if (!res.ok) { - let message = "Failed to update schedule inputs"; - const data = await res.json(); - message = data?.message || data?.detail || message; - throw new Error(message); - } - - await queryClient.invalidateQueries({ - queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey( - schedule.graph_id, - ), - }); - toast({ - title: "Schedule inputs updated", - }); - setIsOpen(false); - } catch (error: any) { - toast({ - title: "Failed to update schedule inputs", - description: error?.message || "An unexpected error occurred.", - variant: "destructive", - }); - } - setIsSaving(false); - } - - return { - isOpen, - setIsOpen, - inputFields, - values, - setValues, - handleSave, - isSaving, - } as const; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx index 8f64417f77..ead985457e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx @@ -4,7 +4,6 @@ import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExe import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Input } from "@/components/atoms/Input/Input"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { getAgentCredentialsFields, getAgentInputFields, @@ -138,25 +137,13 @@ export function SelectedTemplateView({
{inputFields.map(([key, inputSubSchema]) => ( -
- - setInputValue(key, value)} - /> -
+ schema={inputSubSchema} + value={inputs[key] ?? inputSubSchema.default} + placeholder={inputSubSchema.description} + onChange={(value) => setInputValue(key, value)} + /> ))}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx index 2021251ad2..64d4430e78 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx @@ -3,7 +3,6 @@ import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Input } from "@/components/atoms/Input/Input"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { getAgentCredentialsFields, getAgentInputFields, @@ -131,25 +130,13 @@ export function SelectedTriggerView({
{inputFields.map(([key, inputSubSchema]) => ( -
- - setInputValue(key, value)} - /> -
+ schema={inputSubSchema} + value={inputs[key] ?? inputSubSchema.default} + placeholder={inputSubSchema.description} + onChange={(value) => setInputValue(key, value)} + /> ))}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index 0289bbdb5f..5f57032618 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -680,28 +680,20 @@ export function AgentRunDraftView({ {/* Regular inputs */} {Object.entries(agentInputFields).map(([key, inputSubSchema]) => ( -
- - - { - setInputValues((obj) => ({ - ...obj, - [key]: value, - })); - setChangedPresetAttributes((prev) => prev.add("inputs")); - }} - data-testid={`agent-input-${key}`} - /> -
+ { + setInputValues((obj) => ({ + ...obj, + [key]: value, + })); + setChangedPresetAttributes((prev) => prev.add("inputs")); + }} + data-testid={`agent-input-${key}`} + /> ))} diff --git a/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx b/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx index 4bf9a76b94..36184f08c4 100644 --- a/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx +++ b/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx @@ -9,16 +9,20 @@ import ReactMarkdown from "react-markdown"; type Props = { description?: string; + iconSize?: number; }; -export function InformationTooltip({ description }: Props) { +export function InformationTooltip({ description, iconSize = 24 }: Props) { if (!description) return null; return ( - + Date: Mon, 15 Dec 2025 16:57:36 +0700 Subject: [PATCH 48/58] feat(frontend): add nice scrollable tabs on Selected Run view (#11596) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ https://github.com/user-attachments/assets/7e49ed5b-c818-4aa3-b5d6-4fa86fada7ee When the content of Summary + Outputs + Inputs is long enough, it will show in this new `` component, which auto-scrolls the content as you click on a tab. ## Checklist πŸ“‹ ### For code changes - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally - [x] Check the new page with scrollable tabs --- .../AgentInputsReadOnly.tsx | 7 +- .../SelectedRunView/SelectedRunView.tsx | 230 +++++---- .../SelectedScheduleView.tsx | 27 -- .../components/SelectedScheduleActions.tsx | 3 +- .../ScrollableTabs/ScrollableTabs.stories.tsx | 437 ++++++++++++++++++ .../ScrollableTabs/ScrollableTabs.tsx | 59 +++ .../components/ScrollableTabsContent.tsx | 48 ++ .../components/ScrollableTabsList.tsx | 52 +++ .../components/ScrollableTabsTrigger.tsx | 53 +++ .../molecules/ScrollableTabs/context.ts | 22 + .../molecules/ScrollableTabs/helpers.ts | 48 ++ .../ScrollableTabs/useScrollableTabs.ts | 60 +++ 12 files changed, 900 insertions(+), 146 deletions(-) create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.stories.tsx create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts create mode 100644 autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx index 948aae3ad2..bc9918c2bb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx @@ -1,6 +1,7 @@ "use client"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Text } from "@/components/atoms/Text/Text"; import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs"; import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs"; @@ -34,7 +35,11 @@ export function AgentInputsReadOnly({ const hasCredentials = credentialInputs && credentialFieldEntries.length > 0; if (!hasInputs && !hasCredentials) { - return
No input for this run.
; + return ( + + No input for this run. + + ); } return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 1fd4b3fb9d..ff9a4e5809 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -6,12 +6,17 @@ import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner import { Text } from "@/components/atoms/Text/Text"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { + ScrollableTabs, + ScrollableTabsContent, + ScrollableTabsList, + ScrollableTabsTrigger, +} from "@/components/molecules/ScrollableTabs/ScrollableTabs"; import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useEffect } from "react"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; -import { AnchorLinksWrap } from "../AnchorLinksWrap"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; @@ -22,9 +27,6 @@ import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunA import { WebhookTriggerSection } from "./components/WebhookTriggerSection"; import { useSelectedRunView } from "./useSelectedRunView"; -const anchorStyles = - "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; - interface Props { agent: LibraryAgent; runId: string; @@ -59,13 +61,6 @@ export function SelectedRunView({ const withSummary = run?.stats?.activity_status; const withReviews = run?.status === AgentExecutionStatus.REVIEW; - function scrollToSection(id: string) { - const element = document.getElementById(id); - if (element) { - element.scrollIntoView({ behavior: "smooth", block: "start" }); - } - } - if (responseError || httpError) { return ( )} - {/* Navigation Links */} - - {withSummary && ( - - )} - - - {withReviews && ( - - )} - - - {/* Summary Section */} - {withSummary && ( -
- - Summary - -
- } - > - -
-
- )} - - {/* Output Section */} -
- - {isLoading ? ( -
- -
- ) : run && "outputs" in run ? ( - - ) : ( - - No output from this run. - + + + {withSummary && ( + + Summary + )} -
-
- - {/* Input Section */} -
- - Your input - -
- } - > - - -
- - {/* Reviews Section */} - {withReviews && ( -
- - {reviewsLoading ? ( -
Loading reviews…
- ) : pendingReviews.length > 0 ? ( - - ) : ( -
- No pending reviews for this execution + + Output + + + Your input + + {withReviews && ( + + Reviews ({pendingReviews.length}) + + )} + +
+ {/* Summary Section */} + {withSummary && ( + +
+ + Summary + +
+ } + > + +
- )} - + + )} + + {/* Output Section */} + +
+ + {isLoading ? ( +
+ +
+ ) : run && "outputs" in run ? ( + + ) : ( + + No output from this run. + + )} +
+
+
+ + {/* Input Section */} + +
+ + Your input + +
+ } + > + + +
+ + + {/* Reviews Section */} + {withReviews && ( + +
+ + {reviewsLoading ? ( + + ) : pendingReviews.length > 0 ? ( + + ) : ( + + No pending reviews for this execution + + )} + +
+
+ )}
- )} +
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 6563e19d5d..7f67963aa3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -9,7 +9,6 @@ import { humanizeCronExpression } from "@/lib/cron-expression-utils"; import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; -import { AnchorLinksWrap } from "../AnchorLinksWrap"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; @@ -17,9 +16,6 @@ import { SelectedViewLayout } from "../SelectedViewLayout"; import { SelectedScheduleActions } from "./components/SelectedScheduleActions"; import { useSelectedScheduleView } from "./useSelectedScheduleView"; -const anchorStyles = - "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; - interface Props { agent: LibraryAgent; scheduleId: string; @@ -45,13 +41,6 @@ export function SelectedScheduleView({ const breakpoint = useBreakpoint(); const isLgScreenUp = isLargeScreen(breakpoint); - function scrollToSection(id: string) { - const element = document.getElementById(id); - if (element) { - element.scrollIntoView({ behavior: "smooth", block: "start" }); - } - } - if (error) { return ( - {/* Navigation Links */} - - - - - {/* Schedule Section */}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx index 420b84c722..0fd34851fd 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx @@ -25,9 +25,10 @@ export function SelectedScheduleActions({ agent, scheduleId }: Props) { diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.stories.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.stories.tsx new file mode 100644 index 0000000000..eebf387891 --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.stories.tsx @@ -0,0 +1,437 @@ +import type { Meta, StoryObj } from "@storybook/nextjs"; +import { + ScrollableTabs, + ScrollableTabsContent, + ScrollableTabsList, + ScrollableTabsTrigger, +} from "./ScrollableTabs"; + +const meta = { + title: "Molecules/ScrollableTabs", + component: ScrollableTabs, + parameters: { + layout: "fullscreen", + }, + tags: ["autodocs"], + argTypes: {}, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +function ScrollableTabsDemo() { + return ( +
+

ScrollableTabs Examples

+ +
+
+

+ Short Content (Tabs Hidden) +

+
+ + + + Account + + + Password + + + Settings + + + +
+ Make changes to your account here. Click save when you're + done. +
+
+ +
+ Change your password here. After saving, you'll be logged + out. +
+
+ +
+ Update your preferences and settings here. +
+
+
+
+
+ +
+

+ Long Content (Tabs Visible) +

+
+ + + + Account + + + Password + + + Settings + + + +
+

+ Account Settings +

+

+ Make changes to your account here. Click save when + you're done. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed + do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, quis nostrud exercitation + ullamco laboris. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. Excepteur sint + occaecat cupidatat non proident. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit + voluptatem accusantium doloremque laudantium, totam rem + aperiam. +

+
+
+ +
+

+ Password Settings +

+

+ Change your password here. After saving, you'll be + logged out. +

+

+ At vero eos et accusamus et iusto odio dignissimos ducimus + qui blanditiis praesentium voluptatum deleniti atque + corrupti quos dolores et quas molestias excepturi sint + occaecati cupiditate. +

+

+ Et harum quidem rerum facilis est et expedita distinctio. + Nam libero tempore, cum soluta nobis est eligendi optio + cumque nihil impedit quo minus. +

+

+ Temporibus autem quibusdam et aut officiis debitis aut rerum + necessitatibus saepe eveniet ut et voluptates repudiandae + sint. +

+
+
+ +
+

+ General Settings +

+

+ Update your preferences and settings here. +

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut + odit aut fugit, sed quia consequuntur magni dolores eos qui + ratione voluptatem sequi nesciunt. +

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit + amet, consectetur, adipisci velit, sed quia non numquam eius + modi tempora incidunt ut labore et dolore magnam aliquam + quaerat voluptatem. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam, nisi ut aliquid ex ea commodi + consequatur. +

+
+
+
+
+
+ +
+

Many Tabs

+
+ + + + Overview + + + Analytics + + + Reports + + + Notifications + + + Integrations + + + Billing + + + +
+

+ Dashboard Overview +

+

+ Dashboard overview with key metrics and recent activity. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed + do eiusmod tempor incididunt ut labore et dolore magna + aliqua. +

+

+ Ut enim ad minim veniam, quis nostrud exercitation ullamco + laboris nisi ut aliquip ex ea commodo consequat. +

+
+
+ +
+

Analytics

+

+ Detailed analytics and performance metrics. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. +

+

+ Excepteur sint occaecat cupidatat non proident, sunt in + culpa qui officia deserunt mollit anim id est laborum. +

+
+
+ +
+

Reports

+

+ Generate and view reports for your account. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit + voluptatem accusantium doloremque laudantium. +

+

+ Totam rem aperiam, eaque ipsa quae ab illo inventore + veritatis et quasi architecto beatae vitae dicta sunt + explicabo. +

+
+
+ +
+

Notifications

+

Manage your notification preferences.

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut + odit aut fugit. +

+

+ Sed quia consequuntur magni dolores eos qui ratione + voluptatem sequi nesciunt. +

+
+
+ +
+

Integrations

+

+ Connect and manage third-party integrations. +

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit + amet. +

+

+ Consectetur, adipisci velit, sed quia non numquam eius modi + tempora incidunt. +

+
+
+ +
+

Billing

+

+ View and manage your billing information. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam. +

+

+ Nisi ut aliquid ex ea commodi consequatur? Quis autem vel + eum iure reprehenderit qui in ea voluptate velit esse. +

+
+
+
+
+
+
+
+ ); +} + +export const Default = { + render: () => , +} satisfies Story; + +export const ShortContent = { + render: () => ( +
+
+ + + + Account + + + Password + + + +
+ Make changes to your account here. Click save when you're + done. +
+
+ +
+ Change your password here. After saving, you'll be logged + out. +
+
+
+
+
+ ), +} satisfies Story; + +export const LongContent = { + render: () => ( +
+
+ + + Account + Password + Settings + + +
+

Account Settings

+

+ Make changes to your account here. Click save when you're + done. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do + eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut + enim ad minim veniam, quis nostrud exercitation ullamco laboris + nisi ut aliquip ex ea commodo consequat. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit esse + cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat + cupidatat non proident, sunt in culpa qui officia deserunt + mollit anim id est laborum. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit voluptatem + accusantium doloremque laudantium, totam rem aperiam, eaque ipsa + quae ab illo inventore veritatis et quasi architecto beatae + vitae dicta sunt explicabo. +

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit + aut fugit, sed quia consequuntur magni dolores eos qui ratione + voluptatem sequi nesciunt. +

+
+
+ +
+

Password Settings

+

+ Change your password here. After saving, you'll be logged + out. +

+

+ At vero eos et accusamus et iusto odio dignissimos ducimus qui + blanditiis praesentium voluptatum deleniti atque corrupti quos + dolores et quas molestias excepturi sint occaecati cupiditate + non provident. +

+

+ Similique sunt in culpa qui officia deserunt mollitia animi, id + est laborum et dolorum fuga. Et harum quidem rerum facilis est + et expedita distinctio. +

+

+ Nam libero tempore, cum soluta nobis est eligendi optio cumque + nihil impedit quo minus id quod maxime placeat facere possimus, + omnis voluptas assumenda est, omnis dolor repellendus. +

+

+ Temporibus autem quibusdam et aut officiis debitis aut rerum + necessitatibus saepe eveniet ut et voluptates repudiandae sint + et molestiae non recusandae. +

+
+
+ +
+

General Settings

+

Update your preferences and settings here.

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, + consectetur, adipisci velit, sed quia non numquam eius modi + tempora incidunt ut labore et dolore magnam aliquam quaerat + voluptatem. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam, nisi ut aliquid ex ea commodi + consequatur? Quis autem vel eum iure reprehenderit qui in ea + voluptate velit esse quam nihil molestiae consequatur. +

+

+ Vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At + vero eos et accusamus et iusto odio dignissimos ducimus qui + blanditiis praesentium voluptatum deleniti atque corrupti quos + dolores. +

+

+ Et quas molestias excepturi sint occaecati cupiditate non + provident, similique sunt in culpa qui officia deserunt mollitia + animi, id est laborum et dolorum fuga. +

+
+
+
+
+
+ ), +} satisfies Story; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx new file mode 100644 index 0000000000..bdbfa3cd4f --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx @@ -0,0 +1,59 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import { Children } from "react"; +import { ScrollableTabsContent } from "./components/ScrollableTabsContent"; +import { ScrollableTabsList } from "./components/ScrollableTabsList"; +import { ScrollableTabsTrigger } from "./components/ScrollableTabsTrigger"; +import { ScrollableTabsContext } from "./context"; +import { findContentElements, findListElement } from "./helpers"; +import { useScrollableTabsInternal } from "./useScrollableTabs"; + +interface Props { + children?: React.ReactNode; + className?: string; + defaultValue?: string; +} + +export function ScrollableTabs({ children, className, defaultValue }: Props) { + const { + activeValue, + setActiveValue, + registerContent, + scrollToSection, + scrollContainer, + contentContainerRef, + } = useScrollableTabsInternal({ defaultValue }); + + const childrenArray = Children.toArray(children); + const listElement = findListElement(childrenArray); + const contentElements = findContentElements(childrenArray); + + return ( + +
+ {listElement} +
{ + if (contentContainerRef) { + contentContainerRef.current = node; + } + }} + className="max-h-[64rem] overflow-y-auto scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700" + > +
{contentElements}
+
+
+
+ ); +} + +export { ScrollableTabsContent, ScrollableTabsList, ScrollableTabsTrigger }; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx new file mode 100644 index 0000000000..4027e87cfa --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx @@ -0,0 +1,48 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +interface Props extends React.HTMLAttributes { + value: string; +} + +export const ScrollableTabsContent = React.forwardRef( + function ScrollableTabsContent( + { className, value, children, ...props }, + ref, + ) { + const { registerContent } = useScrollableTabs(); + const contentRef = React.useRef(null); + + React.useEffect(() => { + if (contentRef.current) { + registerContent(value, contentRef.current); + } + return () => { + registerContent(value, null); + }; + }, [value, registerContent]); + + return ( +
{ + if (typeof ref === "function") ref(node); + else if (ref) ref.current = node; + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + contentRef.current = node; + }} + data-scrollable-tab-content + data-value={value} + className={cn("focus-visible:outline-none", className)} + {...props} + > + {children} +
+ ); + }, +); + +ScrollableTabsContent.displayName = "ScrollableTabsContent"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx new file mode 100644 index 0000000000..496a91ec5a --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx @@ -0,0 +1,52 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +export const ScrollableTabsList = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(function ScrollableTabsList({ className, children, ...props }, ref) { + const { activeValue } = useScrollableTabs(); + const [activeTabElement, setActiveTabElement] = + React.useState(null); + + React.useEffect(() => { + const activeButton = Array.from( + document.querySelectorAll( + '[data-scrollable-tab-trigger][data-value="' + activeValue + '"]', + ), + )[0]; + + if (activeButton) { + setActiveTabElement(activeButton); + } + }, [activeValue]); + + return ( +
+
+ {children} +
+ {activeTabElement && ( +
+ )} +
+ ); +}); + +ScrollableTabsList.displayName = "ScrollableTabsList"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx new file mode 100644 index 0000000000..41367264d8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx @@ -0,0 +1,53 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +interface Props extends React.ButtonHTMLAttributes { + value: string; +} + +export const ScrollableTabsTrigger = React.forwardRef( + function ScrollableTabsTrigger( + { className, value, children, ...props }, + ref, + ) { + const { activeValue, scrollToSection } = useScrollableTabs(); + const elementRef = React.useRef(null); + const isActive = activeValue === value; + + function handleClick(e: React.MouseEvent) { + e.preventDefault(); + e.stopPropagation(); + scrollToSection(value); + props.onClick?.(e); + } + + return ( + + ); + }, +); + +ScrollableTabsTrigger.displayName = "ScrollableTabsTrigger"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts new file mode 100644 index 0000000000..080ae3702c --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts @@ -0,0 +1,22 @@ +import * as React from "react"; +import { createContext, useContext } from "react"; + +interface ScrollableTabsContextValue { + activeValue: string | null; + setActiveValue: React.Dispatch>; + registerContent: (value: string, element: HTMLElement | null) => void; + scrollToSection: (value: string) => void; + scrollContainer: HTMLElement | null; +} + +export const ScrollableTabsContext = createContext< + ScrollableTabsContextValue | undefined +>(undefined); + +export function useScrollableTabs() { + const context = useContext(ScrollableTabsContext); + if (!context) { + throw new Error("useScrollableTabs must be used within a ScrollableTabs"); + } + return context; +} diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts new file mode 100644 index 0000000000..651e4c84fd --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts @@ -0,0 +1,48 @@ +import * as React from "react"; + +const HEADER_OFFSET = 100; + +export function calculateScrollPosition( + elementRect: DOMRect, + containerRect: DOMRect, + currentScrollTop: number, +): number { + const elementTopRelativeToContainer = + elementRect.top - containerRect.top + currentScrollTop - HEADER_OFFSET; + + return Math.max(0, elementTopRelativeToContainer); +} + +function hasDisplayName( + type: unknown, + displayName: string, +): type is { displayName: string } { + return ( + typeof type === "object" && + type !== null && + "displayName" in type && + (type as { displayName: unknown }).displayName === displayName + ); +} + +export function findListElement( + children: React.ReactNode[], +): React.ReactElement | undefined { + return children.find( + (child) => + React.isValidElement(child) && + hasDisplayName(child.type, "ScrollableTabsList"), + ) as React.ReactElement | undefined; +} + +export function findContentElements( + children: React.ReactNode[], +): React.ReactNode[] { + return children.filter( + (child) => + !( + React.isValidElement(child) && + hasDisplayName(child.type, "ScrollableTabsList") + ), + ); +} diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts new file mode 100644 index 0000000000..5043f1047e --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts @@ -0,0 +1,60 @@ +import { useCallback, useRef, useState } from "react"; +import { calculateScrollPosition } from "./helpers"; + +interface Args { + defaultValue?: string; +} + +export function useScrollableTabsInternal({ defaultValue }: Args) { + const [activeValue, setActiveValue] = useState( + defaultValue || null, + ); + const contentRefs = useRef>(new Map()); + const contentContainerRef = useRef(null); + + function registerContent(value: string, element: HTMLElement | null) { + if (element) { + contentRefs.current.set(value, element); + } else { + contentRefs.current.delete(value); + } + } + + function scrollToSection(value: string) { + const element = contentRefs.current.get(value); + const scrollContainer = contentContainerRef.current; + if (!element || !scrollContainer) return; + + setActiveValue(value); + + const containerRect = scrollContainer.getBoundingClientRect(); + const elementRect = element.getBoundingClientRect(); + const currentScrollTop = scrollContainer.scrollTop; + const scrollTop = calculateScrollPosition( + elementRect, + containerRect, + currentScrollTop, + ); + + const maxScrollTop = + scrollContainer.scrollHeight - scrollContainer.clientHeight; + const clampedScrollTop = Math.min(Math.max(0, scrollTop), maxScrollTop); + + scrollContainer.scrollTo({ + top: clampedScrollTop, + behavior: "smooth", + }); + } + + const memoizedRegisterContent = useCallback(registerContent, []); + const memoizedScrollToSection = useCallback(scrollToSection, []); + + return { + activeValue, + setActiveValue, + registerContent: memoizedRegisterContent, + scrollToSection: memoizedScrollToSection, + scrollContainer: contentContainerRef.current, + contentContainerRef, + }; +} From cc9179178f6a820ca83da3b7cf5f148dfeccdd5f Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Tue, 16 Dec 2025 19:14:14 +0700 Subject: [PATCH 49/58] feat(block): Human in The Loop Block restructure (#11627) ## Summary This PR refactors the Human-In-The-Loop (HITL) review system backend to improve data handling and API consistency. ## Changes ### Backend Refactoring #### 1. **Block Output Schema Update** (`human_in_the_loop.py`) - Replaced single `reviewed_data` and `status` fields with separate `approved_data` and `rejected_data` outputs - This allows downstream blocks to handle approved vs rejected data differently without checking status - Simplified test outputs to match new schema #### 2. **Review Data Handling** (`human_review.py`) - Modified `get_or_create_human_review` to always return `review.payload` regardless of approval status - Previously returned `None` for rejected reviews, which could cause data loss - Now preserves reviewer-modified data for both approved and rejected cases #### 3. **API Route Simplification** (`review/routes.py`) - Streamlined review decision processing logic using ternary operator - Unified data handling for both approved and rejected reviews - Maintains backward compatibility while improving code clarity ## Why These Changes? - **Better Data Flow**: Separate output pins for approved/rejected data make workflow design more intuitive - **Data Preservation**: Rejected reviews can still pass modified data downstream for logging or alternative processing - **Cleaner API**: Simplified decision processing reduces code complexity and potential bugs ## Testing - All existing tests pass with updated schema - Backward compatibility maintained for existing workflows - Human review functionality verified in both approved and rejected scenarios ## Related This is the backend portion of changes from #11529, applied separately to the `feat/hitl` branch. --- .../backend/blocks/human_in_the_loop.py | 21 ++++++++----------- .../backend/backend/data/human_review.py | 2 +- .../server/v2/executions/review/routes.py | 20 +++++++----------- 3 files changed, 18 insertions(+), 25 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 42c98b5146..13c9fb31db 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Literal +from typing import Any from prisma.enums import ReviewStatus @@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block): ) class Output(BlockSchemaOutput): - reviewed_data: Any = SchemaField( - description="The data after human review (may be modified)" + approved_data: Any = SchemaField( + description="The data when approved (may be modified by reviewer)" ) - status: Literal["approved", "rejected"] = SchemaField( - description="Status of the review: 'approved' or 'rejected'" + rejected_data: Any = SchemaField( + description="The data when rejected (may be modified by reviewer)" ) review_message: str = SchemaField( description="Any message provided by the reviewer", default="" @@ -69,8 +69,7 @@ class HumanInTheLoopBlock(Block): "editable": True, }, test_output=[ - ("status", "approved"), - ("reviewed_data", {"name": "John Doe", "age": 30}), + ("approved_data", {"name": "John Doe", "age": 30}), ], test_mock={ "get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult( @@ -116,8 +115,7 @@ class HumanInTheLoopBlock(Block): logger.info( f"HITL block skipping review for node {node_exec_id} - safe mode disabled" ) - yield "status", "approved" - yield "reviewed_data", input_data.data + yield "approved_data", input_data.data yield "review_message", "Auto-approved (safe mode disabled)" return @@ -158,12 +156,11 @@ class HumanInTheLoopBlock(Block): ) if result.status == ReviewStatus.APPROVED: - yield "status", "approved" - yield "reviewed_data", result.data + yield "approved_data", result.data if result.message: yield "review_message", result.message elif result.status == ReviewStatus.REJECTED: - yield "status", "rejected" + yield "rejected_data", result.data if result.message: yield "review_message", result.message diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py index df0b4b21e8..11b87ec1dd 100644 --- a/autogpt_platform/backend/backend/data/human_review.py +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -100,7 +100,7 @@ async def get_or_create_human_review( return None else: return ReviewResult( - data=review.payload if review.status == ReviewStatus.APPROVED else None, + data=review.payload, status=review.status, message=review.reviewMessage or "", processed=review.processed, diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py index febfe40213..14fb435457 100644 --- a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py +++ b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py @@ -134,18 +134,14 @@ async def process_review_action( # Build review decisions map review_decisions = {} for review in request.reviews: - if review.approved: - review_decisions[review.node_exec_id] = ( - ReviewStatus.APPROVED, - review.reviewed_data, - review.message, - ) - else: - review_decisions[review.node_exec_id] = ( - ReviewStatus.REJECTED, - None, - review.message, - ) + review_status = ( + ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED + ) + review_decisions[review.node_exec_id] = ( + review_status, + review.reviewed_data, + review.message, + ) # Process all reviews updated_reviews = await process_all_reviews_for_execution( From e640d36265bf8c73a694663fb27efdbfaaac4f9e Mon Sep 17 00:00:00 2001 From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com> Date: Thu, 18 Dec 2025 09:12:55 +0530 Subject: [PATCH 50/58] feat(frontend): Add special handling for AGENT block type in form and output handlers (#11595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Agent blocks require different handling compared to standard blocks, particularly for: - Handle ID generation (using direct keys instead of generated IDs) - Form data storage structure (nested under `inputs` key) - Field ID parsing (filtering out schema path prefixes) This PR implements special handling for `BlockUIType.AGENT` throughout the form rendering and output handling components to ensure agents work correctly in the flow editor. ### Changes πŸ—οΈ - **CustomNode.tsx**: Pass `uiType` prop to `OutputHandler` component - **FormCreator.tsx**: - Store agent form data in `hardcodedValues.inputs` instead of directly in `hardcodedValues` - Extract initial values from `hardcodedValues.inputs` for agent blocks - **OutputHandler.tsx**: - Accept `uiType` prop - Use direct key as handle ID for agents instead of `generateHandleId(key)` - **useMarketplaceAgentsContent.ts**: - Fetch full agent details using `getV2GetLibraryAgent` before adding to builder - Ensures agent schemas are properly populated (fixes issue where marketplace endpoint returns empty schemas) - **AnyOfField.tsx**: Generate handle IDs for agents by filtering out "root" and "properties" from schema path - **FieldTemplate.tsx**: Apply same handle ID generation logic for agent fields ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Add an agent block from marketplace and verify it renders correctly - [x] Connect inputs/outputs to/from an agent block and verify connections work - [x] Fill in form fields for an agent block and verify data persists correctly - [x] Verify agent blocks work in both new and existing flows - [x] Test that non-agent blocks still work as before (regression test) --- .../nodes/CustomNode/CustomNode.tsx | 6 +++++- .../FlowEditor/nodes/FormCreator.tsx | 19 +++++++++++++++++-- .../FlowEditor/nodes/OutputHandler.tsx | 7 ++++++- .../BlockMenuSearch/useBlockMenuSearch.ts | 8 +++++++- .../useMarketplaceAgentsContent.ts | 11 ++++++++++- .../fields/AnyOfField/AnyOfField.tsx | 10 +++++++++- .../templates/FieldTemplate.tsx | 10 +++++++++- 7 files changed, 63 insertions(+), 8 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 974cbe3754..52068f3acb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -106,7 +106,11 @@ export const CustomNode: React.FC> = React.memo( /> {data.uiType != BlockUIType.OUTPUT && ( - + )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx index 315a52f553..cfee0bf89f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx @@ -20,17 +20,32 @@ export const FormCreator = React.memo( className?: string; }) => { const updateNodeData = useNodeStore((state) => state.updateNodeData); + const getHardCodedValues = useNodeStore( (state) => state.getHardCodedValues, ); + const handleChange = ({ formData }: any) => { if ("credentials" in formData && !formData.credentials?.id) { delete formData.credentials; } - updateNodeData(nodeId, { hardcodedValues: formData }); + + const updatedValues = + uiType === BlockUIType.AGENT + ? { + ...getHardCodedValues(nodeId), + inputs: formData, + } + : formData; + + updateNodeData(nodeId, { hardcodedValues: updatedValues }); }; - const initialValues = getHardCodedValues(nodeId); + const hardcodedValues = getHardCodedValues(nodeId); + const initialValues = + uiType === BlockUIType.AGENT + ? (hardcodedValues.inputs ?? {}) + : hardcodedValues; return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx index 9c032ac20f..ab3b648ba9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx @@ -14,13 +14,16 @@ import { import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore"; import { getTypeDisplayInfo } from "./helpers"; import { generateHandleId } from "../handlers/helpers"; +import { BlockUIType } from "../../types"; export const OutputHandler = ({ outputSchema, nodeId, + uiType, }: { outputSchema: RJSFSchema; nodeId: string; + uiType: BlockUIType; }) => { const { isOutputConnected } = useEdgeStore(); const properties = outputSchema?.properties || {}; @@ -79,7 +82,9 @@ export const OutputHandler = ({ diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts index 5e9007e617..3eb14d3ca9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts @@ -7,6 +7,7 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store"; import { getGetV2ListLibraryAgentsQueryKey, + getV2GetLibraryAgent, usePostV2AddMarketplaceAgent, } from "@/app/api/__generated__/endpoints/library/library"; import { @@ -151,7 +152,12 @@ export const useBlockMenuSearch = () => { }); const libraryAgent = response.data as LibraryAgent; - addAgentToBuilder(libraryAgent); + + const { data: libraryAgentDetails } = await getV2GetLibraryAgent( + libraryAgent.id, + ); + + addAgentToBuilder(libraryAgentDetails as LibraryAgent); toast({ title: "Agent Added", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts index 8ca3fe30f5..ff9b70b79a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts @@ -1,6 +1,7 @@ import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; import { getGetV2ListLibraryAgentsQueryKey, + getV2GetLibraryAgent, usePostV2AddMarketplaceAgent, } from "@/app/api/__generated__/endpoints/library/library"; import { @@ -105,8 +106,16 @@ export const useMarketplaceAgentsContent = () => { }, }); + // Here, libraryAgent has empty input and output schemas. + // Not updating the endpoint because this endpoint is used elsewhere. + // TODO: Create a new endpoint for builder specific to marketplace agents. const libraryAgent = response.data as LibraryAgent; - addAgentToBuilder(libraryAgent); + + const { data: libraryAgentDetails } = await getV2GetLibraryAgent( + libraryAgent.id, + ); + + addAgentToBuilder(libraryAgentDetails as LibraryAgent); toast({ title: "Agent Added", diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx index 7fb3d9c938..79fa15304d 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx @@ -23,6 +23,7 @@ import { TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; import { cn } from "@/lib/utils"; +import { BlockUIType } from "@/app/(platform)/build/components/types"; type TypeOption = { type: string; @@ -47,7 +48,14 @@ export const AnyOfField = ({ onBlur, onFocus, }: FieldProps) => { - const handleId = generateHandleId(idSchema.$id ?? ""); + const handleId = + formContext.uiType === BlockUIType.AGENT + ? (idSchema.$id ?? "") + .split("_") + .filter((p) => p !== "root" && p !== "properties" && p.length > 0) + .join("_") || "" + : generateHandleId(idSchema.$id ?? ""); + const updatedFormContexrt = { ...formContext, fromAnyOf: true }; const { nodeId, showHandles = true } = updatedFormContexrt; diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx index a056782939..ebc8a1f038 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx @@ -58,7 +58,15 @@ const FieldTemplate: React.FC = ({ let handleId = null; if (!isArrayItem) { - handleId = generateHandleId(fieldId); + if (uiType === BlockUIType.AGENT) { + const parts = fieldId.split("_"); + const filtered = parts.filter( + (p) => p !== "root" && p !== "properties" && p.length > 0, + ); + handleId = filtered.join("_") || ""; + } else { + handleId = generateHandleId(fieldId); + } } else { handleId = arrayFieldHandleId; } From 9a1d9406776df8c759a77c4c9b817b8e53f8d4c7 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 18 Dec 2025 18:08:19 +0100 Subject: [PATCH 51/58] fix(frontend): onboarding run card (#11636) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ ### Before Screenshot 2025-12-18 at 17 16 57 - extra label - overflow ### After Screenshot 2025-12-18 at 17 41 53 ## Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally - [x] Test the above --- .../app/(no-navbar)/onboarding/5-run/page.tsx | 22 ++++++------------- .../CredentialRow/CredentialRow.tsx | 7 ++++-- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx index 4b6abacbff..58960a0cf6 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx @@ -8,7 +8,6 @@ import { CardTitle, } from "@/components/__legacy__/ui/card"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr"; import { Play } from "lucide-react"; import OnboardingButton from "../components/OnboardingButton"; @@ -79,20 +78,13 @@ export default function Page() { {Object.entries(agent?.input_schema.properties || {}).map( ([key, inputSubSchema]) => ( -
- - handleSetAgentInput(key, value)} - /> -
+ handleSetAgentInput(key, value)} + /> ), )}
- + {getCredentialDisplayName(credential, displayName)} {"*".repeat(MASKED_KEY_LENGTH)} From 0082a72657a5cca8af7ff6c9d08901d5c47ea6dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 11:16:22 -0600 Subject: [PATCH 52/58] chore(deps): Bump actions/labeler from 5 to 6 (#10868) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/labeler](https://github.com/actions/labeler) from 5 to 6.
Release notes

Sourced from actions/labeler's releases.

v6.0.0

What's Changed

Breaking Changes

Dependency Upgrades

Documentation changes

New Contributors

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/labeler&package-manager=github_actions&previous-version=5&new-version=6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/repo-pr-label.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/repo-pr-label.yml b/.github/workflows/repo-pr-label.yml index eef928ef16..97579c2784 100644 --- a/.github/workflows/repo-pr-label.yml +++ b/.github/workflows/repo-pr-label.yml @@ -61,6 +61,6 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@v5 + - uses: actions/labeler@v6 with: sync-labels: true From 22078671df67c0db82dd10c810b256df40d65ef8 Mon Sep 17 00:00:00 2001 From: Bently Date: Thu, 18 Dec 2025 18:29:20 +0100 Subject: [PATCH 53/58] feat(frontend): increase file upload size limit to 256MB (#11634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Updated Next.js configuration to set body size limits for server actions and API routes. - Enhanced error handling in the API client to provide user-friendly messages for file size errors. - Added user-friendly error messages for 413 Payload Too Large responses in API error parsing. These changes ensure that file uploads are consistent with backend limits and improve user experience during uploads. ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Upload a file bigger than 10MB and it works - [X] Upload a file bigger than 256MB and you see a official error stating the max file size is 256MB --- autogpt_platform/frontend/next.config.mjs | 8 +++++ .../src/app/api/proxy/[...path]/route.ts | 4 +++ .../src/lib/autogpt-server-api/client.ts | 32 ++++++++++++++++++- .../src/lib/autogpt-server-api/helpers.ts | 15 +++++++++ 4 files changed, 58 insertions(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs index d4595990a2..e4e4cdf544 100644 --- a/autogpt_platform/frontend/next.config.mjs +++ b/autogpt_platform/frontend/next.config.mjs @@ -3,6 +3,14 @@ import { withSentryConfig } from "@sentry/nextjs"; /** @type {import('next').NextConfig} */ const nextConfig = { productionBrowserSourceMaps: true, + experimental: { + serverActions: { + bodySizeLimit: "256mb", + }, + // Increase body size limit for API routes (file uploads) - 256MB to match backend limit + proxyClientMaxBodySize: "256mb", + middlewareClientMaxBodySize: "256mb", + }, images: { domains: [ // We dont need to maintain alphabetical order here diff --git a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts index 09235f9c3b..293c406373 100644 --- a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts +++ b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts @@ -6,6 +6,10 @@ import { import { environment } from "@/services/environment"; import { NextRequest, NextResponse } from "next/server"; +// Increase body size limit to 256MB to match backend file upload limit +export const maxDuration = 300; // 5 minutes timeout for large uploads +export const dynamic = "force-dynamic"; + function buildBackendUrl(path: string[], queryString: string): string { const backendPath = path.join("/"); return `${environment.getAGPTServerBaseUrl()}/${backendPath}${queryString}`; diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index 3b0666bf62..682fc14108 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -910,7 +910,37 @@ export default class BackendAPI { reject(new Error("Invalid JSON response")); } } else { - reject(new Error(`HTTP ${xhr.status}: ${xhr.statusText}`)); + // Handle file size errors with user-friendly message + if (xhr.status === 413) { + reject(new Error("File is too large β€” max size is 256MB")); + return; + } + + // Try to parse error response for better messages + let errorMessage = `Upload failed (${xhr.status})`; + try { + const errorData = JSON.parse(xhr.responseText); + if (errorData.detail) { + if ( + typeof errorData.detail === "string" && + errorData.detail.includes("exceeds the maximum") + ) { + const match = errorData.detail.match( + /maximum allowed size of (\d+)MB/, + ); + const maxSize = match ? match[1] : "256"; + errorMessage = `File is too large β€” max size is ${maxSize}MB`; + } else if (typeof errorData.detail === "string") { + errorMessage = errorData.detail; + } + } else if (errorData.error) { + errorMessage = errorData.error; + } + } catch { + // Keep default message if parsing fails + } + + reject(new Error(errorMessage)); } }); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts index 7e20783042..4cb24df77d 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts @@ -184,6 +184,11 @@ export function serializeRequestBody( } export async function parseApiError(response: Response): Promise { + // Handle 413 Payload Too Large with user-friendly message + if (response.status === 413) { + return "File is too large β€” max size is 256MB"; + } + try { const errorData = await response.clone().json(); @@ -205,6 +210,16 @@ export async function parseApiError(response: Response): Promise { return response.statusText; // Fallback to status text if no message } + // Check for file size error from backend + if ( + typeof errorData.detail === "string" && + errorData.detail.includes("exceeds the maximum") + ) { + const match = errorData.detail.match(/maximum allowed size of (\d+)MB/); + const maxSize = match ? match[1] : "256"; + return `File is too large β€” max size is ${maxSize}MB`; + } + return errorData.detail || errorData.error || response.statusText; } catch { return response.statusText; From cab498fa8ce3469ddde8d4c2e4b1f8ab1dd55e02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:34:04 +0000 Subject: [PATCH 54/58] chore(deps): Bump actions/stale from 9 to 10 (#10871) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/stale](https://github.com/actions/stale) from 9 to 10.
Release notes

Sourced from actions/stale's releases.

v10.0.0

What's Changed

Breaking Changes

Enhancement

Dependency Upgrades

Documentation changes

New Contributors

Full Changelog: https://github.com/actions/stale/compare/v9...v10.0.0

v9.1.0

What's Changed

New Contributors

Full Changelog: https://github.com/actions/stale/compare/v9...v9.1.0

Changelog

Sourced from actions/stale's changelog.

Changelog

[9.1.0]

What's Changed

[9.0.0]

Breaking Changes

  1. Action is now stateful: If the action ends because of operations-per-run then the next run will start from the first unprocessed issue skipping the issues processed during the previous run(s). The state is reset when all the issues are processed. This should be considered for scheduling workflow runs.
  2. Version 9 of this action updated the runtime to Node.js 20. All scripts are now run with Node.js 20 instead of Node.js 16 and are affected by any breaking changes between Node.js 16 and 20.

What Else Changed

  1. Performance optimization that removes unnecessary API calls by @​dsame in #1033; fixes #792
  2. Logs displaying current GitHub API rate limit by @​dsame in #1032; addresses #1029

For more information, please read the action documentation and its section about statefulness

[4.1.1]

In scope of this release we updated actions/core to 1.10.0 for v4 and fixed issues operation count.

[8.0.0]

:warning: This version contains breaking changes :warning:

[7.0.0]

:warning: Breaking change :warning:

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/stale&package-manager=github_actions&previous-version=9&new-version=10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. --- > [!NOTE] > Update the stale-issues workflow to use `actions/stale@v10` instead of `v9`. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 747d4ea73adf8f56c5b7c91653c7edcb2a32e1ba. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nicholas Tindle --- .github/workflows/repo-close-stale-issues.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/repo-close-stale-issues.yml b/.github/workflows/repo-close-stale-issues.yml index a9f183d775..d58459daa1 100644 --- a/.github/workflows/repo-close-stale-issues.yml +++ b/.github/workflows/repo-close-stale-issues.yml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v9 + - uses: actions/stale@v10 with: # operations-per-run: 5000 stale-issue-message: > From 99e2261254df6aadc7ed97df235f7d696b8b44b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:55:57 +0100 Subject: [PATCH 55/58] chore(frontend/deps-dev): bump eslint-config-next from 15.5.2 to 15.5.6 in /autogpt_platform/frontend (#11355) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [eslint-config-next](https://github.com/vercel/next.js/tree/HEAD/packages/eslint-config-next) from 15.5.2 to 15.5.6.
Release notes

Sourced from eslint-config-next's releases.

v15.5.6

[!NOTE]
This release is backporting bug fixes. It does not include all pending features/changes on canary.

Core Changes

  • Turbopack: don't define process.cwd() in node_modules #83452

Credits

Huge thanks to @​mischnic for helping!

v15.5.5

[!NOTE]
This release is backporting bug fixes. It does not include all pending features/changes on canary.

Core Changes

  • Split code-frame into separate compiled package (#84238)
  • Add deprecation warning to Runtime config (#84650)
  • fix: unstable_cache should perform blocking revalidation during ISR revalidation (#84716)
  • feat: experimental.middlewareClientMaxBodySize body cloning limit (#84722)
  • fix: missing next/link types with typedRoutes (#84779)

Misc Changes

  • docs: early October improvements and fixes (#84334)

Credits

Huge thanks to @​devjiwonchoi, @​ztanner, and @​icyJoseph for helping!

v15.5.4

[!NOTE]
This release is backporting bug fixes. It does not include all pending features/changes on canary.

Core Changes

  • fix: ensure onRequestError is invoked when otel enabled (#83343)
  • fix: devtools initial position should be from next config (#83571)
  • [devtool] fix overlay styles are missing (#83721)
  • Turbopack: don't match dynamic pattern for node_modules packages (#83176)
  • Turbopack: don't treat metadata routes as RSC (#82911)
  • [turbopack] Improve handling of symlink resolution errors in track_glob and read_glob (#83357)
  • Turbopack: throw large static metadata error earlier (#82939)
  • fix: error overlay not closing when backdrop clicked (#83981)
  • Turbopack: flush Node.js worker IPC on error (#84077)

Misc Changes

  • [CNA] use linter preference (#83194)
  • CI: use KV for test timing data (#83745)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=eslint-config-next&package-manager=npm_and_yarn&previous-version=15.5.2&new-version=15.5.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Lluis Agusti Co-authored-by: Ubbe --- autogpt_platform/frontend/package.json | 2 +- autogpt_platform/frontend/pnpm-lock.yaml | 269 +++++++++++++++++------ 2 files changed, 201 insertions(+), 70 deletions(-) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 89c367c788..ff2175baa1 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -137,7 +137,7 @@ "concurrently": "9.2.1", "cross-env": "10.1.0", "eslint": "8.57.1", - "eslint-config-next": "15.5.2", + "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", "import-in-the-middle": "1.14.2", "msw": "2.11.6", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index de9e029fac..d1d832549a 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -331,8 +331,8 @@ importers: specifier: 8.57.1 version: 8.57.1 eslint-config-next: - specifier: 15.5.2 - version: 15.5.2(eslint@8.57.1)(typescript@5.9.3) + specifier: 15.5.7 + version: 15.5.7(eslint@8.57.1)(typescript@5.9.3) eslint-plugin-storybook: specifier: 9.1.5 version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) @@ -986,12 +986,15 @@ packages: '@date-fns/tz@1.4.1': resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==} - '@emnapi/core@1.5.0': - resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==} + '@emnapi/core@1.7.1': + resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==} '@emnapi/runtime@1.5.0': resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==} + '@emnapi/runtime@1.7.1': + resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==} + '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} @@ -1329,6 +1332,10 @@ packages: resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + '@eslint/eslintrc@2.1.4': resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -1605,8 +1612,8 @@ packages: '@next/env@15.4.10': resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==} - '@next/eslint-plugin-next@15.5.2': - resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==} + '@next/eslint-plugin-next@15.5.7': + resolution: {integrity: sha512-DtRU2N7BkGr8r+pExfuWHwMEPX5SD57FeA6pxdgCHODo+b/UgIgjE+rgWKtJAbEbGhVZ2jtHn4g3wNhWFoNBQQ==} '@next/swc-darwin-arm64@15.4.8': resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==} @@ -2622,8 +2629,8 @@ packages: '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} - '@rushstack/eslint-patch@1.12.0': - resolution: {integrity: sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw==} + '@rushstack/eslint-patch@1.15.0': + resolution: {integrity: sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==} '@scarf/scarf@1.4.0': resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==} @@ -3097,8 +3104,8 @@ packages: peerDependencies: '@testing-library/dom': '>=7.21.4' - '@tybys/wasm-util@0.10.0': - resolution: {integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==} + '@tybys/wasm-util@0.10.1': + resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} '@types/aria-query@5.0.4': resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} @@ -3288,16 +3295,16 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} - '@typescript-eslint/eslint-plugin@8.43.0': - resolution: {integrity: sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==} + '@typescript-eslint/eslint-plugin@8.48.1': + resolution: {integrity: sha512-X63hI1bxl5ohelzr0LY5coufyl0LJNthld+abwxpCoo6Gq+hSqhKwci7MUWkXo67mzgUK6YFByhmaHmUcuBJmA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.43.0 + '@typescript-eslint/parser': ^8.48.1 eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/parser@8.43.0': - resolution: {integrity: sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==} + '@typescript-eslint/parser@8.48.1': + resolution: {integrity: sha512-PC0PDZfJg8sP7cmKe6L3QIL8GZwU5aRvUFedqSIpw3B+QjRSUZeeITC2M5XKeMXEzL6wccN196iy3JLwKNvDVA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -3315,6 +3322,12 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/project-service@8.48.1': + resolution: {integrity: sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/scope-manager@8.43.0': resolution: {integrity: sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3323,6 +3336,10 @@ packages: resolution: {integrity: sha512-LF4b/NmGvdWEHD2H4MsHD8ny6JpiVNDzrSZr3CsckEgCbAGZbYM4Cqxvi9L+WqDMT+51Ozy7lt2M+d0JLEuBqA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/scope-manager@8.48.1': + resolution: {integrity: sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/tsconfig-utils@8.43.0': resolution: {integrity: sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3335,8 +3352,14 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.43.0': - resolution: {integrity: sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==} + '@typescript-eslint/tsconfig-utils@8.48.1': + resolution: {integrity: sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.48.1': + resolution: {integrity: sha512-1jEop81a3LrJQLTf/1VfPQdhIY4PlGDBc/i67EVWObrtvcziysbLN3oReexHOM6N3jyXgCrkBsZpqwH0hiDOQg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -3350,6 +3373,10 @@ packages: resolution: {integrity: sha512-lNCWCbq7rpg7qDsQrd3D6NyWYu+gkTENkG5IKYhUIcxSb59SQC/hEQ+MrG4sTgBVghTonNWq42bA/d4yYumldQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/types@8.48.1': + resolution: {integrity: sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/typescript-estree@8.43.0': resolution: {integrity: sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3362,6 +3389,12 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/typescript-estree@8.48.1': + resolution: {integrity: sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/utils@8.43.0': resolution: {integrity: sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3376,6 +3409,13 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/utils@8.48.1': + resolution: {integrity: sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/visitor-keys@8.43.0': resolution: {integrity: sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3384,6 +3424,10 @@ packages: resolution: {integrity: sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/visitor-keys@8.48.1': + resolution: {integrity: sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@ungap/structured-clone@1.3.0': resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} @@ -4585,8 +4629,8 @@ packages: resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} engines: {node: '>=12'} - eslint-config-next@15.5.2: - resolution: {integrity: sha512-3hPZghsLupMxxZ2ggjIIrat/bPniM2yRpsVPVM40rp8ZMzKWOJp2CGWn7+EzoV2ddkUr5fxNfHpF+wU1hGt/3g==} + eslint-config-next@15.5.7: + resolution: {integrity: sha512-nU/TRGHHeG81NeLW5DeQT5t6BDUqbpsNQTvef1ld/tqHT+/zTx60/TIhKnmPISTTe++DVo+DLxDmk4rnwHaZVw==} peerDependencies: eslint: ^7.23.0 || ^8.0.0 || ^9.0.0 typescript: '>=3.3.1' @@ -4918,6 +4962,10 @@ packages: peerDependencies: next: '>=13.2.0' + generator-function@2.0.1: + resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==} + engines: {node: '>= 0.4'} + gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -4946,8 +4994,8 @@ packages: resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==} engines: {node: '>= 0.4'} - get-tsconfig@4.10.1: - resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==} + get-tsconfig@4.13.0: + resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==} github-slugger@2.0.0: resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==} @@ -5282,6 +5330,10 @@ packages: resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==} engines: {node: '>= 0.4'} + is-generator-function@1.1.2: + resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==} + engines: {node: '>= 0.4'} + is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} @@ -5903,8 +5955,8 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - napi-postinstall@0.3.3: - resolution: {integrity: sha512-uTp172LLXSxuSYHv/kou+f6KW3SMppU9ivthaVTXian9sOt3XM/zHYHpRZiLgQoxeWfYUnslNWQHF1+G71xcow==} + napi-postinstall@0.3.4: + resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==} engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} hasBin: true @@ -6769,6 +6821,11 @@ packages: engines: {node: '>= 0.4'} hasBin: true + resolve@1.22.11: + resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} + engines: {node: '>= 0.4'} + hasBin: true + resolve@1.22.8: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true @@ -7858,7 +7915,7 @@ snapshots: '@babel/helper-plugin-utils': 7.27.1 debug: 4.4.3 lodash.debounce: 4.0.8 - resolve: 1.22.10 + resolve: 1.22.11 transitivePeerDependencies: - supports-color @@ -8550,7 +8607,7 @@ snapshots: '@date-fns/tz@1.4.1': {} - '@emnapi/core@1.5.0': + '@emnapi/core@1.7.1': dependencies: '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 @@ -8561,6 +8618,11 @@ snapshots: tslib: 2.8.1 optional: true + '@emnapi/runtime@1.7.1': + dependencies: + tslib: 2.8.1 + optional: true + '@emnapi/wasi-threads@1.1.0': dependencies: tslib: 2.8.1 @@ -8739,6 +8801,8 @@ snapshots: '@eslint-community/regexpp@4.12.1': {} + '@eslint-community/regexpp@4.12.2': {} + '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 @@ -8996,16 +9060,16 @@ snapshots: '@napi-rs/wasm-runtime@0.2.12': dependencies: - '@emnapi/core': 1.5.0 - '@emnapi/runtime': 1.5.0 - '@tybys/wasm-util': 0.10.0 + '@emnapi/core': 1.7.1 + '@emnapi/runtime': 1.7.1 + '@tybys/wasm-util': 0.10.1 optional: true '@neoconfetti/react@1.0.0': {} '@next/env@15.4.10': {} - '@next/eslint-plugin-next@15.5.2': + '@next/eslint-plugin-next@15.5.7': dependencies: fast-glob: 3.3.1 @@ -10115,7 +10179,7 @@ snapshots: '@rtsao/scc@1.1.0': {} - '@rushstack/eslint-patch@1.12.0': {} + '@rushstack/eslint-patch@1.15.0': {} '@scarf/scarf@1.4.0': {} @@ -10867,7 +10931,7 @@ snapshots: dependencies: '@testing-library/dom': 10.4.1 - '@tybys/wasm-util@0.10.0': + '@tybys/wasm-util@0.10.1': dependencies: tslib: 2.8.1 optional: true @@ -11065,14 +11129,14 @@ snapshots: dependencies: '@types/node': 24.10.0 - '@typescript-eslint/eslint-plugin@8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/eslint-plugin@8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)': dependencies: - '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/scope-manager': 8.43.0 - '@typescript-eslint/type-utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.43.0 + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/type-utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.48.1 eslint: 8.57.1 graphemer: 1.4.0 ignore: 7.0.5 @@ -11082,12 +11146,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3)': dependencies: - '@typescript-eslint/scope-manager': 8.43.0 - '@typescript-eslint/types': 8.43.0 - '@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.43.0 + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.48.1 debug: 4.4.3 eslint: 8.57.1 typescript: 5.9.3 @@ -11097,7 +11161,7 @@ snapshots: '@typescript-eslint/project-service@8.43.0(typescript@5.9.3)': dependencies: '@typescript-eslint/tsconfig-utils': 8.43.0(typescript@5.9.3) - '@typescript-eslint/types': 8.43.0 + '@typescript-eslint/types': 8.48.1 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -11106,7 +11170,16 @@ snapshots: '@typescript-eslint/project-service@8.46.2(typescript@5.9.3)': dependencies: '@typescript-eslint/tsconfig-utils': 8.46.2(typescript@5.9.3) - '@typescript-eslint/types': 8.46.2 + '@typescript-eslint/types': 8.48.1 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.48.1(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3) + '@typescript-eslint/types': 8.48.1 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -11122,6 +11195,11 @@ snapshots: '@typescript-eslint/types': 8.46.2 '@typescript-eslint/visitor-keys': 8.46.2 + '@typescript-eslint/scope-manager@8.48.1': + dependencies: + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/visitor-keys': 8.48.1 + '@typescript-eslint/tsconfig-utils@8.43.0(typescript@5.9.3)': dependencies: typescript: 5.9.3 @@ -11130,11 +11208,15 @@ snapshots: dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.48.1(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.43.0 - '@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + '@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) debug: 4.4.3 eslint: 8.57.1 ts-api-utils: 2.1.0(typescript@5.9.3) @@ -11146,6 +11228,8 @@ snapshots: '@typescript-eslint/types@8.46.2': {} + '@typescript-eslint/types@8.48.1': {} + '@typescript-eslint/typescript-estree@8.43.0(typescript@5.9.3)': dependencies: '@typescript-eslint/project-service': 8.43.0(typescript@5.9.3) @@ -11156,7 +11240,7 @@ snapshots: fast-glob: 3.3.3 is-glob: 4.0.3 minimatch: 9.0.5 - semver: 7.7.2 + semver: 7.7.3 ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: @@ -11178,6 +11262,21 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/typescript-estree@8.48.1(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.48.1(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3) + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/visitor-keys': 8.48.1 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.3 + tinyglobby: 0.2.15 + ts-api-utils: 2.1.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1) @@ -11200,6 +11299,17 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1) + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + eslint: 8.57.1 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/visitor-keys@8.43.0': dependencies: '@typescript-eslint/types': 8.43.0 @@ -11210,6 +11320,11 @@ snapshots: '@typescript-eslint/types': 8.46.2 eslint-visitor-keys: 4.2.1 + '@typescript-eslint/visitor-keys@8.48.1': + dependencies: + '@typescript-eslint/types': 8.48.1 + eslint-visitor-keys: 4.2.1 + '@ungap/structured-clone@1.3.0': {} '@unrs/resolver-binding-android-arm-eabi@1.11.1': @@ -12532,16 +12647,16 @@ snapshots: escape-string-regexp@5.0.0: {} - eslint-config-next@15.5.2(eslint@8.57.1)(typescript@5.9.3): + eslint-config-next@15.5.7(eslint@8.57.1)(typescript@5.9.3): dependencies: - '@next/eslint-plugin-next': 15.5.2 - '@rushstack/eslint-patch': 1.12.0 - '@typescript-eslint/eslint-plugin': 8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@next/eslint-plugin-next': 15.5.7 + '@rushstack/eslint-patch': 1.15.0 + '@typescript-eslint/eslint-plugin': 8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1) - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) eslint-plugin-react: 7.37.5(eslint@8.57.1) eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1) @@ -12556,7 +12671,7 @@ snapshots: dependencies: debug: 3.2.7 is-core-module: 2.16.1 - resolve: 1.22.10 + resolve: 1.22.11 transitivePeerDependencies: - supports-color @@ -12565,28 +12680,28 @@ snapshots: '@nolyfill/is-core-module': 1.0.39 debug: 4.4.3 eslint: 8.57.1 - get-tsconfig: 4.10.1 + get-tsconfig: 4.13.0 is-bun-module: 2.0.0 stable-hash: 0.0.5 tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.9 @@ -12597,7 +12712,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -12609,7 +12724,7 @@ snapshots: string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -12958,6 +13073,8 @@ snapshots: dependencies: next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + generator-function@2.0.1: {} + gensync@1.0.0-beta.2: {} get-caller-file@2.0.5: {} @@ -12990,7 +13107,7 @@ snapshots: es-errors: 1.3.0 get-intrinsic: 1.3.0 - get-tsconfig@4.10.1: + get-tsconfig@4.13.0: dependencies: resolve-pkg-maps: 1.0.0 @@ -13357,7 +13474,7 @@ snapshots: is-bun-module@2.0.0: dependencies: - semver: 7.7.2 + semver: 7.7.3 is-callable@1.2.7: {} @@ -13395,6 +13512,14 @@ snapshots: has-tostringtag: 1.0.2 safe-regex-test: 1.1.0 + is-generator-function@1.1.2: + dependencies: + call-bound: 1.0.4 + generator-function: 2.0.1 + get-proto: 1.0.1 + has-tostringtag: 1.0.2 + safe-regex-test: 1.1.0 + is-glob@4.0.3: dependencies: is-extglob: 2.1.1 @@ -14215,7 +14340,7 @@ snapshots: nanoid@3.3.11: {} - napi-postinstall@0.3.3: {} + napi-postinstall@0.3.4: {} natural-compare@1.4.0: {} @@ -15185,6 +15310,12 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 + resolve@1.22.11: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + resolve@1.22.8: dependencies: is-core-module: 2.16.1 @@ -15996,7 +16127,7 @@ snapshots: unrs-resolver@1.11.1: dependencies: - napi-postinstall: 0.3.3 + napi-postinstall: 0.3.4 optionalDependencies: '@unrs/resolver-binding-android-arm-eabi': 1.11.1 '@unrs/resolver-binding-android-arm64': 1.11.1 @@ -16224,7 +16355,7 @@ snapshots: is-async-function: 2.1.1 is-date-object: 1.1.0 is-finalizationregistry: 1.1.1 - is-generator-function: 1.1.0 + is-generator-function: 1.1.2 is-regex: 1.2.1 is-weakref: 1.1.1 isarray: 2.0.5 From 4c474417bc942b41f80531bd6e6aef57f254f7ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:58:27 +0100 Subject: [PATCH 56/58] chore(frontend/deps-dev): bump import-in-the-middle from 1.14.2 to 2.0.0 in /autogpt_platform/frontend (#11357) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [import-in-the-middle](https://github.com/nodejs/import-in-the-middle) from 1.14.2 to 2.0.0.
Release notes

Sourced from import-in-the-middle's releases.

import-in-the-middle: v2.0.0

2.0.0 (2025-10-14)

⚠ BREAKING CHANGES

This was only a new major out of an abundance of caution. The hook code has been converted to ESM to work around some loader issues. There should actually be no breaking changes when using import-in-the-middle/hook.mjs or the exported Hook API.

Features

  • convert all modules running in loader thread to ESM (#210) (da7c7a6)

import-in-the-middle: v1.15.0

1.15.0 (2025-10-09)

Features

  • Compatibility with specifier imports (#211) (83d662a)

import-in-the-middle: v1.14.4

1.14.4 (2025-09-25)

Bug Fixes

import-in-the-middle: v1.14.3

1.14.3 (2025-09-24)

Bug Fixes

Changelog

Sourced from import-in-the-middle's changelog.

2.0.0 (2025-10-14)

⚠ BREAKING CHANGES

Converting all modules running in the loader thread to ESM should not be a breaking change for most users since it primarily affects internal implementation details. However, if you were referencing internal CJS files like hook.js this will no longer work.

Features

  • convert all modules running in loader thread to ESM (#210) (da7c7a6)

1.15.0 (2025-10-09)

Features

  • Compatibility with specifier imports (#211) (83d662a)

1.14.4 (2025-09-25)

Bug Fixes

1.14.3 (2025-09-24)

Bug Fixes

Commits
Maintainer changes

This version was pushed to npm by [GitHub Actions](https://www.npmjs.com/~GitHub Actions), a new releaser for import-in-the-middle since your current version.


[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=import-in-the-middle&package-manager=npm_and_yarn&previous-version=1.14.2&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Lluis Agusti Co-authored-by: Ubbe --- autogpt_platform/frontend/package.json | 1 - autogpt_platform/frontend/pnpm-lock.yaml | 13 ------------- 2 files changed, 14 deletions(-) diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index ff2175baa1..4cbd867cd8 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -139,7 +139,6 @@ "eslint": "8.57.1", "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", - "import-in-the-middle": "1.14.2", "msw": "2.11.6", "msw-storybook-addon": "2.0.6", "orval": "7.13.0", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index d1d832549a..54843fc589 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -336,9 +336,6 @@ importers: eslint-plugin-storybook: specifier: 9.1.5 version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) - import-in-the-middle: - specifier: 1.14.2 - version: 1.14.2 msw: specifier: 2.11.6 version: 2.11.6(@types/node@24.10.0)(typescript@5.9.3) @@ -5216,9 +5213,6 @@ packages: resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} engines: {node: '>=6'} - import-in-the-middle@1.14.2: - resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==} - import-in-the-middle@2.0.0: resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==} @@ -13391,13 +13385,6 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 - import-in-the-middle@1.14.2: - dependencies: - acorn: 8.15.0 - acorn-import-attributes: 1.9.5(acorn@8.15.0) - cjs-module-lexer: 1.4.3 - module-details-from-path: 1.0.4 - import-in-the-middle@2.0.0: dependencies: acorn: 8.15.0 From cd3e35df9ef725b2d717c96bfd94a1591329eb84 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 18 Dec 2025 19:33:30 +0100 Subject: [PATCH 57/58] fix(frontend): small library/mobile improvements (#11626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ Adds the following improvements: ### Prevent credential row overflowing on mobile πŸ“± **Before** Screenshot 2025-12-15 at 16 42 05 **After** Screenshot 2025-12-15 at 16 44 22 _Just hide the ****** on mobile..._ ### Make touch targets bigger on πŸ“± on the mobile menu **Before** Screenshot 2025-12-15 at 16 58 28 Touch targets were quite small on mobile, especially for people with big fingers... **After** Screenshot 2025-12-15 at 16 54 02 ### New `` component Screenshot 2025-12-15 at 16 48 20 A component that will render text like ``, but automatically displays `...` and the full text content on a tooltip if it detects there is no space for the full text length. Pretty useful for the type of dashboard we are building, where sometimes titles or user-generated content can be quite long, making the UI look whack. ### Google Drive Picker Only allow the removal of files if it is not in read-only mode. ## Checklist πŸ“‹ ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Checkout branch locally - [x] Test the above --- .../CredentialRow/CredentialRow.tsx | 2 +- .../modals/RunAgentInputs/RunAgentInputs.tsx | 2 +- .../OverflowText/OverflowText.stories.tsx | 157 ++++++++++++++++++ .../atoms/OverflowText/OverflowText.tsx | 100 +++++++++++ .../GoogleDrivePickerInput.tsx | 4 +- .../components/MobileNavbarMenuItem.tsx | 2 +- 6 files changed, 262 insertions(+), 5 deletions(-) create mode 100644 autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.stories.tsx create mode 100644 autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx index 34c066e90d..7d6598d7be 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx @@ -70,7 +70,7 @@ export function CredentialRow({ {"*".repeat(MASKED_KEY_LENGTH)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx index ea372193c5..d3e6fd9669 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx @@ -103,7 +103,7 @@ export function RunAgentInputs({ value={value} onChange={onChange} className="w-full" - showRemoveButton={false} + showRemoveButton={!readOnly} /> ); break; diff --git a/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.stories.tsx b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.stories.tsx new file mode 100644 index 0000000000..049948cd1b --- /dev/null +++ b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.stories.tsx @@ -0,0 +1,157 @@ +import type { Meta, StoryObj } from "@storybook/nextjs"; +import { OverflowText } from "./OverflowText"; + +const meta: Meta = { + title: "Atoms/OverflowText", + component: OverflowText, + tags: ["autodocs"], + parameters: { + layout: "centered", + docs: { + description: { + component: + "Text component that automatically truncates overflowing content with ellipsis and shows a tooltip on hover when truncated. Supports both string and ReactNode values.", + }, + }, + }, + argTypes: { + value: { + control: "text", + description: "The text content to display (string or ReactNode)", + }, + className: { + control: "text", + description: "Additional CSS classes to customize styling", + }, + }, + args: { + value: "This is a sample text that may overflow", + className: "", + }, +}; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + render: function DefaultOverflowText(args) { + return ( +
+ +
+ ); + }, +}; + +export const ShortText: Story = { + args: { + value: "Short text", + }, + render: function ShortTextStory(args) { + return ( +
+ +
+ ); + }, +}; + +export const LongText: Story = { + args: { + value: + "This is a very long text that will definitely overflow and show a tooltip when you hover over it", + }, + render: function LongTextStory(args) { + return ( +
+ +
+ ); + }, +}; + +export const CustomStyling: Story = { + args: { + value: "Text with custom styling", + className: "text-lg font-semibold text-indigo-600", + }, + render: function CustomStylingStory(args) { + return ( +
+ +
+ ); + }, +}; + +export const WithReactNode: Story = { + args: { + value: ( + + Text with bold and italic content + + ), + }, + render: function WithReactNodeStory(args) { + return ( +
+ +
+ ); + }, +}; + +export const DifferentWidths: Story = { + render: function DifferentWidthsStory() { + const longText = + "This text will truncate differently depending on the container width"; + return ( +
+
+ Width: 200px +
+ +
+
+
+ Width: 300px +
+ +
+
+
+ Width: 400px +
+ +
+
+
+ ); + }, +}; + +export const FilePathExample: Story = { + args: { + value: "/very/long/path/to/a/file/that/might/overflow/in/the/ui.tsx", + }, + render: function FilePathExampleStory(args) { + return ( +
+ +
+ ); + }, +}; + +export const URLExample: Story = { + args: { + value: "https://example.com/very/long/url/path/that/might/overflow", + }, + render: function URLExampleStory(args) { + return ( +
+ +
+ ); + }, +}; diff --git a/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx new file mode 100644 index 0000000000..efc345f79c --- /dev/null +++ b/autogpt_platform/frontend/src/components/atoms/OverflowText/OverflowText.tsx @@ -0,0 +1,100 @@ +import { Text, type TextProps } from "@/components/atoms/Text/Text"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; +import { cn } from "@/lib/utils"; +import type { ReactNode } from "react"; +import { useEffect, useRef, useState } from "react"; + +interface Props extends Omit { + value: string | ReactNode; +} + +export function OverflowText(props: Props) { + const elementRef = useRef(null); + const [isTruncated, setIsTruncated] = useState(false); + + function updateTruncation() { + const element = elementRef.current; + + if (!element) { + return; + } + + const hasOverflow = element.scrollWidth > element.clientWidth; + + setIsTruncated(hasOverflow); + } + + function setupResizeListener() { + function handleResize() { + updateTruncation(); + } + + window.addEventListener("resize", handleResize); + + return function cleanupResizeListener() { + window.removeEventListener("resize", handleResize); + }; + } + + function setupObserver() { + const element = elementRef.current; + + if (!element || typeof ResizeObserver === "undefined") { + return undefined; + } + + function handleResizeObserver() { + updateTruncation(); + } + + const observer = new ResizeObserver(handleResizeObserver); + + observer.observe(element); + + return function disconnectObserver() { + observer.disconnect(); + }; + } + + useEffect(() => { + if (typeof props.value === "string") updateTruncation(); + }, [props.value]); + + useEffect(setupResizeListener, []); + useEffect(setupObserver, []); + + const { value, className, variant = "body", ...restProps } = props; + + const content = ( + + + {value} + + + ); + + if (isTruncated) { + return ( + + + {content} + + {typeof value === "string" ?

{value}

: value} +
+
+
+ ); + } + + return content; +} diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx index 1db9809de2..2a1ada5012 100644 --- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx +++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx @@ -5,7 +5,7 @@ import { Cross2Icon } from "@radix-ui/react-icons"; import React, { useCallback } from "react"; import { GoogleDrivePicker } from "./GoogleDrivePicker"; -export interface GoogleDrivePickerInputProps { +export interface Props { config: GoogleDrivePickerConfig; value: any; onChange: (value: any) => void; @@ -21,7 +21,7 @@ export function GoogleDrivePickerInput({ error, className, showRemoveButton = true, -}: GoogleDrivePickerInputProps) { +}: Props) { const [pickerError, setPickerError] = React.useState(null); const isMultiSelect = config.multiselect || false; const hasAutoCredentials = !!config.auto_credentials; diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/MobileNavbar/components/MobileNavbarMenuItem.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/MobileNavbar/components/MobileNavbarMenuItem.tsx index fb061f3e0a..fa190b63b7 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/MobileNavbar/components/MobileNavbarMenuItem.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/MobileNavbar/components/MobileNavbarMenuItem.tsx @@ -19,7 +19,7 @@ export function MobileNavbarMenuItem({ onClick, }: Props) { const content = ( -
+
{getAccountMenuOptionIcon(icon)}
Date: Thu, 18 Dec 2025 19:51:24 +0100 Subject: [PATCH 58/58] fix(frontend): modal hidden overflow (#11642) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes πŸ—οΈ ### Before Screenshot 2025-12-18 at 19 07 37 ### After Screenshot 2025-12-18 at 19 02 12 ## Checklist πŸ“‹ ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally and check crop --- .../src/components/molecules/Dialog/components/styles.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts index 3b7d12e8e9..873c33959b 100644 --- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts +++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts @@ -3,8 +3,7 @@ const commonStyles = { title: "font-poppins text-md md:text-lg leading-none", overlay: "fixed inset-0 z-50 bg-stone-500/20 dark:bg-black/50 backdrop-blur-md animate-fade-in", - content: - "overflow-y-hidden bg-white p-6 fixed rounded-2xlarge flex flex-col z-50 w-full", + content: "bg-white p-6 fixed rounded-2xlarge flex flex-col z-50 w-full", }; // Modal specific styles