+
{title}
-
+
{description}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/ScheduleListItem.tsx
rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts
similarity index 100%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/helpers.ts
rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
similarity index 71%
rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts
rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
index e6bd124006..80900e8cc6 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
@@ -1,12 +1,12 @@
"use client";
-import { useEffect, useMemo, useState } from "react";
+import { useEffect, useMemo } from "react";
import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import { okData } from "@/app/api/helpers";
-import { useSearchParams } from "next/navigation";
+import { parseAsString, useQueryStates } from "nuqs";
import {
computeRunsCount,
extractRunsFromPages,
@@ -14,9 +14,16 @@ import {
getRunsPollingInterval,
} from "./helpers";
+function parseTab(value: string | null): "runs" | "scheduled" | "templates" {
+ if (value === "runs" || value === "scheduled" || value === "templates") {
+ return value;
+ }
+ return "runs";
+}
+
type Args = {
graphId?: string;
- onSelectRun: (runId: string) => void;
+ onSelectRun: (runId: string, tab?: "runs" | "scheduled") => void;
onCountsChange?: (info: {
runsCount: number;
schedulesCount: number;
@@ -24,14 +31,17 @@ type Args = {
}) => void;
};
-export function useAgentRunsLists({
+export function useSidebarRunsList({
graphId,
onSelectRun,
onCountsChange,
}: Args) {
- const params = useSearchParams();
- const existingRunId = params.get("executionId") as string | undefined;
- const [tabValue, setTabValue] = useState<"runs" | "scheduled">("runs");
+ const [{ activeItem, activeTab: activeTabRaw }] = useQueryStates({
+ activeItem: parseAsString,
+ activeTab: parseAsString,
+ });
+
+ const tabValue = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]);
const runsQuery = useGetV1ListGraphExecutionsInfinite(
graphId || "",
@@ -77,26 +87,17 @@ export function useAgentRunsLists({
}, [runsCount, schedulesCount, loading, onCountsChange]);
useEffect(() => {
- if (runs.length > 0) {
- if (existingRunId) {
- onSelectRun(existingRunId);
- return;
- }
- onSelectRun(runs[0].id);
+ if (runs.length > 0 && tabValue === "runs" && !activeItem) {
+ onSelectRun(runs[0].id, "runs");
}
- }, [runs, existingRunId]);
-
- useEffect(() => {
- if (existingRunId && existingRunId.startsWith("schedule:"))
- setTabValue("scheduled");
- else setTabValue("runs");
- }, [existingRunId]);
+ }, [runs, activeItem, tabValue, onSelectRun]);
// If there are no runs but there are schedules, and nothing is selected, auto-select the first schedule
useEffect(() => {
- if (!existingRunId && runs.length === 0 && schedules.length > 0)
- onSelectRun(`schedule:${schedules[0].id}`);
- }, [existingRunId, runs.length, schedules, onSelectRun]);
+ if (!activeItem && runs.length === 0 && schedules.length > 0) {
+ onSelectRun(schedules[0].id, "scheduled");
+ }
+ }, [activeItem, runs.length, schedules, onSelectRun]);
return {
runs,
@@ -105,7 +106,6 @@ export function useAgentRunsLists({
loading,
runsQuery,
tabValue,
- setTabValue,
runsCount,
schedulesCount,
fetchMoreRuns: runsQuery.fetchNextPage,
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/helpers.ts
new file mode 100644
index 0000000000..302468d5e8
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/helpers.ts
@@ -0,0 +1 @@
+export const AGENT_LIBRARY_SECTION_PADDING_X = "px-4";
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
index 427ca81706..a97e64650c 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
@@ -2,8 +2,15 @@ import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/libra
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
import { okData } from "@/app/api/helpers";
import { useParams } from "next/navigation";
-import { parseAsString, useQueryState } from "nuqs";
-import { useCallback, useMemo, useState } from "react";
+import { parseAsString, useQueryStates } from "nuqs";
+import { useCallback, useEffect, useMemo, useState } from "react";
+
+function parseTab(value: string | null): "runs" | "scheduled" | "templates" {
+ if (value === "runs" || value === "scheduled" || value === "templates") {
+ return value;
+ }
+ return "runs";
+}
export function useNewAgentLibraryView() {
const { id } = useParams();
@@ -18,8 +25,21 @@ export function useNewAgentLibraryView() {
},
});
- const [runParam, setRunParam] = useQueryState("executionId", parseAsString);
- const selectedRun = runParam ?? undefined;
+ const [{ activeItem, activeTab: activeTabRaw }, setQueryStates] =
+ useQueryStates({
+ activeItem: parseAsString,
+ activeTab: parseAsString,
+ });
+
+ const activeTab = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]);
+
+ useEffect(() => {
+ if (!activeTabRaw) {
+ setQueryStates({
+ activeTab: "runs",
+ });
+ }
+ }, [activeTabRaw, setQueryStates]);
const [sidebarCounts, setSidebarCounts] = useState({
runsCount: 0,
@@ -38,12 +58,29 @@ export function useNewAgentLibraryView() {
// Show sidebar layout while loading or when there are items
const showSidebarLayout = sidebarLoading || hasAnyItems;
- function handleSelectRun(id: string) {
- setRunParam(id, { shallow: true });
+ useEffect(() => {
+ if (response) {
+ document.title = `${response.name} - Library - AutoGPT Platform`;
+ }
+ }, [response]);
+
+ function handleSelectRun(id: string, tab?: "runs" | "scheduled") {
+ setQueryStates({
+ activeItem: id,
+ activeTab: tab ?? "runs",
+ });
}
function handleClearSelectedRun() {
- setRunParam(null, { shallow: true });
+ setQueryStates({
+ activeItem: null,
+ });
+ }
+
+ function handleSetActiveTab(tab: "runs" | "scheduled") {
+ setQueryStates({
+ activeTab: tab,
+ });
}
const handleCountsChange = useCallback(
@@ -70,8 +107,10 @@ export function useNewAgentLibraryView() {
agent: response,
hasAnyItems,
showSidebarLayout,
- selectedRun,
+ activeItem,
sidebarLoading,
+ activeTab,
+ setActiveTab: handleSetActiveTab,
handleClearSelectedRun,
handleCountsChange,
handleSelectRun,
diff --git a/autogpt_platform/frontend/src/components/atoms/Text/helpers.ts b/autogpt_platform/frontend/src/components/atoms/Text/helpers.ts
index 0b2eabe38d..cf5d9c1da0 100644
--- a/autogpt_platform/frontend/src/components/atoms/Text/helpers.ts
+++ b/autogpt_platform/frontend/src/components/atoms/Text/helpers.ts
@@ -14,33 +14,33 @@ export type As =
export const variants = {
// Headings
- h1: "font-poppins text-[2.75rem] font-[600] leading-[3.5rem] tracking-[-0.033rem] text-zinc-800",
- h2: "font-poppins text-[2rem] font-[500] leading-[2.5rem] text-zinc-800 tracking-[-0.02rem]",
- h3: "font-poppins text-[1.75rem] font-[500] leading-[2.5rem] text-zinc-800 tracking-[-0.01313rem]",
- h4: "font-poppins text-[1.375rem] font-[500] leading-[1.5rem] text-zinc-800",
- h5: "font-poppins text-[1rem] font-[500] leading-[1.5rem] text-zinc-800",
+ h1: "font-poppins text-[2.75rem] font-[600] leading-[3.5rem] tracking-[-0.033rem] text-black",
+ h2: "font-poppins text-[2rem] font-[500] leading-[2.5rem] text-black tracking-[-0.02rem]",
+ h3: "font-poppins text-[1.75rem] font-[500] leading-[2.5rem] text-black tracking-[-0.01313rem]",
+ h4: "font-poppins text-[1.375rem] font-[500] leading-[1.5rem] text-black",
+ h5: "font-poppins text-[1rem] font-[500] leading-[1.5rem] text-black",
// Body Text
- lead: "font-sans text-[1.25rem] font-[400] leading-[1.75rem] text-zinc-800",
+ lead: "font-sans text-[1.25rem] font-[400] leading-[1.75rem] text-black",
"lead-medium":
- "font-sans text-[1.25rem] font-[500] leading-[1.75rem] text-zinc-800",
+ "font-sans text-[1.25rem] font-[500] leading-[1.75rem] text-black",
"lead-semibold":
- "font-sans text-[1.25rem] font-[600] leading-[1.75rem] text-zinc-800",
- large: "font-sans text-[1rem] font-[400] leading-[1.625rem] text-zinc-800",
+ "font-sans text-[1.25rem] font-[600] leading-[1.75rem] text-black",
+ large: "font-sans text-[1rem] font-[400] leading-[1.625rem] text-black",
"large-medium":
- "font-sans text-[1rem] font-[500] leading-[1.625rem] text-zinc-800",
+ "font-sans text-[1rem] font-[500] leading-[1.625rem] text-black",
"large-semibold":
- "font-sans text-[1rem] font-[600] leading-[1.625rem] text-zinc-800",
- body: "font-sans text-[0.875rem] font-[400] leading-[1.375rem] text-zinc-800",
+ "font-sans text-[1rem] font-[600] leading-[1.625rem] text-black",
+ body: "font-sans text-[0.875rem] font-[400] leading-[1.375rem] text-black",
"body-medium":
- "font-sans text-[0.875rem] font-[500] leading-[1.375rem] text-zinc-800",
- small: "font-sans text-[0.75rem] font-[400] leading-[1.125rem] text-zinc-800",
+ "font-sans text-[0.875rem] font-[500] leading-[1.375rem] text-black",
+ small: "font-sans text-[0.75rem] font-[400] leading-[1.125rem] text-black",
"small-medium":
- "font-sans text-[0.75rem] font-[500] leading-[1.125rem] text-zinc-800",
+ "font-sans text-[0.75rem] font-[500] leading-[1.125rem] text-black",
// Label Text
label:
- "font-sans text-[0.6785rem] font-medium uppercase leading-[1.25rem] tracking-[0.06785rem] text-zinc-800",
+ "font-sans text-[0.6785rem] font-medium uppercase leading-[1.25rem] tracking-[0.06785rem] text-black",
} as const;
export type Variant = keyof typeof variants;
diff --git a/autogpt_platform/frontend/src/components/molecules/Breadcrumbs/Breadcrumbs.tsx b/autogpt_platform/frontend/src/components/molecules/Breadcrumbs/Breadcrumbs.tsx
index 89ac45282c..1fb69763fe 100644
--- a/autogpt_platform/frontend/src/components/molecules/Breadcrumbs/Breadcrumbs.tsx
+++ b/autogpt_platform/frontend/src/components/molecules/Breadcrumbs/Breadcrumbs.tsx
@@ -13,17 +13,17 @@ interface Props {
export function Breadcrumbs({ items }: Props) {
return (
-
+
{items.map((item, index) => (
{item.name}
{index < items.length - 1 && (
-
+
/
)}
diff --git a/autogpt_platform/frontend/src/components/molecules/TabsLine/TabsLine.tsx b/autogpt_platform/frontend/src/components/molecules/TabsLine/TabsLine.tsx
index 2c0f6ac4a5..0eeda6c110 100644
--- a/autogpt_platform/frontend/src/components/molecules/TabsLine/TabsLine.tsx
+++ b/autogpt_platform/frontend/src/components/molecules/TabsLine/TabsLine.tsx
@@ -52,7 +52,7 @@ const TabsLineList = React.forwardRef<
listRef.current = node;
}}
className={cn(
- "inline-flex w-full items-center justify-start border-b border-zinc-200",
+ "inline-flex w-full items-center justify-start border-b border-zinc-100",
className,
)}
{...props}
@@ -109,7 +109,7 @@ const TabsLineTrigger = React.forwardRef<
elementRef.current = node;
}}
className={cn(
- "relative inline-flex items-center justify-center whitespace-nowrap px-3 py-3 font-sans text-[1rem] font-medium leading-[1.5rem] text-zinc-700 transition-all data-[state=active]:text-purple-600 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-400 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
+ "relative inline-flex items-center justify-center whitespace-nowrap px-3 py-3 font-sans text-[0.875rem] font-medium leading-[1.5rem] text-zinc-700 transition-all data-[state=active]:text-purple-600 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-neutral-400 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
className,
)}
{...props}
diff --git a/autogpt_platform/frontend/src/components/styles/colors.ts b/autogpt_platform/frontend/src/components/styles/colors.ts
index 50129e2093..4e37b9bdd5 100644
--- a/autogpt_platform/frontend/src/components/styles/colors.ts
+++ b/autogpt_platform/frontend/src/components/styles/colors.ts
@@ -19,7 +19,7 @@ export const colors = {
400: "#ADADB3",
500: "#83838C",
600: "#68686F",
- 700: "#8E98A8",
+ 700: "#505057",
800: "#3E3E43",
900: "#2C2C30",
},
From b4a69c49a134c4d0c9a265afe9b3af5c03774198 Mon Sep 17 00:00:00 2001
From: Ubbe
Date: Wed, 3 Dec 2025 16:24:58 +0700
Subject: [PATCH 04/58] feat(frontend): use websockets on new library page +
fixes (#11526)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
## Changes ποΈ
- Create a new `useExecutionEvents()` hook that can be used to subscribe
to agent executions
- now both the **Activity Dropdown** and the new **Library Agent Page**
use that
- so subscribing to executions is centralised on a single place
- Apply a couple of design fixes
- Fix not being able to select the new templates tab
## Checklist π
### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Run the app locally and verify the above
---
.../SelectedRunView/SelectedRunView.tsx | 2 +-
.../SelectedScheduleView.tsx | 5 +-
.../SidebarRunsList/SidebarRunsList.tsx | 10 +-
.../sidebar/SidebarRunsList/helpers.ts | 22 -----
.../SidebarRunsList/useSidebarRunsList.ts | 20 +++-
.../useNewAgentLibraryView.ts | 2 +-
.../useAgentActivityDropdown.ts | 60 +++--------
.../frontend/src/hooks/useExecutionEvents.ts | 99 +++++++++++++++++++
8 files changed, 142 insertions(+), 78 deletions(-)
create mode 100644 autogpt_platform/frontend/src/hooks/useExecutionEvents.ts
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx
index 2a80aafed4..857127164d 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx
@@ -69,7 +69,7 @@ export function SelectedRunView({
if (isLoading && !run) {
return (
-
+
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx
index a431e68021..48d93ec64d 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx
@@ -13,6 +13,7 @@ import {
} from "@/components/molecules/TabsLine/TabsLine";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils";
+import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers";
import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly";
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader";
@@ -68,7 +69,7 @@ export function SelectedScheduleView({
if (isLoading && !schedule) {
return (
-
+
@@ -103,7 +104,7 @@ export function SelectedScheduleView({
-
+
Your input
Schedule
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx
index ae4423931a..6d5f2f98a6 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/SidebarRunsList.tsx
@@ -23,7 +23,7 @@ interface Props {
selectedRunId?: string;
onSelectRun: (id: string, tab?: "runs" | "scheduled") => void;
onClearSelectedRun?: () => void;
- onTabChange?: (tab: "runs" | "scheduled") => void;
+ onTabChange?: (tab: "runs" | "scheduled" | "templates") => void;
onCountsChange?: (info: {
runsCount: number;
schedulesCount: number;
@@ -74,7 +74,7 @@ export function SidebarRunsList({
{
- const value = v as "runs" | "scheduled";
+ const value = v as "runs" | "scheduled" | "templates";
onTabChange?.(value);
if (value === "runs") {
if (runs && runs.length) {
@@ -82,12 +82,14 @@ export function SidebarRunsList({
} else {
onClearSelectedRun?.();
}
- } else {
+ } else if (value === "scheduled") {
if (schedules && schedules.length) {
onSelectRun(schedules[0].id, "scheduled");
} else {
onClearSelectedRun?.();
}
+ } else if (value === "templates") {
+ onClearSelectedRun?.();
}
}}
className="flex min-h-0 flex-col overflow-hidden"
@@ -134,7 +136,7 @@ export function SidebarRunsList({
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts
index 4cbe6787d9..096e40239b 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/helpers.ts
@@ -1,8 +1,6 @@
import type { GraphExecutionsPaginated } from "@/app/api/__generated__/models/graphExecutionsPaginated";
import type { InfiniteData } from "@tanstack/react-query";
-const AGENT_RUNNING_POLL_INTERVAL = 1500;
-
function hasValidExecutionsData(
page: unknown,
): page is { data: GraphExecutionsPaginated } {
@@ -16,26 +14,6 @@ function hasValidExecutionsData(
);
}
-export function getRunsPollingInterval(
- pages: Array | undefined,
- isRunsTab: boolean,
-): number | false {
- if (!isRunsTab || !pages?.length) return false;
-
- try {
- const executions = pages.flatMap((page) => {
- if (!hasValidExecutionsData(page)) return [];
- return page.data.executions || [];
- });
- const hasActive = executions.some(
- (e) => e.status === "RUNNING" || e.status === "QUEUED",
- );
- return hasActive ? AGENT_RUNNING_POLL_INTERVAL : false;
- } catch {
- return false;
- }
-}
-
export function computeRunsCount(
infiniteData: InfiniteData | undefined,
runsLength: number,
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
index 80900e8cc6..eecada463a 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts
@@ -6,12 +6,13 @@ import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/end
import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules";
import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
import { okData } from "@/app/api/helpers";
+import { useExecutionEvents } from "@/hooks/useExecutionEvents";
+import { useQueryClient } from "@tanstack/react-query";
import { parseAsString, useQueryStates } from "nuqs";
import {
computeRunsCount,
extractRunsFromPages,
getNextRunsPageParam,
- getRunsPollingInterval,
} from "./helpers";
function parseTab(value: string | null): "runs" | "scheduled" | "templates" {
@@ -42,6 +43,7 @@ export function useSidebarRunsList({
});
const tabValue = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]);
+ const queryClient = useQueryClient();
const runsQuery = useGetV1ListGraphExecutionsInfinite(
graphId || "",
@@ -49,9 +51,6 @@ export function useSidebarRunsList({
{
query: {
enabled: !!graphId,
- refetchInterval: (q) =>
- getRunsPollingInterval(q.state.data?.pages, tabValue === "runs"),
- refetchIntervalInBackground: true,
refetchOnWindowFocus: false,
getNextPageParam: getNextRunsPageParam,
},
@@ -79,6 +78,19 @@ export function useSidebarRunsList({
const schedulesCount = schedules.length;
const loading = !schedulesQuery.isSuccess || !runsQuery.isSuccess;
+ // Update query cache when execution events arrive via websocket
+ useExecutionEvents({
+ graphId: graphId || undefined,
+ enabled: !!graphId && tabValue === "runs",
+ onExecutionUpdate: (_execution) => {
+ // Invalidate and refetch the query to ensure we have the latest data
+ // This is simpler and more reliable than manually updating the cache
+ // The queryKey is stable and includes the graphId, so this only invalidates
+ // queries for this specific graph's executions
+ queryClient.invalidateQueries({ queryKey: runsQuery.queryKey });
+ },
+ });
+
// Notify parent about counts and loading state
useEffect(() => {
if (onCountsChange) {
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
index a97e64650c..011956cb40 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts
@@ -77,7 +77,7 @@ export function useNewAgentLibraryView() {
});
}
- function handleSetActiveTab(tab: "runs" | "scheduled") {
+ function handleSetActiveTab(tab: "runs" | "scheduled" | "templates") {
setQueryStates({
activeTab: tab,
});
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/useAgentActivityDropdown.ts b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/useAgentActivityDropdown.ts
index 6df18738ca..df8402906b 100644
--- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/useAgentActivityDropdown.ts
+++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/useAgentActivityDropdown.ts
@@ -1,19 +1,17 @@
import { useGetV1ListAllExecutions } from "@/app/api/__generated__/endpoints/graphs/graphs";
-import BackendAPI from "@/lib/autogpt-server-api/client";
-import type { GraphExecution, GraphID } from "@/lib/autogpt-server-api/types";
-import { useCallback, useEffect, useState } from "react";
-import * as Sentry from "@sentry/nextjs";
+import { useExecutionEvents } from "@/hooks/useExecutionEvents";
+import { useLibraryAgents } from "@/hooks/useLibraryAgents/useLibraryAgents";
+import type { GraphExecution } from "@/lib/autogpt-server-api/types";
+import { useCallback, useEffect, useMemo, useState } from "react";
import {
NotificationState,
categorizeExecutions,
handleExecutionUpdate,
} from "./helpers";
-import { useLibraryAgents } from "@/hooks/useLibraryAgents/useLibraryAgents";
export function useAgentActivityDropdown() {
const [isOpen, setIsOpen] = useState(false);
- const [api] = useState(() => new BackendAPI());
const { agentInfoMap } = useLibraryAgents();
const [notifications, setNotifications] = useState({
@@ -23,8 +21,6 @@ export function useAgentActivityDropdown() {
totalCount: 0,
});
- const [isConnected, setIsConnected] = useState(false);
-
const {
data: executions,
isSuccess: executionsSuccess,
@@ -33,6 +29,12 @@ export function useAgentActivityDropdown() {
query: { select: (res) => (res.status === 200 ? res.data : null) },
});
+ // Get all graph IDs from agentInfoMap
+ const graphIds = useMemo(
+ () => Array.from(agentInfoMap.keys()),
+ [agentInfoMap],
+ );
+
// Handle real-time execution updates
const handleExecutionEvent = useCallback(
(execution: GraphExecution) => {
@@ -51,45 +53,15 @@ export function useAgentActivityDropdown() {
}
}, [executions, executionsSuccess, agentInfoMap]);
- // Initialize WebSocket connection for real-time updates
- useEffect(() => {
- if (!agentInfoMap.size) return;
-
- const connectHandler = api.onWebSocketConnect(() => {
- setIsConnected(true);
- agentInfoMap.forEach((_, graphId) => {
- api.subscribeToGraphExecutions(graphId as GraphID).catch((error) => {
- Sentry.captureException(error, {
- tags: {
- graphId,
- },
- });
- });
- });
- });
-
- const disconnectHandler = api.onWebSocketDisconnect(() => {
- setIsConnected(false);
- });
-
- const messageHandler = api.onWebSocketMessage(
- "graph_execution_event",
- handleExecutionEvent,
- );
-
- api.connectWebSocket();
-
- return () => {
- connectHandler();
- disconnectHandler();
- messageHandler();
- api.disconnectWebSocket();
- };
- }, [api, handleExecutionEvent, agentInfoMap]);
+ // Subscribe to execution events for all graphs
+ useExecutionEvents({
+ graphIds: graphIds.length > 0 ? graphIds : undefined,
+ enabled: graphIds.length > 0,
+ onExecutionUpdate: handleExecutionEvent,
+ });
return {
...notifications,
- isConnected,
isReady: executionsSuccess,
error: executionsError,
isOpen,
diff --git a/autogpt_platform/frontend/src/hooks/useExecutionEvents.ts b/autogpt_platform/frontend/src/hooks/useExecutionEvents.ts
new file mode 100644
index 0000000000..9af2b8aead
--- /dev/null
+++ b/autogpt_platform/frontend/src/hooks/useExecutionEvents.ts
@@ -0,0 +1,99 @@
+"use client";
+
+import { useBackendAPI } from "@/lib/autogpt-server-api/context";
+import type { GraphExecution, GraphID } from "@/lib/autogpt-server-api/types";
+import * as Sentry from "@sentry/nextjs";
+import { useEffect, useRef } from "react";
+
+type ExecutionEventHandler = (execution: GraphExecution) => void;
+
+interface UseExecutionEventsOptions {
+ graphId?: GraphID | string | null;
+ graphIds?: (GraphID | string)[];
+ enabled?: boolean;
+ onExecutionUpdate?: ExecutionEventHandler;
+}
+
+/**
+ * Generic hook to subscribe to graph execution events via WebSocket.
+ * Automatically handles subscription/unsubscription and reconnection.
+ *
+ * @param options - Configuration options
+ * @param options.graphId - The graph ID to subscribe to (single graph)
+ * @param options.graphIds - Array of graph IDs to subscribe to (multiple graphs)
+ * @param options.enabled - Whether the subscription is enabled (default: true)
+ * @param options.onExecutionUpdate - Callback invoked when an execution is updated
+ */
+export function useExecutionEvents({
+ graphId,
+ graphIds,
+ enabled = true,
+ onExecutionUpdate,
+}: UseExecutionEventsOptions) {
+ const api = useBackendAPI();
+ const onExecutionUpdateRef = useRef(onExecutionUpdate);
+
+ useEffect(() => {
+ onExecutionUpdateRef.current = onExecutionUpdate;
+ }, [onExecutionUpdate]);
+
+ useEffect(() => {
+ if (!enabled) return;
+
+ const idsToSubscribe = graphIds || (graphId ? [graphId] : []);
+ if (idsToSubscribe.length === 0) return;
+
+ // Normalize IDs to strings for consistent comparison
+ const normalizedIds = idsToSubscribe.map((id) => String(id));
+ const subscribedIds = new Set();
+
+ const handleExecutionEvent = (execution: GraphExecution) => {
+ // Filter by graphIds if provided, using normalized string comparison
+ if (normalizedIds.length > 0) {
+ const executionGraphId = String(execution.graph_id);
+ if (!normalizedIds.includes(executionGraphId)) return;
+ }
+
+ onExecutionUpdateRef.current?.(execution);
+ };
+
+ const connectHandler = api.onWebSocketConnect(() => {
+ normalizedIds.forEach((id) => {
+ // Track subscriptions to avoid duplicate subscriptions
+ if (subscribedIds.has(id)) return;
+ subscribedIds.add(id);
+
+ api
+ .subscribeToGraphExecutions(id as GraphID)
+ .then(() => {
+ console.debug(`Subscribed to execution updates for graph ${id}`);
+ })
+ .catch((error) => {
+ console.error(
+ `Failed to subscribe to execution updates for graph ${id}:`,
+ error,
+ );
+ Sentry.captureException(error, {
+ tags: { graphId: id },
+ });
+ subscribedIds.delete(id);
+ });
+ });
+ });
+
+ const messageHandler = api.onWebSocketMessage(
+ "graph_execution_event",
+ handleExecutionEvent,
+ );
+
+ api.connectWebSocket();
+
+ return () => {
+ connectHandler();
+ messageHandler();
+ // Note: Backend automatically cleans up subscriptions on websocket disconnect
+ // If IDs change while connected, old subscriptions remain but are filtered client-side
+ subscribedIds.clear();
+ };
+ }, [api, graphId, graphIds, enabled]);
+}
From 02d9ff8db27ec4a52faa283fbcad00c10ba742c3 Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Wed, 3 Dec 2025 14:49:29 +0530
Subject: [PATCH 05/58] fix(frontend): improve error message extraction in
agent execution error handler (#11527)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When agent execution fails, the error toast was only showing
`error.message`, which often lacks detail. The API returns more specific
error messages in `error.response.detail.message`, but these weren't
being displayed to users, making debugging harder.
### Changes ποΈ
- Updated error message extraction in `useAgentRunModal` to check
`error.response.detail.message` first, then fall back to
`error.message`, then to the default message
- This ensures users see the most specific error message available from
the API response
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Triggered an agent execution error (e.g., invalid inputs) and
verified the toast shows the detailed error message from
`error.response.detail.message`
- [x] Verified fallback to `error.message` when
`error.response.detail.message` is not available
- [x] Verified fallback to default message when neither is available
---
.../components/modals/RunAgentModal/useAgentRunModal.ts | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts
index 2e8fc02d97..92f9c2703c 100644
--- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts
@@ -87,9 +87,13 @@ export function useAgentRunModal(
}
},
onError: (error: any) => {
+ const errorMessage = error.isGraphValidationError()
+ ? error.response.detail.message
+ : error.message;
+
toast({
title: "β Failed to execute agent",
- description: error.message || "An unexpected error occurred.",
+ description: errorMessage || "An unexpected error occurred.",
variant: "destructive",
});
},
From 7b93600973dd29091a2bc161d8785a47106b01d9 Mon Sep 17 00:00:00 2001
From: Swifty
Date: Wed, 3 Dec 2025 11:04:38 +0100
Subject: [PATCH 06/58] fix duplicate promethues metrics
---
.../backend/backend/monitoring/instrumentation.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/autogpt_platform/backend/backend/monitoring/instrumentation.py b/autogpt_platform/backend/backend/monitoring/instrumentation.py
index 898324deaa..bd384b4ad2 100644
--- a/autogpt_platform/backend/backend/monitoring/instrumentation.py
+++ b/autogpt_platform/backend/backend/monitoring/instrumentation.py
@@ -143,6 +143,9 @@ def instrument_fastapi(
)
# Create instrumentator with default metrics
+ # Use service-specific inprogress_name to avoid duplicate registration
+ # when multiple FastAPI apps are instrumented in the same process
+ service_subsystem = service_name.replace("-", "_")
instrumentator = Instrumentator(
should_group_status_codes=True,
should_ignore_untemplated=True,
@@ -150,7 +153,7 @@ def instrument_fastapi(
should_instrument_requests_inprogress=True,
excluded_handlers=excluded_handlers or ["/health", "/readiness"],
env_var_name="ENABLE_METRICS",
- inprogress_name="autogpt_http_requests_inprogress",
+ inprogress_name=f"autogpt_{service_subsystem}_http_requests_inprogress",
inprogress_labels=True,
)
From bfbd4eee5309ef494357a15af8d40487938ecd50 Mon Sep 17 00:00:00 2001
From: Ubbe
Date: Wed, 3 Dec 2025 18:05:22 +0700
Subject: [PATCH 07/58] ci(frontend): concurrency optimizations (#11525)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
## Changes ποΈ
Added the same concurrency optimisation to the Front-end and Fullstack
CI workflows. It will:
- Cancel in-progress runs when a new workflow starts for the same
branch/PR
- Reduce CI costs by avoiding redundant runs
- Ensure only the latest workflow runs
- Both workflows now use the same concurrency strategy to optimise CI
billing.
## Checklist π
### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] We will see as we push commits...
---
.github/workflows/platform-frontend-ci.yml | 4 ++++
.github/workflows/platform-fullstack-ci.yml | 4 ++++
2 files changed, 8 insertions(+)
diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml
index 58d3464a3a..33e12aa900 100644
--- a/.github/workflows/platform-frontend-ci.yml
+++ b/.github/workflows/platform-frontend-ci.yml
@@ -12,6 +12,10 @@ on:
- "autogpt_platform/frontend/**"
merge_group:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event.pull_request.number || '' }}
+ cancel-in-progress: true
+
defaults:
run:
shell: bash
diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml
index a75c1b9068..0e4b0f8f49 100644
--- a/.github/workflows/platform-fullstack-ci.yml
+++ b/.github/workflows/platform-fullstack-ci.yml
@@ -12,6 +12,10 @@ on:
- "autogpt_platform/**"
merge_group:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event.pull_request.number || '' }}
+ cancel-in-progress: true
+
defaults:
run:
shell: bash
From 6588110bf2b2907e714f70e108540bfbf4014ada Mon Sep 17 00:00:00 2001
From: Swifty
Date: Wed, 3 Dec 2025 13:39:17 +0100
Subject: [PATCH 08/58] fix(frontend): forward X-API-Key header through proxy
(#11530)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The Next.js API proxy was stripping the X-API-Key header when forwarding
requests to the backend, causing API key authentication to fail in
environments where requests go through the proxy (e.g., dev
environment).
### Changes ποΈ
- Updated `createRequestHeaders()` in
`frontend/src/lib/autogpt-server-api/helpers.ts` to forward the
`X-API-Key` header from the original request to the backend
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Verify API key authentication works when requests go through the
Next.js proxy
- [x] Verify existing authentication (Authorization header) still works
- [x] Verify admin impersonation header forwarding still works
#### For configuration changes:
- [x] `.env.default` is updated or already compatible with my changes
- [x] `docker-compose.yml` is updated or already compatible with my
changes
- [x] I have included a list of my configuration changes in the PR
description (under **Changes**)
No configuration changes required.
π€ Generated with [Claude Code](https://claude.com/claude-code)
---------
Co-authored-by: Claude
---
.../frontend/src/lib/autogpt-server-api/helpers.ts | 11 ++++++++++-
autogpt_platform/frontend/src/lib/constants.ts | 3 +++
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts
index 8607b45da0..7e20783042 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts
@@ -1,4 +1,7 @@
-import { IMPERSONATION_HEADER_NAME } from "@/lib/constants";
+import {
+ API_KEY_HEADER_NAME,
+ IMPERSONATION_HEADER_NAME,
+} from "@/lib/constants";
import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase";
import { environment } from "@/services/environment";
import { Key, storage } from "@/services/storage/local-storage";
@@ -154,6 +157,12 @@ export function createRequestHeaders(
if (impersonationHeader) {
headers[IMPERSONATION_HEADER_NAME] = impersonationHeader;
}
+
+ // Forward X-API-Key header if present
+ const apiKeyHeader = originalRequest.headers.get(API_KEY_HEADER_NAME);
+ if (apiKeyHeader) {
+ headers[API_KEY_HEADER_NAME] = apiKeyHeader;
+ }
}
return headers;
diff --git a/autogpt_platform/frontend/src/lib/constants.ts b/autogpt_platform/frontend/src/lib/constants.ts
index f275dbf919..433a37b00e 100644
--- a/autogpt_platform/frontend/src/lib/constants.ts
+++ b/autogpt_platform/frontend/src/lib/constants.ts
@@ -5,3 +5,6 @@
// Admin impersonation
export const IMPERSONATION_HEADER_NAME = "X-Act-As-User-Id";
export const IMPERSONATION_STORAGE_KEY = "admin-impersonate-user-id";
+
+// API key authentication
+export const API_KEY_HEADER_NAME = "X-API-Key";
From f1c6c946364752c2972398a20573bbd07d97bd30 Mon Sep 17 00:00:00 2001
From: Ubbe
Date: Thu, 4 Dec 2025 21:44:06 +0700
Subject: [PATCH 09/58] ci(frontend): fix concurrency groups (#11551)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
## Changes ποΈ
Fix concurrency grouping on Front-end workflows.
## Checklist π
### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] We will see once merged
---
.github/workflows/platform-frontend-ci.yml | 4 ++--
.github/workflows/platform-fullstack-ci.yml | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml
index 33e12aa900..2154fe1385 100644
--- a/.github/workflows/platform-frontend-ci.yml
+++ b/.github/workflows/platform-frontend-ci.yml
@@ -13,8 +13,8 @@ on:
merge_group:
concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event.pull_request.number || '' }}
- cancel-in-progress: true
+ group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || format('{0}-{1}', github.ref, github.event.pull_request.number || github.sha) }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml
index 0e4b0f8f49..c888ace6c5 100644
--- a/.github/workflows/platform-fullstack-ci.yml
+++ b/.github/workflows/platform-fullstack-ci.yml
@@ -13,8 +13,8 @@ on:
merge_group:
concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event.pull_request.number || '' }}
- cancel-in-progress: true
+ group: ${{ github.workflow }}-${{ github.event_name == 'merge_group' && format('merge-queue-{0}', github.ref) || github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
defaults:
run:
From 113df689dc587a90fd4a9e8aadcc28d6a1f7f997 Mon Sep 17 00:00:00 2001
From: Nicholas Tindle
Date: Thu, 4 Dec 2025 08:40:30 -0600
Subject: [PATCH 10/58] feat(platform): Improve Google Sheets/Drive integration
with unified credentials (#11520)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Simplifies and improves the Google Sheets/Drive integration by merging
credentials with the file picker and using narrower OAuth scopes.
### Changes ποΈ
- Merge Google credentials and file picker into a single unified input
field for better UX
- Create spreadsheets using Drive API instead of Sheets API for proper
scope support
- Simplify Google Drive OAuth scope to only use `drive.file` (narrowest
permission needed)
- Clean up unused imports (NormalizedPickedFile)
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Test creating a new Google Spreadsheet with
GoogleSheetsCreateSpreadsheetBlock
- [x] Test reading from existing spreadsheets with GoogleSheetsReadBlock
- [x] Test writing to spreadsheets with GoogleSheetsWriteBlock
- [x] Verify OAuth flow works with simplified scopes
- [x] Verify file picker works with merged credentials field
#### For configuration changes:
- [x] `.env.default` is updated or already compatible with my changes
- [x] `docker-compose.yml` is updated or already compatible with my
changes
- [x] I have included a list of my configuration changes in the PR
description (under **Changes**)
π€ Generated with [Claude Code](https://claude.com/claude-code)
---
> [!NOTE]
> Unifies Google Drive picker and credentials with auto-credentials
across backend and frontend, updates all Sheets blocks and execution to
use it, and adds Drive-based spreadsheet creation plus supporting tests
and UI fixes.
>
> - **Backend**:
> - **Google Drive model/field**: Introduce `GoogleDriveFile` (with
`_credentials_id`) and `GoogleDriveFileField()` for unified auth+picker
(`backend/blocks/google/_drive.py`).
> - **Sheets blocks**: Replace `GoogleDrivePickerField` and explicit
credentials with `GoogleDriveFileField` across all Sheets blocks;
preserve and emit credentials for chaining; add Drive service; create
spreadsheets via Drive API then manage via Sheets API.
> - **IO block**: Add `AgentGoogleDriveFileInputBlock` providing a Drive
picker input.
> - **Execution**: Support auto-generated credentials via
`BlockSchema.get_auto_credentials_fields()`; acquire/release multiple
credential locks; pass creds by `credentials_kwarg`
(`executor/manager.py`, `data/block.py`, `util/test.py`).
> - **Tests**: Add validation tests for duplicate/unique
`auto_credentials.kwarg_name` and defaults.
> - **Frontend**:
> - **Picker**: Enhance Google Drive picker to require/use saved
platform credentials, pass `_credentials_id`, validate scopes, and
manage dialog z-index/interaction; expose `requirePlatformCredentials`.
> - **UI**: Update dialogs/CSS to keep Google picker on top and prevent
overlay interactions.
> - **Types**: Extend `GoogleDrivePickerConfig` with `auto_credentials`
and related typings.
>
> Written by [Cursor
Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit
7d25534def938fe63dbc8ac0a409da4f97725615. This will update automatically
on new commits. Configure
[here](https://cursor.com/dashboard?tab=bugbot).
---------
Co-authored-by: Claude
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
---
.../backend/backend/blocks/google/_drive.py | 201 +++++++----------
.../backend/backend/blocks/google/sheets.py | 210 ++++++++++++------
autogpt_platform/backend/backend/blocks/io.py | 116 ++++++++++
.../backend/backend/blocks/test/test_block.py | 150 ++++++++++++-
.../backend/backend/data/block.py | 55 ++++-
.../backend/backend/executor/manager.py | 63 +++++-
autogpt_platform/backend/backend/util/test.py | 14 ++
autogpt_platform/frontend/src/app/globals.css | 13 ++
.../src/components/__legacy__/ui/dialog.tsx | 80 +++++--
.../GoogleDrivePicker/GoogleDrivePicker.tsx | 7 +-
.../GoogleDrivePickerInput.tsx | 33 ++-
.../GoogleDrivePicker/useGoogleDrivePicker.ts | 64 +++++-
.../Dialog/components/DialogWrap.tsx | 42 +++-
.../src/lib/autogpt-server-api/types.ts | 10 +
14 files changed, 819 insertions(+), 239 deletions(-)
diff --git a/autogpt_platform/backend/backend/blocks/google/_drive.py b/autogpt_platform/backend/backend/blocks/google/_drive.py
index 3e3ecb2711..cb2b52821c 100644
--- a/autogpt_platform/backend/backend/blocks/google/_drive.py
+++ b/autogpt_platform/backend/backend/blocks/google/_drive.py
@@ -1,16 +1,8 @@
-import asyncio
-import mimetypes
-import uuid
-from pathlib import Path
from typing import Any, Literal, Optional
from pydantic import BaseModel, ConfigDict, Field
from backend.data.model import SchemaField
-from backend.util.file import get_exec_file_path
-from backend.util.request import Requests
-from backend.util.type import MediaFileType
-from backend.util.virus_scanner import scan_content_safe
AttachmentView = Literal[
"DOCS",
@@ -30,8 +22,8 @@ ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
)
-class GoogleDriveFile(BaseModel):
- """Represents a single file/folder picked from Google Drive"""
+class _GoogleDriveFileBase(BaseModel):
+ """Internal base class for Google Drive file representation."""
model_config = ConfigDict(populate_by_name=True)
@@ -49,144 +41,115 @@ class GoogleDriveFile(BaseModel):
)
-def GoogleDrivePickerField(
- multiselect: bool = False,
- allow_folder_selection: bool = False,
- allowed_views: Optional[list[AttachmentView]] = None,
- allowed_mime_types: Optional[list[str]] = None,
- scopes: Optional[list[str]] = None,
- title: Optional[str] = None,
- description: Optional[str] = None,
- placeholder: Optional[str] = None,
- **kwargs,
+class GoogleDriveFile(_GoogleDriveFileBase):
+ """
+ Represents a Google Drive file/folder with optional credentials for chaining.
+
+ Used for both inputs and outputs in Google Drive blocks. The `_credentials_id`
+ field enables chaining between blocks - when one block outputs a file, the
+ next block can use the same credentials to access it.
+
+ When used with GoogleDriveFileField(), the frontend renders a combined
+ auth + file picker UI that automatically populates `_credentials_id`.
+ """
+
+ # Hidden field for credential ID - populated by frontend, preserved in outputs
+ credentials_id: Optional[str] = Field(
+ None,
+ alias="_credentials_id",
+ description="Internal: credential ID for authentication",
+ )
+
+
+def GoogleDriveFileField(
+ *,
+ title: str,
+ description: str | None = None,
+ credentials_kwarg: str = "credentials",
+ credentials_scopes: list[str] | None = None,
+ allowed_views: list[AttachmentView] | None = None,
+ allowed_mime_types: list[str] | None = None,
+ placeholder: str | None = None,
+ **kwargs: Any,
) -> Any:
"""
- Creates a Google Drive Picker input field.
+ Creates a Google Drive file input field with auto-generated credentials.
+
+ This field type produces a single UI element that handles both:
+ 1. Google OAuth authentication
+ 2. File selection via Google Drive Picker
+
+ The system automatically generates a credentials field, and the credentials
+ are passed to the run() method using the specified kwarg name.
Args:
- multiselect: Allow selecting multiple files/folders (default: False)
- allow_folder_selection: Allow selecting folders (default: False)
- allowed_views: List of view types to show in picker (default: ["DOCS"])
- allowed_mime_types: Filter by MIME types (e.g., ["application/pdf"])
title: Field title shown in UI
description: Field description/help text
+ credentials_kwarg: Name of the kwarg that will receive GoogleCredentials
+ in the run() method (default: "credentials")
+ credentials_scopes: OAuth scopes required (default: drive.file)
+ allowed_views: List of view types to show in picker (default: ["DOCS"])
+ allowed_mime_types: Filter by MIME types
placeholder: Placeholder text for the button
- **kwargs: Additional SchemaField arguments (advanced, hidden, etc.)
+ **kwargs: Additional SchemaField arguments
Returns:
- Field definition that produces:
- - Single GoogleDriveFile when multiselect=False
- - list[GoogleDriveFile] when multiselect=True
+ Field definition that produces GoogleDriveFile
Example:
>>> class MyBlock(Block):
- ... class Input(BlockSchema):
- ... document: GoogleDriveFile = GoogleDrivePickerField(
- ... title="Select Document",
- ... allowed_views=["DOCUMENTS"],
+ ... class Input(BlockSchemaInput):
+ ... spreadsheet: GoogleDriveFile = GoogleDriveFileField(
+ ... title="Select Spreadsheet",
+ ... credentials_kwarg="creds",
+ ... allowed_views=["SPREADSHEETS"],
... )
...
- ... files: list[GoogleDriveFile] = GoogleDrivePickerField(
- ... title="Select Multiple Files",
- ... multiselect=True,
- ... allow_folder_selection=True,
- ... )
+ ... async def run(
+ ... self, input_data: Input, *, creds: GoogleCredentials, **kwargs
+ ... ):
+ ... # creds is automatically populated
+ ... file = input_data.spreadsheet
"""
- # Build configuration that will be sent to frontend
+
+ # Determine scopes - drive.file is sufficient for picker-selected files
+ scopes = credentials_scopes or ["https://www.googleapis.com/auth/drive.file"]
+
+ # Build picker configuration with auto_credentials embedded
picker_config = {
- "multiselect": multiselect,
- "allow_folder_selection": allow_folder_selection,
+ "multiselect": False,
+ "allow_folder_selection": False,
"allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
+ "scopes": scopes,
+ # Auto-credentials config tells frontend to include _credentials_id in output
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": scopes,
+ "kwarg_name": credentials_kwarg,
+ },
}
- # Add optional configurations
if allowed_mime_types:
picker_config["allowed_mime_types"] = list(allowed_mime_types)
- # Determine required scopes based on config
- base_scopes = scopes if scopes is not None else []
- picker_scopes: set[str] = set(base_scopes)
- if allow_folder_selection:
- picker_scopes.add("https://www.googleapis.com/auth/drive")
- else:
- # Use drive.file for minimal scope - only access files selected by user in picker
- picker_scopes.add("https://www.googleapis.com/auth/drive.file")
-
- picker_config["scopes"] = sorted(picker_scopes)
-
- # Set appropriate default value
- default_value = [] if multiselect else None
-
- # Use SchemaField to handle format properly
return SchemaField(
- default=default_value,
+ default=None,
title=title,
description=description,
- placeholder=placeholder or "Choose from Google Drive",
+ placeholder=placeholder or "Select from Google Drive",
+ # Use google-drive-picker format so frontend renders existing component
format="google-drive-picker",
advanced=False,
json_schema_extra={
"google_drive_picker_config": picker_config,
+ # Also keep auto_credentials at top level for backend detection
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": scopes,
+ "kwarg_name": credentials_kwarg,
+ },
**kwargs,
},
)
-
-
-DRIVE_API_URL = "https://www.googleapis.com/drive/v3/files"
-_requests = Requests(trusted_origins=["https://www.googleapis.com"])
-
-
-def GoogleDriveAttachmentField(
- *,
- title: str,
- description: str | None = None,
- placeholder: str | None = None,
- multiselect: bool = True,
- allowed_mime_types: list[str] | None = None,
- **extra: Any,
-) -> Any:
- return GoogleDrivePickerField(
- multiselect=multiselect,
- allowed_views=list(ATTACHMENT_VIEWS),
- allowed_mime_types=allowed_mime_types,
- title=title,
- description=description,
- placeholder=placeholder or "Choose files from Google Drive",
- **extra,
- )
-
-
-async def drive_file_to_media_file(
- drive_file: GoogleDriveFile, *, graph_exec_id: str, access_token: str
-) -> MediaFileType:
- if drive_file.is_folder:
- raise ValueError("Google Drive selection must be a file.")
- if not access_token:
- raise ValueError("Google Drive access token is required for file download.")
-
- url = f"{DRIVE_API_URL}/{drive_file.id}?alt=media"
- response = await _requests.get(
- url, headers={"Authorization": f"Bearer {access_token}"}
- )
-
- mime_type = drive_file.mime_type or response.headers.get(
- "content-type", "application/octet-stream"
- )
-
- MAX_FILE_SIZE = 100 * 1024 * 1024
- if len(response.content) > MAX_FILE_SIZE:
- raise ValueError(
- f"File too large: {len(response.content)} bytes > {MAX_FILE_SIZE} bytes"
- )
-
- base_path = Path(get_exec_file_path(graph_exec_id, ""))
- base_path.mkdir(parents=True, exist_ok=True)
-
- extension = mimetypes.guess_extension(mime_type, strict=False) or ".bin"
- filename = f"{uuid.uuid4()}{extension}"
- target_path = base_path / filename
-
- await scan_content_safe(response.content, filename=filename)
- await asyncio.to_thread(target_path.write_bytes, response.content)
-
- return MediaFileType(str(target_path.relative_to(base_path)))
diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py
index e35749a72e..fac4e2d1aa 100644
--- a/autogpt_platform/backend/backend/blocks/google/sheets.py
+++ b/autogpt_platform/backend/backend/blocks/google/sheets.py
@@ -5,7 +5,7 @@ from typing import Any
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
-from backend.blocks.google._drive import GoogleDriveFile, GoogleDrivePickerField
+from backend.blocks.google._drive import GoogleDriveFile, GoogleDriveFileField
from backend.data.block import (
Block,
BlockCategory,
@@ -182,6 +182,28 @@ def _build_sheets_service(credentials: GoogleCredentials):
return build("sheets", "v4", credentials=creds)
+def _build_drive_service(credentials: GoogleCredentials):
+ """Build Drive service from platform credentials (with refresh token)."""
+ settings = Settings()
+ creds = Credentials(
+ token=(
+ credentials.access_token.get_secret_value()
+ if credentials.access_token
+ else None
+ ),
+ refresh_token=(
+ credentials.refresh_token.get_secret_value()
+ if credentials.refresh_token
+ else None
+ ),
+ token_uri="https://oauth2.googleapis.com/token",
+ client_id=settings.secrets.google_client_id,
+ client_secret=settings.secrets.google_client_secret,
+ scopes=credentials.scopes,
+ )
+ return build("drive", "v3", credentials=creds)
+
+
def _validate_spreadsheet_file(spreadsheet_file: "GoogleDriveFile") -> str | None:
"""Validate that the selected file is a Google Sheets spreadsheet.
@@ -250,10 +272,10 @@ class BatchOperation(BlockSchemaInput):
class GoogleSheetsReadBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -282,7 +304,6 @@ class GoogleSheetsReadBlock(Block):
output_schema=GoogleSheetsReadBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -308,6 +329,7 @@ class GoogleSheetsReadBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -338,7 +360,7 @@ class GoogleSheetsReadBlock(Block):
self._read_sheet, service, spreadsheet_id, input_data.range
)
yield "result", data
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=spreadsheet_id,
name=input_data.spreadsheet.name,
@@ -346,6 +368,7 @@ class GoogleSheetsReadBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", _handle_sheets_api_error(str(e), "read")
@@ -373,10 +396,10 @@ class GoogleSheetsReadBlock(Block):
class GoogleSheetsWriteBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -408,7 +431,6 @@ class GoogleSheetsWriteBlock(Block):
output_schema=GoogleSheetsWriteBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -435,6 +457,7 @@ class GoogleSheetsWriteBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -477,7 +500,7 @@ class GoogleSheetsWriteBlock(Block):
input_data.values,
)
yield "result", result
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -485,6 +508,7 @@ class GoogleSheetsWriteBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", _handle_sheets_api_error(str(e), "write")
@@ -509,10 +533,10 @@ class GoogleSheetsWriteBlock(Block):
class GoogleSheetsAppendBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -566,7 +590,6 @@ class GoogleSheetsAppendBlock(Block):
output_schema=GoogleSheetsAppendBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -586,6 +609,7 @@ class GoogleSheetsAppendBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -642,7 +666,7 @@ class GoogleSheetsAppendBlock(Block):
input_data.insert_data_option,
)
yield "result", result
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -650,6 +674,7 @@ class GoogleSheetsAppendBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to append to Google Sheet: {str(e)}"
@@ -690,10 +715,10 @@ class GoogleSheetsAppendBlock(Block):
class GoogleSheetsClearBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -722,7 +747,6 @@ class GoogleSheetsClearBlock(Block):
output_schema=GoogleSheetsClearBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -742,6 +766,7 @@ class GoogleSheetsClearBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -774,7 +799,7 @@ class GoogleSheetsClearBlock(Block):
input_data.range,
)
yield "result", result
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -782,6 +807,7 @@ class GoogleSheetsClearBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to clear Google Sheet range: {str(e)}"
@@ -798,10 +824,10 @@ class GoogleSheetsClearBlock(Block):
class GoogleSheetsMetadataBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -826,7 +852,6 @@ class GoogleSheetsMetadataBlock(Block):
output_schema=GoogleSheetsMetadataBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -851,6 +876,7 @@ class GoogleSheetsMetadataBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -883,7 +909,7 @@ class GoogleSheetsMetadataBlock(Block):
input_data.spreadsheet.id,
)
yield "result", result
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -891,6 +917,7 @@ class GoogleSheetsMetadataBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to get spreadsheet metadata: {str(e)}"
@@ -918,10 +945,10 @@ class GoogleSheetsMetadataBlock(Block):
class GoogleSheetsManageSheetBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -955,7 +982,6 @@ class GoogleSheetsManageSheetBlock(Block):
output_schema=GoogleSheetsManageSheetBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -976,6 +1002,7 @@ class GoogleSheetsManageSheetBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -1012,7 +1039,7 @@ class GoogleSheetsManageSheetBlock(Block):
input_data.destination_sheet_name,
)
yield "result", result
- # Output the GoogleDriveFile for chaining
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -1020,6 +1047,7 @@ class GoogleSheetsManageSheetBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to manage sheet: {str(e)}"
@@ -1073,10 +1101,10 @@ class GoogleSheetsManageSheetBlock(Block):
class GoogleSheetsBatchOperationsBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -1104,7 +1132,6 @@ class GoogleSheetsBatchOperationsBlock(Block):
output_schema=GoogleSheetsBatchOperationsBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -1135,6 +1162,7 @@ class GoogleSheetsBatchOperationsBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -1168,6 +1196,7 @@ class GoogleSheetsBatchOperationsBlock(Block):
input_data.operations,
)
yield "result", result
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -1175,6 +1204,7 @@ class GoogleSheetsBatchOperationsBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to perform batch operations: {str(e)}"
@@ -1228,10 +1258,10 @@ class GoogleSheetsBatchOperationsBlock(Block):
class GoogleSheetsFindReplaceBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -1274,7 +1304,6 @@ class GoogleSheetsFindReplaceBlock(Block):
output_schema=GoogleSheetsFindReplaceBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -1297,6 +1326,7 @@ class GoogleSheetsFindReplaceBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -1331,6 +1361,7 @@ class GoogleSheetsFindReplaceBlock(Block):
input_data.match_entire_cell,
)
yield "result", result
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -1338,6 +1369,7 @@ class GoogleSheetsFindReplaceBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to find/replace in Google Sheet: {str(e)}"
@@ -1376,10 +1408,10 @@ class GoogleSheetsFindReplaceBlock(Block):
class GoogleSheetsFindBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -1434,7 +1466,6 @@ class GoogleSheetsFindBlock(Block):
output_schema=GoogleSheetsFindBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -1467,6 +1498,7 @@ class GoogleSheetsFindBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -1511,6 +1543,7 @@ class GoogleSheetsFindBlock(Block):
yield "count", result["count"]
yield "locations", result["locations"]
yield "result", {"success": True}
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -1518,6 +1551,7 @@ class GoogleSheetsFindBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to find text in Google Sheet: {str(e)}"
@@ -1682,10 +1716,10 @@ class GoogleSheetsFindBlock(Block):
class GoogleSheetsFormatBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -1717,7 +1751,6 @@ class GoogleSheetsFormatBlock(Block):
output_schema=GoogleSheetsFormatBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -1739,6 +1772,7 @@ class GoogleSheetsFormatBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -1775,6 +1809,7 @@ class GoogleSheetsFormatBlock(Block):
yield "error", result["error"]
else:
yield "result", result
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -1782,6 +1817,7 @@ class GoogleSheetsFormatBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", f"Failed to format Google Sheet cells: {str(e)}"
@@ -1855,7 +1891,10 @@ class GoogleSheetsFormatBlock(Block):
class GoogleSheetsCreateSpreadsheetBlock(Block):
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
+ # Explicit credentials since this block creates a file (no file picker)
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/drive.file"]
+ )
title: str = SchemaField(
description="The title of the new spreadsheet",
)
@@ -1890,9 +1929,9 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
output_schema=GoogleSheetsCreateSpreadsheetBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
+ "credentials": TEST_CREDENTIALS_INPUT,
"title": "Test Spreadsheet",
"sheet_names": ["Sheet1", "Data", "Summary"],
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
@@ -1905,6 +1944,9 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=TEST_CREDENTIALS_INPUT[
+ "id"
+ ], # Preserves credential ID for chaining
),
),
("spreadsheet_id", "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms"),
@@ -1926,10 +1968,12 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
+ drive_service = _build_drive_service(credentials)
+ sheets_service = _build_sheets_service(credentials)
result = await asyncio.to_thread(
self._create_spreadsheet,
- service,
+ drive_service,
+ sheets_service,
input_data.title,
input_data.sheet_names,
)
@@ -1939,7 +1983,7 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
else:
spreadsheet_id = result["spreadsheetId"]
spreadsheet_url = result["spreadsheetUrl"]
- # Output the full GoogleDriveFile object for easy chaining
+ # Output the GoogleDriveFile for chaining (includes credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=spreadsheet_id,
name=result.get("title", input_data.title),
@@ -1947,40 +1991,68 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
url=spreadsheet_url,
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.credentials.id, # Preserve credentials for chaining
)
yield "spreadsheet_id", spreadsheet_id
yield "spreadsheet_url", spreadsheet_url
yield "result", {"success": True}
- def _create_spreadsheet(self, service, title: str, sheet_names: list[str]) -> dict:
+ def _create_spreadsheet(
+ self, drive_service, sheets_service, title: str, sheet_names: list[str]
+ ) -> dict:
try:
- # Create the initial spreadsheet
- spreadsheet_body = {
- "properties": {"title": title},
- "sheets": [
- {
- "properties": {
- "title": sheet_names[0] if sheet_names else "Sheet1"
- }
- }
- ],
+ # Create blank spreadsheet using Drive API
+ file_metadata = {
+ "name": title,
+ "mimeType": "application/vnd.google-apps.spreadsheet",
}
+ result = (
+ drive_service.files()
+ .create(body=file_metadata, fields="id, webViewLink")
+ .execute()
+ )
- result = service.spreadsheets().create(body=spreadsheet_body).execute()
- spreadsheet_id = result["spreadsheetId"]
- spreadsheet_url = result["spreadsheetUrl"]
+ spreadsheet_id = result["id"]
+ spreadsheet_url = result.get(
+ "webViewLink",
+ f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit",
+ )
+
+ # Rename first sheet if custom name provided (default is "Sheet1")
+ if sheet_names and sheet_names[0] != "Sheet1":
+ # Get first sheet ID and rename it
+ meta = (
+ sheets_service.spreadsheets()
+ .get(spreadsheetId=spreadsheet_id)
+ .execute()
+ )
+ first_sheet_id = meta["sheets"][0]["properties"]["sheetId"]
+ sheets_service.spreadsheets().batchUpdate(
+ spreadsheetId=spreadsheet_id,
+ body={
+ "requests": [
+ {
+ "updateSheetProperties": {
+ "properties": {
+ "sheetId": first_sheet_id,
+ "title": sheet_names[0],
+ },
+ "fields": "title",
+ }
+ }
+ ]
+ },
+ ).execute()
# Add additional sheets if requested
if len(sheet_names) > 1:
- requests = []
- for sheet_name in sheet_names[1:]:
- requests.append({"addSheet": {"properties": {"title": sheet_name}}})
-
- if requests:
- batch_body = {"requests": requests}
- service.spreadsheets().batchUpdate(
- spreadsheetId=spreadsheet_id, body=batch_body
- ).execute()
+ requests = [
+ {"addSheet": {"properties": {"title": name}}}
+ for name in sheet_names[1:]
+ ]
+ sheets_service.spreadsheets().batchUpdate(
+ spreadsheetId=spreadsheet_id, body={"requests": requests}
+ ).execute()
return {
"spreadsheetId": spreadsheet_id,
@@ -1995,10 +2067,10 @@ class GoogleSheetsUpdateCellBlock(Block):
"""Update a single cell in a Google Sheets spreadsheet."""
class Input(BlockSchemaInput):
- credentials: GoogleCredentialsInput = GoogleCredentialsField([])
- spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ spreadsheet: GoogleDriveFile = GoogleDriveFileField(
title="Spreadsheet",
description="Select a Google Sheets spreadsheet",
+ credentials_kwarg="credentials",
allowed_views=["SPREADSHEETS"],
allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
@@ -2035,7 +2107,6 @@ class GoogleSheetsUpdateCellBlock(Block):
output_schema=GoogleSheetsUpdateCellBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "credentials": TEST_CREDENTIALS_INPUT,
"spreadsheet": {
"id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"name": "Test Spreadsheet",
@@ -2059,6 +2130,7 @@ class GoogleSheetsUpdateCellBlock(Block):
url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=None,
),
),
],
@@ -2096,6 +2168,7 @@ class GoogleSheetsUpdateCellBlock(Block):
)
yield "result", result
+ # Output the GoogleDriveFile for chaining (preserves credentials_id)
yield "spreadsheet", GoogleDriveFile(
id=input_data.spreadsheet.id,
name=input_data.spreadsheet.name,
@@ -2103,6 +2176,7 @@ class GoogleSheetsUpdateCellBlock(Block):
url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
isFolder=False,
+ _credentials_id=input_data.spreadsheet.credentials_id,
)
except Exception as e:
yield "error", _handle_sheets_api_error(str(e), "update")
diff --git a/autogpt_platform/backend/backend/blocks/io.py b/autogpt_platform/backend/backend/blocks/io.py
index 5a9e59ddb4..07f09eb349 100644
--- a/autogpt_platform/backend/backend/blocks/io.py
+++ b/autogpt_platform/backend/backend/blocks/io.py
@@ -2,6 +2,8 @@ import copy
from datetime import date, time
from typing import Any, Optional
+# Import for Google Drive file input block
+from backend.blocks.google._drive import AttachmentView, GoogleDriveFile
from backend.data.block import (
Block,
BlockCategory,
@@ -646,6 +648,119 @@ class AgentTableInputBlock(AgentInputBlock):
yield "result", input_data.value if input_data.value is not None else []
+class AgentGoogleDriveFileInputBlock(AgentInputBlock):
+ """
+ This block allows users to select a file from Google Drive.
+
+ It provides a Google Drive file picker UI that handles both authentication
+ and file selection. The selected file information (ID, name, URL, etc.)
+ is output for use by other blocks like Google Sheets Read.
+ """
+
+ class Input(AgentInputBlock.Input):
+ value: Optional[GoogleDriveFile] = SchemaField(
+ description="The selected Google Drive file.",
+ default=None,
+ advanced=False,
+ title="Selected File",
+ )
+ allowed_views: list[AttachmentView] = SchemaField(
+ description="Which views to show in the file picker (DOCS, SPREADSHEETS, PRESENTATIONS, etc.).",
+ default_factory=lambda: ["DOCS", "SPREADSHEETS", "PRESENTATIONS"],
+ advanced=False,
+ title="Allowed Views",
+ )
+ allow_folder_selection: bool = SchemaField(
+ description="Whether to allow selecting folders.",
+ default=False,
+ advanced=True,
+ title="Allow Folder Selection",
+ )
+
+ def generate_schema(self):
+ """Generate schema for the value field with Google Drive picker format."""
+ schema = super().generate_schema()
+
+ # Default scopes for drive.file access
+ scopes = ["https://www.googleapis.com/auth/drive.file"]
+
+ # Build picker configuration
+ picker_config = {
+ "multiselect": False, # Single file selection only for now
+ "allow_folder_selection": self.allow_folder_selection,
+ "allowed_views": (
+ list(self.allowed_views) if self.allowed_views else ["DOCS"]
+ ),
+ "scopes": scopes,
+ # Auto-credentials config tells frontend to include _credentials_id in output
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": scopes,
+ "kwarg_name": "credentials",
+ },
+ }
+
+ # Set format and config for frontend to render Google Drive picker
+ schema["format"] = "google-drive-picker"
+ schema["google_drive_picker_config"] = picker_config
+ # Also keep auto_credentials at top level for backend detection
+ schema["auto_credentials"] = {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": scopes,
+ "kwarg_name": "credentials",
+ }
+
+ if self.value is not None:
+ schema["default"] = self.value.model_dump()
+
+ return schema
+
+ class Output(AgentInputBlock.Output):
+ result: GoogleDriveFile = SchemaField(
+ description="The selected Google Drive file with ID, name, URL, and other metadata."
+ )
+
+ def __init__(self):
+ test_file = GoogleDriveFile.model_validate(
+ {
+ "id": "test-file-id",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ "url": "https://docs.google.com/spreadsheets/d/test-file-id",
+ }
+ )
+ super().__init__(
+ id="d3b32f15-6fd7-40e3-be52-e083f51b19a2",
+ description="Block for selecting a file from Google Drive.",
+ disabled=not config.enable_agent_input_subtype_blocks,
+ input_schema=AgentGoogleDriveFileInputBlock.Input,
+ output_schema=AgentGoogleDriveFileInputBlock.Output,
+ test_input=[
+ {
+ "name": "spreadsheet_input",
+ "description": "Select a spreadsheet from Google Drive",
+ "allowed_views": ["SPREADSHEETS"],
+ "value": {
+ "id": "test-file-id",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ "url": "https://docs.google.com/spreadsheets/d/test-file-id",
+ },
+ }
+ ],
+ test_output=[("result", test_file)],
+ )
+
+ async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
+ """
+ Yields the selected Google Drive file.
+ """
+ if input_data.value is not None:
+ yield "result", input_data.value
+
+
IO_BLOCK_IDs = [
AgentInputBlock().id,
AgentOutputBlock().id,
@@ -658,4 +773,5 @@ IO_BLOCK_IDs = [
AgentDropdownInputBlock().id,
AgentToggleInputBlock().id,
AgentTableInputBlock().id,
+ AgentGoogleDriveFileInputBlock().id,
]
diff --git a/autogpt_platform/backend/backend/blocks/test/test_block.py b/autogpt_platform/backend/backend/blocks/test/test_block.py
index 2c5313b7ab..7a1fdbcc73 100644
--- a/autogpt_platform/backend/backend/blocks/test/test_block.py
+++ b/autogpt_platform/backend/backend/blocks/test/test_block.py
@@ -1,8 +1,9 @@
-from typing import Type
+from typing import Any, Type
import pytest
-from backend.data.block import Block, get_blocks
+from backend.data.block import Block, BlockSchemaInput, get_blocks
+from backend.data.model import SchemaField
from backend.util.test import execute_block_test
SKIP_BLOCK_TESTS = {
@@ -132,3 +133,148 @@ async def test_block_ids_valid(block: Type[Block]):
), f"Block {block.name} ID is UUID version {parsed_uuid.version}, expected version 4"
except ValueError:
pytest.fail(f"Block {block.name} has invalid UUID format: {block_instance.id}")
+
+
+class TestAutoCredentialsFieldsValidation:
+ """Tests for auto_credentials field validation in BlockSchema."""
+
+ def test_duplicate_auto_credentials_kwarg_name_raises_error(self):
+ """Test that duplicate kwarg_name in auto_credentials raises ValueError."""
+
+ class DuplicateKwargSchema(BlockSchemaInput):
+ """Schema with duplicate auto_credentials kwarg_name."""
+
+ # Both fields explicitly use the same kwarg_name "credentials"
+ file1: dict[str, Any] | None = SchemaField(
+ description="First file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ "kwarg_name": "credentials",
+ }
+ },
+ )
+ file2: dict[str, Any] | None = SchemaField(
+ description="Second file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ "kwarg_name": "credentials", # Duplicate kwarg_name!
+ }
+ },
+ )
+
+ with pytest.raises(ValueError) as exc_info:
+ DuplicateKwargSchema.get_auto_credentials_fields()
+
+ error_message = str(exc_info.value)
+ assert "Duplicate auto_credentials kwarg_name 'credentials'" in error_message
+ assert "file1" in error_message
+ assert "file2" in error_message
+
+ def test_unique_auto_credentials_kwarg_names_succeed(self):
+ """Test that unique kwarg_name values work correctly."""
+
+ class UniqueKwargSchema(BlockSchemaInput):
+ """Schema with unique auto_credentials kwarg_name values."""
+
+ file1: dict[str, Any] | None = SchemaField(
+ description="First file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ "kwarg_name": "file1_credentials",
+ }
+ },
+ )
+ file2: dict[str, Any] | None = SchemaField(
+ description="Second file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ "kwarg_name": "file2_credentials", # Different kwarg_name
+ }
+ },
+ )
+
+ # Should not raise
+ result = UniqueKwargSchema.get_auto_credentials_fields()
+
+ assert "file1_credentials" in result
+ assert "file2_credentials" in result
+ assert result["file1_credentials"]["field_name"] == "file1"
+ assert result["file2_credentials"]["field_name"] == "file2"
+
+ def test_default_kwarg_name_is_credentials(self):
+ """Test that missing kwarg_name defaults to 'credentials'."""
+
+ class DefaultKwargSchema(BlockSchemaInput):
+ """Schema with auto_credentials missing kwarg_name."""
+
+ file: dict[str, Any] | None = SchemaField(
+ description="File input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ # No kwarg_name specified - should default to "credentials"
+ }
+ },
+ )
+
+ result = DefaultKwargSchema.get_auto_credentials_fields()
+
+ assert "credentials" in result
+ assert result["credentials"]["field_name"] == "file"
+
+ def test_duplicate_default_kwarg_name_raises_error(self):
+ """Test that two fields with default kwarg_name raises ValueError."""
+
+ class DefaultDuplicateSchema(BlockSchemaInput):
+ """Schema where both fields omit kwarg_name, defaulting to 'credentials'."""
+
+ file1: dict[str, Any] | None = SchemaField(
+ description="First file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ # No kwarg_name - defaults to "credentials"
+ }
+ },
+ )
+ file2: dict[str, Any] | None = SchemaField(
+ description="Second file input",
+ default=None,
+ json_schema_extra={
+ "auto_credentials": {
+ "provider": "google",
+ "type": "oauth2",
+ "scopes": ["https://www.googleapis.com/auth/drive.file"],
+ # No kwarg_name - also defaults to "credentials"
+ }
+ },
+ )
+
+ with pytest.raises(ValueError) as exc_info:
+ DefaultDuplicateSchema.get_auto_credentials_fields()
+
+ assert "Duplicate auto_credentials kwarg_name 'credentials'" in str(
+ exc_info.value
+ )
diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py
index 762e9b37ef..315d63bd8f 100644
--- a/autogpt_platform/backend/backend/data/block.py
+++ b/autogpt_platform/backend/backend/data/block.py
@@ -266,14 +266,61 @@ class BlockSchema(BaseModel):
)
}
+ @classmethod
+ def get_auto_credentials_fields(cls) -> dict[str, dict[str, Any]]:
+ """
+ Get fields that have auto_credentials metadata (e.g., GoogleDriveFileInput).
+
+ Returns a dict mapping kwarg_name -> {field_name, auto_credentials_config}
+
+ Raises:
+ ValueError: If multiple fields have the same kwarg_name, as this would
+ cause silent overwriting and only the last field would be processed.
+ """
+ result: dict[str, dict[str, Any]] = {}
+ schema = cls.jsonschema()
+ properties = schema.get("properties", {})
+
+ for field_name, field_schema in properties.items():
+ auto_creds = field_schema.get("auto_credentials")
+ if auto_creds:
+ kwarg_name = auto_creds.get("kwarg_name", "credentials")
+ if kwarg_name in result:
+ raise ValueError(
+ f"Duplicate auto_credentials kwarg_name '{kwarg_name}' "
+ f"in fields '{result[kwarg_name]['field_name']}' and "
+ f"'{field_name}' on {cls.__qualname__}"
+ )
+ result[kwarg_name] = {
+ "field_name": field_name,
+ "config": auto_creds,
+ }
+ return result
+
@classmethod
def get_credentials_fields_info(cls) -> dict[str, CredentialsFieldInfo]:
- return {
- field_name: CredentialsFieldInfo.model_validate(
+ result = {}
+
+ # Regular credentials fields
+ for field_name in cls.get_credentials_fields().keys():
+ result[field_name] = CredentialsFieldInfo.model_validate(
cls.get_field_schema(field_name), by_alias=True
)
- for field_name in cls.get_credentials_fields().keys()
- }
+
+ # Auto-generated credentials fields (from GoogleDriveFileInput etc.)
+ for kwarg_name, info in cls.get_auto_credentials_fields().items():
+ config = info["config"]
+ # Build a schema-like dict that CredentialsFieldInfo can parse
+ auto_schema = {
+ "credentials_provider": [config.get("provider", "google")],
+ "credentials_types": [config.get("type", "oauth2")],
+ "credentials_scopes": config.get("scopes"),
+ }
+ result[kwarg_name] = CredentialsFieldInfo.model_validate(
+ auto_schema, by_alias=True
+ )
+
+ return result
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py
index bfec94176d..ee875bbf55 100644
--- a/autogpt_platform/backend/backend/executor/manager.py
+++ b/autogpt_platform/backend/backend/executor/manager.py
@@ -218,15 +218,53 @@ async def execute_node(
# changes during execution. β οΈ This means a set of credentials can only be used by
# one (running) block at a time; simultaneous execution of blocks using same
# credentials is not supported.
- creds_lock = None
+ creds_locks: list[AsyncRedisLock] = []
input_model = cast(type[BlockSchema], node_block.input_schema)
+
+ # Handle regular credentials fields
for field_name, input_type in input_model.get_credentials_fields().items():
credentials_meta = input_type(**input_data[field_name])
- credentials, creds_lock = await creds_manager.acquire(
- user_id, credentials_meta.id
- )
+ credentials, lock = await creds_manager.acquire(user_id, credentials_meta.id)
+ creds_locks.append(lock)
extra_exec_kwargs[field_name] = credentials
+ # Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
+ for kwarg_name, info in input_model.get_auto_credentials_fields().items():
+ field_name = info["field_name"]
+ field_data = input_data.get(field_name)
+ if field_data and isinstance(field_data, dict):
+ # Check if _credentials_id key exists in the field data
+ if "_credentials_id" in field_data:
+ cred_id = field_data["_credentials_id"]
+ if cred_id:
+ # Credential ID provided - acquire credentials
+ provider = info.get("config", {}).get(
+ "provider", "external service"
+ )
+ file_name = field_data.get("name", "selected file")
+ try:
+ credentials, lock = await creds_manager.acquire(
+ user_id, cred_id
+ )
+ creds_locks.append(lock)
+ extra_exec_kwargs[kwarg_name] = credentials
+ except ValueError:
+ # Credential was deleted or doesn't exist
+ raise ValueError(
+ f"Authentication expired for '{file_name}' in field '{field_name}'. "
+ f"The saved {provider.capitalize()} credentials no longer exist. "
+ f"Please re-select the file to re-authenticate."
+ )
+ # else: _credentials_id is explicitly None, skip credentials (for chained data)
+ else:
+ # _credentials_id key missing entirely - this is an error
+ provider = info.get("config", {}).get("provider", "external service")
+ file_name = field_data.get("name", "selected file")
+ raise ValueError(
+ f"Authentication missing for '{file_name}' in field '{field_name}'. "
+ f"Please re-select the file to authenticate with {provider.capitalize()}."
+ )
+
output_size = 0
# sentry tracking nonsense to get user counts for blocks because isolation scopes don't work :(
@@ -260,12 +298,17 @@ async def execute_node(
# Re-raise to maintain normal error flow
raise
finally:
- # Ensure credentials are released even if execution fails
- if creds_lock and (await creds_lock.locked()) and (await creds_lock.owned()):
- try:
- await creds_lock.release()
- except Exception as e:
- log_metadata.error(f"Failed to release credentials lock: {e}")
+ # Ensure all credentials are released even if execution fails
+ for creds_lock in creds_locks:
+ if (
+ creds_lock
+ and (await creds_lock.locked())
+ and (await creds_lock.owned())
+ ):
+ try:
+ await creds_lock.release()
+ except Exception as e:
+ log_metadata.error(f"Failed to release credentials lock: {e}")
# Update execution stats
if execution_stats is not None:
diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py
index dda62e7f9f..95ea9554ed 100644
--- a/autogpt_platform/backend/backend/util/test.py
+++ b/autogpt_platform/backend/backend/util/test.py
@@ -144,6 +144,8 @@ async def execute_block_test(block: Block):
"execution_context": ExecutionContext(),
}
input_model = cast(type[BlockSchema], block.input_schema)
+
+ # Handle regular credentials fields
credentials_input_fields = input_model.get_credentials_fields()
if len(credentials_input_fields) == 1 and isinstance(
block.test_credentials, _BaseCredentials
@@ -158,6 +160,18 @@ async def execute_block_test(block: Block):
if field_name in block.test_credentials:
extra_exec_kwargs[field_name] = block.test_credentials[field_name]
+ # Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
+ auto_creds_fields = input_model.get_auto_credentials_fields()
+ if auto_creds_fields and block.test_credentials:
+ if isinstance(block.test_credentials, _BaseCredentials):
+ # Single credentials object - use for all auto_credentials kwargs
+ for kwarg_name in auto_creds_fields.keys():
+ extra_exec_kwargs[kwarg_name] = block.test_credentials
+ elif isinstance(block.test_credentials, dict):
+ for kwarg_name in auto_creds_fields.keys():
+ if kwarg_name in block.test_credentials:
+ extra_exec_kwargs[kwarg_name] = block.test_credentials[kwarg_name]
+
for input_data in block.test_input:
log.info(f"{prefix} in: {input_data}")
diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css
index f969c9db79..1f782f753b 100644
--- a/autogpt_platform/frontend/src/app/globals.css
+++ b/autogpt_platform/frontend/src/app/globals.css
@@ -150,3 +150,16 @@ input[type="number"]::-webkit-inner-spin-button {
input[type="number"] {
-moz-appearance: textfield;
}
+
+/* Google Drive Picker: ensure picker appears above dialogs and can receive clicks */
+[class*="picker-dialog"] {
+ z-index: 10000 !important;
+ pointer-events: auto !important;
+}
+
+/* When Google picker is open, lower dialog z-index so picker renders on top */
+body[data-google-picker-open="true"] [data-dialog-overlay],
+body[data-google-picker-open="true"] [data-dialog-content] {
+ z-index: 1 !important;
+ pointer-events: none !important;
+}
diff --git a/autogpt_platform/frontend/src/components/__legacy__/ui/dialog.tsx b/autogpt_platform/frontend/src/components/__legacy__/ui/dialog.tsx
index 2aa089315b..4ce998b6f6 100644
--- a/autogpt_platform/frontend/src/components/__legacy__/ui/dialog.tsx
+++ b/autogpt_platform/frontend/src/components/__legacy__/ui/dialog.tsx
@@ -14,12 +14,20 @@ const DialogPortal = DialogPrimitive.Portal;
const DialogClose = DialogPrimitive.Close;
+/**
+ * Check if an external picker (like Google Drive) is currently open.
+ */
+function isExternalPickerOpen(): boolean {
+ return document.body.hasAttribute("data-google-picker-open");
+}
+
const DialogOverlay = React.forwardRef<
React.ElementRef,
React.ComponentPropsWithoutRef
>(({ className, ...props }, ref) => (
,
React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
- {children}
-
-
- Close
-
-
-
-));
+>(
+ (
+ {
+ className,
+ children,
+ onPointerDownOutside,
+ onInteractOutside,
+ onFocusOutside,
+ ...props
+ },
+ ref,
+ ) => (
+
+
+ {
+ if (isExternalPickerOpen()) {
+ e.preventDefault();
+ return;
+ }
+ onPointerDownOutside?.(e);
+ }}
+ onInteractOutside={(e) => {
+ if (isExternalPickerOpen()) {
+ e.preventDefault();
+ return;
+ }
+ onInteractOutside?.(e);
+ }}
+ onFocusOutside={(e) => {
+ if (isExternalPickerOpen()) {
+ e.preventDefault();
+ return;
+ }
+ onFocusOutside?.(e);
+ }}
+ className={cn(
+ "fixed left-[50%] top-[50%] z-50 grid w-full max-w-lg translate-x-[-50%] translate-y-[-50%] gap-4 border border-neutral-200 bg-white p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%] data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%] dark:border-neutral-800 dark:bg-neutral-950 sm:rounded-lg",
+ className,
+ )}
+ {...props}
+ >
+ {children}
+
+
+ Close
+
+
+
+ ),
+);
DialogContent.displayName = DialogPrimitive.Content.displayName;
const DialogHeader = ({
diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx
index 0500d08549..eaa44a9452 100644
--- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx
@@ -3,7 +3,12 @@
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
import { Button } from "@/components/atoms/Button/Button";
import { CircleNotchIcon, FolderOpenIcon } from "@phosphor-icons/react";
-import { Props, useGoogleDrivePicker } from "./useGoogleDrivePicker";
+import {
+ Props as BaseProps,
+ useGoogleDrivePicker,
+} from "./useGoogleDrivePicker";
+
+export type Props = BaseProps;
export function GoogleDrivePicker(props: Props) {
const {
diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx
index a1accbada5..1db9809de2 100644
--- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx
@@ -24,6 +24,9 @@ export function GoogleDrivePickerInput({
}: GoogleDrivePickerInputProps) {
const [pickerError, setPickerError] = React.useState(null);
const isMultiSelect = config.multiselect || false;
+ const hasAutoCredentials = !!config.auto_credentials;
+
+ // Strip _credentials_id from value for display purposes
const currentFiles = isMultiSelect
? Array.isArray(value)
? value
@@ -33,25 +36,34 @@ export function GoogleDrivePickerInput({
: [];
const handlePicked = useCallback(
- (files: any[]) => {
+ (files: any[], credentialId?: string) => {
// Clear any previous picker errors
setPickerError(null);
// Convert to GoogleDriveFile format
- const convertedFiles = files.map((f) => ({
- id: f.id,
- name: f.name,
- mimeType: f.mimeType,
- url: f.url,
- iconUrl: f.iconUrl,
- isFolder: f.mimeType === "application/vnd.google-apps.folder",
- }));
+ const convertedFiles = files.map((f) => {
+ const file: any = {
+ id: f.id,
+ name: f.name,
+ mimeType: f.mimeType,
+ url: f.url,
+ iconUrl: f.iconUrl,
+ isFolder: f.mimeType === "application/vnd.google-apps.folder",
+ };
+
+ // Include _credentials_id when auto_credentials is configured
+ if (hasAutoCredentials && credentialId) {
+ file._credentials_id = credentialId;
+ }
+
+ return file;
+ });
// Store based on multiselect mode
const newValue = isMultiSelect ? convertedFiles : convertedFiles[0];
onChange(newValue);
},
- [isMultiSelect, onChange],
+ [isMultiSelect, onChange, hasAutoCredentials],
);
const handleRemoveFile = useCallback(
@@ -79,6 +91,7 @@ export function GoogleDrivePickerInput({
views={config.allowed_views || ["DOCS"]}
scopes={config.scopes || ["https://www.googleapis.com/auth/drive.file"]}
disabled={false}
+ requirePlatformCredentials={hasAutoCredentials}
onPicked={handlePicked}
onCanceled={() => {
// User canceled - no action needed
diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts
index 323cd6d9d6..66386882c6 100644
--- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts
+++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts
@@ -34,7 +34,9 @@ export type Props = {
disableThumbnails?: boolean;
buttonText?: string;
disabled?: boolean;
- onPicked: (files: NormalizedPickedFile[]) => void;
+ /** When true, requires saved platform credentials (no consent flow fallback) */
+ requirePlatformCredentials?: boolean;
+ onPicked: (files: NormalizedPickedFile[], credentialId?: string) => void;
onCanceled: () => void;
onError: (err: unknown) => void;
};
@@ -65,6 +67,7 @@ export function useGoogleDrivePicker(options: Props) {
const accessTokenRef = useRef(null);
const tokenClientRef = useRef(null);
const pickerReadyRef = useRef(false);
+ const usedCredentialIdRef = useRef(undefined);
const credentials = useCredentials(getCredentialsSchema(requestedScopes));
const queryClient = useQueryClient();
const isReady = pickerReadyRef.current && !!tokenClientRef.current;
@@ -114,6 +117,7 @@ export function useGoogleDrivePicker(options: Props) {
) {
const credentialId =
selectedCredential?.id || credentials.savedCredentials[0].id;
+ usedCredentialIdRef.current = credentialId;
try {
const queryOptions = getGetV1GetSpecificCredentialByIdQueryOptions(
@@ -178,6 +182,20 @@ export function useGoogleDrivePicker(options: Props) {
}
}
+ // If platform credentials are required but none exist, show error
+ if (options?.requirePlatformCredentials) {
+ const error = new Error(
+ "Please connect your Google account in Settings before using this feature.",
+ );
+ toast({
+ title: "Google Account Required",
+ description: error.message,
+ variant: "destructive",
+ });
+ if (onError) onError(error);
+ return;
+ }
+
const token = accessTokenRef.current || (await requestAccessToken());
buildAndShowPicker(token);
} catch (e) {
@@ -242,6 +260,24 @@ export function useGoogleDrivePicker(options: Props) {
}
function buildAndShowPicker(accessToken: string): void {
+ if (!developerKey) {
+ const error = new Error(
+ "Missing Google Drive Picker Configuration: developer key is not set",
+ );
+ console.error("[useGoogleDrivePicker]", error.message);
+ onError(error);
+ return;
+ }
+
+ if (!appId) {
+ const error = new Error(
+ "Missing Google Drive Picker Configuration: app ID is not set",
+ );
+ console.error("[useGoogleDrivePicker]", error.message);
+ onError(error);
+ return;
+ }
+
const gp = window.google!.picker!;
const builder = new gp.PickerBuilder()
@@ -269,19 +305,40 @@ export function useGoogleDrivePicker(options: Props) {
});
const picker = builder.build();
+
+ // Mark picker as open - prevents parent dialogs from closing on outside clicks
+ document.body.setAttribute("data-google-picker-open", "true");
+
picker.setVisible(true);
}
function handlePickerData(data: any): void {
+ // Google Picker fires callback on multiple events: LOADED, PICKED, CANCEL
+ // Only remove the marker and process when picker is actually closed (PICKED or CANCEL)
+ const gp = window.google?.picker;
+ if (!gp || !data) return;
+
+ const action = data[gp.Response.ACTION];
+
+ // Ignore LOADED action - picker is still open
+ // Note: gp.Action.LOADED exists at runtime but not in types
+ if (action === "loaded") {
+ return;
+ }
+
+ // Remove the marker when picker closes (PICKED or CANCEL)
+ document.body.removeAttribute("data-google-picker-open");
+
try {
const files = normalizePickerResponse(data);
if (files.length) {
- onPicked(files);
+ // Pass the credential ID that was used for this picker session
+ onPicked(files, usedCredentialIdRef.current);
} else {
onCanceled();
}
} catch (e) {
- if (onError) onError(e);
+ onError(e);
}
}
@@ -307,5 +364,6 @@ export function useGoogleDrivePicker(options: Props) {
accessToken: accessTokenRef.current,
selectedCredential,
setSelectedCredential,
+ usedCredentialId: usedCredentialIdRef.current,
};
}
diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DialogWrap.tsx b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DialogWrap.tsx
index 378aaa9f08..ae4dc8e065 100644
--- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DialogWrap.tsx
+++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DialogWrap.tsx
@@ -4,6 +4,7 @@ import * as RXDialog from "@radix-ui/react-dialog";
import {
CSSProperties,
PropsWithChildren,
+ useCallback,
useEffect,
useRef,
useState,
@@ -20,6 +21,14 @@ interface Props extends BaseProps {
withGradient?: boolean;
}
+/**
+ * Check if an external picker (like Google Drive) is currently open.
+ * Used to prevent dialog from closing when user interacts with the picker.
+ */
+function isExternalPickerOpen(): boolean {
+ return document.body.hasAttribute("data-google-picker-open");
+}
+
export function DialogWrap({
children,
title,
@@ -30,6 +39,30 @@ export function DialogWrap({
const scrollRef = useRef(null);
const [hasVerticalScrollbar, setHasVerticalScrollbar] = useState(false);
+ // Prevent dialog from closing when external picker is open
+ const handleInteractOutside = useCallback(
+ (event: Event) => {
+ if (isExternalPickerOpen()) {
+ event.preventDefault();
+ return;
+ }
+ handleClose();
+ },
+ [handleClose],
+ );
+
+ const handlePointerDownOutside = useCallback((event: Event) => {
+ if (isExternalPickerOpen()) {
+ event.preventDefault();
+ }
+ }, []);
+
+ const handleFocusOutside = useCallback((event: Event) => {
+ if (isExternalPickerOpen()) {
+ event.preventDefault();
+ }
+ }, []);
+
useEffect(() => {
function update() {
const el = scrollRef.current;
@@ -48,12 +81,15 @@ export function DialogWrap({
return (
-
+
Date: Thu, 4 Dec 2025 20:16:43 +0530
Subject: [PATCH 11/58] feat(frontend): add automatic collision resolution for
flow editor nodes (#11506)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When users drag and drop nodes in the new flow editor, nodes can overlap
with each other, making the graph difficult to read and interact with.
This PR adds an automatic collision resolution algorithm that runs when
a node is dropped, ensuring nodes are automatically separated to prevent
overlaps and maintain a clean, readable graph layout.
### Changes ποΈ
- **Added collision resolution algorithm** (`resolve-collision.ts`):
- Implements an iterative collision detection and resolution system
using Flatbush for efficient spatial indexing
- Automatically resolves overlaps by moving nodes apart along the axis
with the smallest overlap
- Configurable options: `maxIterations`, `overlapThreshold`, and
`margin`
- Uses actual node dimensions (`width`, `height`, or `measured` values)
when available
- **Integrated collision resolution into Flow component**:
- Added `onNodeDragStop` callback that triggers collision resolution
after a node is dropped
- Configured with `maxIterations: Infinity`, `overlapThreshold: 0.5`,
and `margin: 15px`
- **Enhanced node dimension handling**:
- Updated `nodeStore.ts` to prioritize actual node dimensions
(`node.width`, `node.measured.width`) over hardcoded defaults when
calculating positions
- Ensures collision detection uses accurate node sizes
- **Added dependency**:
- Added `flatbush@4.5.0` for efficient spatial indexing and collision
detection
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Drag a node and drop it on top of another node - verify nodes
automatically separate
- [x] Drag multiple nodes to create overlapping clusters - verify all
overlaps are resolved
- [x] Drag nodes with different sizes (NOTE blocks vs regular blocks) -
verify collision detection uses correct dimensions
- [x] Drag nodes near the edge of the canvas - verify nodes don't get
pushed off-screen
- [x] Test with a graph containing many nodes (20+) - verify performance
is acceptable
- [x] Verify nodes maintain their positions when no collisions occur
- [x] Test with nodes that have custom measured dimensions - verify
accurate collision detection
---
autogpt_platform/frontend/package.json | 1 +
autogpt_platform/frontend/pnpm-lock.yaml | 31 +++-
.../build/components/FlowEditor/Flow/Flow.tsx | 14 +-
.../Flow/helpers/resolve-collision.ts | 160 ++++++++++++++++++
.../app/(platform)/build/stores/nodeStore.ts | 7 +-
5 files changed, 202 insertions(+), 11 deletions(-)
create mode 100644 autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/helpers/resolve-collision.ts
diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json
index 29a28059a6..ff9073e436 100644
--- a/autogpt_platform/frontend/package.json
+++ b/autogpt_platform/frontend/package.json
@@ -72,6 +72,7 @@
"dotenv": "17.2.3",
"elliptic": "6.6.1",
"embla-carousel-react": "8.6.0",
+ "flatbush": "4.5.0",
"framer-motion": "12.23.24",
"geist": "1.5.1",
"highlight.js": "11.11.1",
diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml
index 406fcb212f..68f727470a 100644
--- a/autogpt_platform/frontend/pnpm-lock.yaml
+++ b/autogpt_platform/frontend/pnpm-lock.yaml
@@ -140,6 +140,9 @@ importers:
embla-carousel-react:
specifier: 8.6.0
version: 8.6.0(react@18.3.1)
+ flatbush:
+ specifier: 4.5.0
+ version: 4.5.0
framer-motion:
specifier: 12.23.24
version: 12.23.24(@emotion/is-prop-valid@1.2.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
@@ -4835,6 +4838,12 @@ packages:
resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==}
engines: {node: ^10.12.0 || >=12.0.0}
+ flatbush@4.5.0:
+ resolution: {integrity: sha512-K7JSilGr4lySRLdJqKY45fu0m/dIs6YAAu/ESqdMsnW3pI0m3gpa6oRc6NDXW161Ov9+rIQjsuyOt5ObdIfgwg==}
+
+ flatqueue@3.0.0:
+ resolution: {integrity: sha512-y1deYaVt+lIc/d2uIcWDNd0CrdQTO5xoCjeFdhX0kSXvm2Acm0o+3bAOiYklTEoRyzwio3sv3/IiBZdusbAe2Q==}
+
flatted@3.3.3:
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
@@ -12531,8 +12540,8 @@ snapshots:
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
- eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1)
- eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
+ eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
eslint-plugin-react: 7.37.5(eslint@8.57.1)
eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1)
@@ -12551,7 +12560,7 @@ snapshots:
transitivePeerDependencies:
- supports-color
- eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1):
+ eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1):
dependencies:
'@nolyfill/is-core-module': 1.0.39
debug: 4.4.3
@@ -12562,22 +12571,22 @@ snapshots:
tinyglobby: 0.2.15
unrs-resolver: 1.11.1
optionalDependencies:
- eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
transitivePeerDependencies:
- supports-color
- eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
+ eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
dependencies:
debug: 3.2.7
optionalDependencies:
'@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3)
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
- eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1)
+ eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1)
transitivePeerDependencies:
- supports-color
- eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
+ eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
dependencies:
'@rtsao/scc': 1.1.0
array-includes: 3.1.9
@@ -12588,7 +12597,7 @@ snapshots:
doctrine: 2.1.0
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
- eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
+ eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
hasown: 2.0.2
is-core-module: 2.16.1
is-glob: 4.0.3
@@ -12864,6 +12873,12 @@ snapshots:
keyv: 4.5.4
rimraf: 3.0.2
+ flatbush@4.5.0:
+ dependencies:
+ flatqueue: 3.0.0
+
+ flatqueue@3.0.0: {}
+
flatted@3.3.3: {}
for-each@0.3.5:
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx
index 37417f632d..13268fc816 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx
@@ -4,7 +4,7 @@ import CustomEdge from "../edges/CustomEdge";
import { useFlow } from "./useFlow";
import { useShallow } from "zustand/react/shallow";
import { useNodeStore } from "../../../stores/nodeStore";
-import { useMemo, useEffect } from "react";
+import { useMemo, useEffect, useCallback } from "react";
import { CustomNode } from "../nodes/CustomNode/CustomNode";
import { useCustomEdge } from "../edges/useCustomEdge";
import { useFlowRealtime } from "./useFlowRealtime";
@@ -21,6 +21,7 @@ import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/grap
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
import { okData } from "@/app/api/helpers";
import { TriggerAgentBanner } from "./components/TriggerAgentBanner";
+import { resolveCollisions } from "./helpers/resolve-collision";
export const Flow = () => {
const [{ flowID, flowExecutionID }] = useQueryStates({
@@ -40,6 +41,7 @@ export const Flow = () => {
);
const nodes = useNodeStore(useShallow((state) => state.nodes));
+ const setNodes = useNodeStore(useShallow((state) => state.setNodes));
const onNodesChange = useNodeStore(
useShallow((state) => state.onNodesChange),
);
@@ -48,6 +50,15 @@ export const Flow = () => {
);
const nodeTypes = useMemo(() => ({ custom: CustomNode }), []);
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
+ const onNodeDragStop = useCallback(() => {
+ setNodes(
+ resolveCollisions(nodes, {
+ maxIterations: Infinity,
+ overlapThreshold: 0.5,
+ margin: 15,
+ }),
+ );
+ }, [setNodes, nodes]);
const { edges, onConnect, onEdgesChange } = useCustomEdge();
// We use this hook to load the graph and convert them into custom nodes and edges.
@@ -84,6 +95,7 @@ export const Flow = () => {
edges={edges}
onConnect={onConnect}
onEdgesChange={onEdgesChange}
+ onNodeDragStop={onNodeDragStop}
maxZoom={2}
minZoom={0.1}
onDragOver={onDragOver}
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/helpers/resolve-collision.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/helpers/resolve-collision.ts
new file mode 100644
index 0000000000..c05f00b5fb
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/helpers/resolve-collision.ts
@@ -0,0 +1,160 @@
+import { CustomNode } from "../../nodes/CustomNode/CustomNode";
+import Flatbush from "flatbush";
+
+export type CollisionAlgorithmOptions = {
+ maxIterations: number;
+ overlapThreshold: number;
+ margin: number;
+};
+
+export type CollisionAlgorithm = (
+ nodes: CustomNode[],
+ options: CollisionAlgorithmOptions,
+) => CustomNode[];
+
+type Box = {
+ minX: number;
+ minY: number;
+ maxX: number;
+ maxY: number;
+ id: string;
+ moved: boolean;
+ x: number;
+ y: number;
+ width: number;
+ height: number;
+ node: CustomNode;
+};
+
+function rebuildFlatbush(boxes: Box[]) {
+ const index = new Flatbush(boxes.length);
+ for (const box of boxes) {
+ index.add(box.minX, box.minY, box.maxX, box.maxY);
+ }
+ index.finish();
+ return index;
+}
+
+export const resolveCollisions: CollisionAlgorithm = (
+ nodes,
+ { maxIterations = 50, overlapThreshold = 0.5, margin = 0 },
+) => {
+ // Create boxes from nodes
+ const boxes: Box[] = new Array(nodes.length);
+
+ for (let i = 0; i < nodes.length; i++) {
+ const node = nodes[i];
+ // Use measured dimensions if available, otherwise use defaults
+ const width = (node.width ?? node.measured?.width ?? 0) + margin * 2;
+ const height = (node.height ?? node.measured?.height ?? 0) + margin * 2;
+
+ console.log("width", width);
+ console.log("height", height);
+ const x = node.position.x - margin;
+ const y = node.position.y - margin;
+
+ const box: Box = {
+ minX: x,
+ minY: y,
+ maxX: x + width,
+ maxY: y + height,
+ id: node.id,
+ moved: false,
+ x,
+ y,
+ width,
+ height,
+ node,
+ };
+
+ boxes[i] = box;
+ }
+
+ let numIterations = 0;
+ let index = rebuildFlatbush(boxes);
+
+ for (let iter = 0; iter <= maxIterations; iter++) {
+ let moved = false;
+
+ // For each box, find potential collisions using spatial search
+ for (let i = 0; i < boxes.length; i++) {
+ const A = boxes[i];
+ // Search for boxes that might overlap with A
+ const candidateIndices = index.search(A.minX, A.minY, A.maxX, A.maxY);
+
+ for (const j of candidateIndices) {
+ const B = boxes[j];
+ // Skip self
+ if (A.id === B.id) continue;
+
+ // Calculate center positions
+ const centerAX = A.x + A.width * 0.5;
+ const centerAY = A.y + A.height * 0.5;
+ const centerBX = B.x + B.width * 0.5;
+ const centerBY = B.y + B.height * 0.5;
+
+ // Calculate distance between centers
+ const dx = centerAX - centerBX;
+ const dy = centerAY - centerBY;
+
+ // Calculate overlap along each axis
+ const px = (A.width + B.width) * 0.5 - Math.abs(dx);
+ const py = (A.height + B.height) * 0.5 - Math.abs(dy);
+
+ // Check if there's significant overlap
+ if (px > overlapThreshold && py > overlapThreshold) {
+ A.moved = B.moved = moved = true;
+
+ // Resolve along the smallest overlap axis
+ if (px < py) {
+ // Move along x-axis
+ const sx = dx > 0 ? 1 : -1;
+ const moveAmount = (px / 2) * sx;
+
+ A.x += moveAmount;
+ A.minX += moveAmount;
+ A.maxX += moveAmount;
+ B.x -= moveAmount;
+ B.minX -= moveAmount;
+ B.maxX -= moveAmount;
+ } else {
+ // Move along y-axis
+ const sy = dy > 0 ? 1 : -1;
+ const moveAmount = (py / 2) * sy;
+
+ A.y += moveAmount;
+ A.minY += moveAmount;
+ A.maxY += moveAmount;
+ B.y -= moveAmount;
+ B.minY -= moveAmount;
+ B.maxY -= moveAmount;
+ }
+ }
+ }
+ }
+
+ numIterations = numIterations + 1;
+
+ // Early exit if no overlaps were found
+ if (!moved) {
+ break;
+ }
+
+ index = rebuildFlatbush(boxes);
+ }
+
+ const newNodes = boxes.map((box) => {
+ if (box.moved) {
+ return {
+ ...box.node,
+ position: {
+ x: box.x + margin,
+ y: box.y + margin,
+ },
+ };
+ }
+ return box.node;
+ });
+
+ return newNodes;
+};
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
index dc9963194a..3beba0c615 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
@@ -139,8 +139,11 @@ export const useNodeStore = create((set, get) => ({
get().nodes.map((node) => ({
position: node.position,
measured: {
- width: node.data.uiType === BlockUIType.NOTE ? 300 : 500,
- height: 400,
+ width:
+ node.width ??
+ node.measured?.width ??
+ (node.data.uiType === BlockUIType.NOTE ? 300 : 500),
+ height: node.height ?? node.measured?.height ?? 400,
},
})),
block.uiType === BlockUIType.NOTE ? 300 : 400,
From f6608e99c811fbf10b7f8a1e2ec56ccf6a74029b Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Thu, 4 Dec 2025 20:42:32 +0530
Subject: [PATCH 12/58] feat(frontend): add expandable text input modal for
better editing experience (#11510)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Text inputs in the form builder can be difficult to edit when dealing
with longer content. Users need a way to expand text inputs into a
larger, more comfortable editing interface, especially for multi-line
text, passwords, and longer string values.
https://github.com/user-attachments/assets/443bf4eb-c77c-4bf6-b34c-77091e005c6d
### Changes ποΈ
- **Added `InputExpanderModal` component**: A new modal component that
provides a larger textarea (300px min-height) for editing text inputs
with the following features:
- Copy-to-clipboard functionality with visual feedback (checkmark icon)
- Toast notification on successful copy
- Auto-focus on open for better UX
- Proper state management to reset values when modal opens/closes
- **Enhanced `TextInputWidget`**:
- Added expand button (ArrowsOutIcon) with tooltip for text, password,
and textarea input types
- Button appears inline next to the input field
- Integrated the new `InputExpanderModal` component
- Improved layout with flexbox to accommodate the expand button
- Added padding-right to input when expand button is visible to prevent
text overlap
- **Refactored file structure**:
- Moved `TextInputWidget.tsx` into `TextInputWidget/` directory
- Updated import path in `widgets/index.ts`
- **UX improvements**:
- Expand button only shows for applicable input types (text, password,
textarea)
- Number and integer inputs don't show expand button (not needed)
- Modal preserves schema title, description, and placeholder for context
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Test expand button appears for text input fields
- [x] Test expand button appears for password input fields
- [x] Test expand button appears for textarea fields
- [x] Test expand button does NOT appear for number/integer inputs
- [x] Test clicking expand button opens modal with current value
- [x] Test editing text in modal and saving updates the input field
- [x] Test cancel button closes modal without saving changes
- [x] Test copy-to-clipboard button copies text and shows success state
- [x] Test toast notification appears on successful copy
- [x] Test modal resets to original value when reopened
- [x] Test modal auto-focuses textarea on open
- [x] Test expand button tooltip displays correctly
- [x] Test input field layout with expand button (no text overlap)
---
.../TextInputWidget/InputExpanderModal.tsx | 118 ++++++++++++++++++
.../{ => TextInputWidget}/TextInputWidget.tsx | 86 +++++++++++--
.../renderers/input-renderer/widgets/index.ts | 2 +-
3 files changed, 192 insertions(+), 14 deletions(-)
create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/InputExpanderModal.tsx
rename autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/{ => TextInputWidget}/TextInputWidget.tsx (52%)
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/InputExpanderModal.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/InputExpanderModal.tsx
new file mode 100644
index 0000000000..5b19874bfb
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/InputExpanderModal.tsx
@@ -0,0 +1,118 @@
+"use client";
+
+import React, { FC, useEffect, useState } from "react";
+import { Button } from "@/components/atoms/Button/Button";
+import { Text } from "@/components/atoms/Text/Text";
+import { useToast } from "@/components/molecules/Toast/use-toast";
+import { CheckIcon, CopyIcon } from "@phosphor-icons/react";
+import { Dialog } from "@/components/molecules/Dialog/Dialog";
+import { cn } from "@/lib/utils";
+import { Input } from "@/components/atoms/Input/Input";
+
+interface InputExpanderModalProps {
+ isOpen: boolean;
+ onClose: () => void;
+ onSave: (value: string) => void;
+ title?: string;
+ defaultValue: string;
+ description?: string;
+ placeholder?: string;
+}
+
+export const InputExpanderModal: FC = ({
+ isOpen,
+ onClose,
+ onSave,
+ title,
+ defaultValue,
+ description,
+ placeholder,
+}) => {
+ const [tempValue, setTempValue] = useState(defaultValue);
+ const [isCopied, setIsCopied] = useState(false);
+ const { toast } = useToast();
+
+ useEffect(() => {
+ if (isOpen) {
+ setTempValue(defaultValue);
+ setIsCopied(false);
+ }
+ }, [isOpen, defaultValue]);
+
+ const handleSave = () => {
+ onSave(tempValue);
+ onClose();
+ };
+
+ const copyValue = () => {
+ navigator.clipboard.writeText(tempValue).then(() => {
+ setIsCopied(true);
+ toast({
+ title: "Copied to clipboard!",
+ duration: 2000,
+ });
+ setTimeout(() => setIsCopied(false), 2000);
+ });
+ };
+
+ return (
+ {
+ if (!open) onClose();
+ },
+ }}
+ onClose={onClose}
+ styling={{ maxWidth: "600px", minWidth: "600px" }}
+ >
+
+
+
+ {title || "Edit Text"}
+
+
{description}
+
setTempValue(e.target.value)}
+ placeholder={placeholder || "Enter text..."}
+ autoFocus
+ />
+
+
+
+ {isCopied ? (
+
+ ) : (
+
+ )}
+
+
+
+
+
+ Cancel
+
+
+ Save
+
+
+
+
+
+ );
+};
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx
similarity index 52%
rename from autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx
rename to autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx
index e10b09bcf9..d9fea28a8d 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/TextInputWidget/TextInputWidget.tsx
@@ -1,10 +1,21 @@
+"use client";
+
+import { useState } from "react";
import { WidgetProps } from "@rjsf/utils";
import {
InputType,
mapJsonSchemaTypeToInputType,
} from "@/app/(platform)/build/components/FlowEditor/nodes/helpers";
import { Input } from "@/components/atoms/Input/Input";
+import { Button } from "@/components/atoms/Button/Button";
+import {
+ Tooltip,
+ TooltipContent,
+ TooltipTrigger,
+} from "@/components/atoms/Tooltip/BaseTooltip";
import { BlockUIType } from "@/lib/autogpt-server-api/types";
+import { InputExpanderModal } from "./InputExpanderModal";
+import { ArrowsOutIcon } from "@phosphor-icons/react";
export const TextInputWidget = (props: WidgetProps) => {
const { schema, formContext } = props;
@@ -13,6 +24,8 @@ export const TextInputWidget = (props: WidgetProps) => {
size?: string;
};
+ const [isModalOpen, setIsModalOpen] = useState(false);
+
const mapped = mapJsonSchemaTypeToInputType(schema);
type InputConfig = {
@@ -59,9 +72,25 @@ export const TextInputWidget = (props: WidgetProps) => {
return props.onChange(config.handleChange(v));
};
+ const handleModalSave = (value: string) => {
+ props.onChange(config.handleChange(value));
+ setIsModalOpen(false);
+ };
+
+ const handleModalOpen = () => {
+ setIsModalOpen(true);
+ };
+
// Determine input size based on context
const inputSize = size === "large" ? "medium" : "small";
+ // Check if this input type should show the expand button
+ // Show for text and password types, not for number/integer
+ const showExpandButton =
+ config.htmlType === "text" ||
+ config.htmlType === "password" ||
+ config.htmlType === "textarea";
+
if (uiType === BlockUIType.NOTE) {
return (
{
}
return (
-
+ <>
+
+
+ {showExpandButton && (
+
+
+
+
+
+
+ Expand input
+
+ )}
+
+
+ setIsModalOpen(false)}
+ onSave={handleModalSave}
+ title={schema.title || "Edit value"}
+ description={schema.description || ""}
+ defaultValue={props.value ?? ""}
+ placeholder={schema.placeholder || config.placeholder}
+ />
+ >
);
};
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts
index cdb02c728c..3788e74fbf 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/index.ts
@@ -1,6 +1,6 @@
import { RegistryWidgetsType } from "@rjsf/utils";
import { SelectWidget } from "./SelectWidget";
-import { TextInputWidget } from "./TextInputWidget";
+import { TextInputWidget } from "./TextInputWidget/TextInputWidget";
import { SwitchWidget } from "./SwitchWidget";
import { FileWidget } from "./FileWidget";
import { DateInputWidget } from "./DateInputWidget";
From 729400dbe1f2e42eec870a6d86bfff62fc09eaca Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Thu, 4 Dec 2025 20:42:51 +0530
Subject: [PATCH 13/58] feat(frontend): display graph validation errors inline
on node fields (#11524)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When running a graph in the new builder, validation errors were only
displayed in toast notifications, making it difficult for users to
identify which specific fields had errors. Users needed to see
validation errors directly next to the problematic fields within each
node for better UX and faster debugging.
### Changes ποΈ
- **Error handling in graph execution** (`useRunGraph.ts`):
- Added detection for graph validation errors using
`ApiError.isGraphValidationError()`
- Parse and store node-level errors from backend validation response
- Clear all node errors on successful graph execution
- Enhanced toast messages to guide users to fix validation errors on
highlighted nodes
- **Node store error management** (`nodeStore.ts`):
- Added `errors` field to node data structure
- Implemented `updateNodeErrors()` to set errors for a specific node
- Implemented `clearNodeErrors()` to remove errors from a specific node
- Implemented `getNodeErrors()` to retrieve errors for a specific node
- Implemented `setNodeErrorsForBackendId()` to set errors by backend ID
(supports matching by `metadata.backend_id` or node `id`)
- Implemented `clearAllNodeErrors()` to clear all node errors across the
graph
- **Visual error indication** (`CustomNode.tsx`, `NodeContainer.tsx`):
- Added error detection logic to identify both configuration errors and
output errors
- Applied error styling to nodes with validation errors (using `FAILED`
status styling)
- Nodes with errors now display with red border/ring to visually
indicate issues
- **Field-level error display** (`FieldTemplate.tsx`):
- Fetch node errors from store for the current node
- Match field IDs with error keys (handles both underscore and dot
notation)
- Display field-specific error messages below each field in red text
- Added helper function `getFieldErrorKey()` to normalize field IDs for
error matching
- **Utility helpers** (`helpers.ts`):
- Created `getFieldErrorKey()` function to extract field key from field
ID (removes `root_` prefix)
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Create a graph with multiple nodes and intentionally leave
required fields empty
- [x] Run the graph and verify that validation errors appear in toast
notification
- [x] Verify that nodes with errors are highlighted with red border/ring
styling
- [x] Verify that field-specific error messages appear below each
problematic field in red text
- [x] Verify that error messages handle both underscore and dot notation
in field keys
- [x] Fix validation errors and run graph again - verify errors are
cleared
- [x] Verify that successful graph execution clears all node errors
- [x] Test with nodes that have `backend_id` in metadata vs nodes
without
- [x] Verify that nodes without errors don't show error styling
- [x] Test with nested fields and array fields to ensure error matching
works correctly
---
.../components/RunGraph/useRunGraph.ts | 51 +++++++++++++++---
.../components/FlowEditor/Flow/useFlow.ts | 2 +-
.../nodes/CustomNode/CustomNode.tsx | 17 +++++-
.../CustomNode/components/NodeContainer.tsx | 4 ++
.../app/(platform)/build/stores/nodeStore.ts | 52 +++++++++++++++++++
.../templates/FieldTemplate.tsx | 20 ++++++-
.../renderers/input-renderer/utils/helpers.ts | 4 ++
7 files changed, 141 insertions(+), 9 deletions(-)
create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
index 2b37ebe8a9..db3b6660df 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts
@@ -9,6 +9,8 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
import { useShallow } from "zustand/react/shallow";
import { useState } from "react";
import { useSaveGraph } from "@/app/(platform)/build/hooks/useSaveGraph";
+import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
+import { ApiError } from "@/lib/autogpt-server-api/helpers"; // Check if this exists
export const useRunGraph = () => {
const { saveGraph, isSaving } = useSaveGraph({
@@ -24,6 +26,13 @@ export const useRunGraph = () => {
);
const [openRunInputDialog, setOpenRunInputDialog] = useState(false);
+ const setNodeErrorsForBackendId = useNodeStore(
+ useShallow((state) => state.setNodeErrorsForBackendId),
+ );
+ const clearAllNodeErrors = useNodeStore(
+ useShallow((state) => state.clearAllNodeErrors),
+ );
+
const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] =
useQueryStates({
flowID: parseAsString,
@@ -35,19 +44,49 @@ export const useRunGraph = () => {
usePostV1ExecuteGraphAgent({
mutation: {
onSuccess: (response: any) => {
+ clearAllNodeErrors();
const { id } = response.data as GraphExecutionMeta;
setQueryStates({
flowExecutionID: id,
});
},
onError: (error: any) => {
- // Reset running state on error
setIsGraphRunning(false);
- toast({
- title: (error.detail as string) ?? "An unexpected error occurred.",
- description: "An unexpected error occurred.",
- variant: "destructive",
- });
+ if (error instanceof ApiError && error.isGraphValidationError?.()) {
+ const errorData = error.response?.detail;
+
+ if (errorData?.node_errors) {
+ Object.entries(errorData.node_errors).forEach(
+ ([backendId, nodeErrors]) => {
+ setNodeErrorsForBackendId(
+ backendId,
+ nodeErrors as { [key: string]: string },
+ );
+ },
+ );
+
+ useNodeStore.getState().nodes.forEach((node) => {
+ const backendId = node.data.metadata?.backend_id || node.id;
+ if (!errorData.node_errors[backendId as string]) {
+ useNodeStore.getState().updateNodeErrors(node.id, {});
+ }
+ });
+ }
+
+ toast({
+ title: errorData?.message || "Graph validation failed",
+ description:
+ "Please fix the validation errors on the highlighted nodes and try again.",
+ variant: "destructive",
+ });
+ } else {
+ toast({
+ title:
+ (error.detail as string) ?? "An unexpected error occurred.",
+ description: "An unexpected error occurred.",
+ variant: "destructive",
+ });
+ }
},
},
});
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts
index badb9784b8..64f00871d8 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts
@@ -81,7 +81,7 @@ export const useFlow = () => {
{
query: {
select: (res) => res.data as BlockInfo[],
- enabled: !!flowID && !!blockIds,
+ enabled: !!flowID && !!blockIds && blockIds.length > 0,
},
},
);
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx
index 52df3edbc4..974cbe3754 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx
@@ -37,6 +37,7 @@ export type CustomNodeData = {
costs: BlockCost[];
categories: BlockInfoCategoriesItem[];
metadata?: NodeModelMetadata;
+ errors?: { [key: string]: string };
};
export type CustomNode = XYNode;
@@ -71,10 +72,24 @@ export const CustomNode: React.FC> = React.memo(
? (data.hardcodedValues.output_schema ?? {})
: data.outputSchema;
+ const hasConfigErrors =
+ data.errors &&
+ Object.values(data.errors).some(
+ (value) => value !== null && value !== undefined && value !== "",
+ );
+
+ const outputData = data.nodeExecutionResult?.output_data;
+ const hasOutputError =
+ typeof outputData === "object" &&
+ outputData !== null &&
+ "error" in outputData;
+
+ const hasErrors = hasConfigErrors || hasOutputError;
+
// Currently all blockTypes design are similar - that's why i am using the same component for all of them
// If in future - if we need some drastic change in some blockTypes design - we can create separate components for them
return (
-
+
{isWebhook &&
}
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx
index 657f1ca048..f8d5b2e089 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContainer.tsx
@@ -3,15 +3,18 @@ import { nodeStyleBasedOnStatus } from "../helpers";
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
import { useShallow } from "zustand/react/shallow";
+import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
export const NodeContainer = ({
children,
nodeId,
selected,
+ hasErrors, // these are configuration errors that occur before executing the graph -- more like validation errors
}: {
children: React.ReactNode;
nodeId: string;
selected: boolean;
+ hasErrors?: boolean;
}) => {
const status = useNodeStore(
useShallow((state) => state.getNodeStatus(nodeId)),
@@ -22,6 +25,7 @@ export const NodeContainer = ({
"z-12 max-w-[370px] rounded-xlarge ring-1 ring-slate-200/60",
selected && "shadow-lg ring-2 ring-slate-200",
status && nodeStyleBasedOnStatus[status],
+ hasErrors ? nodeStyleBasedOnStatus[AgentExecutionStatus.FAILED] : "",
)}
>
{children}
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
index 3beba0c615..2f41c3bb46 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/nodeStore.ts
@@ -53,6 +53,15 @@ type NodeStore = {
getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined;
getNodeBlockUIType: (nodeId: string) => BlockUIType;
hasWebhookNodes: () => boolean;
+
+ updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => void;
+ clearNodeErrors: (nodeId: string) => void;
+ getNodeErrors: (nodeId: string) => { [key: string]: string } | undefined;
+ setNodeErrorsForBackendId: (
+ backendId: string,
+ errors: { [key: string]: string },
+ ) => void;
+ clearAllNodeErrors: () => void; // Add this
};
export const useNodeStore = create
((set, get) => ({
@@ -253,4 +262,47 @@ export const useNodeStore = create((set, get) => ({
[BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType),
);
},
+
+ updateNodeErrors: (nodeId: string, errors: { [key: string]: string }) => {
+ set((state) => ({
+ nodes: state.nodes.map((n) =>
+ n.id === nodeId ? { ...n, data: { ...n.data, errors } } : n,
+ ),
+ }));
+ },
+
+ clearNodeErrors: (nodeId: string) => {
+ set((state) => ({
+ nodes: state.nodes.map((n) =>
+ n.id === nodeId ? { ...n, data: { ...n.data, errors: undefined } } : n,
+ ),
+ }));
+ },
+
+ getNodeErrors: (nodeId: string) => {
+ return get().nodes.find((n) => n.id === nodeId)?.data?.errors;
+ },
+
+ setNodeErrorsForBackendId: (
+ backendId: string,
+ errors: { [key: string]: string },
+ ) => {
+ set((state) => ({
+ nodes: state.nodes.map((n) => {
+ // Match by backend_id if nodes have it, or by id
+ const matches =
+ n.data.metadata?.backend_id === backendId || n.id === backendId;
+ return matches ? { ...n, data: { ...n.data, errors } } : n;
+ }),
+ }));
+ },
+
+ clearAllNodeErrors: () => {
+ set((state) => ({
+ nodes: state.nodes.map((n) => ({
+ ...n,
+ data: { ...n.data, errors: undefined },
+ })),
+ }));
+ },
}));
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx
index b4db9d4159..a056782939 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx
@@ -23,6 +23,7 @@ import { cn } from "@/lib/utils";
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
import { BlockUIType } from "@/lib/autogpt-server-api";
import NodeHandle from "@/app/(platform)/build/components/FlowEditor/handlers/NodeHandle";
+import { getFieldErrorKey } from "../utils/helpers";
const FieldTemplate: React.FC = ({
id: fieldId,
@@ -42,6 +43,11 @@ const FieldTemplate: React.FC = ({
(state) => state.nodeAdvancedStates[nodeId] ?? false,
);
+ const nodeErrors = useNodeStore((state) => {
+ const node = state.nodes.find((n) => n.id === nodeId);
+ return node?.data?.errors;
+ });
+
const { isArrayItem, arrayFieldHandleId } = useContext(ArrayEditorContext);
const isAnyOf =
@@ -89,6 +95,13 @@ const FieldTemplate: React.FC = ({
shouldShowHandle = false;
}
+ const fieldErrorKey = getFieldErrorKey(fieldId);
+ const fieldError =
+ nodeErrors?.[fieldErrorKey] ||
+ nodeErrors?.[fieldErrorKey.replace(/_/g, ".")] ||
+ nodeErrors?.[fieldErrorKey.replace(/\./g, "_")] ||
+ null;
+
return (
= ({
{children}
- )}{" "}
+ )}
+ {fieldError && (
+
+ {fieldError}
+
+ )}
);
};
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts b/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts
new file mode 100644
index 0000000000..51b628d923
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/utils/helpers.ts
@@ -0,0 +1,4 @@
+export const getFieldErrorKey = (fieldId: string): string => {
+ const withoutRoot = fieldId.startsWith("root_") ? fieldId.slice(5) : fieldId;
+ return withoutRoot;
+};
From 4e87f668e3b850de549e1fdf391711eb11820e5d Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Thu, 4 Dec 2025 20:43:01 +0530
Subject: [PATCH 14/58] feat(frontend): add file input widget with variants and
base64 support (#11533)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This PR enhances the FileInput component to support multiple variants
and modes, and integrates it into the form renderer as a file widget.
The changes enable a more flexible file input experience with both
server upload and local base64 conversion capabilities.
### Compact one
## Default one
### Changes ποΈ
#### FileInput Component Enhancements
- **Added variant support**: Introduced `default` and `compact` variants
- `default`: Full-featured UI with drag & drop, progress bar, and
storage note
- `compact`: Minimal inline design suitable for tight spaces like node
inputs
- **Added mode support**: Introduced `upload` and `base64` modes
- `upload`: Uploads files to server (requires `onUploadFile` and
`uploadProgress`)
- `base64`: Converts files to base64 locally without server upload
- **Improved type safety**: Refactored props using discriminated unions
(`UploadModeProps | Base64ModeProps`) to ensure type-safe usage
- **Enhanced file handling**: Added `getFileLabelFromValue` helper to
extract file labels from base64 data URIs or file paths
- **Better UX**: Added `showStorageNote` prop to control visibility of
storage disclaimer
#### FileWidget Integration
- **Replaced legacy Input**: Migrated from legacy `Input` component to
new `FileInput` component
- **Smart variant selection**: Automatically selects `default` or
`compact` variant based on form context size
- **Base64 mode**: Uses base64 mode for form inputs, eliminating need
for server uploads in builder context
- **Improved accessibility**: Better disabled/readonly state handling
with visual feedback
#### Form Renderer Updates
- **Disabled validation**: Added `noValidate={true}` and
`liveValidate={false}` to prevent premature form validation
#### Storybook Updates
- **Expanded stories**: Added comprehensive stories for all variant/mode
combinations:
- `Default`: Default variant with upload mode
- `Compact`: Compact variant with base64 mode
- `CompactWithUpload`: Compact variant with upload mode
- `DefaultWithBase64`: Default variant with base64 mode
- **Improved documentation**: Updated component descriptions to clearly
explain variants and modes
### Checklist π
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Test FileInput component in Storybook with all variant/mode
combinations
- [x] Test file upload flow in default variant with upload mode
- [x] Test base64 conversion in compact variant with base64 mode
- [x] Test file widget in form renderer (node inputs)
- [x] Test file type validation (accept prop)
- [x] Test file size validation (maxFileSize prop)
- [x] Test error handling for invalid files
- [x] Test disabled and readonly states
- [x] Test file clearing/removal functionality
- [x] Verify compact variant renders correctly in tight spaces
- [x] Verify default variant shows storage note only in upload mode
- [x] Test drag & drop functionality in default variant
---
.../atoms/FileInput/FileInput.stories.tsx | 123 +++++--
.../components/atoms/FileInput/FileInput.tsx | 318 ++++++++++++++----
.../renderers/input-renderer/FormRenderer.tsx | 2 +
.../input-renderer/widgets/FileWidget.tsx | 36 +-
4 files changed, 371 insertions(+), 108 deletions(-)
diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx
index 303149084b..2df91f4c9e 100644
--- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx
+++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.stories.tsx
@@ -1,8 +1,8 @@
-import type { Meta, StoryObj } from "@storybook/nextjs";
+import type { Meta } from "@storybook/nextjs";
import { useState } from "react";
import { FileInput } from "./FileInput";
-const meta: Meta = {
+const meta: Meta = {
title: "Atoms/FileInput",
component: FileInput,
tags: ["autodocs"],
@@ -11,26 +11,13 @@ const meta: Meta = {
docs: {
description: {
component:
- "File upload input with progress and removable preview.\n\nProps:\n- accept: optional MIME/extensions filter (e.g. ['image/*', '.pdf']).\n- maxFileSize: optional maximum size in bytes; larger files are rejected with an inline error.",
+ "File upload input with two variants and two modes.\n\n**Variants:**\n- `default`: Full-featured with drag & drop, progress bar, and storage note.\n- `compact`: Minimal inline design for tight spaces like node inputs.\n\n**Modes:**\n- `upload`: Uploads file to server (requires `onUploadFile` and `uploadProgress`).\n- `base64`: Converts file to base64 locally (no server upload).\n\n**Props:**\n- `accept`: optional MIME/extensions filter (e.g. ['image/*', '.pdf']).\n- `maxFileSize`: optional maximum size in bytes; larger files are rejected with an inline error.",
},
},
},
- argTypes: {
- onUploadFile: { action: "upload" },
- accept: {
- control: "object",
- description:
- "Optional accept filter. Supports MIME types (image/*) and extensions (.pdf).",
- },
- maxFileSize: {
- control: "number",
- description: "Optional maximum file size in bytes.",
- },
- },
};
export default meta;
-type Story = StoryObj;
function mockUpload(file: File): Promise<{
file_name: string;
@@ -52,16 +39,16 @@ function mockUpload(file: File): Promise<{
);
}
-export const Basic: Story = {
+export const Default = {
parameters: {
docs: {
description: {
story:
- "This example accepts images or PDFs only and limits size to 5MB. Oversized or disallowed file types show an inline error and do not upload.",
+ "Default variant with upload mode. Full-featured with drag & drop dropzone, progress bar, and storage note. Accepts images or PDFs only and limits size to 5MB.",
},
},
},
- render: function BasicStory() {
+ render: function DefaultStory() {
const [value, setValue] = useState("");
const [progress, setProgress] = useState(0);
@@ -79,6 +66,8 @@ export const Basic: Story = {
return (
("");
+
+ return (
+
+
+
+ );
+ },
+};
+
+export const CompactWithUpload = {
+ parameters: {
+ docs: {
+ description: {
+ story:
+ "Compact variant with upload mode. Useful when you need minimal UI but still want server uploads.",
+ },
+ },
+ },
+ render: function CompactUploadStory() {
+ const [value, setValue] = useState("");
+ const [progress, setProgress] = useState(0);
+
+ async function onUploadFile(file: File) {
+ setProgress(0);
+ const interval = setInterval(() => {
+ setProgress((p) => (p >= 100 ? 100 : p + 20));
+ }, 80);
+ const result = await mockUpload(file);
+ clearInterval(interval);
+ setProgress(100);
+ return result;
+ }
+
+ return (
+
+
+
+ );
+ },
+};
+
+export const DefaultWithBase64 = {
+ parameters: {
+ docs: {
+ description: {
+ story:
+ "Default variant with base64 mode. Full-featured UI but converts to base64 locally instead of uploading.",
+ },
+ },
+ },
+ render: function DefaultBase64Story() {
+ const [value, setValue] = useState("");
+
+ return (
+
+
+
+ );
+ },
+};
diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
index 5eadf26410..8f855ad47d 100644
--- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
+++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
@@ -1,9 +1,11 @@
import { FileTextIcon, TrashIcon, UploadIcon } from "@phosphor-icons/react";
+import { Cross2Icon } from "@radix-ui/react-icons";
import { useRef, useState } from "react";
import { Button } from "../Button/Button";
import { formatFileSize, getFileLabel } from "./helpers";
import { cn } from "@/lib/utils";
import { Progress } from "../Progress/Progress";
+import { Text } from "../Text/Text";
type UploadFileResult = {
file_name: string;
@@ -12,26 +14,51 @@ type UploadFileResult = {
file_uri: string;
};
-interface Props {
- onUploadFile: (file: File) => Promise;
- uploadProgress: number;
- value?: string; // file URI or empty
- placeholder?: string; // e.g. "Resume", "Document", etc.
+type FileInputVariant = "default" | "compact";
+
+interface BaseProps {
+ value?: string;
+ placeholder?: string;
onChange: (value: string) => void;
className?: string;
- maxFileSize?: number; // bytes (optional)
- accept?: string | string[]; // input accept filter (optional)
+ maxFileSize?: number;
+ accept?: string | string[];
+ variant?: FileInputVariant;
+ showStorageNote?: boolean;
}
-export function FileInput({
- onUploadFile,
- uploadProgress,
- value,
- onChange,
- className,
- maxFileSize,
- accept,
-}: Props) {
+interface UploadModeProps extends BaseProps {
+ mode?: "upload";
+ onUploadFile: (file: File) => Promise;
+ uploadProgress: number;
+}
+
+interface Base64ModeProps extends BaseProps {
+ mode: "base64";
+ onUploadFile?: never;
+ uploadProgress?: never;
+}
+
+type Props = UploadModeProps | Base64ModeProps;
+
+export function FileInput(props: Props) {
+ const {
+ value,
+ onChange,
+ className,
+ maxFileSize,
+ accept,
+ placeholder,
+ variant = "default",
+ showStorageNote = true,
+ mode = "upload",
+ } = props;
+
+ const onUploadFile =
+ mode === "upload" ? (props as UploadModeProps).onUploadFile : undefined;
+ const uploadProgress =
+ mode === "upload" ? (props as UploadModeProps).uploadProgress : 0;
+
const [isUploading, setIsUploading] = useState(false);
const [uploadError, setUploadError] = useState(null);
const [fileInfo, setFileInfo] = useState<{
@@ -40,7 +67,96 @@ export function FileInput({
content_type: string;
} | null>(null);
+ const inputRef = useRef(null);
+
+ const storageNote =
+ "Files are stored securely and will be automatically deleted at most 24 hours after upload.";
+
+ function acceptToString(a?: string | string[]) {
+ if (!a) return "*/*";
+ return Array.isArray(a) ? a.join(",") : a;
+ }
+
+ function isAcceptedType(file: File, a?: string | string[]) {
+ if (!a) return true;
+ const list = Array.isArray(a) ? a : a.split(",").map((s) => s.trim());
+ const fileType = file.type;
+ const fileExt = file.name.includes(".")
+ ? `.${file.name.split(".").pop()}`.toLowerCase()
+ : "";
+
+ for (const entry of list) {
+ if (!entry) continue;
+ const e = entry.toLowerCase();
+ if (e.includes("/")) {
+ const [main, sub] = e.split("/");
+ const [fMain, fSub] = fileType.toLowerCase().split("/");
+ if (!fMain || !fSub) continue;
+ if (sub === "*") {
+ if (main === fMain) return true;
+ } else {
+ if (e === fileType.toLowerCase()) return true;
+ }
+ } else if (e.startsWith(".")) {
+ if (fileExt === e) return true;
+ }
+ }
+ return false;
+ }
+
+ const getFileLabelFromValue = (val: string) => {
+ if (val.startsWith("data:")) {
+ const matches = val.match(/^data:([^;]+);/);
+ if (matches?.[1]) {
+ const mimeParts = matches[1].split("/");
+ if (mimeParts.length > 1) {
+ return `${mimeParts[1].toUpperCase()} file`;
+ }
+ return `${matches[1]} file`;
+ }
+ } else {
+ const pathParts = val.split(".");
+ if (pathParts.length > 1) {
+ const ext = pathParts.pop();
+ if (ext) return `${ext.toUpperCase()} file`;
+ }
+ }
+ return "File";
+ };
+
+ const processFileBase64 = (file: File) => {
+ setIsUploading(true);
+ setUploadError(null);
+
+ const reader = new FileReader();
+ reader.onload = (e) => {
+ const base64String = e.target?.result as string;
+ setFileInfo({
+ name: file.name,
+ size: file.size,
+ content_type: file.type || "application/octet-stream",
+ });
+ onChange(base64String);
+ setIsUploading(false);
+ };
+ reader.onerror = () => {
+ setUploadError("Failed to read file");
+ setIsUploading(false);
+ };
+ reader.readAsDataURL(file);
+ };
+
const uploadFile = async (file: File) => {
+ if (mode === "base64") {
+ processFileBase64(file);
+ return;
+ }
+
+ if (!onUploadFile) {
+ setUploadError("Upload handler not provided");
+ return;
+ }
+
setIsUploading(true);
setUploadError(null);
@@ -53,7 +169,6 @@ export function FileInput({
content_type: result.content_type,
});
- // Set the file URI as the value
onChange(result.file_uri);
} catch (error) {
console.error("Upload failed:", error);
@@ -87,43 +202,104 @@ export function FileInput({
if (file) uploadFile(file);
};
- const inputRef = useRef(null);
-
- const storageNote =
- "Files are stored securely and will be automatically deleted at most 24 hours after upload.";
-
- function acceptToString(a?: string | string[]) {
- if (!a) return "*/*";
- return Array.isArray(a) ? a.join(",") : a;
- }
-
- function isAcceptedType(file: File, a?: string | string[]) {
- if (!a) return true;
- const list = Array.isArray(a) ? a : a.split(",").map((s) => s.trim());
- const fileType = file.type; // e.g. image/png
- const fileExt = file.name.includes(".")
- ? `.${file.name.split(".").pop()}`.toLowerCase()
- : "";
-
- for (const entry of list) {
- if (!entry) continue;
- const e = entry.toLowerCase();
- if (e.includes("/")) {
- // MIME type, support wildcards like image/*
- const [main, sub] = e.split("/");
- const [fMain, fSub] = fileType.toLowerCase().split("/");
- if (!fMain || !fSub) continue;
- if (sub === "*") {
- if (main === fMain) return true;
- } else {
- if (e === fileType.toLowerCase()) return true;
- }
- } else if (e.startsWith(".")) {
- // Extension match
- if (fileExt === e) return true;
- }
+ const handleClear = () => {
+ if (inputRef.current) {
+ inputRef.current.value = "";
}
- return false;
+ onChange("");
+ setFileInfo(null);
+ };
+
+ const displayName = placeholder || "File";
+
+ if (variant === "compact") {
+ return (
+
+
+ {isUploading ? (
+
+
+
+
+ {mode === "base64" ? "Processing..." : "Uploading..."}
+
+ {mode === "upload" && (
+
+ {Math.round(uploadProgress)}%
+
+ )}
+
+ {mode === "upload" && (
+
+ )}
+
+ ) : value ? (
+
+
+
+
+
+ {fileInfo
+ ? getFileLabel(fileInfo.name, fileInfo.content_type)
+ : getFileLabelFromValue(value)}
+
+ {fileInfo && (
+
+ {formatFileSize(fileInfo.size)}
+
+ )}
+
+
+
+
+
+ ) : (
+
+ inputRef.current?.click()}
+ className="flex-1 border-zinc-300 text-xs"
+ disabled={isUploading}
+ >
+
+ {`Upload ${displayName}`}
+
+
+ )}
+
+
+ {uploadError && (
+
+ {uploadError}
+
+ )}
+
+ );
}
return (
@@ -134,15 +310,23 @@ export function FileInput({
- Uploading...
-
- {Math.round(uploadProgress)}%
+
+ {mode === "base64" ? "Processing..." : "Uploading..."}
+ {mode === "upload" && (
+
+ {Math.round(uploadProgress)}%
+
+ )}
-
+ {mode === "upload" && (
+
+ )}
- {storageNote}
+ {showStorageNote && mode === "upload" && (
+ {storageNote}
+ )}
) : value ? (
@@ -154,24 +338,20 @@ export function FileInput({
{fileInfo
? getFileLabel(fileInfo.name, fileInfo.content_type)
- : "File"}
+ : getFileLabelFromValue(value)}
{fileInfo ? formatFileSize(fileInfo.size) : ""}
{
- if (inputRef.current) {
- inputRef.current.value = "";
- }
- onChange("");
- setFileInfo(null);
- }}
+ onClick={handleClear}
/>
- {storageNote}
+ {showStorageNote && mode === "upload" && (
+ {storageNote}
+ )}
) : (
@@ -196,7 +376,9 @@ export function FileInput({
Error: {uploadError}
)}
-
{storageNote}
+ {showStorageNote && mode === "upload" && (
+
{storageNote}
+ )}
)}
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx
index 935c9e4337..22c5496efb 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/FormRenderer.tsx
@@ -45,6 +45,8 @@ export const FormRenderer = ({
onChange={handleChange}
uiSchema={uiSchema}
formData={initialValues}
+ noValidate={true}
+ liveValidate={false}
/>
);
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx
index e15d34a9ba..9d670add37 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/widgets/FileWidget.tsx
@@ -1,33 +1,27 @@
import { WidgetProps } from "@rjsf/utils";
-import { Input } from "@/components/__legacy__/ui/input";
+import { FileInput } from "@/components/atoms/FileInput/FileInput";
export const FileWidget = (props: WidgetProps) => {
- const { onChange, multiple = false, disabled, readonly, id } = props;
+ const { onChange, disabled, readonly, value, schema, formContext } = props;
- // TODO: It's temporary solution for file input, will complete it follow up prs
- const handleChange = (event: React.ChangeEvent
) => {
- const files = event.target.files;
- if (!files || files.length === 0) {
- onChange(undefined);
- return;
- }
+ const { size } = formContext || {};
- const file = files[0];
- const reader = new FileReader();
- reader.onload = (e) => {
- onChange(e.target?.result);
- };
- reader.readAsDataURL(file);
+ const displayName = schema?.title || "File";
+
+ const handleChange = (fileUri: string) => {
+ onChange(fileUri);
};
return (
-
);
};
From 2b9816cfa5030eacb52d525f4462afb5a8b5436b Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Thu, 4 Dec 2025 20:43:13 +0530
Subject: [PATCH 15/58] fix(frontend): ensure node selection state is set
before copying in context menu (#11535)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Fixes an issue where copying a node via the context menu dialog fails on
the first attempt in a new session. The problem occurs because the node
selection state update and the copy operation happen in quick
succession, causing a race condition where `copySelectedNodes()` reads
the store before the selection state is properly updated.
### Checklist π
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Start a new browser session (or clear storage)
- [x] Open the flow editor
- [x] Right-click on a node and select "Copy Node" from the context menu
- [x] Verify the node is successfully copied on the first attempt
---
.../FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx | 1 +
1 file changed, 1 insertion(+)
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx
index 3aefb81d91..6e482122f6 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeContextMenu.tsx
@@ -28,6 +28,7 @@ export const NodeContextMenu = ({
})),
}));
+ useCopyPasteStore.getState().copySelectedNodes();
useCopyPasteStore.getState().pasteNodes();
};
From 3ccc712463ace5c2e76d4b96ef4d59ce923dbd9a Mon Sep 17 00:00:00 2001
From: Abhimanyu Yadav <122007096+Abhi1992002@users.noreply.github.com>
Date: Thu, 4 Dec 2025 20:43:23 +0530
Subject: [PATCH 16/58] feat(frontend): add host-scoped credentials support to
CredentialField (#11546)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### Changes ποΈ
This PR adds support for `host_scoped` credential type in the new
builder's `CredentialField` component. This enables blocks that require
sensitive headers for custom API endpoints to configure host-scoped
credentials directly from the credential field.
**Key changes:**
- **Added `HostScopedCredentialsModal` component**
(`models/HostScopedCredentialsModal/`)
- Modal dialog for creating host-scoped credentials with host pattern,
optional title, and dynamic header pairs (key-value)
- Auto-populates host from discriminator value (URL field) when
available
- Supports adding/removing multiple header pairs with validation
- **Enhanced credential filtering logic** (`helpers.ts`)
- Updated `filterCredentialsByProvider` to accept `schema` and
`discriminatorValue` parameters
- Added intelligent filtering for:
- Credential types supported by the block
- OAuth credentials with sufficient scopes
- Host-scoped credentials matched by host from discriminator value
- Extracted `getDiscriminatorValue` helper function for reusability
- **Updated `CredentialField` component**
- Added `supportsHostScoped` check in `useCredentialField` hook
- Conditionally renders `HostScopedCredentialsModal` when
`supportsHostScoped && discriminatorValue` is true
- Exports `discriminatorValue` for use in child components
- **Updated `useCredentialField` hook**
- Calculates `discriminatorValue` using new `getDiscriminatorValue`
helper
- Passes `schema` and `discriminatorValue` to enhanced
`filterCredentialsByProvider` function
- Returns `supportsHostScoped` and `discriminatorValue` for component
consumption
**Technical details:**
- Host extraction uses `getHostFromUrl` utility to parse host from
discriminator value (URL)
- Header pairs are managed as state with add/remove functionality
- Form validation uses `react-hook-form` with `zod` schema
- Credential creation integrates with existing API endpoints and query
invalidation
### Checklist π
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Verify `HostScopedCredentialsModal` appears when block supports
`host_scoped` credentials and discriminator value is present
- [x] Test host auto-population from discriminator value (URL field)
- [x] Test manual host entry when discriminator value is not available
- [x] Test adding/removing multiple header pairs
- [x] Test form validation (host required, empty header pairs filtered
out)
- [x] Test credential creation and successful toast notification
- [x] Verify credentials list refreshes after creation
- [x] Test host-scoped credential filtering matches credentials by host
from URL
- [x] Verify existing credential types (api_key, oauth2, user_password)
still work correctly
- [x] Test OAuth scope filtering still works as expected
- [x] Verify modal only shows when `supportsHostScoped &&
discriminatorValue` conditions are met
---
.../CredentialField/CredentialField.tsx | 10 +
.../fields/CredentialField/helpers.ts | 59 +++++-
.../HostScopedCredentialsModal.tsx | 185 ++++++++++++++++++
.../useHostScopedCredentialsModal.ts | 167 ++++++++++++++++
.../CredentialField/useCredentialField.ts | 17 +-
5 files changed, 430 insertions(+), 8 deletions(-)
create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/HostScopedCredentialsModal.tsx
create mode 100644 autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/models/HostScopedCredentialsModal/useHostScopedCredentialsModal.ts
diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx
index 879f9fe78e..a61d2f0b16 100644
--- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/CredentialField.tsx
@@ -7,6 +7,7 @@ import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
import { APIKeyCredentialsModal } from "./models/APIKeyCredentialModal/APIKeyCredentialModal";
import { OAuthCredentialModal } from "./models/OAuthCredentialModal/OAuthCredentialModal";
import { PasswordCredentialsModal } from "./models/PasswordCredentialModal/PasswordCredentialModal";
+import { HostScopedCredentialsModal } from "./models/HostScopedCredentialsModal/HostScopedCredentialsModal";
export const CredentialsField = (props: FieldProps) => {
const {
@@ -22,9 +23,11 @@ export const CredentialsField = (props: FieldProps) => {
supportsApiKey,
supportsOAuth2,
supportsUserPassword,
+ supportsHostScoped,
credentialsExists,
credentialProvider,
setCredential,
+ discriminatorValue,
} = useCredentialField({
credentialSchema: schema as BlockIOCredentialsSubSchema,
formData,
@@ -71,6 +74,13 @@ export const CredentialsField = (props: FieldProps) => {
{supportsUserPassword && (
)}
+ {supportsHostScoped && discriminatorValue && (
+
+ )}