From 9538992eafb868473f8d4b79ab54abe8df6d7430 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 29 Jan 2026 18:13:28 +0700 Subject: [PATCH 1/8] hotfix(frontend): flags copilot redirects (#11878) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ - Refactor homepage redirect logic to always point to `/` - the `/` route handles whether to redirect to `/copilot` or `/library` based on flag - Simplify `useGetFlag` checks - Add `` and `` wrapper components - helpers to do 1 thing or the other, depending on chat enabled/disabled - avoids boilerplate code, checking flagss and redirects mistakes (especially around race conditions with LD init ) ## Checklist 📋 ### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Log in / out of AutoGPT with flag disabled/enabled - [x] Sign up to AutoGPT with flag disabled/enabled - [x] Redirects to homepage always work `/` - [x] Can't access Copilot with disabled flag --- .gitignore | 1 + .../src/app/(no-navbar)/onboarding/page.tsx | 13 ++-- .../src/app/(platform)/auth/callback/route.ts | 14 ++--- .../SessionsList/useSessionsPagination.ts | 4 +- .../src/app/(platform)/copilot/layout.tsx | 11 +++- .../src/app/(platform)/copilot/page.tsx | 12 +--- .../app/(platform)/copilot/useCopilotPage.ts | 31 +--------- .../src/app/(platform)/error/page.tsx | 6 +- .../src/app/(platform)/login/actions.ts | 7 +-- .../src/app/(platform)/login/useLoginPage.ts | 10 +--- .../src/app/(platform)/signup/actions.ts | 7 +-- .../app/(platform)/signup/useSignupPage.ts | 10 +--- .../frontend/src/app/api/helpers.ts | 1 - autogpt_platform/frontend/src/app/page.tsx | 29 +++------ .../layout/Navbar/components/NavbarLink.tsx | 7 +-- .../frontend/src/hooks/useAgentGraph.tsx | 2 +- .../frontend/src/lib/constants.ts | 7 --- .../frontend/src/lib/supabase/helpers.ts | 3 +- .../frontend/src/lib/supabase/middleware.ts | 3 +- .../onboarding/onboarding-provider.tsx | 10 +--- .../src/services/environment/index.ts | 10 +++- .../feature-flags/FeatureFlagPage.tsx | 59 +++++++++++++++++++ .../feature-flags/FeatureFlagRedirect.tsx | 51 ++++++++++++++++ .../feature-flags/feature-flag-provider.tsx | 10 ++-- .../services/feature-flags/use-get-flag.ts | 33 +++-------- classic/frontend/.gitignore | 1 + 26 files changed, 188 insertions(+), 164 deletions(-) create mode 100644 autogpt_platform/frontend/src/services/feature-flags/FeatureFlagPage.tsx create mode 100644 autogpt_platform/frontend/src/services/feature-flags/FeatureFlagRedirect.tsx diff --git a/.gitignore b/.gitignore index dfce8ba810..8660637ae5 100644 --- a/.gitignore +++ b/.gitignore @@ -179,3 +179,4 @@ autogpt_platform/backend/settings.py .test-contents .claude/settings.local.json /autogpt_platform/backend/logs +.next \ No newline at end of file diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx index 70d9783ccd..246fe52826 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/page.tsx @@ -1,10 +1,9 @@ "use client"; +import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding"; +import { getOnboardingStatus, resolveResponse } from "@/app/api/helpers"; import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { useRouter } from "next/navigation"; import { useEffect } from "react"; -import { resolveResponse, getOnboardingStatus } from "@/app/api/helpers"; -import { getV1OnboardingState } from "@/app/api/__generated__/endpoints/onboarding/onboarding"; -import { getHomepageRoute } from "@/lib/constants"; export default function OnboardingPage() { const router = useRouter(); @@ -13,12 +12,10 @@ export default function OnboardingPage() { async function redirectToStep() { try { // Check if onboarding is enabled (also gets chat flag for redirect) - const { shouldShowOnboarding, isChatEnabled } = - await getOnboardingStatus(); - const homepageRoute = getHomepageRoute(isChatEnabled); + const { shouldShowOnboarding } = await getOnboardingStatus(); if (!shouldShowOnboarding) { - router.replace(homepageRoute); + router.replace("/"); return; } @@ -26,7 +23,7 @@ export default function OnboardingPage() { // Handle completed onboarding if (onboarding.completedSteps.includes("GET_RESULTS")) { - router.replace(homepageRoute); + router.replace("/"); return; } diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts index 15be137f63..e7e2997d0d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts +++ b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts @@ -1,9 +1,8 @@ -import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; -import { getHomepageRoute } from "@/lib/constants"; -import BackendAPI from "@/lib/autogpt-server-api"; -import { NextResponse } from "next/server"; -import { revalidatePath } from "next/cache"; import { getOnboardingStatus } from "@/app/api/helpers"; +import BackendAPI from "@/lib/autogpt-server-api"; +import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; +import { revalidatePath } from "next/cache"; +import { NextResponse } from "next/server"; // Handle the callback to complete the user session login export async function GET(request: Request) { @@ -27,13 +26,12 @@ export async function GET(request: Request) { await api.createUser(); // Get onboarding status from backend (includes chat flag evaluated for this user) - const { shouldShowOnboarding, isChatEnabled } = - await getOnboardingStatus(); + const { shouldShowOnboarding } = await getOnboardingStatus(); if (shouldShowOnboarding) { next = "/onboarding"; revalidatePath("/onboarding", "layout"); } else { - next = getHomepageRoute(isChatEnabled); + next = "/"; revalidatePath(next, "layout"); } } catch (createUserError) { diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts index 11ddd937af..61e3e6f37f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -73,9 +73,9 @@ export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { }; const reset = () => { + // Only reset the offset - keep existing sessions visible during refetch + // The effect will replace sessions when new data arrives at offset 0 setOffset(0); - setAccumulatedSessions([]); - setTotalCount(null); }; return { diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx index 89cf72e2ba..876e5accfb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx @@ -1,6 +1,13 @@ -import type { ReactNode } from "react"; +"use client"; +import { FeatureFlagPage } from "@/services/feature-flags/FeatureFlagPage"; +import { Flag } from "@/services/feature-flags/use-get-flag"; +import { type ReactNode } from "react"; import { CopilotShell } from "./components/CopilotShell/CopilotShell"; export default function CopilotLayout({ children }: { children: ReactNode }) { - return {children}; + return ( + + {children} + + ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx index 104b238895..e9bc018c1b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -14,14 +14,8 @@ export default function CopilotPage() { const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen); const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt); const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt); - const { - greetingName, - quickActions, - isLoading, - hasSession, - initialPrompt, - isReady, - } = state; + const { greetingName, quickActions, isLoading, hasSession, initialPrompt } = + state; const { handleQuickAction, startChatWithPrompt, @@ -29,8 +23,6 @@ export default function CopilotPage() { handleStreamingChange, } = handlers; - if (!isReady) return null; - if (hasSession) { return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts index e4713cd24a..9d99f8e7bd 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useCopilotPage.ts @@ -3,18 +3,11 @@ import { postV2CreateSession, } from "@/app/api/__generated__/endpoints/chat/chat"; import { useToast } from "@/components/molecules/Toast/use-toast"; -import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; -import { - Flag, - type FlagValues, - useGetFlag, -} from "@/services/feature-flags/use-get-flag"; import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import * as Sentry from "@sentry/nextjs"; import { useQueryClient } from "@tanstack/react-query"; -import { useFlags } from "launchdarkly-react-client-sdk"; import { useRouter } from "next/navigation"; import { useEffect } from "react"; import { useCopilotStore } from "./copilot-page-store"; @@ -33,22 +26,6 @@ export function useCopilotPage() { const isCreating = useCopilotStore((s) => s.isCreatingSession); const setIsCreating = useCopilotStore((s) => s.setIsCreatingSession); - // Complete VISIT_COPILOT onboarding step to grant $5 welcome bonus - useEffect(() => { - if (isLoggedIn) { - completeStep("VISIT_COPILOT"); - } - }, [completeStep, isLoggedIn]); - - const isChatEnabled = useGetFlag(Flag.CHAT); - const flags = useFlags(); - const homepageRoute = getHomepageRoute(isChatEnabled); - const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; - const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; - const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); - const isFlagReady = - !isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined; - const greetingName = getGreetingName(user); const quickActions = getQuickActions(); @@ -58,11 +35,8 @@ export function useCopilotPage() { : undefined; useEffect(() => { - if (!isFlagReady) return; - if (isChatEnabled === false) { - router.replace(homepageRoute); - } - }, [homepageRoute, isChatEnabled, isFlagReady, router]); + if (isLoggedIn) completeStep("VISIT_COPILOT"); + }, [completeStep, isLoggedIn]); async function startChatWithPrompt(prompt: string) { if (!prompt?.trim()) return; @@ -116,7 +90,6 @@ export function useCopilotPage() { isLoading: isUserLoading, hasSession, initialPrompt, - isReady: isFlagReady && isChatEnabled !== false && isLoggedIn, }, handlers: { handleQuickAction, diff --git a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx index b26ca4559b..3cf68178ad 100644 --- a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx @@ -1,8 +1,6 @@ "use client"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { getHomepageRoute } from "@/lib/constants"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { useSearchParams } from "next/navigation"; import { Suspense } from "react"; import { getErrorDetails } from "./helpers"; @@ -11,8 +9,6 @@ function ErrorPageContent() { const searchParams = useSearchParams(); const errorMessage = searchParams.get("message"); const errorDetails = getErrorDetails(errorMessage); - const isChatEnabled = useGetFlag(Flag.CHAT); - const homepageRoute = getHomepageRoute(isChatEnabled); function handleRetry() { // Auth-related errors should redirect to login @@ -30,7 +26,7 @@ function ErrorPageContent() { }, 2000); } else { // For server/network errors, go to home - window.location.href = homepageRoute; + window.location.href = "/"; } } diff --git a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts index 447a25a41d..c4867dd123 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/actions.ts @@ -1,6 +1,5 @@ "use server"; -import { getHomepageRoute } from "@/lib/constants"; import BackendAPI from "@/lib/autogpt-server-api"; import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { loginFormSchema } from "@/types/auth"; @@ -38,10 +37,8 @@ export async function login(email: string, password: string) { await api.createUser(); // Get onboarding status from backend (includes chat flag evaluated for this user) - const { shouldShowOnboarding, isChatEnabled } = await getOnboardingStatus(); - const next = shouldShowOnboarding - ? "/onboarding" - : getHomepageRoute(isChatEnabled); + const { shouldShowOnboarding } = await getOnboardingStatus(); + const next = shouldShowOnboarding ? "/onboarding" : "/"; return { success: true, diff --git a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts index e64cc1858d..9b81965c31 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts @@ -1,8 +1,6 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; -import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { loginFormSchema, LoginProvider } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -22,17 +20,15 @@ export function useLoginPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); - const isChatEnabled = useGetFlag(Flag.CHAT); - const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isLoggingIn) { - router.push(nextUrl || homepageRoute); + router.push(nextUrl || "/"); } - }, [homepageRoute, isLoggedIn, isLoggingIn, nextUrl, router]); + }, [isLoggedIn, isLoggingIn, nextUrl, router]); const form = useForm>({ resolver: zodResolver(loginFormSchema), @@ -98,7 +94,7 @@ export function useLoginPage() { } // Prefer URL's next parameter, then use backend-determined route - router.replace(nextUrl || result.next || homepageRoute); + router.replace(nextUrl || result.next || "/"); } catch (error) { toast({ title: diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts index 0fbba54b8e..204482dbe9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts @@ -1,6 +1,5 @@ "use server"; -import { getHomepageRoute } from "@/lib/constants"; import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { signupFormSchema } from "@/types/auth"; import * as Sentry from "@sentry/nextjs"; @@ -59,10 +58,8 @@ export async function signup( } // Get onboarding status from backend (includes chat flag evaluated for this user) - const { shouldShowOnboarding, isChatEnabled } = await getOnboardingStatus(); - const next = shouldShowOnboarding - ? "/onboarding" - : getHomepageRoute(isChatEnabled); + const { shouldShowOnboarding } = await getOnboardingStatus(); + const next = shouldShowOnboarding ? "/onboarding" : "/"; return { success: true, next }; } catch (err) { diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts index 5fa8c2c159..fd78b48735 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts @@ -1,8 +1,6 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; -import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { LoginProvider, signupFormSchema } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -22,17 +20,15 @@ export function useSignupPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); - const isChatEnabled = useGetFlag(Flag.CHAT); - const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isSigningUp) { - router.push(nextUrl || homepageRoute); + router.push(nextUrl || "/"); } - }, [homepageRoute, isLoggedIn, isSigningUp, nextUrl, router]); + }, [isLoggedIn, isSigningUp, nextUrl, router]); const form = useForm>({ resolver: zodResolver(signupFormSchema), @@ -133,7 +129,7 @@ export function useSignupPage() { } // Prefer the URL's next parameter, then result.next (for onboarding), then default - const redirectTo = nextUrl || result.next || homepageRoute; + const redirectTo = nextUrl || result.next || "/"; router.replace(redirectTo); } catch (error) { setIsLoading(false); diff --git a/autogpt_platform/frontend/src/app/api/helpers.ts b/autogpt_platform/frontend/src/app/api/helpers.ts index c2104d231a..226f5fa786 100644 --- a/autogpt_platform/frontend/src/app/api/helpers.ts +++ b/autogpt_platform/frontend/src/app/api/helpers.ts @@ -181,6 +181,5 @@ export async function getOnboardingStatus() { const isCompleted = onboarding.completedSteps.includes("CONGRATS"); return { shouldShowOnboarding: status.is_onboarding_enabled && !isCompleted, - isChatEnabled: status.is_chat_enabled, }; } diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index dbfab49469..31d1e96e48 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,27 +1,14 @@ "use client"; -import { getHomepageRoute } from "@/lib/constants"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { useRouter } from "next/navigation"; -import { useEffect } from "react"; +import { FeatureFlagRedirect } from "@/services/feature-flags/FeatureFlagRedirect"; +import { Flag } from "@/services/feature-flags/use-get-flag"; export default function Page() { - const isChatEnabled = useGetFlag(Flag.CHAT); - const router = useRouter(); - const homepageRoute = getHomepageRoute(isChatEnabled); - const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; - const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; - const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); - const isFlagReady = - !isLaunchDarklyConfigured || typeof isChatEnabled === "boolean"; - - useEffect( - function redirectToHomepage() { - if (!isFlagReady) return; - router.replace(homepageRoute); - }, - [homepageRoute, isFlagReady, router], + return ( + ); - - return null; } diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarLink.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarLink.tsx index eab5a7352f..dff1277384 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarLink.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarLink.tsx @@ -1,7 +1,6 @@ "use client"; import { IconLaptop } from "@/components/__legacy__/ui/icons"; -import { getHomepageRoute } from "@/lib/constants"; import { cn } from "@/lib/utils"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { ListChecksIcon } from "@phosphor-icons/react/dist/ssr"; @@ -24,11 +23,11 @@ interface Props { export function NavbarLink({ name, href }: Props) { const pathname = usePathname(); const isChatEnabled = useGetFlag(Flag.CHAT); - const homepageRoute = getHomepageRoute(isChatEnabled); + const expectedHomeRoute = isChatEnabled ? "/copilot" : "/library"; const isActive = - href === homepageRoute - ? pathname === "/" || pathname.startsWith(homepageRoute) + href === expectedHomeRoute + ? pathname === "/" || pathname.startsWith(expectedHomeRoute) : pathname.includes(href); return ( diff --git a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx index 6c097c395e..d422e389dd 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx +++ b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx @@ -66,7 +66,7 @@ export default function useAgentGraph( >(null); const [xyNodes, setXYNodes] = useState([]); const [xyEdges, setXYEdges] = useState([]); - const betaBlocks = useGetFlag(Flag.BETA_BLOCKS); + const betaBlocks = useGetFlag(Flag.BETA_BLOCKS) as string[]; // Filter blocks based on beta flags const availableBlocks = useMemo(() => { diff --git a/autogpt_platform/frontend/src/lib/constants.ts b/autogpt_platform/frontend/src/lib/constants.ts index de5aac1670..19365a56ac 100644 --- a/autogpt_platform/frontend/src/lib/constants.ts +++ b/autogpt_platform/frontend/src/lib/constants.ts @@ -11,10 +11,3 @@ export const API_KEY_HEADER_NAME = "X-API-Key"; // Layout export const NAVBAR_HEIGHT_PX = 60; - -// Routes -export function getHomepageRoute(isChatEnabled?: boolean | null): string { - if (isChatEnabled === true) return "/copilot"; - if (isChatEnabled === false) return "/library"; - return "/"; -} diff --git a/autogpt_platform/frontend/src/lib/supabase/helpers.ts b/autogpt_platform/frontend/src/lib/supabase/helpers.ts index 3fd0eacb5f..26f7711bde 100644 --- a/autogpt_platform/frontend/src/lib/supabase/helpers.ts +++ b/autogpt_platform/frontend/src/lib/supabase/helpers.ts @@ -1,4 +1,3 @@ -import { getHomepageRoute } from "@/lib/constants"; import { environment } from "@/services/environment"; import { Key, storage } from "@/services/storage/local-storage"; import { type CookieOptions } from "@supabase/ssr"; @@ -71,7 +70,7 @@ export function getRedirectPath( } if (isAdminPage(path) && userRole !== "admin") { - return getHomepageRoute(); + return "/"; } return null; diff --git a/autogpt_platform/frontend/src/lib/supabase/middleware.ts b/autogpt_platform/frontend/src/lib/supabase/middleware.ts index de8b867ef0..cd1f4a240e 100644 --- a/autogpt_platform/frontend/src/lib/supabase/middleware.ts +++ b/autogpt_platform/frontend/src/lib/supabase/middleware.ts @@ -1,4 +1,3 @@ -import { getHomepageRoute } from "@/lib/constants"; import { environment } from "@/services/environment"; import { createServerClient } from "@supabase/ssr"; import { NextResponse, type NextRequest } from "next/server"; @@ -67,7 +66,7 @@ export async function updateSession(request: NextRequest) { // 2. Check if user is authenticated but lacks admin role when accessing admin pages if (user && userRole !== "admin" && isAdminPage(pathname)) { - url.pathname = getHomepageRoute(); + url.pathname = "/"; return NextResponse.redirect(url); } diff --git a/autogpt_platform/frontend/src/providers/onboarding/onboarding-provider.tsx b/autogpt_platform/frontend/src/providers/onboarding/onboarding-provider.tsx index 1ee4b2b6db..42cb99f187 100644 --- a/autogpt_platform/frontend/src/providers/onboarding/onboarding-provider.tsx +++ b/autogpt_platform/frontend/src/providers/onboarding/onboarding-provider.tsx @@ -23,9 +23,7 @@ import { WebSocketNotification, } from "@/lib/autogpt-server-api"; import { useBackendAPI } from "@/lib/autogpt-server-api/context"; -import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import Link from "next/link"; import { usePathname, useRouter } from "next/navigation"; import { @@ -104,8 +102,6 @@ export default function OnboardingProvider({ const pathname = usePathname(); const router = useRouter(); const { isLoggedIn } = useSupabase(); - const isChatEnabled = useGetFlag(Flag.CHAT); - const homepageRoute = getHomepageRoute(isChatEnabled); useOnboardingTimezoneDetection(); @@ -150,7 +146,7 @@ export default function OnboardingProvider({ if (isOnOnboardingRoute) { const enabled = await resolveResponse(getV1IsOnboardingEnabled()); if (!enabled) { - router.push(homepageRoute); + router.push("/"); return; } } @@ -162,7 +158,7 @@ export default function OnboardingProvider({ isOnOnboardingRoute && shouldRedirectFromOnboarding(onboarding.completedSteps, pathname) ) { - router.push(homepageRoute); + router.push("/"); } } catch (error) { console.error("Failed to initialize onboarding:", error); @@ -177,7 +173,7 @@ export default function OnboardingProvider({ } initializeOnboarding(); - }, [api, homepageRoute, isOnOnboardingRoute, router, isLoggedIn, pathname]); + }, [api, isOnOnboardingRoute, router, isLoggedIn, pathname]); const handleOnboardingNotification = useCallback( (notification: WebSocketNotification) => { diff --git a/autogpt_platform/frontend/src/services/environment/index.ts b/autogpt_platform/frontend/src/services/environment/index.ts index f19bc417e3..0214dcb3c8 100644 --- a/autogpt_platform/frontend/src/services/environment/index.ts +++ b/autogpt_platform/frontend/src/services/environment/index.ts @@ -83,6 +83,10 @@ function getPostHogCredentials() { }; } +function getLaunchDarklyClientId() { + return process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; +} + function isProductionBuild() { return process.env.NODE_ENV === "production"; } @@ -120,7 +124,10 @@ function isVercelPreview() { } function areFeatureFlagsEnabled() { - return process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "enabled"; + return ( + process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true" && + Boolean(process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID) + ); } function isPostHogEnabled() { @@ -143,6 +150,7 @@ export const environment = { getSupabaseAnonKey, getPreviewStealingDev, getPostHogCredentials, + getLaunchDarklyClientId, // Assertions isServerSide, isClientSide, diff --git a/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagPage.tsx b/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagPage.tsx new file mode 100644 index 0000000000..eef0691de2 --- /dev/null +++ b/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagPage.tsx @@ -0,0 +1,59 @@ +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { useLDClient } from "launchdarkly-react-client-sdk"; +import { useRouter } from "next/navigation"; +import { ReactNode, useEffect, useState } from "react"; +import { environment } from "../environment"; +import { Flag, useGetFlag } from "./use-get-flag"; + +interface FeatureFlagRedirectProps { + flag: Flag; + whenDisabled: string; + children: ReactNode; +} + +export function FeatureFlagPage({ + flag, + whenDisabled, + children, +}: FeatureFlagRedirectProps) { + const [isLoading, setIsLoading] = useState(true); + const router = useRouter(); + const flagValue = useGetFlag(flag); + const ldClient = useLDClient(); + const ldEnabled = environment.areFeatureFlagsEnabled(); + const ldReady = Boolean(ldClient); + const flagEnabled = Boolean(flagValue); + + useEffect(() => { + const initialize = async () => { + if (!ldEnabled) { + router.replace(whenDisabled); + setIsLoading(false); + return; + } + + // Wait for LaunchDarkly to initialize when enabled to prevent race conditions + if (ldEnabled && !ldReady) return; + + try { + await ldClient?.waitForInitialization(); + if (!flagEnabled) router.replace(whenDisabled); + } catch (error) { + console.error(error); + router.replace(whenDisabled); + } finally { + setIsLoading(false); + } + }; + + initialize(); + }, [ldReady, flagEnabled]); + + return isLoading || !flagEnabled ? ( + + ) : ( + <>{children} + ); +} diff --git a/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagRedirect.tsx b/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagRedirect.tsx new file mode 100644 index 0000000000..b843b5567c --- /dev/null +++ b/autogpt_platform/frontend/src/services/feature-flags/FeatureFlagRedirect.tsx @@ -0,0 +1,51 @@ +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { useLDClient } from "launchdarkly-react-client-sdk"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; +import { environment } from "../environment"; +import { Flag, useGetFlag } from "./use-get-flag"; + +interface FeatureFlagRedirectProps { + flag: Flag; + whenEnabled: string; + whenDisabled: string; +} + +export function FeatureFlagRedirect({ + flag, + whenEnabled, + whenDisabled, +}: FeatureFlagRedirectProps) { + const router = useRouter(); + const flagValue = useGetFlag(flag); + const ldEnabled = environment.areFeatureFlagsEnabled(); + const ldClient = useLDClient(); + const ldReady = Boolean(ldClient); + const flagEnabled = Boolean(flagValue); + + useEffect(() => { + const initialize = async () => { + if (!ldEnabled) { + router.replace(whenDisabled); + return; + } + + // Wait for LaunchDarkly to initialize when enabled to prevent race conditions + if (ldEnabled && !ldReady) return; + + try { + await ldClient?.waitForInitialization(); + router.replace(flagEnabled ? whenEnabled : whenDisabled); + } catch (error) { + console.error(error); + router.replace(whenDisabled); + } + }; + + initialize(); + }, [ldReady, flagEnabled]); + + return ; +} diff --git a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx index 47e4bd738a..8b78f4c589 100644 --- a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx +++ b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx @@ -7,14 +7,12 @@ import type { ReactNode } from "react"; import { useMemo } from "react"; import { environment } from "../environment"; -const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; -const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; const LAUNCHDARKLY_INIT_TIMEOUT_MS = 5000; export function LaunchDarklyProvider({ children }: { children: ReactNode }) { const { user, isUserLoading } = useSupabase(); - const isCloud = environment.isCloud(); - const isLaunchDarklyConfigured = isCloud && envEnabled && clientId; + const envEnabled = environment.areFeatureFlagsEnabled(); + const clientId = environment.getLaunchDarklyClientId(); const context = useMemo(() => { if (isUserLoading || !user) { @@ -36,7 +34,7 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) { }; }, [user, isUserLoading]); - if (!isLaunchDarklyConfigured) { + if (!envEnabled) { return <>{children}; } @@ -44,7 +42,7 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) { (flag: T): FlagValues[T] | null { +type FlagValues = typeof defaultFlags; + +export function useGetFlag(flag: T): FlagValues[T] { const currentFlags = useFlags(); const flagValue = currentFlags[flag]; + const areFlagsEnabled = environment.areFeatureFlagsEnabled(); - const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; - const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; - const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); - - if (!isLaunchDarklyConfigured || isPwMockEnabled) { - return mockFlags[flag]; + if (!areFlagsEnabled || isPwMockEnabled) { + return defaultFlags[flag]; } - return flagValue ?? mockFlags[flag]; + return flagValue ?? defaultFlags[flag]; } diff --git a/classic/frontend/.gitignore b/classic/frontend/.gitignore index 036283f834..eb060615c5 100644 --- a/classic/frontend/.gitignore +++ b/classic/frontend/.gitignore @@ -8,6 +8,7 @@ .buildlog/ .history .svn/ +.next/ migrate_working_dir/ # IntelliJ related From e10ff8d37fb5b94a1dd814d36b8789b574d12be9 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 08:32:50 +0000 Subject: [PATCH 2/8] fix(frontend): remove double flag check on homepage redirect (#11894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ Fixes the hard refresh redirect bug (SECRT-1845) by removing the double feature flag check. ### Before (buggy) ``` / → checks flag → /copilot or /library /copilot (layout) → checks flag → /library if OFF ``` On hard refresh, two sequential LD checks created a race condition window. ### After (fixed) ``` / → always redirects to /copilot /copilot (layout) → single flag check via FeatureFlagPage ``` Single check point = no double-check race condition. ## Root Cause As identified by @0ubbe: the root page and copilot layout were both checking the feature flag. On hard refresh with network latency, the second check could fire before LaunchDarkly fully initialized, causing users to be bounced to `/library`. ## Test Plan - [ ] Hard refresh on `/` → should go to `/copilot` (flag ON) - [ ] Hard refresh on `/copilot` → should stay on `/copilot` (flag ON) - [ ] With flag OFF → should redirect to `/library` - [ ] Normal navigation still works Fixes: SECRT-1845 cc @0ubbe --- autogpt_platform/frontend/src/app/page.tsx | 24 ++++++++++++++-------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index 31d1e96e48..9a55e986bc 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,14 +1,20 @@ "use client"; -import { FeatureFlagRedirect } from "@/services/feature-flags/FeatureFlagRedirect"; -import { Flag } from "@/services/feature-flags/use-get-flag"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; +/** + * Root page always redirects to /copilot. + * The /copilot page handles the feature flag check and redirects to /library if needed. + * This single-check approach avoids race conditions with LaunchDarkly initialization. + * See: SECRT-1845 + */ export default function Page() { - return ( - - ); + const router = useRouter(); + + useEffect(() => { + router.replace("/copilot"); + }, [router]); + + return null; } From e6438b9a764d26c08f368173fd1a1150608b7065 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Fri, 30 Jan 2026 17:20:03 +0700 Subject: [PATCH 3/8] hotfix(frontend): use server redirect (#11900) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ The page used a client-side redirect (`useEffect` + `router.replace`) which only works after JavaScript loads and hydrates. On deployed sites, if there's any delay or failure in JS execution, users see an empty/black page because the component returns null. **Fix:** Converted to a server-side redirect using redirect() from next/navigation. This is a server component now, so: ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Tested locally but will see it fully working once deployed --- autogpt_platform/frontend/src/app/page.tsx | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index 9a55e986bc..88e4e21644 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,7 +1,4 @@ -"use client"; - -import { useRouter } from "next/navigation"; -import { useEffect } from "react"; +import { redirect } from "next/navigation"; /** * Root page always redirects to /copilot. @@ -10,11 +7,5 @@ import { useEffect } from "react"; * See: SECRT-1845 */ export default function Page() { - const router = useRouter(); - - useEffect(() => { - router.replace("/copilot"); - }, [router]); - - return null; + redirect("/copilot"); } From dbbff046163020dc1d035926f037871befb713c7 Mon Sep 17 00:00:00 2001 From: Otto Date: Fri, 30 Jan 2026 12:08:26 +0000 Subject: [PATCH 4/8] hotfix(frontend): LD remount (#11903) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes 🏗️ Removes the `key` prop from `LDProvider` that was causing full remounts when user context changed. ### The Problem The `key={context.key}` prop was forcing React to unmount and remount the entire LDProvider when switching from anonymous → logged in user: ``` 1. Page loads, user loading → key="anonymous" → LD mounts → flags available ✅ 2. User finishes loading → key="user-123" → React sees key changed 3. LDProvider UNMOUNTS → flags become undefined ❌ 4. New LDProvider MOUNTS → initializes again → flags available ✅ ``` This caused the flag values to cycle: `undefined → value → undefined → value` ### The Fix Remove the `key` prop. The LDProvider handles context changes internally via the `context` prop, which triggers `identify()` without remounting the provider. ## Checklist 📋 - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [ ] I have tested my changes according to the test plan: - [ ] Flag values don't flicker on page load - [ ] Flag values update correctly when logging in/out - [ ] No redirect race conditions Related: SECRT-1845 --- .../src/services/feature-flags/feature-flag-provider.tsx | 2 -- 1 file changed, 2 deletions(-) diff --git a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx index 8b78f4c589..9e1c812e85 100644 --- a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx +++ b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx @@ -40,8 +40,6 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) { return ( Date: Fri, 30 Jan 2026 20:40:46 +0700 Subject: [PATCH 5/8] hotfix(frontend): fix home redirect (3) (#11904) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ Further improvements to LaunchDarkly initialisation and homepage redirect... ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run the app locally with the flag disabled/enabled, and the redirects work --------- Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Ubbe <0ubbe@users.noreply.github.com> --- autogpt_platform/frontend/src/app/page.tsx | 20 +++++++++++-------- .../feature-flags/feature-flag-provider.tsx | 9 ++++++++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index 88e4e21644..ce67760eda 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,11 +1,15 @@ -import { redirect } from "next/navigation"; +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; -/** - * Root page always redirects to /copilot. - * The /copilot page handles the feature flag check and redirects to /library if needed. - * This single-check approach avoids race conditions with LaunchDarkly initialization. - * See: SECRT-1845 - */ export default function Page() { - redirect("/copilot"); + const router = useRouter(); + + useEffect(() => { + router.replace("/copilot"); + }, [router]); + + return ; } diff --git a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx index 9e1c812e85..da073816ac 100644 --- a/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx +++ b/autogpt_platform/frontend/src/services/feature-flags/feature-flag-provider.tsx @@ -1,5 +1,6 @@ "use client"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import * as Sentry from "@sentry/nextjs"; import { LDProvider } from "launchdarkly-react-client-sdk"; @@ -15,7 +16,9 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) { const clientId = environment.getLaunchDarklyClientId(); const context = useMemo(() => { - if (isUserLoading || !user) { + if (isUserLoading) return; + + if (!user) { return { kind: "user" as const, key: "anonymous", @@ -38,6 +41,10 @@ export function LaunchDarklyProvider({ children }: { children: ReactNode }) { return <>{children}; } + if (isUserLoading) { + return ; + } + return ( Date: Fri, 30 Jan 2026 21:00:43 -0600 Subject: [PATCH 6/8] fix(backend/chat): Include input schema in discovery and validate unknown fields (#11916) Co-authored-by: Claude Opus 4.5 --- .../api/features/chat/tools/agent_search.py | 56 ++++++++++++++++++- .../backend/api/features/chat/tools/models.py | 20 +++++++ .../api/features/chat/tools/run_agent.py | 17 ++++++ .../api/features/chat/tools/run_agent_test.py | 39 +++++++++++++ 4 files changed, 130 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 5fa74ba04e..3613bb568d 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -1,10 +1,13 @@ """Shared agent search functionality for find_agent and find_library_agent tools.""" +import asyncio import logging from typing import Literal from backend.api.features.library import db as library_db from backend.api.features.store import db as store_db +from backend.data import graph as graph_db +from backend.data.graph import GraphModel from backend.util.exceptions import DatabaseError, NotFoundError from .models import ( @@ -14,6 +17,7 @@ from .models import ( NoResultsResponse, ToolResponseBase, ) +from .utils import fetch_graph_from_store_slug logger = logging.getLogger(__name__) @@ -54,7 +58,28 @@ async def search_agents( if source == "marketplace": logger.info(f"Searching marketplace for: {query}") results = await store_db.get_store_agents(search_query=query, page_size=5) - for agent in results.agents: + + # Fetch all graphs in parallel for better performance + async def fetch_marketplace_graph( + creator: str, slug: str + ) -> GraphModel | None: + try: + graph, _ = await fetch_graph_from_store_slug(creator, slug) + return graph + except Exception as e: + logger.warning( + f"Failed to fetch input schema for {creator}/{slug}: {e}" + ) + return None + + graphs = await asyncio.gather( + *( + fetch_marketplace_graph(agent.creator, agent.slug) + for agent in results.agents + ) + ) + + for agent, graph in zip(results.agents, graphs): agents.append( AgentInfo( id=f"{agent.creator}/{agent.slug}", @@ -67,6 +92,7 @@ async def search_agents( rating=agent.rating, runs=agent.runs, is_featured=False, + inputs=graph.input_schema if graph else None, ) ) else: # library @@ -76,7 +102,32 @@ async def search_agents( search_term=query, page_size=10, ) - for agent in results.agents: + + # Fetch all graphs in parallel for better performance + # (list_library_agents doesn't include nodes for performance) + async def fetch_library_graph( + graph_id: str, graph_version: int + ) -> GraphModel | None: + try: + return await graph_db.get_graph( + graph_id=graph_id, + version=graph_version, + user_id=user_id, + ) + except Exception as e: + logger.warning( + f"Failed to fetch input schema for graph {graph_id}: {e}" + ) + return None + + graphs = await asyncio.gather( + *( + fetch_library_graph(agent.graph_id, agent.graph_version) + for agent in results.agents + ) + ) + + for agent, graph in zip(results.agents, graphs): agents.append( AgentInfo( id=agent.id, @@ -90,6 +141,7 @@ async def search_agents( has_external_trigger=agent.has_external_trigger, new_output=agent.new_output, graph_id=agent.graph_id, + inputs=graph.input_schema if graph else None, ) ) logger.info(f"Found {len(agents)} agents in {source}") diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py index 8552681d03..e635578149 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py @@ -32,6 +32,8 @@ class ResponseType(str, Enum): OPERATION_STARTED = "operation_started" OPERATION_PENDING = "operation_pending" OPERATION_IN_PROGRESS = "operation_in_progress" + # Input validation + INPUT_VALIDATION_ERROR = "input_validation_error" # Base response model @@ -62,6 +64,10 @@ class AgentInfo(BaseModel): has_external_trigger: bool | None = None new_output: bool | None = None graph_id: str | None = None + inputs: dict[str, Any] | None = Field( + default=None, + description="Input schema for the agent, including field names, types, and defaults", + ) class AgentsFoundResponse(ToolResponseBase): @@ -188,6 +194,20 @@ class ErrorResponse(ToolResponseBase): details: dict[str, Any] | None = None +class InputValidationErrorResponse(ToolResponseBase): + """Response when run_agent receives unknown input fields.""" + + type: ResponseType = ResponseType.INPUT_VALIDATION_ERROR + unrecognized_fields: list[str] = Field( + description="List of input field names that were not recognized" + ) + inputs: dict[str, Any] = Field( + description="The agent's valid input schema for reference" + ) + graph_id: str | None = None + graph_version: int | None = None + + # Agent output models class ExecutionOutputInfo(BaseModel): """Summary of a single execution's outputs.""" diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py index a7fa65348a..73d4cf81f2 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py @@ -30,6 +30,7 @@ from .models import ( ErrorResponse, ExecutionOptions, ExecutionStartedResponse, + InputValidationErrorResponse, SetupInfo, SetupRequirementsResponse, ToolResponseBase, @@ -273,6 +274,22 @@ class RunAgentTool(BaseTool): input_properties = graph.input_schema.get("properties", {}) required_fields = set(graph.input_schema.get("required", [])) provided_inputs = set(params.inputs.keys()) + valid_fields = set(input_properties.keys()) + + # Check for unknown input fields + unrecognized_fields = provided_inputs - valid_fields + if unrecognized_fields: + return InputValidationErrorResponse( + message=( + f"Unknown input field(s) provided: {', '.join(sorted(unrecognized_fields))}. " + f"Agent was not executed. Please use the correct field names from the schema." + ), + session_id=session_id, + unrecognized_fields=sorted(unrecognized_fields), + inputs=graph.input_schema, + graph_id=graph.id, + graph_version=graph.version, + ) # If agent has inputs but none were provided AND use_defaults is not set, # always show what's available first so user can decide diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py index 404df2adb6..d5da394fa6 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent_test.py @@ -402,3 +402,42 @@ async def test_run_agent_schedule_without_name(setup_test_data): # Should return error about missing schedule_name assert result_data.get("type") == "error" assert "schedule_name" in result_data["message"].lower() + + +@pytest.mark.asyncio(loop_scope="session") +async def test_run_agent_rejects_unknown_input_fields(setup_test_data): + """Test that run_agent returns input_validation_error for unknown input fields.""" + user = setup_test_data["user"] + store_submission = setup_test_data["store_submission"] + + tool = RunAgentTool() + agent_marketplace_id = f"{user.email.split('@')[0]}/{store_submission.slug}" + session = make_session(user_id=user.id) + + # Execute with unknown input field names + response = await tool.execute( + user_id=user.id, + session_id=str(uuid.uuid4()), + tool_call_id=str(uuid.uuid4()), + username_agent_slug=agent_marketplace_id, + inputs={ + "unknown_field": "some value", + "another_unknown": "another value", + }, + session=session, + ) + + assert response is not None + assert hasattr(response, "output") + assert isinstance(response.output, str) + result_data = orjson.loads(response.output) + + # Should return input_validation_error type with unrecognized fields + assert result_data.get("type") == "input_validation_error" + assert "unrecognized_fields" in result_data + assert set(result_data["unrecognized_fields"]) == { + "another_unknown", + "unknown_field", + } + assert "inputs" in result_data # Contains the valid schema + assert "Agent was not executed" in result_data["message"] From 2abbb7fbc8abb5853993e4a27161b8b5d96fb9f4 Mon Sep 17 00:00:00 2001 From: Otto Date: Sat, 31 Jan 2026 03:50:21 +0000 Subject: [PATCH 7/8] hotfix(backend): use discriminator for credential matching in run_block (#11908) Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Nicholas Tindle Co-authored-by: Nicholas Tindle Co-authored-by: Claude Opus 4.5 --- .../api/features/chat/tools/run_block.py | 43 +++++++++++++++---- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py index 3f57236564..3a7118eb87 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py @@ -4,6 +4,8 @@ import logging from collections import defaultdict from typing import Any +from pydantic_core import PydanticUndefined + from backend.api.features.chat.model import ChatSession from backend.data.block import get_block from backend.data.execution import ExecutionContext @@ -73,15 +75,22 @@ class RunBlockTool(BaseTool): self, user_id: str, block: Any, + input_data: dict[str, Any] | None = None, ) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]: """ Check if user has required credentials for a block. + Args: + user_id: User ID + block: Block to check credentials for + input_data: Input data for the block (used to determine provider via discriminator) + Returns: tuple[matched_credentials, missing_credentials] """ matched_credentials: dict[str, CredentialsMetaInput] = {} missing_credentials: list[CredentialsMetaInput] = [] + input_data = input_data or {} # Get credential field info from block's input schema credentials_fields_info = block.input_schema.get_credentials_fields_info() @@ -94,14 +103,33 @@ class RunBlockTool(BaseTool): available_creds = await creds_manager.store.get_all_creds(user_id) for field_name, field_info in credentials_fields_info.items(): - # field_info.provider is a frozenset of acceptable providers - # field_info.supported_types is a frozenset of acceptable types + effective_field_info = field_info + if field_info.discriminator and field_info.discriminator_mapping: + # Get discriminator from input, falling back to schema default + discriminator_value = input_data.get(field_info.discriminator) + if discriminator_value is None: + field = block.input_schema.model_fields.get( + field_info.discriminator + ) + if field and field.default is not PydanticUndefined: + discriminator_value = field.default + + if ( + discriminator_value + and discriminator_value in field_info.discriminator_mapping + ): + effective_field_info = field_info.discriminate(discriminator_value) + logger.debug( + f"Discriminated provider for {field_name}: " + f"{discriminator_value} -> {effective_field_info.provider}" + ) + matching_cred = next( ( cred for cred in available_creds - if cred.provider in field_info.provider - and cred.type in field_info.supported_types + if cred.provider in effective_field_info.provider + and cred.type in effective_field_info.supported_types ), None, ) @@ -115,8 +143,8 @@ class RunBlockTool(BaseTool): ) else: # Create a placeholder for the missing credential - provider = next(iter(field_info.provider), "unknown") - cred_type = next(iter(field_info.supported_types), "api_key") + provider = next(iter(effective_field_info.provider), "unknown") + cred_type = next(iter(effective_field_info.supported_types), "api_key") missing_credentials.append( CredentialsMetaInput( id=field_name, @@ -184,10 +212,9 @@ class RunBlockTool(BaseTool): logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}") - # Check credentials creds_manager = IntegrationCredentialsManager() matched_credentials, missing_credentials = await self._check_block_credentials( - user_id, block + user_id, block, input_data ) if missing_credentials: From f7350c797a537703eaa8303770296b1a9c7f54b0 Mon Sep 17 00:00:00 2001 From: Otto Date: Mon, 2 Feb 2026 13:01:05 +0000 Subject: [PATCH 8/8] fix(copilot): use messages_dict in fallback context compaction (#11922) ## Summary Fixes a bug where the fallback path in context compaction passes `recent_messages` (already sliced) instead of `messages_dict` (full conversation) to `_ensure_tool_pairs_intact`. This caused the function to fail to find assistant messages that exist in the original conversation but were outside the sliced window, resulting in orphan tool_results being sent to Anthropic and rejected with: ``` messages.66.content.0: unexpected tool_use_id found in tool_result blocks: toolu_vrtx_019bi1PDvEn7o5ByAxcS3VdA ``` ## Changes - Pass `messages_dict` and `slice_start` (relative to full conversation) instead of `recent_messages` and `reduced_slice_start` (relative to already-sliced list) ## Testing This is a targeted fix for the fallback path. The bug only manifests when: 1. Token count > 120k (triggers compaction) 2. Initial compaction + summary still exceeds limit (triggers fallback) 3. A tool_result's corresponding assistant is in `messages_dict` but not in `recent_messages` ## Related - Fixes SECRT-1861 - Related: SECRT-1839 (original fix that missed this code path) --- .../backend/backend/api/features/chat/service.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 20216162b5..ddc64d64b2 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1184,11 +1184,14 @@ async def _stream_chat_chunks( else recent_messages ) # Ensure tool pairs stay intact in the reduced slice - reduced_slice_start = max( + # Note: Search in messages_dict (full conversation) not recent_messages + # (already sliced), so we can find assistants outside the current slice. + # Calculate where reduced_recent starts in messages_dict + reduced_start_in_dict = slice_start + max( 0, len(recent_messages) - keep_count ) reduced_recent = _ensure_tool_pairs_intact( - reduced_recent, recent_messages, reduced_slice_start + reduced_recent, messages_dict, reduced_start_in_dict ) if has_system_prompt: messages = [