diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py
index 5fa74ba04e..bf8ea88be1 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py
@@ -1,10 +1,11 @@
"""Shared agent search functionality for find_agent and find_library_agent tools."""
import logging
-from typing import Literal
+from typing import Any, Literal
from backend.api.features.library import db as library_db
from backend.api.features.store import db as store_db
+from backend.data.graph import get_graph
from backend.util.exceptions import DatabaseError, NotFoundError
from .models import (
@@ -14,12 +15,39 @@ from .models import (
NoResultsResponse,
ToolResponseBase,
)
+from .utils import fetch_graph_from_store_slug
logger = logging.getLogger(__name__)
SearchSource = Literal["marketplace", "library"]
+async def _fetch_input_schema_for_store_agent(
+ creator: str, slug: str
+) -> dict[str, Any] | None:
+ """Fetch input schema for a marketplace agent. Returns None on error."""
+ try:
+ graph, _ = await fetch_graph_from_store_slug(creator, slug)
+ if graph and graph.input_schema:
+ return graph.input_schema.get("properties", {})
+ except Exception as e:
+ logger.debug(f"Could not fetch input schema for {creator}/{slug}: {e}")
+ return None
+
+
+async def _fetch_input_schema_for_library_agent(
+ graph_id: str, graph_version: int, user_id: str
+) -> dict[str, Any] | None:
+ """Fetch input schema for a library agent. Returns None on error."""
+ try:
+ graph = await get_graph(graph_id, graph_version, user_id=user_id)
+ if graph and graph.input_schema:
+ return graph.input_schema.get("properties", {})
+ except Exception as e:
+ logger.debug(f"Could not fetch input schema for graph {graph_id}: {e}")
+ return None
+
+
async def search_agents(
query: str,
source: SearchSource,
@@ -55,6 +83,10 @@ async def search_agents(
logger.info(f"Searching marketplace for: {query}")
results = await store_db.get_store_agents(search_query=query, page_size=5)
for agent in results.agents:
+ # Fetch input schema for this agent
+ inputs = await _fetch_input_schema_for_store_agent(
+ agent.creator, agent.slug
+ )
agents.append(
AgentInfo(
id=f"{agent.creator}/{agent.slug}",
@@ -67,6 +99,7 @@ async def search_agents(
rating=agent.rating,
runs=agent.runs,
is_featured=False,
+ inputs=inputs,
)
)
else: # library
@@ -77,6 +110,10 @@ async def search_agents(
page_size=10,
)
for agent in results.agents:
+ # Fetch input schema for this agent
+ inputs = await _fetch_input_schema_for_library_agent(
+ agent.graph_id, agent.graph_version, user_id # type: ignore[arg-type]
+ )
agents.append(
AgentInfo(
id=agent.id,
@@ -90,6 +127,7 @@ async def search_agents(
has_external_trigger=agent.has_external_trigger,
new_output=agent.new_output,
graph_id=agent.graph_id,
+ inputs=inputs,
)
)
logger.info(f"Found {len(agents)} agents in {source}")
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/models.py b/autogpt_platform/backend/backend/api/features/chat/tools/models.py
index 49b233784e..fef885d0ea 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/models.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/models.py
@@ -68,6 +68,10 @@ class AgentInfo(BaseModel):
has_external_trigger: bool | None = None
new_output: bool | None = None
graph_id: str | None = None
+ inputs: dict[str, Any] | None = Field(
+ default=None,
+ description="Input schema for the agent (properties from input_schema)",
+ )
class AgentsFoundResponse(ToolResponseBase):
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
index a7fa65348a..634e7bed99 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
@@ -273,6 +273,27 @@ class RunAgentTool(BaseTool):
input_properties = graph.input_schema.get("properties", {})
required_fields = set(graph.input_schema.get("required", []))
provided_inputs = set(params.inputs.keys())
+ valid_fields = set(input_properties.keys())
+
+ # Check for unknown fields - reject early with helpful message
+ unknown_fields = provided_inputs - valid_fields
+ if unknown_fields:
+ valid_list = ", ".join(sorted(valid_fields)) if valid_fields else "none"
+ return AgentDetailsResponse(
+ message=(
+ f"Unknown input field(s) provided: {', '.join(sorted(unknown_fields))}. "
+ f"Valid fields for '{graph.name}': {valid_list}. "
+ "Please check the field names and try again."
+ ),
+ session_id=session_id,
+ agent=self._build_agent_details(
+ graph,
+ extract_credentials_from_schema(graph.credentials_input_schema),
+ ),
+ user_authenticated=True,
+ graph_id=graph.id,
+ graph_version=graph.version,
+ )
# If agent has inputs but none were provided AND use_defaults is not set,
# always show what's available first so user can decide
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/FavoritesSection.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/FavoritesSection.test.tsx
new file mode 100644
index 0000000000..cca4335727
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/FavoritesSection.test.tsx
@@ -0,0 +1,185 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen, waitFor } from "@/tests/integrations/test-utils";
+import { FavoritesSection } from "../FavoritesSection/FavoritesSection";
+import { server } from "@/mocks/mock-server";
+import { http, HttpResponse } from "msw";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+const mockFavoriteAgent = {
+ id: "fav-agent-id",
+ graph_id: "fav-graph-id",
+ graph_version: 1,
+ owner_user_id: "test-owner-id",
+ image_url: null,
+ creator_name: "Test Creator",
+ creator_image_url: "https://example.com/avatar.png",
+ status: "READY",
+ created_at: new Date().toISOString(),
+ updated_at: new Date().toISOString(),
+ name: "Favorite Agent Name",
+ description: "Test favorite agent",
+ input_schema: {},
+ output_schema: {},
+ credentials_input_schema: null,
+ has_external_trigger: false,
+ has_human_in_the_loop: false,
+ has_sensitive_action: false,
+ new_output: false,
+ can_access_graph: true,
+ is_latest_version: true,
+ is_favorite: true,
+};
+
+describe("FavoritesSection", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("renders favorites section when there are favorites", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [mockFavoriteAgent],
+ pagination: {
+ total_items: 1,
+ total_pages: 1,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ render();
+
+ await waitFor(() => {
+ expect(screen.getByText(/favorites/i)).toBeInTheDocument();
+ });
+ });
+
+ test("renders favorite agent cards", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [mockFavoriteAgent],
+ pagination: {
+ total_items: 1,
+ total_pages: 1,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ render();
+
+ await waitFor(() => {
+ expect(screen.getByText("Favorite Agent Name")).toBeInTheDocument();
+ });
+ });
+
+ test("shows agent count", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [mockFavoriteAgent],
+ pagination: {
+ total_items: 1,
+ total_pages: 1,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ render();
+
+ await waitFor(() => {
+ expect(screen.getByTestId("agents-count")).toBeInTheDocument();
+ });
+ });
+
+ test("does not render when there are no favorites", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ const { container } = render();
+
+ // Wait for loading to complete
+ await waitFor(() => {
+ // Component should return null when no favorites
+ expect(container.textContent).toBe("");
+ });
+ });
+
+ test("filters favorites based on search term", async () => {
+ mockAuthenticatedUser();
+
+ // Mock that returns different results based on search term
+ server.use(
+ http.get("*/api/library/agents/favorites*", ({ request }) => {
+ const url = new URL(request.url);
+ const searchTerm = url.searchParams.get("search_term");
+
+ if (searchTerm === "nonexistent") {
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }
+
+ return HttpResponse.json({
+ agents: [mockFavoriteAgent],
+ pagination: {
+ total_items: 1,
+ total_pages: 1,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ const { rerender } = render();
+
+ await waitFor(() => {
+ expect(screen.getByText("Favorite Agent Name")).toBeInTheDocument();
+ });
+
+ // Rerender with search term that yields no results
+ rerender();
+
+ await waitFor(() => {
+ expect(screen.queryByText("Favorite Agent Name")).not.toBeInTheDocument();
+ });
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryAgentCard.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryAgentCard.test.tsx
new file mode 100644
index 0000000000..5f26cb834a
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryAgentCard.test.tsx
@@ -0,0 +1,122 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen } from "@/tests/integrations/test-utils";
+import { LibraryAgentCard } from "../LibraryAgentCard/LibraryAgentCard";
+import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+const mockAgent: LibraryAgent = {
+ id: "test-agent-id",
+ graph_id: "test-graph-id",
+ graph_version: 1,
+ owner_user_id: "test-owner-id",
+ image_url: null,
+ creator_name: "Test Creator",
+ creator_image_url: "https://example.com/avatar.png",
+ status: "READY",
+ created_at: new Date().toISOString(),
+ updated_at: new Date().toISOString(),
+ name: "Test Agent Name",
+ description: "Test agent description",
+ input_schema: {},
+ output_schema: {},
+ credentials_input_schema: null,
+ has_external_trigger: false,
+ has_human_in_the_loop: false,
+ has_sensitive_action: false,
+ new_output: false,
+ can_access_graph: true,
+ is_latest_version: true,
+ is_favorite: false,
+};
+
+describe("LibraryAgentCard", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("renders agent name", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(screen.getByText("Test Agent Name")).toBeInTheDocument();
+ });
+
+ test("renders see runs link", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(screen.getByText(/see runs/i)).toBeInTheDocument();
+ });
+
+ test("renders open in builder link when can_access_graph is true", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(screen.getByText(/open in builder/i)).toBeInTheDocument();
+ });
+
+ test("does not render open in builder link when can_access_graph is false", () => {
+ mockAuthenticatedUser();
+ const agentWithoutAccess = { ...mockAgent, can_access_graph: false };
+ render();
+
+ expect(screen.queryByText(/open in builder/i)).not.toBeInTheDocument();
+ });
+
+ test("shows 'FROM MARKETPLACE' label for marketplace agents", () => {
+ mockAuthenticatedUser();
+ const marketplaceAgent = {
+ ...mockAgent,
+ marketplace_listing: {
+ id: "listing-id",
+ name: "Marketplace Agent",
+ slug: "marketplace-agent",
+ creator: {
+ id: "creator-id",
+ name: "Creator Name",
+ slug: "creator-slug",
+ },
+ },
+ };
+ render();
+
+ expect(screen.getByText(/from marketplace/i)).toBeInTheDocument();
+ });
+
+ test("shows 'Built by you' label for user's own agents", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(screen.getByText(/built by you/i)).toBeInTheDocument();
+ });
+
+ test("renders favorite button", () => {
+ mockAuthenticatedUser();
+ render();
+
+ // The favorite button should be present (as a heart icon button)
+ const card = screen.getByTestId("library-agent-card");
+ expect(card).toBeInTheDocument();
+ });
+
+ test("links to correct agent detail page", () => {
+ mockAuthenticatedUser();
+ render();
+
+ const link = screen.getByTestId("library-agent-card-see-runs-link");
+ expect(link).toHaveAttribute("href", "/library/agents/test-agent-id");
+ });
+
+ test("links to correct builder page", () => {
+ mockAuthenticatedUser();
+ render();
+
+ const builderLink = screen.getByTestId(
+ "library-agent-card-open-in-builder-link",
+ );
+ expect(builderLink).toHaveAttribute("href", "/build?flowID=test-graph-id");
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySearchBar.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySearchBar.test.tsx
new file mode 100644
index 0000000000..9a9b574f01
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySearchBar.test.tsx
@@ -0,0 +1,53 @@
+import { describe, expect, test, vi } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@/tests/integrations/test-utils";
+import { LibrarySearchBar } from "../LibrarySearchBar/LibrarySearchBar";
+
+describe("LibrarySearchBar", () => {
+ test("renders search input", () => {
+ const setSearchTerm = vi.fn();
+ render();
+
+ expect(screen.getByPlaceholderText(/search agents/i)).toBeInTheDocument();
+ });
+
+ test("renders search icon", () => {
+ const setSearchTerm = vi.fn();
+ const { container } = render(
+ ,
+ );
+
+ // Check for the magnifying glass icon (SVG element)
+ const searchIcon = container.querySelector("svg");
+ expect(searchIcon).toBeInTheDocument();
+ });
+
+ test("calls setSearchTerm on input change", async () => {
+ const setSearchTerm = vi.fn();
+ render();
+
+ const input = screen.getByPlaceholderText(/search agents/i);
+ fireEvent.change(input, { target: { value: "test query" } });
+
+ // The search bar uses debouncing, so we need to wait
+ await waitFor(
+ () => {
+ expect(setSearchTerm).toHaveBeenCalled();
+ },
+ { timeout: 1000 },
+ );
+ });
+
+ test("has correct test id", () => {
+ const setSearchTerm = vi.fn();
+ render();
+
+ expect(screen.getByTestId("search-bar")).toBeInTheDocument();
+ });
+
+ test("input has correct test id", () => {
+ const setSearchTerm = vi.fn();
+ render();
+
+ expect(screen.getByTestId("library-textbox")).toBeInTheDocument();
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySortMenu.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySortMenu.test.tsx
new file mode 100644
index 0000000000..c3623fdac1
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibrarySortMenu.test.tsx
@@ -0,0 +1,53 @@
+import { describe, expect, test, vi } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@/tests/integrations/test-utils";
+import { LibrarySortMenu } from "../LibrarySortMenu/LibrarySortMenu";
+
+describe("LibrarySortMenu", () => {
+ test("renders sort dropdown", () => {
+ const setLibrarySort = vi.fn();
+ render();
+
+ expect(screen.getByTestId("sort-by-dropdown")).toBeInTheDocument();
+ });
+
+ test("shows 'sort by' label on larger screens", () => {
+ const setLibrarySort = vi.fn();
+ render();
+
+ expect(screen.getByText(/sort by/i)).toBeInTheDocument();
+ });
+
+ test("shows default placeholder text", () => {
+ const setLibrarySort = vi.fn();
+ render();
+
+ expect(screen.getByText(/last modified/i)).toBeInTheDocument();
+ });
+
+ test("opens dropdown when clicked", async () => {
+ const setLibrarySort = vi.fn();
+ render();
+
+ const trigger = screen.getByRole("combobox");
+ fireEvent.click(trigger);
+
+ await waitFor(() => {
+ expect(screen.getByText(/creation date/i)).toBeInTheDocument();
+ });
+ });
+
+ test("shows both sort options in dropdown", async () => {
+ const setLibrarySort = vi.fn();
+ render();
+
+ const trigger = screen.getByRole("combobox");
+ fireEvent.click(trigger);
+
+ await waitFor(() => {
+ expect(screen.getByText(/creation date/i)).toBeInTheDocument();
+ expect(
+ screen.getAllByText(/last modified/i).length,
+ ).toBeGreaterThanOrEqual(1);
+ });
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryUploadAgentDialog.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryUploadAgentDialog.test.tsx
new file mode 100644
index 0000000000..ee3986b843
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/LibraryUploadAgentDialog.test.tsx
@@ -0,0 +1,78 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen, fireEvent, waitFor } from "@/tests/integrations/test-utils";
+import LibraryUploadAgentDialog from "../LibraryUploadAgentDialog/LibraryUploadAgentDialog";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+describe("LibraryUploadAgentDialog", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("renders upload button", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(
+ screen.getByRole("button", { name: /upload agent/i }),
+ ).toBeInTheDocument();
+ });
+
+ test("opens dialog when upload button is clicked", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ const uploadButton = screen.getByRole("button", { name: /upload agent/i });
+ fireEvent.click(uploadButton);
+
+ await waitFor(() => {
+ expect(screen.getByText("Upload Agent")).toBeInTheDocument();
+ });
+ });
+
+ test("dialog contains agent name input", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ const uploadButton = screen.getByRole("button", { name: /upload agent/i });
+ fireEvent.click(uploadButton);
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/agent name/i)).toBeInTheDocument();
+ });
+ });
+
+ test("dialog contains agent description input", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ const uploadButton = screen.getByRole("button", { name: /upload agent/i });
+ fireEvent.click(uploadButton);
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/agent description/i)).toBeInTheDocument();
+ });
+ });
+
+ test("upload button is disabled when form is incomplete", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ const triggerButton = screen.getByRole("button", { name: /upload agent/i });
+ fireEvent.click(triggerButton);
+
+ await waitFor(() => {
+ const submitButton = screen.getByRole("button", { name: /^upload$/i });
+ expect(submitButton).toBeDisabled();
+ });
+ });
+
+ test("has correct test id on trigger button", () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(screen.getByTestId("upload-agent-button")).toBeInTheDocument();
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/auth-state.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/auth-state.test.tsx
new file mode 100644
index 0000000000..1d15fef18d
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/auth-state.test.tsx
@@ -0,0 +1,40 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen } from "@/tests/integrations/test-utils";
+import LibraryPage from "../../page";
+import {
+ mockAuthenticatedUser,
+ mockUnauthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+describe("LibraryPage - Auth State", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("renders page correctly when logged in", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ // Wait for upload button text to appear (indicates page is rendered)
+ expect(
+ await screen.findByText("Upload agent", { exact: false }),
+ ).toBeInTheDocument();
+
+ // Search bar should be visible
+ expect(screen.getByTestId("search-bar")).toBeInTheDocument();
+ });
+
+ test("renders page correctly when logged out", async () => {
+ mockUnauthenticatedUser();
+ render();
+
+ // Wait for upload button text to appear (indicates page is rendered)
+ expect(
+ await screen.findByText("Upload agent", { exact: false }),
+ ).toBeInTheDocument();
+
+ // Search bar should still be visible
+ expect(screen.getByTestId("search-bar")).toBeInTheDocument();
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/empty-state.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/empty-state.test.tsx
new file mode 100644
index 0000000000..89ea09f058
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/empty-state.test.tsx
@@ -0,0 +1,82 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen, waitFor } from "@/tests/integrations/test-utils";
+import LibraryPage from "../../page";
+import { server } from "@/mocks/mock-server";
+import { http, HttpResponse } from "msw";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+describe("LibraryPage - Empty State", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("handles empty agents list gracefully", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents*", () => {
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ render();
+
+ // Page should still render without crashing
+ // Search bar should be visible even with no agents
+ expect(
+ await screen.findByPlaceholderText(/search agents/i),
+ ).toBeInTheDocument();
+
+ // Upload button should be visible
+ expect(
+ screen.getByRole("button", { name: /upload agent/i }),
+ ).toBeInTheDocument();
+ });
+
+ test("handles empty favorites gracefully", async () => {
+ mockAuthenticatedUser();
+
+ server.use(
+ http.get("*/api/library/agents/favorites*", () => {
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ render();
+
+ // Page should still render without crashing
+ expect(
+ await screen.findByPlaceholderText(/search agents/i),
+ ).toBeInTheDocument();
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/error-handling.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/error-handling.test.tsx
new file mode 100644
index 0000000000..d1a61c861d
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/error-handling.test.tsx
@@ -0,0 +1,59 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen, waitFor } from "@/tests/integrations/test-utils";
+import LibraryPage from "../../page";
+import { server } from "@/mocks/mock-server";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+import { create500Handler } from "@/tests/integrations/helpers/create-500-handler";
+import {
+ getGetV2ListLibraryAgentsMockHandler422,
+ getGetV2ListFavoriteLibraryAgentsMockHandler422,
+} from "@/app/api/__generated__/endpoints/library/library.msw";
+
+describe("LibraryPage - Error Handling", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("handles API 422 error gracefully", async () => {
+ mockAuthenticatedUser();
+
+ server.use(getGetV2ListLibraryAgentsMockHandler422());
+
+ render();
+
+ // Page should still render without crashing
+ // Search bar should be visible even with error
+ await waitFor(() => {
+ expect(screen.getByPlaceholderText(/search agents/i)).toBeInTheDocument();
+ });
+ });
+
+ test("handles favorites API 422 error gracefully", async () => {
+ mockAuthenticatedUser();
+
+ server.use(getGetV2ListFavoriteLibraryAgentsMockHandler422());
+
+ render();
+
+ // Page should still render without crashing
+ await waitFor(() => {
+ expect(screen.getByPlaceholderText(/search agents/i)).toBeInTheDocument();
+ });
+ });
+
+ test("handles API 500 error gracefully", async () => {
+ mockAuthenticatedUser();
+
+ server.use(create500Handler("get", "*/api/library/agents*"));
+
+ render();
+
+ // Page should still render without crashing
+ await waitFor(() => {
+ expect(screen.getByPlaceholderText(/search agents/i)).toBeInTheDocument();
+ });
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/loading-state.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/loading-state.test.tsx
new file mode 100644
index 0000000000..a544d15fd8
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/loading-state.test.tsx
@@ -0,0 +1,55 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen } from "@/tests/integrations/test-utils";
+import LibraryPage from "../../page";
+import { server } from "@/mocks/mock-server";
+import { http, HttpResponse, delay } from "msw";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+describe("LibraryPage - Loading State", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("shows loading spinner while agents are being fetched", async () => {
+ mockAuthenticatedUser();
+
+ // Override handlers to add delay to simulate loading
+ server.use(
+ http.get("*/api/library/agents*", async () => {
+ await delay(500);
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ http.get("*/api/library/agents/favorites*", async () => {
+ await delay(500);
+ return HttpResponse.json({
+ agents: [],
+ pagination: {
+ total_items: 0,
+ total_pages: 0,
+ current_page: 1,
+ page_size: 20,
+ },
+ });
+ }),
+ );
+
+ const { container } = render();
+
+ // Check for loading spinner (LoadingSpinner component)
+ const loadingElements = container.querySelectorAll(
+ '[class*="animate-spin"]',
+ );
+ expect(loadingElements.length).toBeGreaterThan(0);
+ });
+});
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/rendering.test.tsx b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/rendering.test.tsx
new file mode 100644
index 0000000000..6f51c0323f
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/(platform)/library/components/__tests__/rendering.test.tsx
@@ -0,0 +1,65 @@
+import { describe, expect, test, afterEach } from "vitest";
+import { render, screen, waitFor } from "@/tests/integrations/test-utils";
+import LibraryPage from "../../page";
+import {
+ mockAuthenticatedUser,
+ resetAuthState,
+} from "@/tests/integrations/helpers/mock-supabase-auth";
+
+describe("LibraryPage - Rendering", () => {
+ afterEach(() => {
+ resetAuthState();
+ });
+
+ test("renders search bar", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(
+ await screen.findByPlaceholderText(/search agents/i),
+ ).toBeInTheDocument();
+ });
+
+ test("renders upload agent button", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ expect(
+ await screen.findByRole("button", { name: /upload agent/i }),
+ ).toBeInTheDocument();
+ });
+
+ test("renders agent cards when data is loaded", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ // Wait for agent cards to appear (from mock data)
+ await waitFor(() => {
+ const agentCards = screen.getAllByTestId("library-agent-card");
+ expect(agentCards.length).toBeGreaterThan(0);
+ });
+ });
+
+ test("agent cards display agent name", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ // Wait for agent cards and check they have names
+ await waitFor(() => {
+ const agentNames = screen.getAllByTestId("library-agent-card-name");
+ expect(agentNames.length).toBeGreaterThan(0);
+ });
+ });
+
+ test("agent cards have see runs link", async () => {
+ mockAuthenticatedUser();
+ render();
+
+ await waitFor(() => {
+ const seeRunsLinks = screen.getAllByTestId(
+ "library-agent-card-see-runs-link",
+ );
+ expect(seeRunsLinks.length).toBeGreaterThan(0);
+ });
+ });
+});
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainerAiSdk.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainerAiSdk.ts
new file mode 100644
index 0000000000..1b6bba9c4d
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainerAiSdk.ts
@@ -0,0 +1,246 @@
+/**
+ * useChatContainerAiSdk - ChatContainer hook using Vercel AI SDK
+ *
+ * This is a drop-in replacement for useChatContainer that uses @ai-sdk/react
+ * instead of the custom streaming implementation. The API surface is identical
+ * to enable easy A/B testing and gradual migration.
+ */
+
+import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
+import { useEffect, useMemo, useRef } from "react";
+import type { UIMessage } from "ai";
+import { useAiSdkChat } from "../../useAiSdkChat";
+import { usePageContext } from "../../usePageContext";
+import type { ChatMessageData } from "../ChatMessage/useChatMessage";
+import {
+ filterAuthMessages,
+ hasSentInitialPrompt,
+ markInitialPromptSent,
+ processInitialMessages,
+} from "./helpers";
+
+// Helper to convert backend messages to AI SDK UIMessage format
+function convertToUIMessages(
+ messages: SessionDetailResponse["messages"],
+): UIMessage[] {
+ const result: UIMessage[] = [];
+
+ for (const msg of messages) {
+ if (!msg.role || !msg.content) continue;
+
+ // Create parts based on message type
+ const parts: UIMessage["parts"] = [];
+
+ if (msg.role === "user" || msg.role === "assistant") {
+ if (typeof msg.content === "string") {
+ parts.push({ type: "text", text: msg.content });
+ }
+ }
+
+ // Handle tool calls in assistant messages
+ if (msg.role === "assistant" && msg.tool_calls) {
+ for (const toolCall of msg.tool_calls as Array<{
+ id: string;
+ type: string;
+ function: { name: string; arguments: string };
+ }>) {
+ if (toolCall.type === "function") {
+ let args = {};
+ try {
+ args = JSON.parse(toolCall.function.arguments);
+ } catch {
+ // Keep empty args
+ }
+ parts.push({
+ type: `tool-${toolCall.function.name}` as `tool-${string}`,
+ toolCallId: toolCall.id,
+ toolName: toolCall.function.name,
+ state: "input-available",
+ input: args,
+ } as UIMessage["parts"][number]);
+ }
+ }
+ }
+
+ // Handle tool responses
+ if (msg.role === "tool" && msg.tool_call_id) {
+ // Find the matching tool call to get the name
+ const toolName = "unknown";
+ let output: unknown = msg.content;
+ try {
+ output =
+ typeof msg.content === "string"
+ ? JSON.parse(msg.content)
+ : msg.content;
+ } catch {
+ // Keep as string
+ }
+
+ parts.push({
+ type: `tool-${toolName}` as `tool-${string}`,
+ toolCallId: msg.tool_call_id as string,
+ toolName,
+ state: "output-available",
+ output,
+ } as UIMessage["parts"][number]);
+ }
+
+ if (parts.length > 0) {
+ result.push({
+ id: msg.id || `msg-${Date.now()}-${Math.random()}`,
+ role: msg.role === "tool" ? "assistant" : (msg.role as "user" | "assistant"),
+ parts,
+ createdAt: msg.created_at ? new Date(msg.created_at as string) : new Date(),
+ });
+ }
+ }
+
+ return result;
+}
+
+interface Args {
+ sessionId: string | null;
+ initialMessages: SessionDetailResponse["messages"];
+ initialPrompt?: string;
+ onOperationStarted?: () => void;
+}
+
+export function useChatContainerAiSdk({
+ sessionId,
+ initialMessages,
+ initialPrompt,
+ onOperationStarted,
+}: Args) {
+ const { capturePageContext } = usePageContext();
+ const sendMessageRef = useRef<
+ (
+ content: string,
+ isUserMessage?: boolean,
+ context?: { url: string; content: string },
+ ) => Promise
+ >();
+
+ // Convert initial messages to AI SDK format
+ const uiMessages = useMemo(
+ () => convertToUIMessages(initialMessages),
+ [initialMessages],
+ );
+
+ const {
+ messages: aiSdkMessages,
+ streamingChunks,
+ isStreaming,
+ error,
+ isRegionBlockedModalOpen,
+ setIsRegionBlockedModalOpen,
+ sendMessage,
+ stopStreaming,
+ } = useAiSdkChat({
+ sessionId,
+ initialMessages: uiMessages,
+ onOperationStarted,
+ });
+
+ // Keep ref updated for initial prompt handling
+ sendMessageRef.current = sendMessage;
+
+ // Merge AI SDK messages with processed initial messages
+ // This ensures we show both historical messages and new streaming messages
+ const allMessages = useMemo(() => {
+ const processedInitial = processInitialMessages(initialMessages);
+
+ // Build a set of message keys for deduplication
+ const seenKeys = new Set();
+ const result: ChatMessageData[] = [];
+
+ // Add processed initial messages first
+ for (const msg of processedInitial) {
+ const key = getMessageKey(msg);
+ if (!seenKeys.has(key)) {
+ seenKeys.add(key);
+ result.push(msg);
+ }
+ }
+
+ // Add AI SDK messages that aren't duplicates
+ for (const msg of aiSdkMessages) {
+ const key = getMessageKey(msg);
+ if (!seenKeys.has(key)) {
+ seenKeys.add(key);
+ result.push(msg);
+ }
+ }
+
+ return result;
+ }, [initialMessages, aiSdkMessages]);
+
+ // Handle initial prompt
+ useEffect(
+ function handleInitialPrompt() {
+ if (!initialPrompt || !sessionId) return;
+ if (initialMessages.length > 0) return;
+ if (hasSentInitialPrompt(sessionId)) return;
+
+ markInitialPromptSent(sessionId);
+ const context = capturePageContext();
+ sendMessageRef.current?.(initialPrompt, true, context);
+ },
+ [initialPrompt, sessionId, initialMessages.length, capturePageContext],
+ );
+
+ // Send message with page context
+ async function sendMessageWithContext(
+ content: string,
+ isUserMessage: boolean = true,
+ ) {
+ const context = capturePageContext();
+ await sendMessage(content, isUserMessage, context);
+ }
+
+ function handleRegionModalOpenChange(open: boolean) {
+ setIsRegionBlockedModalOpen(open);
+ }
+
+ function handleRegionModalClose() {
+ setIsRegionBlockedModalOpen(false);
+ }
+
+ return {
+ messages: filterAuthMessages(allMessages),
+ streamingChunks,
+ isStreaming,
+ error,
+ isRegionBlockedModalOpen,
+ setIsRegionBlockedModalOpen,
+ sendMessageWithContext,
+ handleRegionModalOpenChange,
+ handleRegionModalClose,
+ sendMessage,
+ stopStreaming,
+ };
+}
+
+// Helper to generate deduplication key for a message
+function getMessageKey(msg: ChatMessageData): string {
+ if (msg.type === "message") {
+ return `msg:${msg.role}:${msg.content}`;
+ } else if (msg.type === "tool_call") {
+ return `toolcall:${msg.toolId}`;
+ } else if (msg.type === "tool_response") {
+ return `toolresponse:${(msg as { toolId?: string }).toolId}`;
+ } else if (
+ msg.type === "operation_started" ||
+ msg.type === "operation_pending" ||
+ msg.type === "operation_in_progress"
+ ) {
+ const typedMsg = msg as {
+ toolId?: string;
+ operationId?: string;
+ toolCallId?: string;
+ toolName?: string;
+ };
+ return `op:${typedMsg.toolId || typedMsg.operationId || typedMsg.toolCallId || ""}:${typedMsg.toolName || ""}`;
+ } else {
+ return `${msg.type}:${JSON.stringify(msg).slice(0, 100)}`;
+ }
+}
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/useAiSdkChat.ts b/autogpt_platform/frontend/src/components/contextual/Chat/useAiSdkChat.ts
new file mode 100644
index 0000000000..ef581a25a3
--- /dev/null
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/useAiSdkChat.ts
@@ -0,0 +1,421 @@
+"use client";
+
+/**
+ * useAiSdkChat - Vercel AI SDK integration for CoPilot Chat
+ *
+ * This hook wraps @ai-sdk/react's useChat to provide:
+ * - Streaming chat with the existing Python backend (already AI SDK protocol compatible)
+ * - Integration with existing session management
+ * - Custom tool response parsing for AutoGPT-specific types
+ * - Page context injection
+ *
+ * The Python backend already implements the AI SDK Data Stream Protocol (v1),
+ * so this hook can communicate directly without any backend changes.
+ */
+
+import { useChat as useAiSdkChatBase } from "@ai-sdk/react";
+import { DefaultChatTransport, type UIMessage } from "ai";
+import { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import { toast } from "sonner";
+import type { ChatMessageData } from "./components/ChatMessage/useChatMessage";
+
+// Tool response types from the backend
+type OperationType =
+ | "operation_started"
+ | "operation_pending"
+ | "operation_in_progress";
+
+interface ToolOutputBase {
+ type: string;
+ [key: string]: unknown;
+}
+
+interface UseAiSdkChatOptions {
+ sessionId: string | null;
+ initialMessages?: UIMessage[];
+ onOperationStarted?: () => void;
+ onStreamingChange?: (isStreaming: boolean) => void;
+}
+
+/**
+ * Parse tool output from AI SDK message parts into ChatMessageData format
+ */
+function parseToolOutput(
+ toolCallId: string,
+ toolName: string,
+ output: unknown,
+): ChatMessageData | null {
+ if (!output) return null;
+
+ let parsed: ToolOutputBase;
+ try {
+ parsed =
+ typeof output === "string"
+ ? JSON.parse(output)
+ : (output as ToolOutputBase);
+ } catch {
+ return null;
+ }
+
+ const type = parsed.type;
+
+ // Handle operation status types
+ if (
+ type === "operation_started" ||
+ type === "operation_pending" ||
+ type === "operation_in_progress"
+ ) {
+ return {
+ type: type as OperationType,
+ toolId: toolCallId,
+ toolName: toolName,
+ operationId: (parsed.operation_id as string) || undefined,
+ message: (parsed.message as string) || undefined,
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+
+ // Handle agent carousel
+ if (type === "agent_carousel" && Array.isArray(parsed.agents)) {
+ return {
+ type: "agent_carousel",
+ toolId: toolCallId,
+ toolName: toolName,
+ agents: parsed.agents,
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+
+ // Handle execution started
+ if (type === "execution_started") {
+ return {
+ type: "execution_started",
+ toolId: toolCallId,
+ toolName: toolName,
+ graphId: parsed.graph_id as string,
+ graphVersion: parsed.graph_version as number,
+ graphExecId: parsed.graph_exec_id as string,
+ nodeExecIds: parsed.node_exec_ids as string[],
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+
+ // Handle error responses
+ if (type === "error") {
+ return {
+ type: "tool_response",
+ toolId: toolCallId,
+ toolName: toolName,
+ result: parsed,
+ success: false,
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+
+ // Handle clarification questions
+ if (type === "clarification_questions" && Array.isArray(parsed.questions)) {
+ return {
+ type: "clarification_questions",
+ toolId: toolCallId,
+ toolName: toolName,
+ questions: parsed.questions,
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+
+ // Handle credentials needed
+ if (type === "credentials_needed" || type === "setup_requirements") {
+ const credentials = parsed.credentials as
+ | Array<{
+ provider: string;
+ provider_name: string;
+ credential_type: string;
+ scopes?: string[];
+ }>
+ | undefined;
+ if (credentials && credentials.length > 0) {
+ return {
+ type: "credentials_needed",
+ toolId: toolCallId,
+ toolName: toolName,
+ credentials: credentials,
+ timestamp: new Date(),
+ } as ChatMessageData;
+ }
+ }
+
+ // Default: generic tool response
+ return {
+ type: "tool_response",
+ toolId: toolCallId,
+ toolName: toolName,
+ result: parsed,
+ success: true,
+ timestamp: new Date(),
+ } as ChatMessageData;
+}
+
+/**
+ * Convert AI SDK UIMessage parts to ChatMessageData array
+ */
+function convertMessageToChatData(message: UIMessage): ChatMessageData[] {
+ const result: ChatMessageData[] = [];
+
+ for (const part of message.parts) {
+ switch (part.type) {
+ case "text":
+ if (part.text.trim()) {
+ result.push({
+ type: "message",
+ role: message.role as "user" | "assistant",
+ content: part.text,
+ timestamp: new Date(message.createdAt || Date.now()),
+ });
+ }
+ break;
+
+ default:
+ // Handle tool parts (tool-*)
+ if (part.type.startsWith("tool-")) {
+ const toolPart = part as {
+ type: string;
+ toolCallId: string;
+ toolName: string;
+ state: string;
+ input?: Record;
+ output?: unknown;
+ };
+
+ // Show tool call in progress
+ if (
+ toolPart.state === "input-streaming" ||
+ toolPart.state === "input-available"
+ ) {
+ result.push({
+ type: "tool_call",
+ toolId: toolPart.toolCallId,
+ toolName: toolPart.toolName,
+ arguments: toolPart.input || {},
+ timestamp: new Date(),
+ });
+ }
+
+ // Parse tool output when available
+ if (
+ toolPart.state === "output-available" &&
+ toolPart.output !== undefined
+ ) {
+ const parsed = parseToolOutput(
+ toolPart.toolCallId,
+ toolPart.toolName,
+ toolPart.output,
+ );
+ if (parsed) {
+ result.push(parsed);
+ }
+ }
+
+ // Handle tool errors
+ if (toolPart.state === "output-error") {
+ result.push({
+ type: "tool_response",
+ toolId: toolPart.toolCallId,
+ toolName: toolPart.toolName,
+ response: {
+ type: "error",
+ message: (toolPart as { errorText?: string }).errorText,
+ },
+ success: false,
+ timestamp: new Date(),
+ } as ChatMessageData);
+ }
+ }
+ break;
+ }
+ }
+
+ return result;
+}
+
+export function useAiSdkChat({
+ sessionId,
+ initialMessages = [],
+ onOperationStarted,
+ onStreamingChange,
+}: UseAiSdkChatOptions) {
+ const [isRegionBlockedModalOpen, setIsRegionBlockedModalOpen] =
+ useState(false);
+ const previousSessionIdRef = useRef(null);
+ const hasNotifiedOperationRef = useRef>(new Set());
+
+ // Create transport with session-specific endpoint
+ const transport = useMemo(() => {
+ if (!sessionId) return undefined;
+ return new DefaultChatTransport({
+ api: `/api/chat/sessions/${sessionId}/stream`,
+ headers: {
+ "Content-Type": "application/json",
+ },
+ });
+ }, [sessionId]);
+
+ const {
+ messages: aiMessages,
+ status,
+ error,
+ stop,
+ setMessages,
+ sendMessage: aiSendMessage,
+ } = useAiSdkChatBase({
+ transport,
+ initialMessages,
+ onError: (err) => {
+ console.error("[useAiSdkChat] Error:", err);
+
+ // Check for region blocking
+ if (
+ err.message?.toLowerCase().includes("not available in your region") ||
+ (err as { code?: string }).code === "MODEL_NOT_AVAILABLE_REGION"
+ ) {
+ setIsRegionBlockedModalOpen(true);
+ return;
+ }
+
+ toast.error("Chat Error", {
+ description: err.message || "An error occurred",
+ });
+ },
+ onFinish: ({ message }) => {
+ console.info("[useAiSdkChat] Message finished:", {
+ id: message.id,
+ partsCount: message.parts.length,
+ });
+ },
+ });
+
+ // Track streaming status
+ const isStreaming = status === "streaming" || status === "submitted";
+
+ // Notify parent of streaming changes
+ useEffect(() => {
+ onStreamingChange?.(isStreaming);
+ }, [isStreaming, onStreamingChange]);
+
+ // Handle session changes - reset state
+ useEffect(() => {
+ if (sessionId === previousSessionIdRef.current) return;
+
+ if (previousSessionIdRef.current && status === "streaming") {
+ stop();
+ }
+
+ previousSessionIdRef.current = sessionId;
+ hasNotifiedOperationRef.current = new Set();
+
+ if (sessionId) {
+ setMessages(initialMessages);
+ }
+ }, [sessionId, status, stop, setMessages, initialMessages]);
+
+ // Convert AI SDK messages to ChatMessageData format
+ const messages = useMemo(() => {
+ const result: ChatMessageData[] = [];
+
+ for (const message of aiMessages) {
+ const converted = convertMessageToChatData(message);
+ result.push(...converted);
+
+ // Check for operation_started and notify
+ for (const msg of converted) {
+ if (
+ msg.type === "operation_started" &&
+ !hasNotifiedOperationRef.current.has(
+ (msg as { toolId?: string }).toolId || "",
+ )
+ ) {
+ hasNotifiedOperationRef.current.add(
+ (msg as { toolId?: string }).toolId || "",
+ );
+ onOperationStarted?.();
+ }
+ }
+ }
+
+ return result;
+ }, [aiMessages, onOperationStarted]);
+
+ // Get streaming text chunks from the last assistant message
+ const streamingChunks = useMemo(() => {
+ if (!isStreaming) return [];
+
+ const lastMessage = aiMessages[aiMessages.length - 1];
+ if (!lastMessage || lastMessage.role !== "assistant") return [];
+
+ const chunks: string[] = [];
+ for (const part of lastMessage.parts) {
+ if (part.type === "text" && part.text) {
+ chunks.push(part.text);
+ }
+ }
+
+ return chunks;
+ }, [aiMessages, isStreaming]);
+
+ // Send message with optional context
+ const sendMessage = useCallback(
+ async (
+ content: string,
+ isUserMessage: boolean = true,
+ context?: { url: string; content: string },
+ ) => {
+ if (!sessionId || !transport) {
+ console.error("[useAiSdkChat] Cannot send message: no session");
+ return;
+ }
+
+ setIsRegionBlockedModalOpen(false);
+
+ try {
+ await aiSendMessage(
+ { text: content },
+ {
+ body: {
+ is_user_message: isUserMessage,
+ context: context || null,
+ },
+ },
+ );
+ } catch (err) {
+ console.error("[useAiSdkChat] Failed to send message:", err);
+
+ if (err instanceof Error && err.name === "AbortError") return;
+
+ toast.error("Failed to send message", {
+ description:
+ err instanceof Error ? err.message : "Failed to send message",
+ });
+ }
+ },
+ [sessionId, transport, aiSendMessage],
+ );
+
+ // Stop streaming
+ const stopStreaming = useCallback(() => {
+ stop();
+ }, [stop]);
+
+ return {
+ messages,
+ streamingChunks,
+ isStreaming,
+ error,
+ status,
+ isRegionBlockedModalOpen,
+ setIsRegionBlockedModalOpen,
+ sendMessage,
+ stopStreaming,
+ // Expose raw AI SDK state for advanced use cases
+ aiMessages,
+ setAiMessages: setMessages,
+ };
+}