Compare commits

..

1 Commits

Author SHA1 Message Date
Zamil Majdy
12690ad0a9 fix(backend): Use explicit {schema}.vector for pgvector types
The unqualified ::vector type fails in ORDER BY context with PgBouncer.
Use explicit schema qualification ({schema}.vector) which resolves to
platform.vector where the pgvector extension is installed.

Changes:
- Add {schema} placeholder to db.py for raw schema name
- Use {schema}.vector instead of unqualified ::vector in embeddings.py
- Use {{schema}}.vector instead of unqualified ::vector in hybrid_search.py

Tested on dev: explicit platform.vector works in all contexts.

Fixes: AUTOGPT-SERVER-76B
2026-01-21 10:19:08 -05:00
31 changed files with 223 additions and 1888 deletions

View File

@@ -128,7 +128,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
exitOnceUploaded: true
e2e_test:
test:
runs-on: big-boi
needs: setup
strategy:
@@ -258,39 +258,3 @@ jobs:
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs
integration_test:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "22.18.0"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Generate API client
run: pnpm generate:api
- name: Run Integration Tests
run: pnpm test:unit

View File

@@ -154,16 +154,15 @@ async def store_content_embedding(
# Upsert the embedding
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
# Use unqualified ::vector - pgvector is in search_path on all environments
await execute_raw_with_schema(
"""
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
"id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt"
)
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW())
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::{schema}.vector, $5, $6::jsonb, NOW(), NOW())
ON CONFLICT ("contentType", "contentId", "userId")
DO UPDATE SET
"embedding" = $4::vector,
"embedding" = $4::{schema}.vector,
"searchableText" = $5,
"metadata" = $6::jsonb,
"updatedAt" = NOW()
@@ -879,7 +878,6 @@ async def semantic_search(
min_similarity_idx = len(params) + 1
params.append(min_similarity)
# Use unqualified ::vector and <=> operator - pgvector is in search_path on all environments
sql = (
"""
SELECT
@@ -889,7 +887,7 @@ async def semantic_search(
metadata,
1 - (embedding <=> '"""
+ embedding_str
+ """'::vector) as similarity
+ """'::{schema}.vector) as similarity
FROM {schema_prefix}"UnifiedContentEmbedding"
WHERE "contentType" IN ("""
+ content_type_placeholders
@@ -899,7 +897,7 @@ async def semantic_search(
+ """
AND 1 - (embedding <=> '"""
+ embedding_str
+ """'::vector) >= $"""
+ """'::{schema}.vector) >= $"""
+ str(min_similarity_idx)
+ """
ORDER BY similarity DESC

View File

@@ -295,7 +295,7 @@ async def unified_hybrid_search(
FROM {{schema_prefix}}"UnifiedContentEmbedding" uce
WHERE uce."contentType" = ANY({content_types_param}::{{schema_prefix}}"ContentType"[])
{user_filter}
ORDER BY uce.embedding <=> {embedding_param}::vector
ORDER BY uce.embedding <=> {embedding_param}::{{schema}}.vector
LIMIT 200
)
),
@@ -307,7 +307,7 @@ async def unified_hybrid_search(
uce.metadata,
uce."updatedAt" as updated_at,
-- Semantic score: cosine similarity (1 - distance)
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
COALESCE(1 - (uce.embedding <=> {embedding_param}::{{schema}}.vector), 0) as semantic_score,
-- Lexical score: ts_rank_cd
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
-- Category match from metadata
@@ -583,7 +583,7 @@ async def hybrid_search(
WHERE uce."contentType" = 'STORE_AGENT'::{{schema_prefix}}"ContentType"
AND uce."userId" IS NULL
AND {where_clause}
ORDER BY uce.embedding <=> {embedding_param}::vector
ORDER BY uce.embedding <=> {embedding_param}::{{schema}}.vector
LIMIT 200
) uce
),
@@ -605,7 +605,7 @@ async def hybrid_search(
-- Searchable text for BM25 reranking
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
-- Semantic score
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
COALESCE(1 - (uce.embedding <=> {embedding_param}::{{schema}}.vector), 0) as semantic_score,
-- Lexical score (raw, will normalize)
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
-- Category match

View File

@@ -120,12 +120,7 @@ async def _raw_with_schema(
Supports placeholders:
- {schema_prefix}: Table/type prefix (e.g., "platform".)
- {schema}: Raw schema name for application tables (e.g., platform)
Note on pgvector types:
Use unqualified ::vector and <=> operator in queries. PostgreSQL resolves
these via search_path, which includes the schema where pgvector is installed
on all environments (local, CI, dev).
- {schema}: Raw schema name (e.g., platform) for pgvector types
Args:
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
@@ -137,19 +132,16 @@ async def _raw_with_schema(
- list[dict] if execute=False (query results)
- int if execute=True (number of affected rows)
Example with vector type:
Example:
await execute_raw_with_schema(
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::vector)',
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::{schema}.vector)',
embedding_data
)
"""
schema = get_database_schema()
schema_prefix = f'"{schema}".' if schema != "public" else ""
formatted_query = query_template.format(
schema_prefix=schema_prefix,
schema=schema,
)
formatted_query = query_template.format(schema_prefix=schema_prefix, schema=schema)
import prisma as prisma_module

View File

@@ -103,18 +103,8 @@ class RedisEventBus(BaseRedisEventBus[M], ABC):
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
@@ -138,19 +128,9 @@ class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(

View File

@@ -1,56 +0,0 @@
"""
Tests for event_bus graceful degradation when Redis is unavailable.
"""
from unittest.mock import AsyncMock, patch
import pytest
from pydantic import BaseModel
from backend.data.event_bus import AsyncRedisEventBus
class TestEvent(BaseModel):
"""Test event model."""
message: str
class TestNotificationBus(AsyncRedisEventBus[TestEvent]):
"""Test implementation of AsyncRedisEventBus."""
Model = TestEvent
@property
def event_bus_name(self) -> str:
return "test_event_bus"
@pytest.mark.asyncio
async def test_publish_event_handles_connection_failure_gracefully():
"""Test that publish_event logs exception instead of raising when Redis is unavailable."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock get_redis_async to raise connection error
with patch(
"backend.data.event_bus.redis.get_redis_async",
side_effect=ConnectionError("Authentication required."),
):
# Should not raise exception
await bus.publish_event(event, "test_channel")
@pytest.mark.asyncio
async def test_publish_event_works_with_redis_available():
"""Test that publish_event works normally when Redis is available."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock successful Redis connection
mock_redis = AsyncMock()
mock_redis.publish = AsyncMock()
with patch("backend.data.event_bus.redis.get_redis_async", return_value=mock_redis):
await bus.publish_event(event, "test_channel")
mock_redis.publish.assert_called_once()

View File

@@ -81,8 +81,6 @@ class ExecutionContext(BaseModel):
This includes information needed by blocks, sub-graphs, and execution management.
"""
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: bool = True
sensitive_action_safe_mode: bool = False
user_timezone: str = "UTC"

View File

@@ -64,8 +64,6 @@ logger = logging.getLogger(__name__)
class GraphSettings(BaseModel):
# Use Annotated with BeforeValidator to coerce None to default values.
# This handles cases where the database has null values for these fields.
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: Annotated[
bool, BeforeValidator(lambda v: v if v is not None else True)
] = True

View File

@@ -1,10 +1,9 @@
-- CreateExtension
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
-- Creates extension in current schema (determined by search_path from DATABASE_URL ?schema= param)
-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification
-- Create in public schema so vector type is available across all schemas
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "vector";
CREATE EXTENSION IF NOT EXISTS "vector" WITH SCHEMA "public";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'vector extension not available or already exists, skipping';
END $$;
@@ -20,7 +19,7 @@ CREATE TABLE "UnifiedContentEmbedding" (
"contentType" "ContentType" NOT NULL,
"contentId" TEXT NOT NULL,
"userId" TEXT,
"embedding" vector(1536) NOT NULL,
"embedding" public.vector(1536) NOT NULL,
"searchableText" TEXT NOT NULL,
"metadata" JSONB NOT NULL DEFAULT '{}',
@@ -46,4 +45,4 @@ CREATE UNIQUE INDEX "UnifiedContentEmbedding_contentType_contentId_userId_key" O
-- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py
-- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW)
DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx";
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" vector_cosine_ops);
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" public.vector_cosine_ops);

View File

@@ -366,12 +366,12 @@ def generate_block_markdown(
lines.append("")
# What it is (full description)
lines.append("### What it is")
lines.append(f"### What it is")
lines.append(block.description or "No description available.")
lines.append("")
# How it works (manual section)
lines.append("### How it works")
lines.append(f"### How it works")
how_it_works = manual_content.get(
"how_it_works", "_Add technical explanation here._"
)
@@ -383,7 +383,7 @@ def generate_block_markdown(
# Inputs table (auto-generated)
visible_inputs = [f for f in block.inputs if not f.hidden]
if visible_inputs:
lines.append("### Inputs")
lines.append(f"### Inputs")
lines.append("")
lines.append("| Input | Description | Type | Required |")
lines.append("|-------|-------------|------|----------|")
@@ -400,7 +400,7 @@ def generate_block_markdown(
# Outputs table (auto-generated)
visible_outputs = [f for f in block.outputs if not f.hidden]
if visible_outputs:
lines.append("### Outputs")
lines.append(f"### Outputs")
lines.append("")
lines.append("| Output | Description | Type |")
lines.append("|--------|-------------|------|")
@@ -414,7 +414,7 @@ def generate_block_markdown(
lines.append("")
# Possible use case (manual section)
lines.append("### Possible use case")
lines.append(f"### Possible use case")
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
lines.append("<!-- MANUAL: use_case -->")
lines.append(use_case)

View File

@@ -29,4 +29,4 @@ NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY=
NEXT_PUBLIC_TURNSTILE=disabled
# PR previews
NEXT_PUBLIC_PREVIEW_STEALING_DEV=
NEXT_PUBLIC_PREVIEW_STEALING_DEV=

View File

@@ -16,12 +16,6 @@ export default defineConfig({
client: "react-query",
httpClient: "fetch",
indexFiles: false,
mock: {
type: "msw",
baseUrl: "http://localhost:3000/api/proxy",
generateEachHttpStatus: true,
delay: 0,
},
override: {
mutator: {
path: "./mutators/custom-mutator.ts",

View File

@@ -15,8 +15,6 @@
"types": "tsc --noEmit",
"test": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test",
"test-ui": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test --ui",
"test:unit": "vitest run",
"test:unit:watch": "vitest",
"test:no-build": "playwright test",
"gentests": "playwright codegen http://localhost:3000",
"storybook": "storybook dev -p 6006",
@@ -120,7 +118,6 @@
},
"devDependencies": {
"@chromatic-com/storybook": "4.1.2",
"happy-dom": "20.3.4",
"@opentelemetry/instrumentation": "0.209.0",
"@playwright/test": "1.56.1",
"@storybook/addon-a11y": "9.1.5",
@@ -130,8 +127,6 @@
"@storybook/nextjs": "9.1.5",
"@tanstack/eslint-plugin-query": "5.91.2",
"@tanstack/react-query-devtools": "5.90.2",
"@testing-library/dom": "10.4.1",
"@testing-library/react": "16.3.2",
"@types/canvas-confetti": "1.9.0",
"@types/lodash": "4.17.20",
"@types/negotiator": "0.6.4",
@@ -140,7 +135,6 @@
"@types/react-dom": "18.3.5",
"@types/react-modal": "3.16.3",
"@types/react-window": "1.8.8",
"@vitejs/plugin-react": "5.1.2",
"axe-playwright": "2.2.2",
"chromatic": "13.3.3",
"concurrently": "9.2.1",
@@ -159,9 +153,7 @@
"require-in-the-middle": "8.0.1",
"storybook": "9.1.5",
"tailwindcss": "3.4.17",
"typescript": "5.9.3",
"vite-tsconfig-paths": "6.0.4",
"vitest": "4.0.17"
"typescript": "5.9.3"
},
"msw": {
"workerDirectory": [

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,8 @@
"use client";
import React, {
useCallback,
useContext,
useEffect,
useMemo,
useState,
} from "react";
import React, { useCallback, useEffect, useMemo, useState } from "react";
import {
CredentialsMetaInput,
CredentialsType,
GraphExecutionID,
GraphMeta,
LibraryAgentPreset,
@@ -36,11 +29,7 @@ import {
} from "@/components/__legacy__/ui/icons";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/atoms/Button/Button";
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
import {
findSavedCredentialByProviderAndType,
findSavedUserCredentialByProviderAndType,
} from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
import {
useToast,
@@ -48,7 +37,6 @@ import {
} from "@/components/molecules/Toast/use-toast";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { cn, isEmpty } from "@/lib/utils";
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
@@ -102,7 +90,6 @@ export function AgentRunDraftView({
const api = useBackendAPI();
const { toast } = useToast();
const toastOnFail = useToastOnFail();
const allProviders = useContext(CredentialsProvidersContext);
const [inputValues, setInputValues] = useState<Record<string, any>>({});
const [inputCredentials, setInputCredentials] = useState<
@@ -141,77 +128,6 @@ export function AgentRunDraftView({
() => graph.credentials_input_schema.properties,
[graph],
);
const credentialFields = useMemo(
function getCredentialFields() {
return Object.entries(agentCredentialsInputFields);
},
[agentCredentialsInputFields],
);
const requiredCredentials = useMemo(
function getRequiredCredentials() {
return new Set(
(graph.credentials_input_schema?.required as string[]) || [],
);
},
[graph.credentials_input_schema?.required],
);
useEffect(
function initializeDefaultCredentials() {
if (!allProviders) return;
if (!graph.credentials_input_schema?.properties) return;
if (requiredCredentials.size === 0) return;
setInputCredentials(function updateCredentials(currentCreds) {
const next = { ...currentCreds };
let didAdd = false;
for (const key of requiredCredentials) {
if (next[key]) continue;
const schema = graph.credentials_input_schema.properties[key];
if (!schema) continue;
const providerNames = schema.credentials_provider || [];
const credentialTypes = schema.credentials_types || [];
const requiredScopes = schema.credentials_scopes;
const userCredential = findSavedUserCredentialByProviderAndType(
providerNames,
credentialTypes,
requiredScopes,
allProviders,
);
const savedCredential =
userCredential ||
findSavedCredentialByProviderAndType(
providerNames,
credentialTypes,
requiredScopes,
allProviders,
);
if (!savedCredential) continue;
next[key] = {
id: savedCredential.id,
provider: savedCredential.provider,
type: savedCredential.type as CredentialsType,
title: savedCredential.title,
};
didAdd = true;
}
if (!didAdd) return currentCreds;
return next;
});
},
[
allProviders,
graph.credentials_input_schema?.properties,
requiredCredentials,
],
);
const [allRequiredInputsAreSet, missingInputs] = useMemo(() => {
const nonEmptyInputs = new Set(
@@ -229,35 +145,18 @@ export function AgentRunDraftView({
);
return [isSuperset, difference];
}, [agentInputSchema.required, inputValues]);
const [allCredentialsAreSet, missingCredentials] = useMemo(
function getCredentialStatus() {
const missing = Array.from(requiredCredentials).filter((key) => {
const cred = inputCredentials[key];
return !cred || !cred.id;
});
return [missing.length === 0, missing];
},
[requiredCredentials, inputCredentials],
);
function addChangedCredentials(prev: Set<keyof LibraryAgentPresetUpdatable>) {
const next = new Set(prev);
next.add("credentials");
return next;
}
function handleCredentialChange(key: string, value?: CredentialsMetaInput) {
setInputCredentials(function updateInputCredentials(currentCreds) {
const next = { ...currentCreds };
if (value === undefined) {
delete next[key];
return next;
}
next[key] = value;
return next;
});
setChangedPresetAttributes(addChangedCredentials);
}
const [allCredentialsAreSet, missingCredentials] = useMemo(() => {
const availableCredentials = new Set(Object.keys(inputCredentials));
const allCredentials = new Set(Object.keys(agentCredentialsInputFields));
// Backwards-compatible implementation of isSupersetOf and difference
const isSuperset = Array.from(allCredentials).every((item) =>
availableCredentials.has(item),
);
const difference = Array.from(allCredentials).filter(
(item) => !availableCredentials.has(item),
);
return [isSuperset, difference];
}, [agentCredentialsInputFields, inputCredentials]);
const notifyMissingInputs = useCallback(
(needPresetName: boolean = true) => {
const allMissingFields = (
@@ -750,6 +649,35 @@ export function AgentRunDraftView({
</>
)}
{/* Credentials inputs */}
{Object.entries(agentCredentialsInputFields).map(
([key, inputSubSchema]) => (
<CredentialsInput
key={key}
schema={{ ...inputSubSchema, discriminator: undefined }}
selectedCredentials={
inputCredentials[key] ?? inputSubSchema.default
}
onSelectCredentials={(value) => {
setInputCredentials((obj) => {
const newObj = { ...obj };
if (value === undefined) {
delete newObj[key];
return newObj;
}
return {
...obj,
[key]: value,
};
});
setChangedPresetAttributes((prev) =>
prev.add("credentials"),
);
}}
/>
),
)}
{/* Regular inputs */}
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
<RunAgentInputs
@@ -767,17 +695,6 @@ export function AgentRunDraftView({
data-testid={`agent-input-${key}`}
/>
))}
{/* Credentials inputs */}
{credentialFields.length > 0 && (
<CredentialsGroupedView
credentialFields={credentialFields}
requiredCredentials={requiredCredentials}
inputCredentials={inputCredentials}
inputValues={inputValues}
onCredentialChange={handleCredentialChange}
/>
)}
</CardContent>
</Card>
</div>

View File

@@ -1,15 +0,0 @@
import { expect, test } from "vitest";
import { render, screen } from "@/tests/integrations/test-utils";
import { MainMarkeplacePage } from "../MainMarketplacePage";
import { server } from "@/mocks/mock-server";
import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
// Only for CI testing purpose, will remove it in future PR
test("MainMarketplacePage", async () => {
server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
render(<MainMarkeplacePage />);
expect(
await screen.findByText("Featured agents", { exact: false }),
).toBeDefined();
});

View File

@@ -0,0 +1,81 @@
// import { render, screen } from "@testing-library/react";
// import { describe, expect, it } from "vitest";
// import { Badge } from "./Badge";
// describe("Badge Component", () => {
// it("renders badge with content", () => {
// render(<Badge variant="success">Success</Badge>);
// expect(screen.getByText("Success")).toBeInTheDocument();
// });
// it("applies correct variant styles", () => {
// const { rerender } = render(<Badge variant="success">Success</Badge>);
// let badge = screen.getByText("Success");
// expect(badge).toHaveClass("bg-green-100", "text-green-800");
// rerender(<Badge variant="error">Error</Badge>);
// badge = screen.getByText("Error");
// expect(badge).toHaveClass("bg-red-100", "text-red-800");
// rerender(<Badge variant="info">Info</Badge>);
// badge = screen.getByText("Info");
// expect(badge).toHaveClass("bg-slate-100", "text-slate-800");
// });
// it("applies custom className", () => {
// render(
// <Badge variant="success" className="custom-class">
// Success
// </Badge>,
// );
// const badge = screen.getByText("Success");
// expect(badge).toHaveClass("custom-class");
// });
// it("renders as span element", () => {
// render(<Badge variant="success">Success</Badge>);
// const badge = screen.getByText("Success");
// expect(badge.tagName).toBe("SPAN");
// });
// it("renders children correctly", () => {
// render(
// <Badge variant="success">
// <span>Custom</span> Content
// </Badge>,
// );
// expect(screen.getByText("Custom")).toBeInTheDocument();
// expect(screen.getByText("Content")).toBeInTheDocument();
// });
// it("supports all badge variants", () => {
// const variants = ["success", "error", "info"] as const;
// variants.forEach((variant) => {
// const { unmount } = render(
// <Badge variant={variant} data-testid={`badge-${variant}`}>
// {variant}
// </Badge>,
// );
// expect(screen.getByTestId(`badge-${variant}`)).toBeInTheDocument();
// unmount();
// });
// });
// it("handles long text content", () => {
// render(
// <Badge variant="info">
// Very long text that should be handled properly by the component
// </Badge>,
// );
// const badge = screen.getByText(/Very long text/);
// expect(badge).toBeInTheDocument();
// expect(badge).toHaveClass("overflow-hidden", "text-ellipsis");
// });
// });

View File

@@ -1,5 +1,5 @@
import { CredentialsProvidersContextType } from "@/providers/agent-credentials/credentials-provider";
import { filterSystemCredentials, getSystemCredentials } from "../../helpers";
import { getSystemCredentials } from "../../helpers";
export type CredentialField = [string, any];
@@ -208,42 +208,3 @@ export function findSavedCredentialByProviderAndType(
return undefined;
}
export function findSavedUserCredentialByProviderAndType(
providerNames: string[],
credentialTypes: string[],
requiredScopes: string[] | undefined,
allProviders: CredentialsProvidersContextType | null,
): SavedCredential | undefined {
for (const providerName of providerNames) {
const providerData = allProviders?.[providerName];
if (!providerData) continue;
const userCredentials = filterSystemCredentials(
providerData.savedCredentials ?? [],
);
const matchingCredentials: SavedCredential[] = [];
for (const credential of userCredentials) {
const typeMatches =
credentialTypes.length === 0 ||
credentialTypes.includes(credential.type);
const scopesMatch = hasRequiredScopes(credential, requiredScopes);
if (!typeMatches) continue;
if (!scopesMatch) continue;
matchingCredentials.push(credential as SavedCredential);
}
if (matchingCredentials.length === 1) {
return matchingCredentials[0];
}
if (matchingCredentials.length > 1) {
return undefined;
}
}
return undefined;
}

View File

@@ -98,20 +98,24 @@ export function useCredentialsInput({
// Auto-select the first available credential on initial mount
// Once a user has made a selection, we don't override it
useEffect(
function autoSelectCredential() {
if (readOnly) return;
if (!credentials || !("savedCredentials" in credentials)) return;
if (selectedCredential?.id) return;
useEffect(() => {
if (readOnly) return;
if (!credentials || !("savedCredentials" in credentials)) return;
const savedCreds = credentials.savedCredentials;
if (savedCreds.length === 0) return;
// If already selected, don't auto-select
if (selectedCredential?.id) return;
if (hasAttemptedAutoSelect.current) return;
hasAttemptedAutoSelect.current = true;
// Only attempt auto-selection once
if (hasAttemptedAutoSelect.current) return;
hasAttemptedAutoSelect.current = true;
if (isOptional) return;
// If optional, don't auto-select (user can choose "None")
if (isOptional) return;
const savedCreds = credentials.savedCredentials;
// Auto-select the first credential if any are available
if (savedCreds.length > 0) {
const cred = savedCreds[0];
onSelectCredential({
id: cred.id,
@@ -119,15 +123,14 @@ export function useCredentialsInput({
provider: credentials.provider,
title: (cred as any).title,
});
},
[
credentials,
selectedCredential?.id,
readOnly,
isOptional,
onSelectCredential,
],
);
}
}, [
credentials,
selectedCredential?.id,
readOnly,
isOptional,
onSelectCredential,
]);
if (
!credentials ||

View File

@@ -106,14 +106,9 @@ export function getTimezoneDisplayName(timezone: string): string {
const parts = timezone.split("/");
const city = parts[parts.length - 1].replace(/_/g, " ");
const abbr = getTimezoneAbbreviation(timezone);
if (abbr && abbr !== timezone) {
return `${city} (${abbr})`;
}
// If abbreviation is same as timezone or not found, show timezone with underscores replaced
const timezoneDisplay = timezone.replace(/_/g, " ");
return `${city} (${timezoneDisplay})`;
return abbr ? `${city} (${abbr})` : city;
} catch {
return timezone.replace(/_/g, " ");
return timezone;
}
}

View File

@@ -1,13 +0,0 @@
// We are not using this for tests because Vitest runs our tests in a Node.js environment.
// However, we can use it for development purposes to test our UI in the browser with fake data.
export async function initMocks() {
if (typeof window === "undefined") {
const { server } = await import("./mock-server");
server.listen({ onUnhandledRequest: "bypass" });
console.log("[MSW] Server mock initialized");
} else {
const { worker } = await import("./mock-browser");
await worker.start({ onUnhandledRequest: "bypass" });
console.log("[MSW] Browser mock initialized");
}
}

View File

@@ -1,4 +0,0 @@
import { setupWorker } from "msw/browser";
import { mockHandlers } from "./mock-handlers";
export const worker = setupWorker(...mockHandlers);

View File

@@ -1,48 +0,0 @@
import { getAdminMock } from "@/app/api/__generated__/endpoints/admin/admin.msw";
import { getAnalyticsMock } from "@/app/api/__generated__/endpoints/analytics/analytics.msw";
import { getApiKeysMock } from "@/app/api/__generated__/endpoints/api-keys/api-keys.msw";
import { getAuthMock } from "@/app/api/__generated__/endpoints/auth/auth.msw";
import { getBlocksMock } from "@/app/api/__generated__/endpoints/blocks/blocks.msw";
import { getChatMock } from "@/app/api/__generated__/endpoints/chat/chat.msw";
import { getCreditsMock } from "@/app/api/__generated__/endpoints/credits/credits.msw";
import { getDefaultMock } from "@/app/api/__generated__/endpoints/default/default.msw";
import { getEmailMock } from "@/app/api/__generated__/endpoints/email/email.msw";
import { getExecutionsMock } from "@/app/api/__generated__/endpoints/executions/executions.msw";
import { getFilesMock } from "@/app/api/__generated__/endpoints/files/files.msw";
import { getGraphsMock } from "@/app/api/__generated__/endpoints/graphs/graphs.msw";
import { getHealthMock } from "@/app/api/__generated__/endpoints/health/health.msw";
import { getIntegrationsMock } from "@/app/api/__generated__/endpoints/integrations/integrations.msw";
import { getLibraryMock } from "@/app/api/__generated__/endpoints/library/library.msw";
import { getMetricsMock } from "@/app/api/__generated__/endpoints/metrics/metrics.msw";
import { getOauthMock } from "@/app/api/__generated__/endpoints/oauth/oauth.msw";
import { getOnboardingMock } from "@/app/api/__generated__/endpoints/onboarding/onboarding.msw";
import { getOttoMock } from "@/app/api/__generated__/endpoints/otto/otto.msw";
import { getPresetsMock } from "@/app/api/__generated__/endpoints/presets/presets.msw";
import { getSchedulesMock } from "@/app/api/__generated__/endpoints/schedules/schedules.msw";
import { getStoreMock } from "@/app/api/__generated__/endpoints/store/store.msw";
// Pass hard-coded data to individual handler functions to override faker-generated data.
export const mockHandlers = [
...getAdminMock(),
...getAnalyticsMock(),
...getApiKeysMock(),
...getAuthMock(),
...getBlocksMock(),
...getChatMock(),
...getCreditsMock(),
...getDefaultMock(),
...getEmailMock(),
...getExecutionsMock(),
...getFilesMock(),
...getGraphsMock(),
...getHealthMock(),
...getIntegrationsMock(),
...getLibraryMock(),
...getMetricsMock(),
...getOauthMock(),
...getOnboardingMock(),
...getOttoMock(),
...getPresetsMock(),
...getSchedulesMock(),
...getStoreMock(),
];

View File

@@ -1,4 +0,0 @@
import { setupServer } from "msw/node";
import { mockHandlers } from "./mock-handlers";
export const server = setupServer(...mockHandlers);

View File

@@ -1,220 +0,0 @@
# Frontend Testing Rules 🧪
## Testing Types Overview
| Type | Tool | Speed | Purpose |
| --------------- | --------------------- | --------------- | -------------------------------- |
| **E2E** | Playwright | Slow (~5s/test) | Real browser, full user journeys |
| **Integration** | Vitest + RTL | Fast (~100ms) | Component + mocked API |
| **Unit** | Vitest + RTL | Fastest (~10ms) | Individual functions/components |
| **Visual** | Storybook + Chromatic | N/A | UI appearance, design system |
---
## When to Use Each
### ✅ E2E Tests (Playwright)
**Use for:** Critical user journeys that MUST work in a real browser.
- Authentication flows (login, signup, logout)
- Payment or sensitive transactions
- Flows requiring real browser APIs (clipboard, downloads)
- Cross-page navigation that must work end-to-end
**Location:** `src/tests/*.spec.ts` (centralized, as there will be fewer of them)
### ✅ Integration Tests (Vitest + RTL)
**Use for:** Testing components with their dependencies (API calls, state).
- Page-level behavior with mocked API responses
- Components that fetch data
- User interactions that trigger API calls
- Feature flows within a single page
**Location:** Place tests in a `__tests__` folder next to the component:
```
ComponentName/
__tests__/
main.test.tsx
some-flow.test.tsx
ComponentName.tsx
useComponentName.ts
```
**Start at page level:** Initially write integration tests at the "page" level. No need to write them for every small component.
```
/library/
__tests__/
main.test.tsx
searching-agents.test.tsx
agents-pagination.test.tsx
page.tsx
useLibraryPage.ts
```
Start with a `main.test.tsx` file and split into smaller files as it grows.
**What integration tests should do:**
1. Render a page or complex modal (e.g., `AgentPublishModal`)
2. Mock API requests via MSW
3. Assert UI scenarios via Testing Library
```tsx
// Example: Test page renders data from API
import { server } from "@/mocks/mock-server";
import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
test("shows error when submission fails", async () => {
// Override default handler to return error status
server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
render(<MarketplacePage />);
await screen.findByText("Featured Agents");
// ... assert error UI
});
```
**Tip:** Use `findBy...` methods most of the time—they wait for elements to appear, so async code won't cause flaky tests. The regular `getBy...` methods don't wait and error immediately.
### ✅ Unit Tests (Vitest + RTL)
**Use for:** Testing isolated components and utility functions.
- Pure utility functions (`lib/utils.ts`)
- Component rendering with various props
- Component state changes
- Custom hooks
**Location:** Co-located with the file: `Component.test.tsx` next to `Component.tsx`
```tsx
// Example: Test component renders correctly
render(<AgentCard title="My Agent" />);
expect(screen.getByText("My Agent")).toBeInTheDocument();
```
### ✅ Storybook Tests (Visual)
**Use for:** Design system, visual appearance, component documentation.
- Atoms (Button, Input, Badge)
- Molecules (Dialog, Card)
- Visual states (hover, disabled, loading)
- Responsive layouts
**Location:** Co-located: `Component.stories.tsx` next to `Component.tsx`
---
## Decision Flowchart
```
Does it need a REAL browser/backend?
├─ YES → E2E (Playwright)
└─ NO
└─ Does it involve API calls or complex state?
├─ YES → Integration (Vitest + RTL)
└─ NO
└─ Is it about visual appearance?
├─ YES → Storybook
└─ NO → Unit (Vitest + RTL)
```
---
## What NOT to Test
❌ Third-party library internals (Radix UI, React Query)
❌ CSS styling details (use Storybook)
❌ Simple prop-passing components with no logic
❌ TypeScript types
---
## File Organization
```
src/
├── components/
│ └── atoms/
│ └── Button/
│ ├── Button.tsx
│ ├── Button.test.tsx # Unit test
│ └── Button.stories.tsx # Visual test
├── app/
│ └── (platform)/
│ └── marketplace/
│ └── components/
│ └── MainMarketplacePage/
│ ├── __tests__/
│ │ ├── main.test.tsx # Integration test
│ │ └── search-agents.test.tsx # Integration test
│ ├── MainMarketplacePage.tsx
│ └── useMainMarketplacePage.ts
├── lib/
│ ├── utils.ts
│ └── utils.test.ts # Unit test
├── mocks/
│ ├── mock-handlers.ts # MSW handlers (auto-generated via Orval)
│ └── mock-server.ts # MSW server setup
└── tests/
├── integrations/
│ ├── test-utils.tsx # Testing utilities
│ └── vitest.setup.tsx # Integration test setup
└── *.spec.ts # E2E tests (Playwright) - centralized
```
---
## Priority Matrix
| Component Type | Test Priority | Recommended Test |
| ------------------- | ------------- | ---------------- |
| Pages/Features | **Highest** | Integration |
| Custom Hooks | High | Unit |
| Utility Functions | High | Unit |
| Organisms (complex) | High | Integration |
| Molecules | Medium | Unit + Storybook |
| Atoms | Medium | Storybook only\* |
\*Atoms are typically simple enough that Storybook visual tests suffice.
---
## MSW Mocking
API mocking is handled via MSW (Mock Service Worker). Handlers are auto-generated by Orval from the OpenAPI schema.
**Default behavior:** All client-side requests are intercepted and return 200 status with faker-generated data.
**Override for specific tests:** Use generated error handlers to test non-OK status scenarios:
```tsx
import { server } from "@/mocks/mock-server";
import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
test("shows error when deletion fails", async () => {
server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
render(<MyComponent />);
// ... assert error UI
});
```
**Generated handlers location:** `src/app/api/__generated__/endpoints/*/` - each endpoint has handlers for different status codes.
---
## Golden Rules
1. **Test behavior, not implementation** - Query by role/text, not class names
2. **One assertion per concept** - Tests should be focused
3. **Mock at boundaries** - Mock API calls, not internal functions
4. **Co-locate integration tests** - Keep `__tests__/` folder next to the component
5. **E2E is expensive** - Only for critical happy paths; prefer integration tests
6. **AI agents are good at writing integration tests** - Start with these when adding test coverage

View File

@@ -1,25 +0,0 @@
import { vi } from "vitest";
const mockSupabaseClient = {
auth: {
getUser: vi.fn().mockResolvedValue({
data: { user: null },
error: null,
}),
getSession: vi.fn().mockResolvedValue({
data: { session: null },
error: null,
}),
signOut: vi.fn().mockResolvedValue({ error: null }),
refreshSession: vi.fn().mockResolvedValue({
data: { session: null, user: null },
error: null,
}),
},
};
export const mockSupabaseRequest = () => {
vi.mock("@/lib/supabase/server/getServerSupabase", () => ({
getServerSupabase: vi.fn().mockResolvedValue(mockSupabaseClient),
}));
};

View File

@@ -1,63 +0,0 @@
import { vi } from "vitest";
export const mockNextjsModules = () => {
vi.mock("next/image", () => ({
__esModule: true,
default: ({
fill: _fill,
priority: _priority,
quality: _quality,
placeholder: _placeholder,
blurDataURL: _blurDataURL,
loader: _loader,
...props
}: any) => {
// eslint-disable-next-line jsx-a11y/alt-text, @next/next/no-img-element
return <img {...props} />;
},
}));
vi.mock("next/headers", () => ({
cookies: vi.fn(() => ({
get: vi.fn(() => undefined),
getAll: vi.fn(() => []),
set: vi.fn(),
delete: vi.fn(),
has: vi.fn(() => false),
})),
headers: vi.fn(() => new Headers()),
}));
vi.mock("next/dist/server/request/cookies", () => ({
cookies: vi.fn(() => ({
get: vi.fn(() => undefined),
getAll: vi.fn(() => []),
set: vi.fn(),
delete: vi.fn(),
has: vi.fn(() => false),
})),
}));
vi.mock("next/navigation", () => ({
useRouter: () => ({
push: vi.fn(),
replace: vi.fn(),
prefetch: vi.fn(),
back: vi.fn(),
forward: vi.fn(),
refresh: vi.fn(),
}),
usePathname: () => "/marketplace",
useSearchParams: () => new URLSearchParams(),
useParams: () => ({}),
}));
vi.mock("next/link", () => ({
__esModule: true,
default: ({ children, href, ...props }: any) => (
<a href={href} {...props}>
{children}
</a>
),
}));
};

View File

@@ -1,36 +0,0 @@
import { BackendAPIProvider } from "@/lib/autogpt-server-api/context";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { render, RenderOptions } from "@testing-library/react";
import { ReactElement, ReactNode } from "react";
function createTestQueryClient() {
return new QueryClient({
defaultOptions: {
queries: {
retry: false,
},
},
});
}
function TestProviders({ children }: { children: ReactNode }) {
const queryClient = createTestQueryClient();
return (
<QueryClientProvider client={queryClient}>
<BackendAPIProvider>
<OnboardingProvider>{children}</OnboardingProvider>
</BackendAPIProvider>
</QueryClientProvider>
);
}
function customRender(
ui: ReactElement,
options?: Omit<RenderOptions, "wrapper">,
) {
return render(ui, { wrapper: TestProviders, ...options });
}
export * from "@testing-library/react";
export { customRender as render };

View File

@@ -1,12 +0,0 @@
import { beforeAll, afterAll, afterEach } from "vitest";
import { server } from "@/mocks/mock-server";
import { mockNextjsModules } from "./setup-nextjs-mocks";
import { mockSupabaseRequest } from "./mock-supabase-request";
beforeAll(() => {
mockNextjsModules();
mockSupabaseRequest(); // If you need user's data - please mock supabase actions in your specific test - it sends null user [It's only to avoid cookies() call]
return server.listen({ onUnhandledRequest: "error" });
});
afterEach(() => server.resetHandlers());
afterAll(() => server.close());

View File

@@ -4,7 +4,6 @@ import { LoginPage } from "./pages/login.page";
import { MarketplacePage } from "./pages/marketplace.page";
import { hasMinCount, hasUrl, isVisible, matchesUrl } from "./utils/assertion";
// Marketplace tests for store agent search functionality
test.describe("Marketplace Basic Functionality", () => {
test("User can access marketplace page when logged out", async ({ page }) => {
const marketplacePage = new MarketplacePage(page);

View File

@@ -1,12 +0,0 @@
import { defineConfig } from "vitest/config";
import react from "@vitejs/plugin-react";
import tsconfigPaths from "vite-tsconfig-paths";
export default defineConfig({
plugins: [tsconfigPaths(), react()],
test: {
environment: "happy-dom",
include: ["src/**/*.test.tsx"],
setupFiles: ["./src/tests/integrations/vitest.setup.tsx"],
},
});