Compare commits

..

2 Commits

Author SHA1 Message Date
Zamil Majdy
5d0cd88d98 fix(backend): Use unqualified vector type for pgvector queries (#11818)
## Summary
- Remove explicit schema qualification (`{schema}.vector` and
`OPERATOR({schema}.<=>)`) from pgvector queries in `embeddings.py` and
`hybrid_search.py`
- Use unqualified `::vector` type cast and `<=>` operator which work
because pgvector is in the search_path on all environments

## Problem
The previous approach tried to explicitly qualify the vector type with
schema names, but this failed because:
- **CI environment**: pgvector is in `public` schema → `platform.vector`
doesn't exist
- **Dev (Supabase)**: pgvector is in `platform` schema → `public.vector`
doesn't exist

## Solution
Use unqualified `::vector` and `<=>` operator. PostgreSQL resolves these
via `search_path`, which includes the schema where pgvector is installed
on all environments.

Tested on both local and dev environments with a test script that
verified:
-  Unqualified `::vector` type cast
-  Unqualified `<=>` operator in ORDER BY
-  Unqualified `<=>` in SELECT (similarity calculation)
-  Combined query patterns matching actual usage

## Test plan
- [ ] CI tests pass
- [ ] Marketplace approval works on dev after deployment

Fixes: AUTOGPT-SERVER-763, AUTOGPT-SERVER-764, AUTOGPT-SERVER-76B
2026-01-21 18:11:58 +00:00
Zamil Majdy
033f58c075 fix(backend): Make Redis event bus gracefully handle connection failures (#11817)
## Summary
Adds graceful error handling to AsyncRedisEventBus and RedisEventBus so
that connection failures log exceptions with full traceback while
remaining non-breaking. This allows DatabaseManager to operate without
Redis connectivity.

## Problem
DatabaseManager was failing with "Authentication required" when trying
to publish notifications via AsyncRedisNotificationEventBus. The service
has no Redis credentials configured, causing `increment_onboarding_runs`
to fail.

## Root Cause
When `increment_onboarding_runs` publishes a notification:
1. Calls `AsyncRedisNotificationEventBus().publish()`
2. Attempts to connect to Redis via `get_redis_async()`
3. Connection fails due to missing credentials
4. Exception propagates, failing the entire DB operation

Previous fix (#11775) made the cache module lazy, but didn't address the
notification bus which also requires Redis.

## Solution
Wrap Redis operations in try-except blocks:
- `publish_event`: Logs exception with traceback, continues without
publishing
- `listen_events`: Logs exception with traceback, returns empty
generator
- `wait_for_event`: Returns None on connection failure

Using `logger.exception()` instead of `logger.warning()` ensures full
stack traces are captured for debugging while keeping operations
non-breaking.

This allows services to operate without Redis when only using event bus
for non-critical notifications.

## Changes
- Modified `backend/data/event_bus.py`:
- Added graceful error handling to `RedisEventBus` and
`AsyncRedisEventBus`
- All Redis operations now catch exceptions and log with
`logger.exception()`
- Added `backend/data/event_bus_test.py`:
  - Tests verify graceful degradation when Redis is unavailable
  - Tests verify normal operation when Redis is available

## Test Plan
- [x] New tests verify graceful degradation when Redis unavailable
- [x] Existing notification tests still pass
- [x] DatabaseManager can increment onboarding runs without Redis

## Related Issues
Fixes https://significant-gravitas.sentry.io/issues/7205834440/
(AUTOGPT-SERVER-76D)
2026-01-21 15:51:26 +00:00
25 changed files with 243 additions and 1560 deletions

View File

@@ -154,16 +154,16 @@ async def store_content_embedding(
# Upsert the embedding
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
# Use {pgvector_schema}.vector for explicit pgvector type qualification
# Use unqualified ::vector - pgvector is in search_path on all environments
await execute_raw_with_schema(
"""
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
"id", "contentType", "contentId", "userId", "embedding", "searchableText", "metadata", "createdAt", "updatedAt"
)
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::{pgvector_schema}.vector, $5, $6::jsonb, NOW(), NOW())
VALUES (gen_random_uuid()::text, $1::{schema_prefix}"ContentType", $2, $3, $4::vector, $5, $6::jsonb, NOW(), NOW())
ON CONFLICT ("contentType", "contentId", "userId")
DO UPDATE SET
"embedding" = $4::{pgvector_schema}.vector,
"embedding" = $4::vector,
"searchableText" = $5,
"metadata" = $6::jsonb,
"updatedAt" = NOW()
@@ -879,8 +879,7 @@ async def semantic_search(
min_similarity_idx = len(params) + 1
params.append(min_similarity)
# Use regular string (not f-string) for template to preserve {schema_prefix} and {schema} placeholders
# Use OPERATOR({pgvector_schema}.<=>) for explicit operator schema qualification
# Use unqualified ::vector and <=> operator - pgvector is in search_path on all environments
sql = (
"""
SELECT
@@ -888,9 +887,9 @@ async def semantic_search(
"contentType" as content_type,
"searchableText" as searchable_text,
metadata,
1 - (embedding OPERATOR({pgvector_schema}.<=>) '"""
1 - (embedding <=> '"""
+ embedding_str
+ """'::{pgvector_schema}.vector) as similarity
+ """'::vector) as similarity
FROM {schema_prefix}"UnifiedContentEmbedding"
WHERE "contentType" IN ("""
+ content_type_placeholders
@@ -898,9 +897,9 @@ async def semantic_search(
"""
+ user_filter
+ """
AND 1 - (embedding OPERATOR({pgvector_schema}.<=>) '"""
AND 1 - (embedding <=> '"""
+ embedding_str
+ """'::{pgvector_schema}.vector) >= $"""
+ """'::vector) >= $"""
+ str(min_similarity_idx)
+ """
ORDER BY similarity DESC

View File

@@ -295,7 +295,7 @@ async def unified_hybrid_search(
FROM {{schema_prefix}}"UnifiedContentEmbedding" uce
WHERE uce."contentType" = ANY({content_types_param}::{{schema_prefix}}"ContentType"[])
{user_filter}
ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector
ORDER BY uce.embedding <=> {embedding_param}::vector
LIMIT 200
)
),
@@ -307,7 +307,7 @@ async def unified_hybrid_search(
uce.metadata,
uce."updatedAt" as updated_at,
-- Semantic score: cosine similarity (1 - distance)
COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score,
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
-- Lexical score: ts_rank_cd
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
-- Category match from metadata
@@ -583,7 +583,7 @@ async def hybrid_search(
WHERE uce."contentType" = 'STORE_AGENT'::{{schema_prefix}}"ContentType"
AND uce."userId" IS NULL
AND {where_clause}
ORDER BY uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector
ORDER BY uce.embedding <=> {embedding_param}::vector
LIMIT 200
) uce
),
@@ -605,7 +605,7 @@ async def hybrid_search(
-- Searchable text for BM25 reranking
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
-- Semantic score
COALESCE(1 - (uce.embedding OPERATOR({{pgvector_schema}}.<=>) {embedding_param}::{{pgvector_schema}}.vector), 0) as semantic_score,
COALESCE(1 - (uce.embedding <=> {embedding_param}::vector), 0) as semantic_score,
-- Lexical score (raw, will normalize)
COALESCE(ts_rank_cd(uce.search, plainto_tsquery('english', {query_param})), 0) as lexical_raw,
-- Category match

View File

@@ -121,10 +121,14 @@ async def _raw_with_schema(
Supports placeholders:
- {schema_prefix}: Table/type prefix (e.g., "platform".)
- {schema}: Raw schema name for application tables (e.g., platform)
- {pgvector_schema}: Schema where pgvector is installed (defaults to "public")
Note on pgvector types:
Use unqualified ::vector and <=> operator in queries. PostgreSQL resolves
these via search_path, which includes the schema where pgvector is installed
on all environments (local, CI, dev).
Args:
query_template: SQL query with {schema_prefix}, {schema}, and/or {pgvector_schema} placeholders
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
*args: Query parameters
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
client: Optional Prisma client for transactions (only used when execute=True).
@@ -135,20 +139,16 @@ async def _raw_with_schema(
Example with vector type:
await execute_raw_with_schema(
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::{pgvector_schema}.vector)',
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::vector)',
embedding_data
)
"""
schema = get_database_schema()
schema_prefix = f'"{schema}".' if schema != "public" else ""
# pgvector extension is typically installed in "public" schema
# On Supabase it may be in "extensions" but "public" is the common default
pgvector_schema = "public"
formatted_query = query_template.format(
schema_prefix=schema_prefix,
schema=schema,
pgvector_schema=pgvector_schema,
)
import prisma as prisma_module

View File

@@ -103,8 +103,18 @@ class RedisEventBus(BaseRedisEventBus[M], ABC):
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
@@ -128,9 +138,19 @@ class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(

View File

@@ -0,0 +1,56 @@
"""
Tests for event_bus graceful degradation when Redis is unavailable.
"""
from unittest.mock import AsyncMock, patch
import pytest
from pydantic import BaseModel
from backend.data.event_bus import AsyncRedisEventBus
class TestEvent(BaseModel):
"""Test event model."""
message: str
class TestNotificationBus(AsyncRedisEventBus[TestEvent]):
"""Test implementation of AsyncRedisEventBus."""
Model = TestEvent
@property
def event_bus_name(self) -> str:
return "test_event_bus"
@pytest.mark.asyncio
async def test_publish_event_handles_connection_failure_gracefully():
"""Test that publish_event logs exception instead of raising when Redis is unavailable."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock get_redis_async to raise connection error
with patch(
"backend.data.event_bus.redis.get_redis_async",
side_effect=ConnectionError("Authentication required."),
):
# Should not raise exception
await bus.publish_event(event, "test_channel")
@pytest.mark.asyncio
async def test_publish_event_works_with_redis_available():
"""Test that publish_event works normally when Redis is available."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock successful Redis connection
mock_redis = AsyncMock()
mock_redis.publish = AsyncMock()
with patch("backend.data.event_bus.redis.get_redis_async", return_value=mock_redis):
await bus.publish_event(event, "test_channel")
mock_redis.publish.assert_called_once()

View File

@@ -81,6 +81,8 @@ class ExecutionContext(BaseModel):
This includes information needed by blocks, sub-graphs, and execution management.
"""
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: bool = True
sensitive_action_safe_mode: bool = False
user_timezone: str = "UTC"

View File

@@ -64,6 +64,8 @@ logger = logging.getLogger(__name__)
class GraphSettings(BaseModel):
# Use Annotated with BeforeValidator to coerce None to default values.
# This handles cases where the database has null values for these fields.
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: Annotated[
bool, BeforeValidator(lambda v: v if v is not None else True)
] = True

View File

@@ -1,9 +1,10 @@
-- CreateExtension
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
-- Create in public schema so vector type is available across all schemas
-- Creates extension in current schema (determined by search_path from DATABASE_URL ?schema= param)
-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "vector" WITH SCHEMA "public";
CREATE EXTENSION IF NOT EXISTS "vector";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'vector extension not available or already exists, skipping';
END $$;
@@ -19,7 +20,7 @@ CREATE TABLE "UnifiedContentEmbedding" (
"contentType" "ContentType" NOT NULL,
"contentId" TEXT NOT NULL,
"userId" TEXT,
"embedding" public.vector(1536) NOT NULL,
"embedding" vector(1536) NOT NULL,
"searchableText" TEXT NOT NULL,
"metadata" JSONB NOT NULL DEFAULT '{}',
@@ -45,4 +46,4 @@ CREATE UNIQUE INDEX "UnifiedContentEmbedding_contentType_contentId_userId_key" O
-- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py
-- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW)
DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx";
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" public.vector_cosine_ops);
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" vector_cosine_ops);

View File

@@ -366,12 +366,12 @@ def generate_block_markdown(
lines.append("")
# What it is (full description)
lines.append(f"### What it is")
lines.append("### What it is")
lines.append(block.description or "No description available.")
lines.append("")
# How it works (manual section)
lines.append(f"### How it works")
lines.append("### How it works")
how_it_works = manual_content.get(
"how_it_works", "_Add technical explanation here._"
)
@@ -383,7 +383,7 @@ def generate_block_markdown(
# Inputs table (auto-generated)
visible_inputs = [f for f in block.inputs if not f.hidden]
if visible_inputs:
lines.append(f"### Inputs")
lines.append("### Inputs")
lines.append("")
lines.append("| Input | Description | Type | Required |")
lines.append("|-------|-------------|------|----------|")
@@ -400,7 +400,7 @@ def generate_block_markdown(
# Outputs table (auto-generated)
visible_outputs = [f for f in block.outputs if not f.hidden]
if visible_outputs:
lines.append(f"### Outputs")
lines.append("### Outputs")
lines.append("")
lines.append("| Output | Description | Type |")
lines.append("|--------|-------------|------|")
@@ -414,7 +414,7 @@ def generate_block_markdown(
lines.append("")
# Possible use case (manual section)
lines.append(f"### Possible use case")
lines.append("### Possible use case")
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
lines.append("<!-- MANUAL: use_case -->")
lines.append(use_case)

View File

@@ -29,4 +29,4 @@ NEXT_PUBLIC_CLOUDFLARE_TURNSTILE_SITE_KEY=
NEXT_PUBLIC_TURNSTILE=disabled
# PR previews
NEXT_PUBLIC_PREVIEW_STEALING_DEV=
NEXT_PUBLIC_PREVIEW_STEALING_DEV=

View File

@@ -16,12 +16,6 @@ export default defineConfig({
client: "react-query",
httpClient: "fetch",
indexFiles: false,
mock: {
type: "msw",
baseUrl: "http://localhost:3000/api/proxy",
generateEachHttpStatus: true,
delay: 0,
},
override: {
mutator: {
path: "./mutators/custom-mutator.ts",

View File

@@ -15,8 +15,6 @@
"types": "tsc --noEmit",
"test": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test",
"test-ui": "NEXT_PUBLIC_PW_TEST=true next build --turbo && playwright test --ui",
"test:unit": "vitest run",
"test:unit:watch": "vitest",
"test:no-build": "playwright test",
"gentests": "playwright codegen http://localhost:3000",
"storybook": "storybook dev -p 6006",
@@ -120,7 +118,6 @@
},
"devDependencies": {
"@chromatic-com/storybook": "4.1.2",
"happy-dom": "20.3.4",
"@opentelemetry/instrumentation": "0.209.0",
"@playwright/test": "1.56.1",
"@storybook/addon-a11y": "9.1.5",
@@ -130,8 +127,6 @@
"@storybook/nextjs": "9.1.5",
"@tanstack/eslint-plugin-query": "5.91.2",
"@tanstack/react-query-devtools": "5.90.2",
"@testing-library/dom": "10.4.1",
"@testing-library/react": "16.3.2",
"@types/canvas-confetti": "1.9.0",
"@types/lodash": "4.17.20",
"@types/negotiator": "0.6.4",
@@ -140,7 +135,6 @@
"@types/react-dom": "18.3.5",
"@types/react-modal": "3.16.3",
"@types/react-window": "1.8.8",
"@vitejs/plugin-react": "5.1.2",
"axe-playwright": "2.2.2",
"chromatic": "13.3.3",
"concurrently": "9.2.1",
@@ -159,9 +153,7 @@
"require-in-the-middle": "8.0.1",
"storybook": "9.1.5",
"tailwindcss": "3.4.17",
"typescript": "5.9.3",
"vite-tsconfig-paths": "6.0.4",
"vitest": "4.0.17"
"typescript": "5.9.3"
},
"msw": {
"workerDirectory": [

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
// import { render, screen } from "@testing-library/react";
// import { describe, expect, it } from "vitest";
// import { Badge } from "./Badge";
// describe("Badge Component", () => {
// it("renders badge with content", () => {
// render(<Badge variant="success">Success</Badge>);
// expect(screen.getByText("Success")).toBeInTheDocument();
// });
// it("applies correct variant styles", () => {
// const { rerender } = render(<Badge variant="success">Success</Badge>);
// let badge = screen.getByText("Success");
// expect(badge).toHaveClass("bg-green-100", "text-green-800");
// rerender(<Badge variant="error">Error</Badge>);
// badge = screen.getByText("Error");
// expect(badge).toHaveClass("bg-red-100", "text-red-800");
// rerender(<Badge variant="info">Info</Badge>);
// badge = screen.getByText("Info");
// expect(badge).toHaveClass("bg-slate-100", "text-slate-800");
// });
// it("applies custom className", () => {
// render(
// <Badge variant="success" className="custom-class">
// Success
// </Badge>,
// );
// const badge = screen.getByText("Success");
// expect(badge).toHaveClass("custom-class");
// });
// it("renders as span element", () => {
// render(<Badge variant="success">Success</Badge>);
// const badge = screen.getByText("Success");
// expect(badge.tagName).toBe("SPAN");
// });
// it("renders children correctly", () => {
// render(
// <Badge variant="success">
// <span>Custom</span> Content
// </Badge>,
// );
// expect(screen.getByText("Custom")).toBeInTheDocument();
// expect(screen.getByText("Content")).toBeInTheDocument();
// });
// it("supports all badge variants", () => {
// const variants = ["success", "error", "info"] as const;
// variants.forEach((variant) => {
// const { unmount } = render(
// <Badge variant={variant} data-testid={`badge-${variant}`}>
// {variant}
// </Badge>,
// );
// expect(screen.getByTestId(`badge-${variant}`)).toBeInTheDocument();
// unmount();
// });
// });
// it("handles long text content", () => {
// render(
// <Badge variant="info">
// Very long text that should be handled properly by the component
// </Badge>,
// );
// const badge = screen.getByText(/Very long text/);
// expect(badge).toBeInTheDocument();
// expect(badge).toHaveClass("overflow-hidden", "text-ellipsis");
// });
// });

View File

@@ -1,13 +0,0 @@
// We are not using this for tests because Vitest runs our tests in a Node.js environment.
// However, we can use it for development purposes to test our UI in the browser with fake data.
export async function initMocks() {
if (typeof window === "undefined") {
const { server } = await import("./mock-server");
server.listen({ onUnhandledRequest: "bypass" });
console.log("[MSW] Server mock initialized");
} else {
const { worker } = await import("./mock-browser");
await worker.start({ onUnhandledRequest: "bypass" });
console.log("[MSW] Browser mock initialized");
}
}

View File

@@ -1,4 +0,0 @@
import { setupWorker } from "msw/browser";
import { mockHandlers } from "./mock-handlers";
export const worker = setupWorker(...mockHandlers);

View File

@@ -1,48 +0,0 @@
import { getAdminMock } from "@/app/api/__generated__/endpoints/admin/admin.msw";
import { getAnalyticsMock } from "@/app/api/__generated__/endpoints/analytics/analytics.msw";
import { getApiKeysMock } from "@/app/api/__generated__/endpoints/api-keys/api-keys.msw";
import { getAuthMock } from "@/app/api/__generated__/endpoints/auth/auth.msw";
import { getBlocksMock } from "@/app/api/__generated__/endpoints/blocks/blocks.msw";
import { getChatMock } from "@/app/api/__generated__/endpoints/chat/chat.msw";
import { getCreditsMock } from "@/app/api/__generated__/endpoints/credits/credits.msw";
import { getDefaultMock } from "@/app/api/__generated__/endpoints/default/default.msw";
import { getEmailMock } from "@/app/api/__generated__/endpoints/email/email.msw";
import { getExecutionsMock } from "@/app/api/__generated__/endpoints/executions/executions.msw";
import { getFilesMock } from "@/app/api/__generated__/endpoints/files/files.msw";
import { getGraphsMock } from "@/app/api/__generated__/endpoints/graphs/graphs.msw";
import { getHealthMock } from "@/app/api/__generated__/endpoints/health/health.msw";
import { getIntegrationsMock } from "@/app/api/__generated__/endpoints/integrations/integrations.msw";
import { getLibraryMock } from "@/app/api/__generated__/endpoints/library/library.msw";
import { getMetricsMock } from "@/app/api/__generated__/endpoints/metrics/metrics.msw";
import { getOauthMock } from "@/app/api/__generated__/endpoints/oauth/oauth.msw";
import { getOnboardingMock } from "@/app/api/__generated__/endpoints/onboarding/onboarding.msw";
import { getOttoMock } from "@/app/api/__generated__/endpoints/otto/otto.msw";
import { getPresetsMock } from "@/app/api/__generated__/endpoints/presets/presets.msw";
import { getSchedulesMock } from "@/app/api/__generated__/endpoints/schedules/schedules.msw";
import { getStoreMock } from "@/app/api/__generated__/endpoints/store/store.msw";
// Pass hard-coded data to individual handler functions to override faker-generated data.
export const mockHandlers = [
...getAdminMock(),
...getAnalyticsMock(),
...getApiKeysMock(),
...getAuthMock(),
...getBlocksMock(),
...getChatMock(),
...getCreditsMock(),
...getDefaultMock(),
...getEmailMock(),
...getExecutionsMock(),
...getFilesMock(),
...getGraphsMock(),
...getHealthMock(),
...getIntegrationsMock(),
...getLibraryMock(),
...getMetricsMock(),
...getOauthMock(),
...getOnboardingMock(),
...getOttoMock(),
...getPresetsMock(),
...getSchedulesMock(),
...getStoreMock(),
];

View File

@@ -1,4 +0,0 @@
import { setupServer } from "msw/node";
import { mockHandlers } from "./mock-handlers";
export const server = setupServer(...mockHandlers);

View File

@@ -1,220 +0,0 @@
# Frontend Testing Rules 🧪
## Testing Types Overview
| Type | Tool | Speed | Purpose |
| --------------- | --------------------- | --------------- | -------------------------------- |
| **E2E** | Playwright | Slow (~5s/test) | Real browser, full user journeys |
| **Integration** | Vitest + RTL | Fast (~100ms) | Component + mocked API |
| **Unit** | Vitest + RTL | Fastest (~10ms) | Individual functions/components |
| **Visual** | Storybook + Chromatic | N/A | UI appearance, design system |
---
## When to Use Each
### ✅ E2E Tests (Playwright)
**Use for:** Critical user journeys that MUST work in a real browser.
- Authentication flows (login, signup, logout)
- Payment or sensitive transactions
- Flows requiring real browser APIs (clipboard, downloads)
- Cross-page navigation that must work end-to-end
**Location:** `src/tests/*.spec.ts` (centralized, as there will be fewer of them)
### ✅ Integration Tests (Vitest + RTL)
**Use for:** Testing components with their dependencies (API calls, state).
- Page-level behavior with mocked API responses
- Components that fetch data
- User interactions that trigger API calls
- Feature flows within a single page
**Location:** Place tests in a `__tests__` folder next to the component:
```
ComponentName/
__tests__/
main.test.tsx
some-flow.test.tsx
ComponentName.tsx
useComponentName.ts
```
**Start at page level:** Initially write integration tests at the "page" level. No need to write them for every small component.
```
/library/
__tests__/
main.test.tsx
searching-agents.test.tsx
agents-pagination.test.tsx
page.tsx
useLibraryPage.ts
```
Start with a `main.test.tsx` file and split into smaller files as it grows.
**What integration tests should do:**
1. Render a page or complex modal (e.g., `AgentPublishModal`)
2. Mock API requests via MSW
3. Assert UI scenarios via Testing Library
```tsx
// Example: Test page renders data from API
import { server } from "@/mocks/mock-server";
import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
test("shows error when submission fails", async () => {
// Override default handler to return error status
server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
render(<MarketplacePage />);
await screen.findByText("Featured Agents");
// ... assert error UI
});
```
**Tip:** Use `findBy...` methods most of the time—they wait for elements to appear, so async code won't cause flaky tests. The regular `getBy...` methods don't wait and error immediately.
### ✅ Unit Tests (Vitest + RTL)
**Use for:** Testing isolated components and utility functions.
- Pure utility functions (`lib/utils.ts`)
- Component rendering with various props
- Component state changes
- Custom hooks
**Location:** Co-located with the file: `Component.test.tsx` next to `Component.tsx`
```tsx
// Example: Test component renders correctly
render(<AgentCard title="My Agent" />);
expect(screen.getByText("My Agent")).toBeInTheDocument();
```
### ✅ Storybook Tests (Visual)
**Use for:** Design system, visual appearance, component documentation.
- Atoms (Button, Input, Badge)
- Molecules (Dialog, Card)
- Visual states (hover, disabled, loading)
- Responsive layouts
**Location:** Co-located: `Component.stories.tsx` next to `Component.tsx`
---
## Decision Flowchart
```
Does it need a REAL browser/backend?
├─ YES → E2E (Playwright)
└─ NO
└─ Does it involve API calls or complex state?
├─ YES → Integration (Vitest + RTL)
└─ NO
└─ Is it about visual appearance?
├─ YES → Storybook
└─ NO → Unit (Vitest + RTL)
```
---
## What NOT to Test
❌ Third-party library internals (Radix UI, React Query)
❌ CSS styling details (use Storybook)
❌ Simple prop-passing components with no logic
❌ TypeScript types
---
## File Organization
```
src/
├── components/
│ └── atoms/
│ └── Button/
│ ├── Button.tsx
│ ├── Button.test.tsx # Unit test
│ └── Button.stories.tsx # Visual test
├── app/
│ └── (platform)/
│ └── marketplace/
│ └── components/
│ └── MainMarketplacePage/
│ ├── __tests__/
│ │ ├── main.test.tsx # Integration test
│ │ └── search-agents.test.tsx # Integration test
│ ├── MainMarketplacePage.tsx
│ └── useMainMarketplacePage.ts
├── lib/
│ ├── utils.ts
│ └── utils.test.ts # Unit test
├── mocks/
│ ├── mock-handlers.ts # MSW handlers (auto-generated via Orval)
│ └── mock-server.ts # MSW server setup
└── tests/
├── integrations/
│ ├── test-utils.tsx # Testing utilities
│ └── vitest.setup.tsx # Integration test setup
└── *.spec.ts # E2E tests (Playwright) - centralized
```
---
## Priority Matrix
| Component Type | Test Priority | Recommended Test |
| ------------------- | ------------- | ---------------- |
| Pages/Features | **Highest** | Integration |
| Custom Hooks | High | Unit |
| Utility Functions | High | Unit |
| Organisms (complex) | High | Integration |
| Molecules | Medium | Unit + Storybook |
| Atoms | Medium | Storybook only\* |
\*Atoms are typically simple enough that Storybook visual tests suffice.
---
## MSW Mocking
API mocking is handled via MSW (Mock Service Worker). Handlers are auto-generated by Orval from the OpenAPI schema.
**Default behavior:** All client-side requests are intercepted and return 200 status with faker-generated data.
**Override for specific tests:** Use generated error handlers to test non-OK status scenarios:
```tsx
import { server } from "@/mocks/mock-server";
import { getDeleteV2DeleteStoreSubmissionMockHandler422 } from "@/app/api/__generated__/endpoints/store/store.msw";
test("shows error when deletion fails", async () => {
server.use(getDeleteV2DeleteStoreSubmissionMockHandler422());
render(<MyComponent />);
// ... assert error UI
});
```
**Generated handlers location:** `src/app/api/__generated__/endpoints/*/` - each endpoint has handlers for different status codes.
---
## Golden Rules
1. **Test behavior, not implementation** - Query by role/text, not class names
2. **One assertion per concept** - Tests should be focused
3. **Mock at boundaries** - Mock API calls, not internal functions
4. **Co-locate integration tests** - Keep `__tests__/` folder next to the component
5. **E2E is expensive** - Only for critical happy paths; prefer integration tests
6. **AI agents are good at writing integration tests** - Start with these when adding test coverage

View File

@@ -1,25 +0,0 @@
import { vi } from "vitest";
const mockSupabaseClient = {
auth: {
getUser: vi.fn().mockResolvedValue({
data: { user: null },
error: null,
}),
getSession: vi.fn().mockResolvedValue({
data: { session: null },
error: null,
}),
signOut: vi.fn().mockResolvedValue({ error: null }),
refreshSession: vi.fn().mockResolvedValue({
data: { session: null, user: null },
error: null,
}),
},
};
export const mockSupabaseRequest = () => {
vi.mock("@/lib/supabase/server/getServerSupabase", () => ({
getServerSupabase: vi.fn().mockResolvedValue(mockSupabaseClient),
}));
};

View File

@@ -1,63 +0,0 @@
import { vi } from "vitest";
export const mockNextjsModules = () => {
vi.mock("next/image", () => ({
__esModule: true,
default: ({
fill: _fill,
priority: _priority,
quality: _quality,
placeholder: _placeholder,
blurDataURL: _blurDataURL,
loader: _loader,
...props
}: any) => {
// eslint-disable-next-line jsx-a11y/alt-text, @next/next/no-img-element
return <img {...props} />;
},
}));
vi.mock("next/headers", () => ({
cookies: vi.fn(() => ({
get: vi.fn(() => undefined),
getAll: vi.fn(() => []),
set: vi.fn(),
delete: vi.fn(),
has: vi.fn(() => false),
})),
headers: vi.fn(() => new Headers()),
}));
vi.mock("next/dist/server/request/cookies", () => ({
cookies: vi.fn(() => ({
get: vi.fn(() => undefined),
getAll: vi.fn(() => []),
set: vi.fn(),
delete: vi.fn(),
has: vi.fn(() => false),
})),
}));
vi.mock("next/navigation", () => ({
useRouter: () => ({
push: vi.fn(),
replace: vi.fn(),
prefetch: vi.fn(),
back: vi.fn(),
forward: vi.fn(),
refresh: vi.fn(),
}),
usePathname: () => "/marketplace",
useSearchParams: () => new URLSearchParams(),
useParams: () => ({}),
}));
vi.mock("next/link", () => ({
__esModule: true,
default: ({ children, href, ...props }: any) => (
<a href={href} {...props}>
{children}
</a>
),
}));
};

View File

@@ -1,36 +0,0 @@
import { BackendAPIProvider } from "@/lib/autogpt-server-api/context";
import OnboardingProvider from "@/providers/onboarding/onboarding-provider";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { render, RenderOptions } from "@testing-library/react";
import { ReactElement, ReactNode } from "react";
function createTestQueryClient() {
return new QueryClient({
defaultOptions: {
queries: {
retry: false,
},
},
});
}
function TestProviders({ children }: { children: ReactNode }) {
const queryClient = createTestQueryClient();
return (
<QueryClientProvider client={queryClient}>
<BackendAPIProvider>
<OnboardingProvider>{children}</OnboardingProvider>
</BackendAPIProvider>
</QueryClientProvider>
);
}
function customRender(
ui: ReactElement,
options?: Omit<RenderOptions, "wrapper">,
) {
return render(ui, { wrapper: TestProviders, ...options });
}
export * from "@testing-library/react";
export { customRender as render };

View File

@@ -1,12 +0,0 @@
import { beforeAll, afterAll, afterEach } from "vitest";
import { server } from "@/mocks/mock-server";
import { mockNextjsModules } from "./setup-nextjs-mocks";
import { mockSupabaseRequest } from "./mock-supabase-request";
beforeAll(() => {
mockNextjsModules();
mockSupabaseRequest(); // If you need user's data - please mock supabase actions in your specific test - it sends null user [It's only to avoid cookies() call]
return server.listen({ onUnhandledRequest: "error" });
});
afterEach(() => server.resetHandlers());
afterAll(() => server.close());

View File

@@ -4,6 +4,7 @@ import { LoginPage } from "./pages/login.page";
import { MarketplacePage } from "./pages/marketplace.page";
import { hasMinCount, hasUrl, isVisible, matchesUrl } from "./utils/assertion";
// Marketplace tests for store agent search functionality
test.describe("Marketplace Basic Functionality", () => {
test("User can access marketplace page when logged out", async ({ page }) => {
const marketplacePage = new MarketplacePage(page);

View File

@@ -1,12 +0,0 @@
import { defineConfig } from "vitest/config";
import react from "@vitejs/plugin-react";
import tsconfigPaths from "vite-tsconfig-paths";
export default defineConfig({
plugins: [tsconfigPaths(), react()],
test: {
environment: "happy-dom",
include: ["src/**/*.test.tsx"],
setupFiles: ["./src/tests/integrations/vitest.setup.tsx"],
},
});