Compare commits

..

3 Commits

Author SHA1 Message Date
Bentlybro
1ed748a356 refactor(backend): revert selective COPY, keep cleanup approach
Address review feedback: keep COPY --from=builder /app /app to avoid
maintenance burden of selective copies. The builder cleanup step still
removes __pycache__, test dirs, pip/poetry caches for size reduction.

Added clarifying comment about --only main referencing the development
docs (dev deps are installed locally, not in production images).
2026-01-31 19:56:21 +00:00
Bentlybro
9c28639c32 fix: address review feedback — keep setuptools, remove redundant mkdir, add comments
- Keep setuptools in cleanup (it's a direct dependency, used by aioclamd
  via pkg_resources at runtime)
- Remove redundant mkdir -p commands (COPY already creates dirs)
- Add clarifying comments for the autogpt_libs double-copy pattern
- Use || true instead of trailing ; true for cleaner error handling
2026-01-31 18:44:15 +00:00
Bentlybro
4f37a12743 docker: optimize backend image size — reduce ~862MB COPY layer
- Install only main dependencies (skip dev deps like pytest, black, ruff)
- Clean up build artifacts, caches, and unnecessary packages
- Replace wholesale COPY with selective copying of required files
- Add --no-cache-dir to pip install

This reduces the bloated 862MB layer from COPY --from=builder /app /app
by only copying what's actually needed at runtime: virtualenv, libs,
schema, and Prisma-generated types. All 7 backend services benefit.
2026-01-31 18:29:09 +00:00
19 changed files with 73 additions and 189 deletions

View File

@@ -37,13 +37,15 @@ ENV POETRY_VIRTUALENVS_CREATE=true
ENV POETRY_VIRTUALENVS_IN_PROJECT=true ENV POETRY_VIRTUALENVS_IN_PROJECT=true
ENV PATH=/opt/poetry/bin:$PATH ENV PATH=/opt/poetry/bin:$PATH
RUN pip3 install poetry --break-system-packages RUN pip3 install --no-cache-dir poetry --break-system-packages
# Copy and install dependencies # Copy and install dependencies
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/ COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
WORKDIR /app/autogpt_platform/backend WORKDIR /app/autogpt_platform/backend
RUN poetry install --no-ansi --no-root # Production image only needs runtime deps; dev deps (pytest, black, ruff, etc.)
# are installed locally via `poetry install --with dev` per the development docs
RUN poetry install --no-ansi --no-root --only main
# Generate Prisma client # Generate Prisma client
COPY autogpt_platform/backend/schema.prisma ./ COPY autogpt_platform/backend/schema.prisma ./
@@ -51,6 +53,15 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./ COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
RUN poetry run prisma generate && poetry run gen-prisma-stub RUN poetry run prisma generate && poetry run gen-prisma-stub
# Clean up build artifacts and caches to reduce layer size
# Note: setuptools is kept as it's a direct dependency (used by aioclamd via pkg_resources)
RUN find /app -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true; \
find /app -type d -name tests -exec rm -rf {} + 2>/dev/null || true; \
find /app -type d -name test -exec rm -rf {} + 2>/dev/null || true; \
rm -rf /app/autogpt_platform/backend/.venv/lib/python*/site-packages/pip* \
/root/.cache/pip \
/root/.cache/pypoetry
FROM debian:13-slim AS server_dependencies FROM debian:13-slim AS server_dependencies
WORKDIR /app WORKDIR /app
@@ -68,7 +79,7 @@ RUN apt-get update && apt-get install -y \
python3-pip \ python3-pip \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Copy only necessary files from builder # Copy built artifacts from builder (cleaned of caches, __pycache__, and test dirs)
COPY --from=builder /app /app COPY --from=builder /app /app
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3* COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
@@ -81,9 +92,7 @@ COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-pyth
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH" ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
RUN mkdir -p /app/autogpt_platform/autogpt_libs # Copy fresh source from context (overwrites builder's copy with latest source)
RUN mkdir -p /app/autogpt_platform/backend
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/ COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/

View File

@@ -14,7 +14,6 @@ from backend.data.graph import (
create_graph, create_graph,
get_graph, get_graph,
get_graph_all_versions, get_graph_all_versions,
get_store_listed_graphs,
) )
from backend.util.exceptions import DatabaseError, NotFoundError from backend.util.exceptions import DatabaseError, NotFoundError
@@ -267,18 +266,18 @@ async def get_library_agents_for_generation(
async def search_marketplace_agents_for_generation( async def search_marketplace_agents_for_generation(
search_query: str, search_query: str,
max_results: int = 10, max_results: int = 10,
) -> list[LibraryAgentSummary]: ) -> list[MarketplaceAgentSummary]:
"""Search marketplace agents formatted for Agent Generator. """Search marketplace agents formatted for Agent Generator.
Fetches marketplace agents and their full schemas so they can be used Note: This returns basic agent info. Full input/output schemas would require
as sub-agents in generated workflows. additional graph fetches and is a potential future enhancement.
Args: Args:
search_query: Search term to find relevant public agents search_query: Search term to find relevant public agents
max_results: Maximum number of agents to return (default 10) max_results: Maximum number of agents to return (default 10)
Returns: Returns:
List of LibraryAgentSummary with full input/output schemas List of MarketplaceAgentSummary (without detailed schemas for now)
""" """
try: try:
response = await store_db.get_store_agents( response = await store_db.get_store_agents(
@@ -287,31 +286,17 @@ async def search_marketplace_agents_for_generation(
page_size=max_results, page_size=max_results,
) )
agents_with_graphs = [ results: list[MarketplaceAgentSummary] = []
agent for agent in response.agents if agent.agent_graph_id for agent in response.agents:
] results.append(
MarketplaceAgentSummary(
if not agents_with_graphs: name=agent.agent_name,
return [] description=agent.description,
sub_heading=agent.sub_heading,
graph_ids = [agent.agent_graph_id for agent in agents_with_graphs] creator=agent.creator,
graphs = await get_store_listed_graphs(*graph_ids) is_marketplace_agent=True,
results: list[LibraryAgentSummary] = []
for agent in agents_with_graphs:
graph_id = agent.agent_graph_id
if graph_id and graph_id in graphs:
graph = graphs[graph_id]
results.append(
LibraryAgentSummary(
graph_id=graph.id,
graph_version=graph.version,
name=agent.agent_name,
description=agent.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
)
) )
)
return results return results
except Exception as e: except Exception as e:
logger.warning(f"Failed to search marketplace agents: {e}") logger.warning(f"Failed to search marketplace agents: {e}")
@@ -342,7 +327,8 @@ async def get_all_relevant_agents_for_generation(
max_marketplace_results: Max marketplace agents to return (default 10) max_marketplace_results: Max marketplace agents to return (default 10)
Returns: Returns:
List of AgentSummary with full schemas (both library and marketplace agents) List of AgentSummary, library agents first (with full schemas),
then marketplace agents (basic info only)
""" """
agents: list[AgentSummary] = [] agents: list[AgentSummary] = []
seen_graph_ids: set[str] = set() seen_graph_ids: set[str] = set()
@@ -379,11 +365,16 @@ async def get_all_relevant_agents_for_generation(
search_query=search_query, search_query=search_query,
max_results=max_marketplace_results, max_results=max_marketplace_results,
) )
library_names: set[str] = set()
for a in agents:
name = a.get("name")
if name and isinstance(name, str):
library_names.add(name.lower())
for agent in marketplace_agents: for agent in marketplace_agents:
graph_id = agent.get("graph_id") agent_name = agent.get("name")
if graph_id and graph_id not in seen_graph_ids: if agent_name and isinstance(agent_name, str):
agents.append(agent) if agent_name.lower() not in library_names:
seen_graph_ids.add(graph_id) agents.append(agent)
return agents return agents

View File

@@ -112,7 +112,6 @@ async def get_store_agents(
description=agent["description"], description=agent["description"],
runs=agent["runs"], runs=agent["runs"],
rating=agent["rating"], rating=agent["rating"],
agent_graph_id=agent.get("agentGraphId", ""),
) )
store_agents.append(store_agent) store_agents.append(store_agent)
except Exception as e: except Exception as e:
@@ -171,7 +170,6 @@ async def get_store_agents(
description=agent.description, description=agent.description,
runs=agent.runs, runs=agent.runs,
rating=agent.rating, rating=agent.rating,
agent_graph_id=agent.agentGraphId,
) )
# Add to the list only if creation was successful # Add to the list only if creation was successful
store_agents.append(store_agent) store_agents.append(store_agent)

View File

@@ -600,7 +600,6 @@ async def hybrid_search(
sa.featured, sa.featured,
sa.is_available, sa.is_available,
sa.updated_at, sa.updated_at,
sa."agentGraphId",
-- Searchable text for BM25 reranking -- Searchable text for BM25 reranking
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text, COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
-- Semantic score -- Semantic score
@@ -660,7 +659,6 @@ async def hybrid_search(
featured, featured,
is_available, is_available,
updated_at, updated_at,
"agentGraphId",
searchable_text, searchable_text,
semantic_score, semantic_score,
lexical_score, lexical_score,

View File

@@ -38,7 +38,6 @@ class StoreAgent(pydantic.BaseModel):
description: str description: str
runs: int runs: int
rating: float rating: float
agent_graph_id: str
class StoreAgentsResponse(pydantic.BaseModel): class StoreAgentsResponse(pydantic.BaseModel):

View File

@@ -26,13 +26,11 @@ def test_store_agent():
description="Test description", description="Test description",
runs=50, runs=50,
rating=4.5, rating=4.5,
agent_graph_id="test-graph-id",
) )
assert agent.slug == "test-agent" assert agent.slug == "test-agent"
assert agent.agent_name == "Test Agent" assert agent.agent_name == "Test Agent"
assert agent.runs == 50 assert agent.runs == 50
assert agent.rating == 4.5 assert agent.rating == 4.5
assert agent.agent_graph_id == "test-graph-id"
def test_store_agents_response(): def test_store_agents_response():
@@ -48,7 +46,6 @@ def test_store_agents_response():
description="Test description", description="Test description",
runs=50, runs=50,
rating=4.5, rating=4.5,
agent_graph_id="test-graph-id",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(

View File

@@ -82,7 +82,6 @@ def test_get_agents_featured(
description="Featured agent description", description="Featured agent description",
runs=100, runs=100,
rating=4.5, rating=4.5,
agent_graph_id="test-graph-1",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(
@@ -128,7 +127,6 @@ def test_get_agents_by_creator(
description="Creator agent description", description="Creator agent description",
runs=50, runs=50,
rating=4.0, rating=4.0,
agent_graph_id="test-graph-2",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(
@@ -174,7 +172,6 @@ def test_get_agents_sorted(
description="Top agent description", description="Top agent description",
runs=1000, runs=1000,
rating=5.0, rating=5.0,
agent_graph_id="test-graph-3",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(
@@ -220,7 +217,6 @@ def test_get_agents_search(
description="Specific search term description", description="Specific search term description",
runs=75, runs=75,
rating=4.2, rating=4.2,
agent_graph_id="test-graph-search",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(
@@ -266,7 +262,6 @@ def test_get_agents_category(
description="Category agent description", description="Category agent description",
runs=60, runs=60,
rating=4.1, rating=4.1,
agent_graph_id="test-graph-category",
) )
], ],
pagination=store_model.Pagination( pagination=store_model.Pagination(
@@ -311,7 +306,6 @@ def test_get_agents_pagination(
description=f"Agent {i} description", description=f"Agent {i} description",
runs=i * 10, runs=i * 10,
rating=4.0, rating=4.0,
agent_graph_id="test-graph-2",
) )
for i in range(5) for i in range(5)
], ],

View File

@@ -33,7 +33,6 @@ class TestCacheDeletion:
description="Test description", description="Test description",
runs=100, runs=100,
rating=4.5, rating=4.5,
agent_graph_id="test-graph-id",
) )
], ],
pagination=Pagination( pagination=Pagination(

View File

@@ -1028,39 +1028,6 @@ async def get_graph(
return GraphModel.from_db(graph, for_export) return GraphModel.from_db(graph, for_export)
async def get_store_listed_graphs(*graph_ids: str) -> dict[str, GraphModel]:
"""Batch-fetch multiple store-listed graphs by their IDs.
Only returns graphs that have approved store listings (publicly available).
Does not require permission checks since store-listed graphs are public.
Args:
*graph_ids: Variable number of graph IDs to fetch
Returns:
Dict mapping graph_id to GraphModel for graphs with approved store listings
"""
if not graph_ids:
return {}
store_listings = await StoreListingVersion.prisma().find_many(
where={
"agentGraphId": {"in": list(graph_ids)},
"submissionStatus": SubmissionStatus.APPROVED,
"isDeleted": False,
},
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
distinct=["agentGraphId"],
order={"agentGraphVersion": "desc"},
)
return {
listing.agentGraphId: GraphModel.from_db(listing.AgentGraph)
for listing in store_listings
if listing.AgentGraph
}
async def get_graph_as_admin( async def get_graph_as_admin(
graph_id: str, graph_id: str,
version: int | None = None, version: int | None = None,

View File

@@ -1,39 +0,0 @@
from urllib.parse import urlparse
import fastapi
from fastapi.routing import APIRoute
from backend.api.features.integrations.router import router as integrations_router
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks import utils as webhooks_utils
def test_webhook_ingress_url_matches_route(monkeypatch) -> None:
app = fastapi.FastAPI()
app.include_router(integrations_router, prefix="/api/integrations")
provider = ProviderName.GITHUB
webhook_id = "webhook_123"
base_url = "https://example.com"
monkeypatch.setattr(webhooks_utils.app_config, "platform_base_url", base_url)
route = next(
route
for route in integrations_router.routes
if isinstance(route, APIRoute)
and route.path == "/{provider}/webhooks/{webhook_id}/ingress"
and "POST" in route.methods
)
expected_path = f"/api/integrations{route.path}".format(
provider=provider.value,
webhook_id=webhook_id,
)
actual_url = urlparse(webhooks_utils.webhook_ingress_url(provider, webhook_id))
expected_base = urlparse(base_url)
assert (actual_url.scheme, actual_url.netloc) == (
expected_base.scheme,
expected_base.netloc,
)
assert actual_url.path == expected_path

View File

@@ -9,8 +9,7 @@
"sub_heading": "Creator agent subheading", "sub_heading": "Creator agent subheading",
"description": "Creator agent description", "description": "Creator agent description",
"runs": 50, "runs": 50,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
} }
], ],
"pagination": { "pagination": {

View File

@@ -9,8 +9,7 @@
"sub_heading": "Category agent subheading", "sub_heading": "Category agent subheading",
"description": "Category agent description", "description": "Category agent description",
"runs": 60, "runs": 60,
"rating": 4.1, "rating": 4.1
"agent_graph_id": "test-graph-category"
} }
], ],
"pagination": { "pagination": {

View File

@@ -9,8 +9,7 @@
"sub_heading": "Agent 0 subheading", "sub_heading": "Agent 0 subheading",
"description": "Agent 0 description", "description": "Agent 0 description",
"runs": 0, "runs": 0,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
}, },
{ {
"slug": "agent-1", "slug": "agent-1",
@@ -21,8 +20,7 @@
"sub_heading": "Agent 1 subheading", "sub_heading": "Agent 1 subheading",
"description": "Agent 1 description", "description": "Agent 1 description",
"runs": 10, "runs": 10,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
}, },
{ {
"slug": "agent-2", "slug": "agent-2",
@@ -33,8 +31,7 @@
"sub_heading": "Agent 2 subheading", "sub_heading": "Agent 2 subheading",
"description": "Agent 2 description", "description": "Agent 2 description",
"runs": 20, "runs": 20,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
}, },
{ {
"slug": "agent-3", "slug": "agent-3",
@@ -45,8 +42,7 @@
"sub_heading": "Agent 3 subheading", "sub_heading": "Agent 3 subheading",
"description": "Agent 3 description", "description": "Agent 3 description",
"runs": 30, "runs": 30,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
}, },
{ {
"slug": "agent-4", "slug": "agent-4",
@@ -57,8 +53,7 @@
"sub_heading": "Agent 4 subheading", "sub_heading": "Agent 4 subheading",
"description": "Agent 4 description", "description": "Agent 4 description",
"runs": 40, "runs": 40,
"rating": 4.0, "rating": 4.0
"agent_graph_id": "test-graph-2"
} }
], ],
"pagination": { "pagination": {

View File

@@ -9,8 +9,7 @@
"sub_heading": "Search agent subheading", "sub_heading": "Search agent subheading",
"description": "Specific search term description", "description": "Specific search term description",
"runs": 75, "runs": 75,
"rating": 4.2, "rating": 4.2
"agent_graph_id": "test-graph-search"
} }
], ],
"pagination": { "pagination": {

View File

@@ -9,8 +9,7 @@
"sub_heading": "Top agent subheading", "sub_heading": "Top agent subheading",
"description": "Top agent description", "description": "Top agent description",
"runs": 1000, "runs": 1000,
"rating": 5.0, "rating": 5.0
"agent_graph_id": "test-graph-3"
} }
], ],
"pagination": { "pagination": {

View File

@@ -9,8 +9,7 @@
"sub_heading": "Featured agent subheading", "sub_heading": "Featured agent subheading",
"description": "Featured agent description", "description": "Featured agent description",
"runs": 100, "runs": 100,
"rating": 4.5, "rating": 4.5
"agent_graph_id": "test-graph-1"
} }
], ],
"pagination": { "pagination": {

View File

@@ -134,28 +134,15 @@ class TestSearchMarketplaceAgentsForGeneration:
description="A public agent", description="A public agent",
sub_heading="Does something useful", sub_heading="Does something useful",
creator="creator-1", creator="creator-1",
agent_graph_id="graph-123",
) )
] ]
mock_graph = MagicMock() # The store_db is dynamically imported, so patch the import path
mock_graph.id = "graph-123" with patch(
mock_graph.version = 1 "backend.api.features.store.db.get_store_agents",
mock_graph.input_schema = {"type": "object"} new_callable=AsyncMock,
mock_graph.output_schema = {"type": "object"} return_value=mock_response,
) as mock_search:
with (
patch(
"backend.api.features.store.db.get_store_agents",
new_callable=AsyncMock,
return_value=mock_response,
) as mock_search,
patch(
"backend.api.features.chat.tools.agent_generator.core.get_store_listed_graphs",
new_callable=AsyncMock,
return_value={"graph-123": mock_graph},
),
):
result = await core.search_marketplace_agents_for_generation( result = await core.search_marketplace_agents_for_generation(
search_query="automation", search_query="automation",
max_results=10, max_results=10,
@@ -169,7 +156,7 @@ class TestSearchMarketplaceAgentsForGeneration:
assert len(result) == 1 assert len(result) == 1
assert result[0]["name"] == "Public Agent" assert result[0]["name"] == "Public Agent"
assert result[0]["graph_id"] == "graph-123" assert result[0]["is_marketplace_agent"] is True
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_handles_marketplace_error_gracefully(self): async def test_handles_marketplace_error_gracefully(self):
@@ -206,12 +193,11 @@ class TestGetAllRelevantAgentsForGeneration:
marketplace_agents = [ marketplace_agents = [
{ {
"graph_id": "market-456",
"graph_version": 1,
"name": "Market Agent", "name": "Market Agent",
"description": "From marketplace", "description": "From marketplace",
"input_schema": {}, "sub_heading": "Sub heading",
"output_schema": {}, "creator": "creator-1",
"is_marketplace_agent": True,
} }
] ]
@@ -239,11 +225,11 @@ class TestGetAllRelevantAgentsForGeneration:
assert result[1]["name"] == "Market Agent" assert result[1]["name"] == "Market Agent"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_deduplicates_by_graph_id(self): async def test_deduplicates_by_name(self):
"""Test that marketplace agents with same graph_id as library are excluded.""" """Test that marketplace agents with same name as library are excluded."""
library_agents = [ library_agents = [
{ {
"graph_id": "shared-123", "graph_id": "lib-123",
"graph_version": 1, "graph_version": 1,
"name": "Shared Agent", "name": "Shared Agent",
"description": "From library", "description": "From library",
@@ -254,20 +240,18 @@ class TestGetAllRelevantAgentsForGeneration:
marketplace_agents = [ marketplace_agents = [
{ {
"graph_id": "shared-123", # Same graph_id, should be deduplicated "name": "Shared Agent", # Same name, should be deduplicated
"graph_version": 1,
"name": "Shared Agent",
"description": "From marketplace", "description": "From marketplace",
"input_schema": {}, "sub_heading": "Sub heading",
"output_schema": {}, "creator": "creator-1",
"is_marketplace_agent": True,
}, },
{ {
"graph_id": "unique-456",
"graph_version": 1,
"name": "Unique Agent", "name": "Unique Agent",
"description": "Only in marketplace", "description": "Only in marketplace",
"input_schema": {}, "sub_heading": "Sub heading",
"output_schema": {}, "creator": "creator-2",
"is_marketplace_agent": True,
}, },
] ]
@@ -289,7 +273,7 @@ class TestGetAllRelevantAgentsForGeneration:
include_marketplace=True, include_marketplace=True,
) )
# Shared Agent from marketplace should be excluded by graph_id # Shared Agent from marketplace should be excluded
assert len(result) == 2 assert len(result) == 2
names = [a["name"] for a in result] names = [a["name"] for a in result]
assert "Shared Agent" in names assert "Shared Agent" in names

View File

@@ -9833,8 +9833,7 @@
"sub_heading": { "type": "string", "title": "Sub Heading" }, "sub_heading": { "type": "string", "title": "Sub Heading" },
"description": { "type": "string", "title": "Description" }, "description": { "type": "string", "title": "Description" },
"runs": { "type": "integer", "title": "Runs" }, "runs": { "type": "integer", "title": "Runs" },
"rating": { "type": "number", "title": "Rating" }, "rating": { "type": "number", "title": "Rating" }
"agent_graph_id": { "type": "string", "title": "Agent Graph Id" }
}, },
"type": "object", "type": "object",
"required": [ "required": [
@@ -9846,8 +9845,7 @@
"sub_heading", "sub_heading",
"description", "description",
"runs", "runs",
"rating", "rating"
"agent_graph_id"
], ],
"title": "StoreAgent" "title": "StoreAgent"
}, },

View File

@@ -124,4 +124,3 @@ test("user can signup with existing email handling", async ({
console.error("❌ Duplicate email handling test failed:", error); console.error("❌ Duplicate email handling test failed:", error);
} }
}); });