Compare commits

...

3 Commits

Author SHA1 Message Date
Bentlybro
73651d7f38 fix(backend): Handle stale RabbitMQ channels on connection drop
When the RabbitMQ connection drops, connect_robust reconnects the
underlying connection but the cached channel object becomes invalid.
The is_ready check passes (channel doesn't report as closed) but
operations fail with ChannelInvalidStateError ('No active transport
in channel'), generating ~39K Sentry errors (AUTOGPT-SERVER-1TN).

Fix:
- Add _ensure_channel() helper that resets stale channels before
  reconnecting, so connect() creates a fresh channel
- Catch ChannelInvalidStateError in publish_message and do a full
  reconnect (reset both channel and connection) before retrying
- Simplify get_channel() to use the same resilient helper

The existing @func_retry decorator provides additional retry
resilience on top of the explicit reconnect handling.
2026-02-02 15:10:25 +00:00
Guofang.Tang
1081590384 feat(backend): cover webhook ingress URL route (#11747)
### Changes 🏗️

- Add a unit test to verify webhook ingress URL generation matches the
FastAPI route.

  ### Checklist 📋

  #### For code changes:

  - [x] I have clearly listed my changes in the PR description
  - [x] I have made a test plan
  - [x] I have tested my changes according to the test plan:
- [x] poetry run pytest backend/integrations/webhooks/utils_test.py
--confcutdir=backend/integrations/webhooks

  #### For configuration changes:

  - [x] .env.default is updated or already compatible with my changes
- [x] docker-compose.yml is updated or already compatible with my
changes
- [x] I have included a list of my configuration changes in the PR
description (under Changes)



<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

* **Tests**
* Added a unit test that validates webhook ingress URL generation
matches the application's resolved route (scheme, host, and path) for
provider-specific webhook endpoints, improving confidence in routing
behavior and helping prevent regressions.

<sub>✏️ Tip: You can customize this high-level summary in your review
settings.</sub>
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Co-authored-by: Reinier van der Leer <pwuts@agpt.co>
2026-02-01 20:29:15 +00:00
Otto
7e37de8e30 fix: Include graph schemas for marketplace agents in Agent Generator (#11920)
## Problem

When marketplace agents are included in the `library_agents` payload
sent to the Agent Generator service, they were missing required fields
(`graph_id`, `graph_version`, `input_schema`, `output_schema`). This
caused Pydantic validation to fail with HTTP 422 Unprocessable Entity.

**Root cause:** The `MarketplaceAgentSummary` TypedDict had a different
shape than `LibraryAgentInfo` expected by the Agent Generator:
- Agent Generator expects: `graph_id`, `graph_version`, `name`,
`description`, `input_schema`, `output_schema`
- MarketplaceAgentSummary had: `name`, `description`, `sub_heading`,
`creator`, `is_marketplace_agent`

## Solution

1. **Add `agent_graph_id` to `StoreAgent` model** - The field was
already in the database view but not exposed
2. **Include `agentGraphId` in hybrid search SQL query** - Carry the
field through the search CTEs
3. **Update `search_marketplace_agents_for_generation()`** - Now fetches
full graph schemas using `get_graph()` and returns `LibraryAgentSummary`
(same type as library agents)
4. **Update deduplication logic** - Use `graph_id` instead of name for
more accurate deduplication

## Changes

- `backend/api/features/store/model.py`: Add optional `agent_graph_id`
field to `StoreAgent`
- `backend/api/features/store/hybrid_search.py`: Include `agentGraphId`
in SQL query columns
- `backend/api/features/store/db.py`: Map `agentGraphId` when creating
`StoreAgent` objects
- `backend/api/features/chat/tools/agent_generator/core.py`: Update
`search_marketplace_agents_for_generation()` to fetch and include full
graph schemas

## Testing

- [ ] Agent creation on dev with marketplace agents in context
- [ ] Verify no 422 errors from Agent Generator
- [ ] Verify marketplace agents can be used as sub-agents

Fixes: SECRT-1817

---------

Co-authored-by: majdyz <majdyz@users.noreply.github.com>
Co-authored-by: Zamil Majdy <zamil.majdy@agpt.co>
2026-01-31 19:17:36 +00:00
18 changed files with 242 additions and 80 deletions

View File

@@ -14,6 +14,7 @@ from backend.data.graph import (
create_graph,
get_graph,
get_graph_all_versions,
get_store_listed_graphs,
)
from backend.util.exceptions import DatabaseError, NotFoundError
@@ -266,18 +267,18 @@ async def get_library_agents_for_generation(
async def search_marketplace_agents_for_generation(
search_query: str,
max_results: int = 10,
) -> list[MarketplaceAgentSummary]:
) -> list[LibraryAgentSummary]:
"""Search marketplace agents formatted for Agent Generator.
Note: This returns basic agent info. Full input/output schemas would require
additional graph fetches and is a potential future enhancement.
Fetches marketplace agents and their full schemas so they can be used
as sub-agents in generated workflows.
Args:
search_query: Search term to find relevant public agents
max_results: Maximum number of agents to return (default 10)
Returns:
List of MarketplaceAgentSummary (without detailed schemas for now)
List of LibraryAgentSummary with full input/output schemas
"""
try:
response = await store_db.get_store_agents(
@@ -286,17 +287,31 @@ async def search_marketplace_agents_for_generation(
page_size=max_results,
)
results: list[MarketplaceAgentSummary] = []
for agent in response.agents:
results.append(
MarketplaceAgentSummary(
name=agent.agent_name,
description=agent.description,
sub_heading=agent.sub_heading,
creator=agent.creator,
is_marketplace_agent=True,
agents_with_graphs = [
agent for agent in response.agents if agent.agent_graph_id
]
if not agents_with_graphs:
return []
graph_ids = [agent.agent_graph_id for agent in agents_with_graphs]
graphs = await get_store_listed_graphs(*graph_ids)
results: list[LibraryAgentSummary] = []
for agent in agents_with_graphs:
graph_id = agent.agent_graph_id
if graph_id and graph_id in graphs:
graph = graphs[graph_id]
results.append(
LibraryAgentSummary(
graph_id=graph.id,
graph_version=graph.version,
name=agent.agent_name,
description=agent.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
)
)
)
return results
except Exception as e:
logger.warning(f"Failed to search marketplace agents: {e}")
@@ -327,8 +342,7 @@ async def get_all_relevant_agents_for_generation(
max_marketplace_results: Max marketplace agents to return (default 10)
Returns:
List of AgentSummary, library agents first (with full schemas),
then marketplace agents (basic info only)
List of AgentSummary with full schemas (both library and marketplace agents)
"""
agents: list[AgentSummary] = []
seen_graph_ids: set[str] = set()
@@ -365,16 +379,11 @@ async def get_all_relevant_agents_for_generation(
search_query=search_query,
max_results=max_marketplace_results,
)
library_names: set[str] = set()
for a in agents:
name = a.get("name")
if name and isinstance(name, str):
library_names.add(name.lower())
for agent in marketplace_agents:
agent_name = agent.get("name")
if agent_name and isinstance(agent_name, str):
if agent_name.lower() not in library_names:
agents.append(agent)
graph_id = agent.get("graph_id")
if graph_id and graph_id not in seen_graph_ids:
agents.append(agent)
seen_graph_ids.add(graph_id)
return agents

View File

@@ -112,6 +112,7 @@ async def get_store_agents(
description=agent["description"],
runs=agent["runs"],
rating=agent["rating"],
agent_graph_id=agent.get("agentGraphId", ""),
)
store_agents.append(store_agent)
except Exception as e:
@@ -170,6 +171,7 @@ async def get_store_agents(
description=agent.description,
runs=agent.runs,
rating=agent.rating,
agent_graph_id=agent.agentGraphId,
)
# Add to the list only if creation was successful
store_agents.append(store_agent)

View File

@@ -600,6 +600,7 @@ async def hybrid_search(
sa.featured,
sa.is_available,
sa.updated_at,
sa."agentGraphId",
-- Searchable text for BM25 reranking
COALESCE(sa.agent_name, '') || ' ' || COALESCE(sa.sub_heading, '') || ' ' || COALESCE(sa.description, '') as searchable_text,
-- Semantic score
@@ -659,6 +660,7 @@ async def hybrid_search(
featured,
is_available,
updated_at,
"agentGraphId",
searchable_text,
semantic_score,
lexical_score,

View File

@@ -38,6 +38,7 @@ class StoreAgent(pydantic.BaseModel):
description: str
runs: int
rating: float
agent_graph_id: str
class StoreAgentsResponse(pydantic.BaseModel):

View File

@@ -26,11 +26,13 @@ def test_store_agent():
description="Test description",
runs=50,
rating=4.5,
agent_graph_id="test-graph-id",
)
assert agent.slug == "test-agent"
assert agent.agent_name == "Test Agent"
assert agent.runs == 50
assert agent.rating == 4.5
assert agent.agent_graph_id == "test-graph-id"
def test_store_agents_response():
@@ -46,6 +48,7 @@ def test_store_agents_response():
description="Test description",
runs=50,
rating=4.5,
agent_graph_id="test-graph-id",
)
],
pagination=store_model.Pagination(

View File

@@ -82,6 +82,7 @@ def test_get_agents_featured(
description="Featured agent description",
runs=100,
rating=4.5,
agent_graph_id="test-graph-1",
)
],
pagination=store_model.Pagination(
@@ -127,6 +128,7 @@ def test_get_agents_by_creator(
description="Creator agent description",
runs=50,
rating=4.0,
agent_graph_id="test-graph-2",
)
],
pagination=store_model.Pagination(
@@ -172,6 +174,7 @@ def test_get_agents_sorted(
description="Top agent description",
runs=1000,
rating=5.0,
agent_graph_id="test-graph-3",
)
],
pagination=store_model.Pagination(
@@ -217,6 +220,7 @@ def test_get_agents_search(
description="Specific search term description",
runs=75,
rating=4.2,
agent_graph_id="test-graph-search",
)
],
pagination=store_model.Pagination(
@@ -262,6 +266,7 @@ def test_get_agents_category(
description="Category agent description",
runs=60,
rating=4.1,
agent_graph_id="test-graph-category",
)
],
pagination=store_model.Pagination(
@@ -306,6 +311,7 @@ def test_get_agents_pagination(
description=f"Agent {i} description",
runs=i * 10,
rating=4.0,
agent_graph_id="test-graph-2",
)
for i in range(5)
],

View File

@@ -33,6 +33,7 @@ class TestCacheDeletion:
description="Test description",
runs=100,
rating=4.5,
agent_graph_id="test-graph-id",
)
],
pagination=Pagination(

View File

@@ -1028,6 +1028,39 @@ async def get_graph(
return GraphModel.from_db(graph, for_export)
async def get_store_listed_graphs(*graph_ids: str) -> dict[str, GraphModel]:
"""Batch-fetch multiple store-listed graphs by their IDs.
Only returns graphs that have approved store listings (publicly available).
Does not require permission checks since store-listed graphs are public.
Args:
*graph_ids: Variable number of graph IDs to fetch
Returns:
Dict mapping graph_id to GraphModel for graphs with approved store listings
"""
if not graph_ids:
return {}
store_listings = await StoreListingVersion.prisma().find_many(
where={
"agentGraphId": {"in": list(graph_ids)},
"submissionStatus": SubmissionStatus.APPROVED,
"isDeleted": False,
},
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
distinct=["agentGraphId"],
order={"agentGraphVersion": "desc"},
)
return {
listing.agentGraphId: GraphModel.from_db(listing.AgentGraph)
for listing in store_listings
if listing.AgentGraph
}
async def get_graph_as_admin(
graph_id: str,
version: int | None = None,

View File

@@ -291,6 +291,18 @@ class AsyncRabbitMQ(RabbitMQBase):
exchange, routing_key=queue.routing_key or queue.name
)
async def _ensure_channel(self) -> aio_pika.abc.AbstractChannel:
"""Get a valid channel, reconnecting if the current one is stale."""
if not self.is_ready:
# Reset stale channel so connect() creates a fresh one
self._channel = None
await self.connect()
if self._channel is None:
raise RuntimeError("Channel should be established after connect")
return self._channel
@func_retry
async def publish_message(
self,
@@ -299,32 +311,58 @@ class AsyncRabbitMQ(RabbitMQBase):
exchange: Optional[Exchange] = None,
persistent: bool = True,
) -> None:
if not self.is_ready:
try:
channel = await self._ensure_channel()
except Exception:
# Force full reconnect on channel acquisition failure
self._channel = None
self._connection = None
channel = await self._ensure_channel()
try:
if exchange:
exchange_obj = await channel.get_exchange(exchange.name)
else:
exchange_obj = channel.default_exchange
await exchange_obj.publish(
aio_pika.Message(
body=message.encode(),
delivery_mode=(
aio_pika.DeliveryMode.PERSISTENT
if persistent
else aio_pika.DeliveryMode.NOT_PERSISTENT
),
),
routing_key=routing_key,
)
except aio_pika.exceptions.ChannelInvalidStateError:
logger.warning(
"RabbitMQ channel invalid, reconnecting and retrying publish"
)
self._channel = None
self._connection = None
await self.connect()
if self._channel is None:
raise RuntimeError("Channel should be established after connect")
if self._channel is None:
raise RuntimeError("Channel should be established after reconnect")
if exchange:
exchange_obj = await self._channel.get_exchange(exchange.name)
else:
exchange_obj = self._channel.default_exchange
if exchange:
exchange_obj = await self._channel.get_exchange(exchange.name)
else:
exchange_obj = self._channel.default_exchange
await exchange_obj.publish(
aio_pika.Message(
body=message.encode(),
delivery_mode=(
aio_pika.DeliveryMode.PERSISTENT
if persistent
else aio_pika.DeliveryMode.NOT_PERSISTENT
await exchange_obj.publish(
aio_pika.Message(
body=message.encode(),
delivery_mode=(
aio_pika.DeliveryMode.PERSISTENT
if persistent
else aio_pika.DeliveryMode.NOT_PERSISTENT
),
),
),
routing_key=routing_key,
)
routing_key=routing_key,
)
async def get_channel(self) -> aio_pika.abc.AbstractChannel:
if not self.is_ready:
await self.connect()
if self._channel is None:
raise RuntimeError("Channel should be established after connect")
return self._channel
return await self._ensure_channel()

View File

@@ -0,0 +1,39 @@
from urllib.parse import urlparse
import fastapi
from fastapi.routing import APIRoute
from backend.api.features.integrations.router import router as integrations_router
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks import utils as webhooks_utils
def test_webhook_ingress_url_matches_route(monkeypatch) -> None:
app = fastapi.FastAPI()
app.include_router(integrations_router, prefix="/api/integrations")
provider = ProviderName.GITHUB
webhook_id = "webhook_123"
base_url = "https://example.com"
monkeypatch.setattr(webhooks_utils.app_config, "platform_base_url", base_url)
route = next(
route
for route in integrations_router.routes
if isinstance(route, APIRoute)
and route.path == "/{provider}/webhooks/{webhook_id}/ingress"
and "POST" in route.methods
)
expected_path = f"/api/integrations{route.path}".format(
provider=provider.value,
webhook_id=webhook_id,
)
actual_url = urlparse(webhooks_utils.webhook_ingress_url(provider, webhook_id))
expected_base = urlparse(base_url)
assert (actual_url.scheme, actual_url.netloc) == (
expected_base.scheme,
expected_base.netloc,
)
assert actual_url.path == expected_path

View File

@@ -9,7 +9,8 @@
"sub_heading": "Creator agent subheading",
"description": "Creator agent description",
"runs": 50,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
}
],
"pagination": {

View File

@@ -9,7 +9,8 @@
"sub_heading": "Category agent subheading",
"description": "Category agent description",
"runs": 60,
"rating": 4.1
"rating": 4.1,
"agent_graph_id": "test-graph-category"
}
],
"pagination": {

View File

@@ -9,7 +9,8 @@
"sub_heading": "Agent 0 subheading",
"description": "Agent 0 description",
"runs": 0,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
},
{
"slug": "agent-1",
@@ -20,7 +21,8 @@
"sub_heading": "Agent 1 subheading",
"description": "Agent 1 description",
"runs": 10,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
},
{
"slug": "agent-2",
@@ -31,7 +33,8 @@
"sub_heading": "Agent 2 subheading",
"description": "Agent 2 description",
"runs": 20,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
},
{
"slug": "agent-3",
@@ -42,7 +45,8 @@
"sub_heading": "Agent 3 subheading",
"description": "Agent 3 description",
"runs": 30,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
},
{
"slug": "agent-4",
@@ -53,7 +57,8 @@
"sub_heading": "Agent 4 subheading",
"description": "Agent 4 description",
"runs": 40,
"rating": 4.0
"rating": 4.0,
"agent_graph_id": "test-graph-2"
}
],
"pagination": {

View File

@@ -9,7 +9,8 @@
"sub_heading": "Search agent subheading",
"description": "Specific search term description",
"runs": 75,
"rating": 4.2
"rating": 4.2,
"agent_graph_id": "test-graph-search"
}
],
"pagination": {

View File

@@ -9,7 +9,8 @@
"sub_heading": "Top agent subheading",
"description": "Top agent description",
"runs": 1000,
"rating": 5.0
"rating": 5.0,
"agent_graph_id": "test-graph-3"
}
],
"pagination": {

View File

@@ -9,7 +9,8 @@
"sub_heading": "Featured agent subheading",
"description": "Featured agent description",
"runs": 100,
"rating": 4.5
"rating": 4.5,
"agent_graph_id": "test-graph-1"
}
],
"pagination": {

View File

@@ -134,15 +134,28 @@ class TestSearchMarketplaceAgentsForGeneration:
description="A public agent",
sub_heading="Does something useful",
creator="creator-1",
agent_graph_id="graph-123",
)
]
# The store_db is dynamically imported, so patch the import path
with patch(
"backend.api.features.store.db.get_store_agents",
new_callable=AsyncMock,
return_value=mock_response,
) as mock_search:
mock_graph = MagicMock()
mock_graph.id = "graph-123"
mock_graph.version = 1
mock_graph.input_schema = {"type": "object"}
mock_graph.output_schema = {"type": "object"}
with (
patch(
"backend.api.features.store.db.get_store_agents",
new_callable=AsyncMock,
return_value=mock_response,
) as mock_search,
patch(
"backend.api.features.chat.tools.agent_generator.core.get_store_listed_graphs",
new_callable=AsyncMock,
return_value={"graph-123": mock_graph},
),
):
result = await core.search_marketplace_agents_for_generation(
search_query="automation",
max_results=10,
@@ -156,7 +169,7 @@ class TestSearchMarketplaceAgentsForGeneration:
assert len(result) == 1
assert result[0]["name"] == "Public Agent"
assert result[0]["is_marketplace_agent"] is True
assert result[0]["graph_id"] == "graph-123"
@pytest.mark.asyncio
async def test_handles_marketplace_error_gracefully(self):
@@ -193,11 +206,12 @@ class TestGetAllRelevantAgentsForGeneration:
marketplace_agents = [
{
"graph_id": "market-456",
"graph_version": 1,
"name": "Market Agent",
"description": "From marketplace",
"sub_heading": "Sub heading",
"creator": "creator-1",
"is_marketplace_agent": True,
"input_schema": {},
"output_schema": {},
}
]
@@ -225,11 +239,11 @@ class TestGetAllRelevantAgentsForGeneration:
assert result[1]["name"] == "Market Agent"
@pytest.mark.asyncio
async def test_deduplicates_by_name(self):
"""Test that marketplace agents with same name as library are excluded."""
async def test_deduplicates_by_graph_id(self):
"""Test that marketplace agents with same graph_id as library are excluded."""
library_agents = [
{
"graph_id": "lib-123",
"graph_id": "shared-123",
"graph_version": 1,
"name": "Shared Agent",
"description": "From library",
@@ -240,18 +254,20 @@ class TestGetAllRelevantAgentsForGeneration:
marketplace_agents = [
{
"name": "Shared Agent", # Same name, should be deduplicated
"graph_id": "shared-123", # Same graph_id, should be deduplicated
"graph_version": 1,
"name": "Shared Agent",
"description": "From marketplace",
"sub_heading": "Sub heading",
"creator": "creator-1",
"is_marketplace_agent": True,
"input_schema": {},
"output_schema": {},
},
{
"graph_id": "unique-456",
"graph_version": 1,
"name": "Unique Agent",
"description": "Only in marketplace",
"sub_heading": "Sub heading",
"creator": "creator-2",
"is_marketplace_agent": True,
"input_schema": {},
"output_schema": {},
},
]
@@ -273,7 +289,7 @@ class TestGetAllRelevantAgentsForGeneration:
include_marketplace=True,
)
# Shared Agent from marketplace should be excluded
# Shared Agent from marketplace should be excluded by graph_id
assert len(result) == 2
names = [a["name"] for a in result]
assert "Shared Agent" in names

View File

@@ -9833,7 +9833,8 @@
"sub_heading": { "type": "string", "title": "Sub Heading" },
"description": { "type": "string", "title": "Description" },
"runs": { "type": "integer", "title": "Runs" },
"rating": { "type": "number", "title": "Rating" }
"rating": { "type": "number", "title": "Rating" },
"agent_graph_id": { "type": "string", "title": "Agent Graph Id" }
},
"type": "object",
"required": [
@@ -9845,7 +9846,8 @@
"sub_heading",
"description",
"runs",
"rating"
"rating",
"agent_graph_id"
],
"title": "StoreAgent"
},