Compare commits

..

4 Commits

Author SHA1 Message Date
Otto
9b20f4cd13 refactor: simplify ExecutionQueue docstrings and move test file
- Trim verbose BUG FIX docstring to concise 3-line note
- Remove redundant method docstrings (add, get, empty)
- Move test file to backend/data/ with proper pytest conventions
- Add note about ProcessPoolExecutor migration for future devs

Co-authored-by: Zamil Majdy <majdyz@users.noreply.github.com>
2026-02-08 16:11:35 +00:00
Nikhil Bhagat
a3d0f9cbd2 fix(backend): format test_execution_queue.py and remove unused variable 2025-12-14 19:37:29 +05:45
Nikhil Bhagat
02ddb51446 Added test_execution_queue.py and test the execution part and the test got passed 2025-12-14 19:05:14 +05:45
Nikhil Bhagat
750e096f15 fix(backend): replace multiprocessing.Manager().Queue() with queue.Queue()
ExecutionQueue was unnecessarily using multiprocessing.Manager().Queue() which
spawns a subprocess for IPC. Since ExecutionQueue is only accessed from threads
within the same process, queue.Queue() is sufficient and more efficient.

- Eliminates unnecessary subprocess spawning per graph execution
- Removes IPC overhead for queue operations
- Prevents potential resource leaks from Manager processes
- Improves scalability for concurrent graph executions
2025-12-14 19:04:14 +05:45
502 changed files with 13529 additions and 29386 deletions

View File

@@ -11,7 +11,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@v10 - uses: actions/stale@v9
with: with:
# operations-per-run: 5000 # operations-per-run: 5000
stale-issue-message: > stale-issue-message: >

View File

@@ -61,6 +61,6 @@ jobs:
pull-requests: write pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@v6 - uses: actions/labeler@v5
with: with:
sync-labels: true sync-labels: true

View File

@@ -57,9 +57,6 @@ class APIKeySmith:
def hash_key(self, raw_key: str) -> tuple[str, str]: def hash_key(self, raw_key: str) -> tuple[str, str]:
"""Migrate a legacy hash to secure hash format.""" """Migrate a legacy hash to secure hash format."""
if not raw_key.startswith(self.PREFIX):
raise ValueError("Key without 'agpt_' prefix would fail validation")
salt = self._generate_salt() salt = self._generate_salt()
hash = self._hash_key_with_salt(raw_key, salt) hash = self._hash_key_with_salt(raw_key, salt)
return hash, salt.hex() return hash, salt.hex()

View File

@@ -1,25 +1,29 @@
from fastapi import FastAPI from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from .jwt_utils import bearer_jwt_auth from .jwt_utils import bearer_jwt_auth
def add_auth_responses_to_openapi(app: FastAPI) -> None: def add_auth_responses_to_openapi(app: FastAPI) -> None:
""" """
Patch a FastAPI instance's `openapi()` method to add 401 responses Set up custom OpenAPI schema generation that adds 401 responses
to all authenticated endpoints. to all authenticated endpoints.
This is needed when using HTTPBearer with auto_error=False to get proper This is needed when using HTTPBearer with auto_error=False to get proper
401 responses instead of 403, but FastAPI only automatically adds security 401 responses instead of 403, but FastAPI only automatically adds security
responses when auto_error=True. responses when auto_error=True.
""" """
# Wrap current method to allow stacking OpenAPI schema modifiers like this
wrapped_openapi = app.openapi
def custom_openapi(): def custom_openapi():
if app.openapi_schema: if app.openapi_schema:
return app.openapi_schema return app.openapi_schema
openapi_schema = wrapped_openapi() openapi_schema = get_openapi(
title=app.title,
version=app.version,
description=app.description,
routes=app.routes,
)
# Add 401 response to all endpoints that have security requirements # Add 401 response to all endpoints that have security requirements
for path, methods in openapi_schema["paths"].items(): for path, methods in openapi_schema["paths"].items():

View File

@@ -108,7 +108,7 @@ import fastapi.testclient
import pytest import pytest
from pytest_snapshot.plugin import Snapshot from pytest_snapshot.plugin import Snapshot
from backend.api.features.myroute import router from backend.server.v2.myroute import router
app = fastapi.FastAPI() app = fastapi.FastAPI()
app.include_router(router) app.include_router(router)
@@ -149,7 +149,7 @@ These provide the easiest way to set up authentication mocking in test modules:
import fastapi import fastapi
import fastapi.testclient import fastapi.testclient
import pytest import pytest
from backend.api.features.myroute import router from backend.server.v2.myroute import router
app = fastapi.FastAPI() app = fastapi.FastAPI()
app.include_router(router) app.include_router(router)

View File

@@ -1,25 +0,0 @@
from fastapi import FastAPI
from backend.api.middleware.security import SecurityHeadersMiddleware
from backend.monitoring.instrumentation import instrument_fastapi
from .v1.routes import v1_router
external_api = FastAPI(
title="AutoGPT External API",
description="External API for AutoGPT integrations",
docs_url="/docs",
version="1.0",
)
external_api.add_middleware(SecurityHeadersMiddleware)
external_api.include_router(v1_router, prefix="/v1")
# Add Prometheus instrumentation
instrument_fastapi(
external_api,
service_name="external-api",
expose_endpoint=True,
endpoint="/metrics",
include_in_schema=True,
)

View File

@@ -1,107 +0,0 @@
from fastapi import HTTPException, Security, status
from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
from prisma.enums import APIKeyPermission
from backend.data.auth.api_key import APIKeyInfo, validate_api_key
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.auth.oauth import (
InvalidClientError,
InvalidTokenError,
OAuthAccessTokenInfo,
validate_access_token,
)
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
bearer_auth = HTTPBearer(auto_error=False)
async def require_api_key(api_key: str | None = Security(api_key_header)) -> APIKeyInfo:
"""Middleware for API key authentication only"""
if api_key is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing API key"
)
api_key_obj = await validate_api_key(api_key)
if not api_key_obj:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
)
return api_key_obj
async def require_access_token(
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
) -> OAuthAccessTokenInfo:
"""Middleware for OAuth access token authentication only"""
if bearer is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Missing Authorization header",
)
try:
token_info, _ = await validate_access_token(bearer.credentials)
except (InvalidClientError, InvalidTokenError) as e:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
return token_info
async def require_auth(
api_key: str | None = Security(api_key_header),
bearer: HTTPAuthorizationCredentials | None = Security(bearer_auth),
) -> APIAuthorizationInfo:
"""
Unified authentication middleware supporting both API keys and OAuth tokens.
Supports two authentication methods, which are checked in order:
1. X-API-Key header (existing API key authentication)
2. Authorization: Bearer <token> header (OAuth access token)
Returns:
APIAuthorizationInfo: base class of both APIKeyInfo and OAuthAccessTokenInfo.
"""
# Try API key first
if api_key is not None:
api_key_info = await validate_api_key(api_key)
if api_key_info:
return api_key_info
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key"
)
# Try OAuth bearer token
if bearer is not None:
try:
token_info, _ = await validate_access_token(bearer.credentials)
return token_info
except (InvalidClientError, InvalidTokenError) as e:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e))
# No credentials provided
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Missing authentication. Provide API key or access token.",
)
def require_permission(permission: APIKeyPermission):
"""
Dependency function for checking specific permissions
(works with API keys and OAuth tokens)
"""
async def check_permission(
auth: APIAuthorizationInfo = Security(require_auth),
) -> APIAuthorizationInfo:
if permission not in auth.scopes:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Missing required permission: {permission.value}",
)
return auth
return check_permission

View File

@@ -1,343 +0,0 @@
"""
OAuth Application Admin Routes
Provides admin-only endpoints for managing OAuth applications:
- List all OAuth applications
- Create new OAuth applications
- Update OAuth applications
- Delete OAuth applications
- Regenerate client secrets
"""
import logging
from typing import Optional
from autogpt_libs.auth import get_user_id, requires_admin_user
from fastapi import APIRouter, Body, HTTPException, Query, Security, status
from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field
from backend.data.auth.oauth import (
OAuthApplicationCreationResult,
OAuthApplicationInfo,
admin_update_oauth_application,
create_oauth_application,
delete_oauth_application,
get_oauth_application_by_id,
list_all_oauth_applications,
regenerate_client_secret,
)
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/admin",
tags=["oauth", "admin"],
dependencies=[Security(requires_admin_user)],
)
# ============================================================================
# Request/Response Models
# ============================================================================
class CreateOAuthAppRequest(BaseModel):
"""Request to create a new OAuth application"""
name: str = Field(description="Application name")
description: Optional[str] = Field(None, description="Application description")
redirect_uris: list[str] = Field(description="Allowed redirect URIs")
scopes: list[str] = Field(
description="List of scopes (e.g., EXECUTE_GRAPH, READ_GRAPH)"
)
grant_types: Optional[list[str]] = Field(
None,
description="Grant types (default: authorization_code, refresh_token)",
)
owner_id: str = Field(description="User ID who will own this application")
class UpdateOAuthAppRequest(BaseModel):
"""Request to update an OAuth application"""
name: Optional[str] = Field(None, description="Application name")
description: Optional[str] = Field(None, description="Application description")
redirect_uris: Optional[list[str]] = Field(None, description="Allowed redirect URIs")
scopes: Optional[list[str]] = Field(None, description="List of scopes")
is_active: Optional[bool] = Field(None, description="Whether the app is active")
class OAuthAppsListResponse(BaseModel):
"""Response for listing OAuth applications"""
applications: list[OAuthApplicationInfo]
total: int
page: int
page_size: int
total_pages: int
class RegenerateSecretResponse(BaseModel):
"""Response when regenerating a client secret"""
client_secret: str = Field(
description="New plaintext client secret - shown only once"
)
# ============================================================================
# Admin Endpoints
# ============================================================================
@router.get(
"/apps",
response_model=OAuthAppsListResponse,
summary="List All OAuth Applications",
)
async def list_oauth_apps(
admin_user_id: str = Security(get_user_id),
page: int = Query(1, ge=1, description="Page number"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
search: Optional[str] = Query(None, description="Search by name, client ID, or description"),
):
"""
List all OAuth applications in the system.
Admin-only endpoint. Returns paginated list of all OAuth applications
with their details (excluding client secrets).
"""
logger.info(f"Admin user {admin_user_id} is listing OAuth applications")
applications, total = await list_all_oauth_applications(
page=page,
page_size=page_size,
search=search,
)
total_pages = (total + page_size - 1) // page_size
return OAuthAppsListResponse(
applications=applications,
total=total,
page=page,
page_size=page_size,
total_pages=total_pages,
)
@router.get(
"/apps/{app_id}",
response_model=OAuthApplicationInfo,
summary="Get OAuth Application Details",
)
async def get_oauth_app(
app_id: str,
admin_user_id: str = Security(get_user_id),
):
"""
Get details of a specific OAuth application.
Admin-only endpoint. Returns application details (excluding client secret).
"""
logger.info(f"Admin user {admin_user_id} is getting OAuth app {app_id}")
app = await get_oauth_application_by_id(app_id)
if not app:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="OAuth application not found",
)
return app
@router.post(
"/apps",
response_model=OAuthApplicationCreationResult,
summary="Create OAuth Application",
status_code=status.HTTP_201_CREATED,
)
async def create_oauth_app(
request: CreateOAuthAppRequest = Body(),
admin_user_id: str = Security(get_user_id),
):
"""
Create a new OAuth application.
Admin-only endpoint. Returns the created application including the
plaintext client secret (which is only shown once).
The client secret is hashed before storage and cannot be retrieved later.
If lost, a new secret must be generated using the regenerate endpoint.
"""
logger.info(
f"Admin user {admin_user_id} is creating OAuth app '{request.name}' "
f"for user {request.owner_id}"
)
# Validate scopes
try:
validated_scopes = [APIKeyPermission(s.strip()) for s in request.scopes if s.strip()]
except ValueError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Invalid scope: {e}",
)
if not validated_scopes:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="At least one scope is required",
)
# Validate redirect URIs
if not request.redirect_uris:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="At least one redirect URI is required",
)
result = await create_oauth_application(
name=request.name,
description=request.description,
redirect_uris=request.redirect_uris,
scopes=validated_scopes,
owner_id=request.owner_id,
grant_types=request.grant_types,
)
logger.info(
f"Created OAuth app '{result.application.name}' "
f"(client_id: {result.application.client_id})"
)
return result
@router.patch(
"/apps/{app_id}",
response_model=OAuthApplicationInfo,
summary="Update OAuth Application",
)
async def update_oauth_app(
app_id: str,
request: UpdateOAuthAppRequest = Body(),
admin_user_id: str = Security(get_user_id),
):
"""
Update an OAuth application.
Admin-only endpoint. Can update name, description, redirect URIs,
scopes, and active status.
"""
logger.info(f"Admin user {admin_user_id} is updating OAuth app {app_id}")
# Validate scopes if provided
validated_scopes = None
if request.scopes is not None:
try:
validated_scopes = [
APIKeyPermission(s.strip()) for s in request.scopes if s.strip()
]
except ValueError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Invalid scope: {e}",
)
if not validated_scopes:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="At least one scope is required",
)
updated_app = await admin_update_oauth_application(
app_id=app_id,
name=request.name,
description=request.description,
redirect_uris=request.redirect_uris,
scopes=validated_scopes,
is_active=request.is_active,
)
if not updated_app:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="OAuth application not found",
)
action = "updated"
if request.is_active is not None:
action = "enabled" if request.is_active else "disabled"
logger.info(f"OAuth app {updated_app.name} (#{app_id}) {action}")
return updated_app
@router.delete(
"/apps/{app_id}",
summary="Delete OAuth Application",
status_code=status.HTTP_204_NO_CONTENT,
)
async def delete_oauth_app(
app_id: str,
admin_user_id: str = Security(get_user_id),
):
"""
Delete an OAuth application.
Admin-only endpoint. This will also delete all associated authorization
codes, access tokens, and refresh tokens.
This action is irreversible.
"""
logger.info(f"Admin user {admin_user_id} is deleting OAuth app {app_id}")
deleted = await delete_oauth_application(app_id)
if not deleted:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="OAuth application not found",
)
logger.info(f"Deleted OAuth app {app_id}")
return None
@router.post(
"/apps/{app_id}/regenerate-secret",
response_model=RegenerateSecretResponse,
summary="Regenerate Client Secret",
)
async def regenerate_oauth_secret(
app_id: str,
admin_user_id: str = Security(get_user_id),
):
"""
Regenerate the client secret for an OAuth application.
Admin-only endpoint. The old secret will be invalidated immediately.
Returns the new plaintext client secret (shown only once).
All existing tokens will continue to work, but new token requests
must use the new client secret.
"""
logger.info(
f"Admin user {admin_user_id} is regenerating secret for OAuth app {app_id}"
)
new_secret = await regenerate_client_secret(app_id)
if not new_secret:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="OAuth application not found",
)
logger.info(f"Regenerated client secret for OAuth app {app_id}")
return RegenerateSecretResponse(client_secret=new_secret)

View File

@@ -1,340 +0,0 @@
"""Tests for analytics API endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
from .analytics import router as analytics_router
app = fastapi.FastAPI()
app.include_router(analytics_router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module."""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
# =============================================================================
# /log_raw_metric endpoint tests
# =============================================================================
def test_log_raw_metric_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw metric logging."""
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200, f"Unexpected response: {response.text}"
assert response.json() == "metric-123-uuid"
mock_log_metric.assert_called_once_with(
user_id=test_user_id,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_success",
)
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_various_values(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values."""
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response.json(), "test_case": test_id},
indent=2,
sort_keys=True,
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"),
({"metric_name": "test"}, "Field required"),
(
{"metric_name": "test", "metric_value": "not_a_number", "data_string": "x"},
"Input should be a valid number",
),
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
),
(
{"metric_name": "test", "metric_value": 1.0, "data_string": ""},
"String should have at least 1 character",
),
],
ids=[
"empty_request",
"missing_metric_value_and_data_string",
"invalid_metric_value_type",
"empty_metric_name",
"empty_data_string",
],
)
def test_log_raw_metric_validation_errors(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test validation errors for invalid metric requests."""
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
def test_log_raw_metric_service_error(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error handling when analytics service fails."""
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
side_effect=Exception("Database connection failed"),
)
request_data = {
"metric_name": "test_metric",
"metric_value": 1.0,
"data_string": "test",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 500
error_detail = response.json()["detail"]
assert "Database connection failed" in error_detail["message"]
assert "hint" in error_detail
# =============================================================================
# /log_raw_analytics endpoint tests
# =============================================================================
def test_log_raw_analytics_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw analytics logging."""
mock_result = Mock(id="analytics-789-uuid")
mock_log_analytics = mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "user_action",
"data": {
"action": "button_click",
"button_id": "submit_form",
"timestamp": "2023-01-01T00:00:00Z",
"metadata": {"form_type": "registration", "fields_filled": 5},
},
"data_index": "button_click_submit_form",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200, f"Unexpected response: {response.text}"
assert response.json() == "analytics-789-uuid"
mock_log_analytics.assert_called_once_with(
test_user_id,
"user_action",
request_data["data"],
"button_click_submit_form",
)
configured_snapshot.assert_match(
json.dumps({"analytics_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_analytics_success",
)
def test_log_raw_analytics_complex_data(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw analytics logging with complex nested data structures."""
mock_result = Mock(id="analytics-complex-uuid")
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "agent_execution",
"data": {
"agent_id": "agent_123",
"execution_id": "exec_456",
"status": "completed",
"duration_ms": 3500,
"nodes_executed": 15,
"blocks_used": [
{"block_id": "llm_block", "count": 3},
{"block_id": "http_block", "count": 5},
{"block_id": "code_block", "count": 2},
],
"errors": [],
"metadata": {
"trigger": "manual",
"user_tier": "premium",
"environment": "production",
},
},
"data_index": "agent_123_exec_456",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
configured_snapshot.assert_match(
json.dumps(
{"analytics_id": response.json(), "logged_data": request_data["data"]},
indent=2,
sort_keys=True,
),
"analytics_log_analytics_complex_data",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"),
({"type": "test"}, "Field required"),
(
{"type": "test", "data": "not_a_dict", "data_index": "test"},
"Input should be a valid dictionary",
),
({"type": "test", "data": {"key": "value"}}, "Field required"),
],
ids=[
"empty_request",
"missing_data_and_data_index",
"invalid_data_type",
"missing_data_index",
],
)
def test_log_raw_analytics_validation_errors(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test validation errors for invalid analytics requests."""
response = client.post("/log_raw_analytics", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail, f"Missing 'detail' in error: {error_detail}"
error_text = json.dumps(error_detail)
assert (
expected_error in error_text
), f"Expected '{expected_error}' in error response: {error_text}"
def test_log_raw_analytics_service_error(
mocker: pytest_mock.MockFixture,
test_user_id: str,
) -> None:
"""Test error handling when analytics service fails."""
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
side_effect=Exception("Analytics DB unreachable"),
)
request_data = {
"type": "test_event",
"data": {"key": "value"},
"data_index": "test_index",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 500
error_detail = response.json()["detail"]
assert "Analytics DB unreachable" in error_detail["message"]
assert "hint" in error_detail

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +0,0 @@
from fastapi import FastAPI
def sort_openapi(app: FastAPI) -> None:
"""
Patch a FastAPI instance's `openapi()` method to sort the endpoints,
schemas, and responses.
"""
wrapped_openapi = app.openapi
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = wrapped_openapi()
# Sort endpoints
openapi_schema["paths"] = dict(sorted(openapi_schema["paths"].items()))
# Sort endpoints -> methods
for p in openapi_schema["paths"].keys():
openapi_schema["paths"][p] = dict(
sorted(openapi_schema["paths"][p].items())
)
# Sort endpoints -> methods -> responses
for m in openapi_schema["paths"][p].keys():
openapi_schema["paths"][p][m]["responses"] = dict(
sorted(openapi_schema["paths"][p][m]["responses"].items())
)
# Sort schemas and responses as well
for k in openapi_schema["components"].keys():
openapi_schema["components"][k] = dict(
sorted(openapi_schema["components"][k].items())
)
app.openapi_schema = openapi_schema
return openapi_schema
app.openapi = custom_openapi

View File

@@ -36,10 +36,10 @@ def main(**kwargs):
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs). Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
""" """
from backend.api.rest_api import AgentServer
from backend.api.ws_api import WebsocketServer
from backend.executor import DatabaseManager, ExecutionManager, Scheduler from backend.executor import DatabaseManager, ExecutionManager, Scheduler
from backend.notifications import NotificationManager from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes( run_processes(
DatabaseManager().set_log_level("warning"), DatabaseManager().set_log_level("warning"),

View File

@@ -1,7 +1,6 @@
from typing import Any from typing import Any
from backend.blocks.llm import ( from backend.blocks.llm import (
DEFAULT_LLM_MODEL,
TEST_CREDENTIALS, TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT, TEST_CREDENTIALS_INPUT,
AIBlockBase, AIBlockBase,
@@ -50,7 +49,7 @@ class AIConditionBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for evaluating the condition.", description="The language model to use for evaluating the condition.",
advanced=False, advanced=False,
) )
@@ -82,7 +81,7 @@ class AIConditionBlock(AIBlockBase):
"condition": "the input is an email address", "condition": "the input is an email address",
"yes_value": "Valid email", "yes_value": "Valid email",
"no_value": "Not an email", "no_value": "Not an email",
"model": DEFAULT_LLM_MODEL, "model": LlmModel.GPT4O,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
}, },
test_credentials=TEST_CREDENTIALS, test_credentials=TEST_CREDENTIALS,

View File

@@ -20,7 +20,6 @@ from backend.data.model import (
SchemaField, SchemaField,
) )
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.exceptions import BlockExecutionError
from backend.util.request import Requests from backend.util.request import Requests
TEST_CREDENTIALS = APIKeyCredentials( TEST_CREDENTIALS = APIKeyCredentials(
@@ -247,11 +246,7 @@ class AIShortformVideoCreatorBlock(Block):
await asyncio.sleep(10) await asyncio.sleep(10)
logger.error("Video creation timed out") logger.error("Video creation timed out")
raise BlockExecutionError( raise TimeoutError("Video creation timed out")
message="Video creation timed out",
block_name=self.name,
block_id=self.id,
)
def __init__(self): def __init__(self):
super().__init__( super().__init__(
@@ -427,11 +422,7 @@ class AIAdMakerVideoCreatorBlock(Block):
await asyncio.sleep(10) await asyncio.sleep(10)
logger.error("Video creation timed out") logger.error("Video creation timed out")
raise BlockExecutionError( raise TimeoutError("Video creation timed out")
message="Video creation timed out",
block_name=self.name,
block_id=self.id,
)
def __init__(self): def __init__(self):
super().__init__( super().__init__(
@@ -608,11 +599,7 @@ class AIScreenshotToVideoAdBlock(Block):
await asyncio.sleep(10) await asyncio.sleep(10)
logger.error("Video creation timed out") logger.error("Video creation timed out")
raise BlockExecutionError( raise TimeoutError("Video creation timed out")
message="Video creation timed out",
block_name=self.name,
block_id=self.id,
)
def __init__(self): def __init__(self):
super().__init__( super().__init__(

View File

@@ -106,10 +106,7 @@ class ConditionBlock(Block):
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b, ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
} }
try: result = comparison_funcs[operator](value1, value2)
result = comparison_funcs[operator](value1, value2)
except Exception as e:
raise ValueError(f"Comparison failed: {e}") from e
yield "result", result yield "result", result

View File

@@ -182,10 +182,13 @@ class DataForSeoRelatedKeywordsBlock(Block):
if results and len(results) > 0: if results and len(results) > 0:
# results is a list, get the first element # results is a list, get the first element
first_result = results[0] if isinstance(results, list) else results first_result = results[0] if isinstance(results, list) else results
# Handle missing key, null value, or valid list value items = (
if isinstance(first_result, dict): first_result.get("items", [])
items = first_result.get("items") or [] if isinstance(first_result, dict)
else: else []
)
# Ensure items is never None
if items is None:
items = [] items = []
for item in items: for item in items:
# Extract keyword_data from the item # Extract keyword_data from the item

View File

@@ -15,7 +15,6 @@ from backend.sdk import (
SchemaField, SchemaField,
cost, cost,
) )
from backend.util.exceptions import BlockExecutionError
from ._config import firecrawl from ._config import firecrawl
@@ -60,18 +59,11 @@ class FirecrawlExtractBlock(Block):
) -> BlockOutput: ) -> BlockOutput:
app = FirecrawlApp(api_key=credentials.api_key.get_secret_value()) app = FirecrawlApp(api_key=credentials.api_key.get_secret_value())
try: extract_result = app.extract(
extract_result = app.extract( urls=input_data.urls,
urls=input_data.urls, prompt=input_data.prompt,
prompt=input_data.prompt, schema=input_data.output_schema,
schema=input_data.output_schema, enable_web_search=input_data.enable_web_search,
enable_web_search=input_data.enable_web_search, )
)
except Exception as e:
raise BlockExecutionError(
message=f"Extract failed: {e}",
block_name=self.name,
block_id=self.id,
) from e
yield "data", extract_result.data yield "data", extract_result.data

View File

@@ -19,7 +19,6 @@ from backend.data.model import (
SchemaField, SchemaField,
) )
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.util.exceptions import ModerationError
from backend.util.file import MediaFileType, store_media_file from backend.util.file import MediaFileType, store_media_file
TEST_CREDENTIALS = APIKeyCredentials( TEST_CREDENTIALS = APIKeyCredentials(
@@ -154,8 +153,6 @@ class AIImageEditorBlock(Block):
), ),
aspect_ratio=input_data.aspect_ratio.value, aspect_ratio=input_data.aspect_ratio.value,
seed=input_data.seed, seed=input_data.seed,
user_id=user_id,
graph_exec_id=graph_exec_id,
) )
yield "output_image", result yield "output_image", result
@@ -167,8 +164,6 @@ class AIImageEditorBlock(Block):
input_image_b64: Optional[str], input_image_b64: Optional[str],
aspect_ratio: str, aspect_ratio: str,
seed: Optional[int], seed: Optional[int],
user_id: str,
graph_exec_id: str,
) -> MediaFileType: ) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value()) client = ReplicateClient(api_token=api_key.get_secret_value())
input_params = { input_params = {
@@ -178,21 +173,11 @@ class AIImageEditorBlock(Block):
**({"seed": seed} if seed is not None else {}), **({"seed": seed} if seed is not None else {}),
} }
try: output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore model_name,
model_name, input=input_params,
input=input_params, wait=False,
wait=False, )
)
except Exception as e:
if "flagged as sensitive" in str(e).lower():
raise ModerationError(
message="Content was flagged as sensitive by the model provider",
user_id=user_id,
graph_exec_id=graph_exec_id,
moderation_type="model_provider",
)
raise ValueError(f"Model execution failed: {e}") from e
if isinstance(output, list) and output: if isinstance(output, list) and output:
output = output[0] output = output[0]

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
import logging import logging
from typing import Any from typing import Any, Literal
from prisma.enums import ReviewStatus from prisma.enums import ReviewStatus
@@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block):
) )
class Output(BlockSchemaOutput): class Output(BlockSchemaOutput):
approved_data: Any = SchemaField( reviewed_data: Any = SchemaField(
description="The data when approved (may be modified by reviewer)" description="The data after human review (may be modified)"
) )
rejected_data: Any = SchemaField( status: Literal["approved", "rejected"] = SchemaField(
description="The data when rejected (may be modified by reviewer)" description="Status of the review: 'approved' or 'rejected'"
) )
review_message: str = SchemaField( review_message: str = SchemaField(
description="Any message provided by the reviewer", default="" description="Any message provided by the reviewer", default=""
@@ -69,7 +69,8 @@ class HumanInTheLoopBlock(Block):
"editable": True, "editable": True,
}, },
test_output=[ test_output=[
("approved_data", {"name": "John Doe", "age": 30}), ("status", "approved"),
("reviewed_data", {"name": "John Doe", "age": 30}),
], ],
test_mock={ test_mock={
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult( "get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
@@ -115,7 +116,8 @@ class HumanInTheLoopBlock(Block):
logger.info( logger.info(
f"HITL block skipping review for node {node_exec_id} - safe mode disabled" f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
) )
yield "approved_data", input_data.data yield "status", "approved"
yield "reviewed_data", input_data.data
yield "review_message", "Auto-approved (safe mode disabled)" yield "review_message", "Auto-approved (safe mode disabled)"
return return
@@ -156,11 +158,12 @@ class HumanInTheLoopBlock(Block):
) )
if result.status == ReviewStatus.APPROVED: if result.status == ReviewStatus.APPROVED:
yield "approved_data", result.data yield "status", "approved"
yield "reviewed_data", result.data
if result.message: if result.message:
yield "review_message", result.message yield "review_message", result.message
elif result.status == ReviewStatus.REJECTED: elif result.status == ReviewStatus.REJECTED:
yield "rejected_data", result.data yield "status", "rejected"
if result.message: if result.message:
yield "review_message", result.message yield "review_message", result.message

View File

@@ -2,6 +2,7 @@ from enum import Enum
from typing import Any, Dict, Literal, Optional from typing import Any, Dict, Literal, Optional
from pydantic import SecretStr from pydantic import SecretStr
from requests.exceptions import RequestException
from backend.data.block import ( from backend.data.block import (
Block, Block,
@@ -331,8 +332,8 @@ class IdeogramModelBlock(Block):
try: try:
response = await Requests().post(url, headers=headers, json=data) response = await Requests().post(url, headers=headers, json=data)
return response.json()["data"][0]["url"] return response.json()["data"][0]["url"]
except Exception as e: except RequestException as e:
raise ValueError(f"Failed to fetch image with V3 endpoint: {e}") from e raise Exception(f"Failed to fetch image with V3 endpoint: {str(e)}")
async def _run_model_legacy( async def _run_model_legacy(
self, self,
@@ -384,8 +385,8 @@ class IdeogramModelBlock(Block):
try: try:
response = await Requests().post(url, headers=headers, json=data) response = await Requests().post(url, headers=headers, json=data)
return response.json()["data"][0]["url"] return response.json()["data"][0]["url"]
except Exception as e: except RequestException as e:
raise ValueError(f"Failed to fetch image with legacy endpoint: {e}") from e raise Exception(f"Failed to fetch image with legacy endpoint: {str(e)}")
async def upscale_image(self, api_key: SecretStr, image_url: str): async def upscale_image(self, api_key: SecretStr, image_url: str):
url = "https://api.ideogram.ai/upscale" url = "https://api.ideogram.ai/upscale"
@@ -412,5 +413,5 @@ class IdeogramModelBlock(Block):
return (response.json())["data"][0]["url"] return (response.json())["data"][0]["url"]
except Exception as e: except RequestException as e:
raise ValueError(f"Failed to upscale image: {e}") from e raise Exception(f"Failed to upscale image: {str(e)}")

View File

@@ -16,7 +16,6 @@ from backend.data.block import (
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.util.exceptions import BlockExecutionError
class SearchTheWebBlock(Block, GetRequest): class SearchTheWebBlock(Block, GetRequest):
@@ -57,17 +56,7 @@ class SearchTheWebBlock(Block, GetRequest):
# Prepend the Jina Search URL to the encoded query # Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}" jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = await self.get_request(jina_search_url, headers=headers, json=False)
try:
results = await self.get_request(
jina_search_url, headers=headers, json=False
)
except Exception as e:
raise BlockExecutionError(
message=f"Search failed: {e}",
block_name=self.name,
block_id=self.id,
) from e
# Output the search results # Output the search results
yield "results", results yield "results", results

View File

@@ -92,9 +92,8 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
O1 = "o1" O1 = "o1"
O1_MINI = "o1-mini" O1_MINI = "o1-mini"
# GPT-5 models # GPT-5 models
GPT5_2 = "gpt-5.2-2025-12-11"
GPT5_1 = "gpt-5.1-2025-11-13"
GPT5 = "gpt-5-2025-08-07" GPT5 = "gpt-5-2025-08-07"
GPT5_1 = "gpt-5.1-2025-11-13"
GPT5_MINI = "gpt-5-mini-2025-08-07" GPT5_MINI = "gpt-5-mini-2025-08-07"
GPT5_NANO = "gpt-5-nano-2025-08-07" GPT5_NANO = "gpt-5-nano-2025-08-07"
GPT5_CHAT = "gpt-5-chat-latest" GPT5_CHAT = "gpt-5-chat-latest"
@@ -195,9 +194,8 @@ MODEL_METADATA = {
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17 LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12 LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
# GPT-5 models # GPT-5 models
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000), LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384), LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
@@ -305,8 +303,6 @@ MODEL_METADATA = {
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000), LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
} }
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
for model in LlmModel: for model in LlmModel:
if model not in MODEL_METADATA: if model not in MODEL_METADATA:
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}") raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
@@ -794,7 +790,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )
@@ -859,7 +855,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
input_schema=AIStructuredResponseGeneratorBlock.Input, input_schema=AIStructuredResponseGeneratorBlock.Input,
output_schema=AIStructuredResponseGeneratorBlock.Output, output_schema=AIStructuredResponseGeneratorBlock.Output,
test_input={ test_input={
"model": DEFAULT_LLM_MODEL, "model": LlmModel.GPT4O,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
"expected_format": { "expected_format": {
"key1": "value1", "key1": "value1",
@@ -1225,7 +1221,7 @@ class AITextGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )
@@ -1321,7 +1317,7 @@ class AITextSummarizerBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for summarizing the text.", description="The language model to use for summarizing the text.",
) )
focus: str = SchemaField( focus: str = SchemaField(
@@ -1538,7 +1534,7 @@ class AIConversationBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for the conversation.", description="The language model to use for the conversation.",
) )
credentials: AICredentials = AICredentialsField() credentials: AICredentials = AICredentialsField()
@@ -1576,7 +1572,7 @@ class AIConversationBlock(AIBlockBase):
}, },
{"role": "user", "content": "Where was it played?"}, {"role": "user", "content": "Where was it played?"},
], ],
"model": DEFAULT_LLM_MODEL, "model": LlmModel.GPT4O,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
}, },
test_credentials=TEST_CREDENTIALS, test_credentials=TEST_CREDENTIALS,
@@ -1639,7 +1635,7 @@ class AIListGeneratorBlock(AIBlockBase):
) )
model: LlmModel = SchemaField( model: LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=DEFAULT_LLM_MODEL, default=LlmModel.GPT4O,
description="The language model to use for generating the list.", description="The language model to use for generating the list.",
advanced=True, advanced=True,
) )
@@ -1696,7 +1692,7 @@ class AIListGeneratorBlock(AIBlockBase):
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of " "drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
"fictional worlds." "fictional worlds."
), ),
"model": DEFAULT_LLM_MODEL, "model": LlmModel.GPT4O,
"credentials": TEST_CREDENTIALS_INPUT, "credentials": TEST_CREDENTIALS_INPUT,
"max_retries": 3, "max_retries": 3,
"force_json_output": False, "force_json_output": False,

View File

@@ -18,7 +18,6 @@ from backend.data.block import (
BlockSchemaOutput, BlockSchemaOutput,
) )
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
from backend.util.exceptions import BlockExecutionError, BlockInputError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -112,27 +111,9 @@ class ReplicateModelBlock(Block):
yield "status", "succeeded" yield "status", "succeeded"
yield "model_name", input_data.model_name yield "model_name", input_data.model_name
except Exception as e: except Exception as e:
error_msg = str(e) error_msg = f"Unexpected error running Replicate model: {str(e)}"
logger.error(f"Error running Replicate model: {error_msg}") logger.error(error_msg)
raise RuntimeError(error_msg)
# Input validation errors (422, 400) → BlockInputError
if (
"422" in error_msg
or "Input validation failed" in error_msg
or "400" in error_msg
):
raise BlockInputError(
message=f"Invalid model inputs: {error_msg}",
block_name=self.name,
block_id=self.id,
) from e
# Everything else → BlockExecutionError
else:
raise BlockExecutionError(
message=f"Replicate model error: {error_msg}",
block_name=self.name,
block_id=self.id,
) from e
async def run_model(self, model_ref: str, model_inputs: dict, api_key: SecretStr): async def run_model(self, model_ref: str, model_inputs: dict, api_key: SecretStr):
""" """

View File

@@ -45,16 +45,10 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
async def run(self, input_data: Input, **kwargs) -> BlockOutput: async def run(self, input_data: Input, **kwargs) -> BlockOutput:
topic = input_data.topic topic = input_data.topic
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}" url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
response = await self.get_request(url, json=True)
# Note: User-Agent is now automatically set by the request library if "extract" not in response:
# to comply with Wikimedia's robot policy (https://w.wiki/4wJS) raise RuntimeError(f"Unable to parse Wikipedia response: {response}")
try: yield "summary", response["extract"]
response = await self.get_request(url, json=True)
if "extract" not in response:
raise ValueError(f"Unable to parse Wikipedia response: {response}")
yield "summary", response["extract"]
except Exception as e:
raise ValueError(f"Failed to fetch Wikipedia summary: {e}") from e
TEST_CREDENTIALS = APIKeyCredentials( TEST_CREDENTIALS = APIKeyCredentials(

View File

@@ -226,7 +226,7 @@ class SmartDecisionMakerBlock(Block):
) )
model: llm.LlmModel = SchemaField( model: llm.LlmModel = SchemaField(
title="LLM Model", title="LLM Model",
default=llm.DEFAULT_LLM_MODEL, default=llm.LlmModel.GPT4O,
description="The language model to use for answering the prompt.", description="The language model to use for answering the prompt.",
advanced=False, advanced=False,
) )

View File

@@ -196,15 +196,6 @@ class TestXMLParserBlockSecurity:
async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)): async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)):
pass pass
async def test_rejects_text_outside_root(self):
"""Ensure parser surfaces readable errors for invalid root text."""
block = XMLParserBlock()
invalid_xml = "<root><child>value</child></root> trailing"
with pytest.raises(ValueError, match="text outside the root element"):
async for _ in block.run(XMLParserBlock.Input(input_xml=invalid_xml)):
pass
class TestStoreMediaFileSecurity: class TestStoreMediaFileSecurity:
"""Test file storage security limits.""" """Test file storage security limits."""

View File

@@ -28,7 +28,7 @@ class TestLLMStatsTracking:
response = await llm.llm_call( response = await llm.llm_call(
credentials=llm.TEST_CREDENTIALS, credentials=llm.TEST_CREDENTIALS,
llm_model=llm.DEFAULT_LLM_MODEL, llm_model=llm.LlmModel.GPT4O,
prompt=[{"role": "user", "content": "Hello"}], prompt=[{"role": "user", "content": "Hello"}],
max_tokens=100, max_tokens=100,
) )
@@ -65,7 +65,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test prompt", prompt="Test prompt",
expected_format={"key1": "desc1", "key2": "desc2"}, expected_format={"key1": "desc1", "key2": "desc2"},
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore
) )
@@ -109,7 +109,7 @@ class TestLLMStatsTracking:
# Run the block # Run the block
input_data = llm.AITextGeneratorBlock.Input( input_data = llm.AITextGeneratorBlock.Input(
prompt="Generate text", prompt="Generate text",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -170,7 +170,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test prompt", prompt="Test prompt",
expected_format={"key1": "desc1", "key2": "desc2"}, expected_format={"key1": "desc1", "key2": "desc2"},
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, retry=2,
) )
@@ -228,7 +228,7 @@ class TestLLMStatsTracking:
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text=long_text, text=long_text,
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=100, # Small chunks max_tokens=100, # Small chunks
chunk_overlap=10, chunk_overlap=10,
@@ -299,7 +299,7 @@ class TestLLMStatsTracking:
# Test with very short text (should only need 1 chunk + 1 final summary) # Test with very short text (should only need 1 chunk + 1 final summary)
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="This is a short text.", text="This is a short text.",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=1000, # Large enough to avoid chunking max_tokens=1000, # Large enough to avoid chunking
) )
@@ -346,7 +346,7 @@ class TestLLMStatsTracking:
{"role": "assistant", "content": "Hi there!"}, {"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"}, {"role": "user", "content": "How are you?"},
], ],
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -387,7 +387,7 @@ class TestLLMStatsTracking:
# Run the block # Run the block
input_data = llm.AIListGeneratorBlock.Input( input_data = llm.AIListGeneratorBlock.Input(
focus="test items", focus="test items",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_retries=3, max_retries=3,
) )
@@ -469,7 +469,7 @@ class TestLLMStatsTracking:
input_data = llm.AIStructuredResponseGeneratorBlock.Input( input_data = llm.AIStructuredResponseGeneratorBlock.Input(
prompt="Test", prompt="Test",
expected_format={"result": "desc"}, expected_format={"result": "desc"},
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -513,7 +513,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
style=llm.SummaryStyle.BULLET_POINTS, style=llm.SummaryStyle.BULLET_POINTS,
) )
@@ -558,7 +558,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
style=llm.SummaryStyle.BULLET_POINTS, style=llm.SummaryStyle.BULLET_POINTS,
max_tokens=1000, max_tokens=1000,
@@ -593,7 +593,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )
@@ -623,7 +623,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
max_tokens=1000, max_tokens=1000,
) )
@@ -654,7 +654,7 @@ class TestAITextSummarizerValidation:
# Create input data # Create input data
input_data = llm.AITextSummarizerBlock.Input( input_data = llm.AITextSummarizerBlock.Input(
text="Some text to summarize", text="Some text to summarize",
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
) )

View File

@@ -5,10 +5,10 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest import pytest
from backend.api.model import CreateGraph
from backend.api.rest_api import AgentServer
from backend.data.execution import ExecutionContext from backend.data.execution import ExecutionContext
from backend.data.model import ProviderName, User from backend.data.model import ProviderName, User
from backend.server.model import CreateGraph
from backend.server.rest_api import AgentServer
from backend.usecases.sample import create_test_graph, create_test_user from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.test import SpinTestServer, wait_execution from backend.util.test import SpinTestServer, wait_execution
@@ -233,7 +233,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
# Create test input # Create test input
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Should I continue with this task?", prompt="Should I continue with this task?",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -335,7 +335,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, # Set retry to 2 for testing retry=2, # Set retry to 2 for testing
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
@@ -402,7 +402,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -462,7 +462,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -526,7 +526,7 @@ async def test_smart_decision_maker_parameter_validation():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Search for keywords", prompt="Search for keywords",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -648,7 +648,7 @@ async def test_smart_decision_maker_raw_response_conversion():
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt", prompt="Test prompt",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
retry=2, retry=2,
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
@@ -722,7 +722,7 @@ async def test_smart_decision_maker_raw_response_conversion():
): ):
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Simple prompt", prompt="Simple prompt",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -778,7 +778,7 @@ async def test_smart_decision_maker_raw_response_conversion():
): ):
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Another test", prompt="Another test",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, agent_mode_max_iterations=0,
) )
@@ -931,7 +931,7 @@ async def test_smart_decision_maker_agent_mode():
# Test agent mode with max_iterations = 3 # Test agent mode with max_iterations = 3
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Complete this task using tools", prompt="Complete this task using tools",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
) )
@@ -1020,7 +1020,7 @@ async def test_smart_decision_maker_traditional_mode_default():
# Test default behavior (traditional mode) # Test default behavior (traditional mode)
input_data = SmartDecisionMakerBlock.Input( input_data = SmartDecisionMakerBlock.Input(
prompt="Test prompt", prompt="Test prompt",
model=llm_module.DEFAULT_LLM_MODEL, model=llm_module.LlmModel.GPT4O,
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
agent_mode_max_iterations=0, # Traditional mode agent_mode_max_iterations=0, # Traditional mode
) )

View File

@@ -373,7 +373,7 @@ async def test_output_yielding_with_dynamic_fields():
input_data = block.input_schema( input_data = block.input_schema(
prompt="Create a user dictionary", prompt="Create a user dictionary",
credentials=llm.TEST_CREDENTIALS_INPUT, credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
agent_mode_max_iterations=0, # Use traditional mode to test output yielding agent_mode_max_iterations=0, # Use traditional mode to test output yielding
) )
@@ -594,7 +594,7 @@ async def test_validation_errors_dont_pollute_conversation():
input_data = block.input_schema( input_data = block.input_schema(
prompt="Test prompt", prompt="Test prompt",
credentials=llm.TEST_CREDENTIALS_INPUT, credentials=llm.TEST_CREDENTIALS_INPUT,
model=llm.DEFAULT_LLM_MODEL, model=llm.LlmModel.GPT4O,
retry=3, # Allow retries retry=3, # Allow retries
agent_mode_max_iterations=1, agent_mode_max_iterations=1,
) )

View File

@@ -1,5 +1,5 @@
from gravitasml.parser import Parser from gravitasml.parser import Parser
from gravitasml.token import Token, tokenize from gravitasml.token import tokenize
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
from backend.data.model import SchemaField from backend.data.model import SchemaField
@@ -25,38 +25,6 @@ class XMLParserBlock(Block):
], ],
) )
@staticmethod
def _validate_tokens(tokens: list[Token]) -> None:
"""Ensure the XML has a single root element and no stray text."""
if not tokens:
raise ValueError("XML input is empty.")
depth = 0
root_seen = False
for token in tokens:
if token.type == "TAG_OPEN":
if depth == 0 and root_seen:
raise ValueError("XML must have a single root element.")
depth += 1
if depth == 1:
root_seen = True
elif token.type == "TAG_CLOSE":
depth -= 1
if depth < 0:
raise SyntaxError("Unexpected closing tag in XML input.")
elif token.type in {"TEXT", "ESCAPE"}:
if depth == 0 and token.value:
raise ValueError(
"XML contains text outside the root element; "
"wrap content in a single root tag."
)
if depth != 0:
raise SyntaxError("Unclosed tag detected in XML input.")
if not root_seen:
raise ValueError("XML must include a root element.")
async def run(self, input_data: Input, **kwargs) -> BlockOutput: async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Security fix: Add size limits to prevent XML bomb attacks # Security fix: Add size limits to prevent XML bomb attacks
MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input
@@ -67,9 +35,7 @@ class XMLParserBlock(Block):
) )
try: try:
tokens = list(tokenize(input_data.input_xml)) tokens = tokenize(input_data.input_xml)
self._validate_tokens(tokens)
parser = Parser(tokens) parser = Parser(tokens)
parsed_result = parser.parse() parsed_result = parser.parse()
yield "parsed_xml", parsed_result yield "parsed_xml", parsed_result

View File

@@ -111,8 +111,6 @@ class TranscribeYoutubeVideoBlock(Block):
return parsed_url.path.split("/")[2] return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/": if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2] return parsed_url.path.split("/")[2]
if parsed_url.path.startswith("/shorts/"):
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}") raise ValueError(f"Invalid YouTube URL: {url}")
def get_transcript( def get_transcript(

View File

@@ -244,7 +244,11 @@ def websocket(server_address: str, graph_exec_id: str):
import websockets.asyncio.client import websockets.asyncio.client
from backend.api.ws_api import WSMessage, WSMethod, WSSubscribeGraphExecutionRequest from backend.server.ws_api import (
WSMessage,
WSMethod,
WSSubscribeGraphExecutionRequest,
)
async def send_message(server_address: str): async def send_message(server_address: str):
uri = f"ws://{server_address}" uri = f"ws://{server_address}"

View File

@@ -1 +0,0 @@
"""CLI utilities for backend development & administration"""

View File

@@ -1,57 +0,0 @@
#!/usr/bin/env python3
"""
Script to generate OpenAPI JSON specification for the FastAPI app.
This script imports the FastAPI app from backend.api.rest_api and outputs
the OpenAPI specification as JSON to stdout or a specified file.
Usage:
`poetry run python generate_openapi_json.py`
`poetry run python generate_openapi_json.py --output openapi.json`
`poetry run python generate_openapi_json.py --indent 4 --output openapi.json`
"""
import json
import os
from pathlib import Path
import click
@click.command()
@click.option(
"--output",
type=click.Path(dir_okay=False, path_type=Path),
help="Output file path (default: stdout)",
)
@click.option(
"--pretty",
type=click.BOOL,
default=False,
help="Pretty-print JSON output (indented 2 spaces)",
)
def main(output: Path, pretty: bool):
"""Generate and output the OpenAPI JSON specification."""
openapi_schema = get_openapi_schema()
json_output = json.dumps(openapi_schema, indent=2 if pretty else None)
if output:
output.write_text(json_output)
click.echo(f"✅ OpenAPI specification written to {output}\n\nPreview:")
click.echo(f"\n{json_output[:500]} ...")
else:
print(json_output)
def get_openapi_schema():
"""Get the OpenAPI schema from the FastAPI app"""
from backend.api.rest_api import app
return app.openapi()
if __name__ == "__main__":
os.environ["LOG_LEVEL"] = "ERROR" # disable stdout log output
main()

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
from backend.api.features.library.model import LibraryAgentPreset from backend.server.v2.library.model import LibraryAgentPreset
from .graph import NodeModel from .graph import NodeModel
from .integrations import Webhook # noqa: F401 from .integrations import Webhook # noqa: F401

View File

@@ -1,24 +1,22 @@
import logging import logging
import uuid import uuid
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import Literal, Optional from typing import Optional
from autogpt_libs.api_key.keysmith import APIKeySmith from autogpt_libs.api_key.keysmith import APIKeySmith
from prisma.enums import APIKeyPermission, APIKeyStatus from prisma.enums import APIKeyPermission, APIKeyStatus
from prisma.models import APIKey as PrismaAPIKey from prisma.models import APIKey as PrismaAPIKey
from prisma.types import APIKeyWhereUniqueInput from prisma.types import APIKeyWhereUniqueInput
from pydantic import Field from pydantic import BaseModel, Field
from backend.data.includes import MAX_USER_API_KEYS_FETCH from backend.data.includes import MAX_USER_API_KEYS_FETCH
from backend.util.exceptions import NotAuthorizedError, NotFoundError from backend.util.exceptions import NotAuthorizedError, NotFoundError
from .base import APIAuthorizationInfo
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
keysmith = APIKeySmith() keysmith = APIKeySmith()
class APIKeyInfo(APIAuthorizationInfo): class APIKeyInfo(BaseModel):
id: str id: str
name: str name: str
head: str = Field( head: str = Field(
@@ -28,9 +26,12 @@ class APIKeyInfo(APIAuthorizationInfo):
description=f"The last {APIKeySmith.TAIL_LENGTH} characters of the key" description=f"The last {APIKeySmith.TAIL_LENGTH} characters of the key"
) )
status: APIKeyStatus status: APIKeyStatus
permissions: list[APIKeyPermission]
created_at: datetime
last_used_at: Optional[datetime] = None
revoked_at: Optional[datetime] = None
description: Optional[str] = None description: Optional[str] = None
user_id: str
type: Literal["api_key"] = "api_key" # type: ignore
@staticmethod @staticmethod
def from_db(api_key: PrismaAPIKey): def from_db(api_key: PrismaAPIKey):
@@ -40,7 +41,7 @@ class APIKeyInfo(APIAuthorizationInfo):
head=api_key.head, head=api_key.head,
tail=api_key.tail, tail=api_key.tail,
status=APIKeyStatus(api_key.status), status=APIKeyStatus(api_key.status),
scopes=[APIKeyPermission(p) for p in api_key.permissions], permissions=[APIKeyPermission(p) for p in api_key.permissions],
created_at=api_key.createdAt, created_at=api_key.createdAt,
last_used_at=api_key.lastUsedAt, last_used_at=api_key.lastUsedAt,
revoked_at=api_key.revokedAt, revoked_at=api_key.revokedAt,
@@ -210,7 +211,7 @@ async def suspend_api_key(key_id: str, user_id: str) -> APIKeyInfo:
def has_permission(api_key: APIKeyInfo, required_permission: APIKeyPermission) -> bool: def has_permission(api_key: APIKeyInfo, required_permission: APIKeyPermission) -> bool:
return required_permission in api_key.scopes return required_permission in api_key.permissions
async def get_api_key_by_id(key_id: str, user_id: str) -> Optional[APIKeyInfo]: async def get_api_key_by_id(key_id: str, user_id: str) -> Optional[APIKeyInfo]:

View File

@@ -1,15 +0,0 @@
from datetime import datetime
from typing import Literal, Optional
from prisma.enums import APIKeyPermission
from pydantic import BaseModel
class APIAuthorizationInfo(BaseModel):
user_id: str
scopes: list[APIKeyPermission]
type: Literal["oauth", "api_key"]
created_at: datetime
expires_at: Optional[datetime] = None
last_used_at: Optional[datetime] = None
revoked_at: Optional[datetime] = None

File diff suppressed because it is too large Load Diff

View File

@@ -59,13 +59,12 @@ from backend.integrations.credentials_store import (
MODEL_COST: dict[LlmModel, int] = { MODEL_COST: dict[LlmModel, int] = {
LlmModel.O3: 4, LlmModel.O3: 4,
LlmModel.O3_MINI: 2, LlmModel.O3_MINI: 2, # $1.10 / $4.40
LlmModel.O1: 16, LlmModel.O1: 16, # $15 / $60
LlmModel.O1_MINI: 4, LlmModel.O1_MINI: 4,
# GPT-5 models # GPT-5 models
LlmModel.GPT5_2: 6,
LlmModel.GPT5_1: 5,
LlmModel.GPT5: 2, LlmModel.GPT5: 2,
LlmModel.GPT5_1: 5,
LlmModel.GPT5_MINI: 1, LlmModel.GPT5_MINI: 1,
LlmModel.GPT5_NANO: 1, LlmModel.GPT5_NANO: 1,
LlmModel.GPT5_CHAT: 5, LlmModel.GPT5_CHAT: 5,
@@ -88,7 +87,7 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.AIML_API_LLAMA3_3_70B: 1, LlmModel.AIML_API_LLAMA3_3_70B: 1,
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1, LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
LlmModel.AIML_API_LLAMA_3_2_3B: 1, LlmModel.AIML_API_LLAMA_3_2_3B: 1,
LlmModel.LLAMA3_3_70B: 1, LlmModel.LLAMA3_3_70B: 1, # $0.59 / $0.79
LlmModel.LLAMA3_1_8B: 1, LlmModel.LLAMA3_1_8B: 1,
LlmModel.OLLAMA_LLAMA3_3: 1, LlmModel.OLLAMA_LLAMA3_3: 1,
LlmModel.OLLAMA_LLAMA3_2: 1, LlmModel.OLLAMA_LLAMA3_2: 1,

View File

@@ -16,7 +16,6 @@ from prisma.models import CreditRefundRequest, CreditTransaction, User, UserBala
from prisma.types import CreditRefundRequestCreateInput, CreditTransactionWhereInput from prisma.types import CreditRefundRequestCreateInput, CreditTransactionWhereInput
from pydantic import BaseModel from pydantic import BaseModel
from backend.api.features.admin.model import UserHistoryResponse
from backend.data.block_cost_config import BLOCK_COSTS from backend.data.block_cost_config import BLOCK_COSTS
from backend.data.db import query_raw_with_schema from backend.data.db import query_raw_with_schema
from backend.data.includes import MAX_CREDIT_REFUND_REQUESTS_FETCH from backend.data.includes import MAX_CREDIT_REFUND_REQUESTS_FETCH
@@ -30,6 +29,7 @@ from backend.data.model import (
from backend.data.notifications import NotificationEventModel, RefundRequestData from backend.data.notifications import NotificationEventModel, RefundRequestData
from backend.data.user import get_user_by_id, get_user_email_by_id from backend.data.user import get_user_by_id, get_user_email_by_id
from backend.notifications.notifications import queue_notification_async from backend.notifications.notifications import queue_notification_async
from backend.server.v2.admin.model import UserHistoryResponse
from backend.util.exceptions import InsufficientBalanceError from backend.util.exceptions import InsufficientBalanceError
from backend.util.feature_flag import Flag, is_feature_enabled from backend.util.feature_flag import Flag, is_feature_enabled
from backend.util.json import SafeJson, dumps from backend.util.json import SafeJson, dumps
@@ -341,19 +341,6 @@ class UserCreditBase(ABC):
if result: if result:
# UserBalance is already updated by the CTE # UserBalance is already updated by the CTE
# Clear insufficient funds notification flags when credits are added
# so user can receive alerts again if they run out in the future.
if transaction.amount > 0 and transaction.type in [
CreditTransactionType.GRANT,
CreditTransactionType.TOP_UP,
]:
from backend.executor.manager import (
clear_insufficient_funds_notifications,
)
await clear_insufficient_funds_notifications(user_id)
return result[0]["balance"] return result[0]["balance"]
async def _add_transaction( async def _add_transaction(
@@ -543,22 +530,6 @@ class UserCreditBase(ABC):
if result: if result:
new_balance, tx_key = result[0]["balance"], result[0]["transactionKey"] new_balance, tx_key = result[0]["balance"], result[0]["transactionKey"]
# UserBalance is already updated by the CTE # UserBalance is already updated by the CTE
# Clear insufficient funds notification flags when credits are added
# so user can receive alerts again if they run out in the future.
if (
amount > 0
and is_active
and transaction_type
in [CreditTransactionType.GRANT, CreditTransactionType.TOP_UP]
):
# Lazy import to avoid circular dependency with executor.manager
from backend.executor.manager import (
clear_insufficient_funds_notifications,
)
await clear_insufficient_funds_notifications(user_id)
return new_balance, tx_key return new_balance, tx_key
# If no result, either user doesn't exist or insufficient balance # If no result, either user doesn't exist or insufficient balance

View File

@@ -111,7 +111,7 @@ def get_database_schema() -> str:
async def query_raw_with_schema(query_template: str, *args) -> list[dict]: async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
"""Execute raw SQL query with proper schema handling.""" """Execute raw SQL query with proper schema handling."""
schema = get_database_schema() schema = get_database_schema()
schema_prefix = f'"{schema}".' if schema != "public" else "" schema_prefix = f"{schema}." if schema != "public" else ""
formatted_query = query_template.format(schema_prefix=schema_prefix) formatted_query = query_template.format(schema_prefix=schema_prefix)
import prisma as prisma_module import prisma as prisma_module

View File

@@ -1,9 +1,8 @@
import logging import logging
import queue
from collections import defaultdict from collections import defaultdict
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from enum import Enum from enum import Enum
from multiprocessing import Manager
from queue import Empty
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Annotated, Annotated,
@@ -1164,12 +1163,16 @@ class NodeExecutionEntry(BaseModel):
class ExecutionQueue(Generic[T]): class ExecutionQueue(Generic[T]):
""" """
Queue for managing the execution of agents. Thread-safe queue for managing node execution within a single graph execution.
This will be shared between different processes
Note: Uses queue.Queue (not multiprocessing.Queue) since all access is from
threads within the same process. If migrating back to ProcessPoolExecutor,
replace with multiprocessing.Manager().Queue() for cross-process safety.
""" """
def __init__(self): def __init__(self):
self.queue = Manager().Queue() # Thread-safe queue (not multiprocessing) — see class docstring
self.queue: queue.Queue[T] = queue.Queue()
def add(self, execution: T) -> T: def add(self, execution: T) -> T:
self.queue.put(execution) self.queue.put(execution)
@@ -1184,7 +1187,7 @@ class ExecutionQueue(Generic[T]):
def get_or_none(self) -> T | None: def get_or_none(self) -> T | None:
try: try:
return self.queue.get_nowait() return self.queue.get_nowait()
except Empty: except queue.Empty:
return None return None

View File

@@ -0,0 +1,60 @@
"""Tests for ExecutionQueue thread-safety."""
import queue
import threading
import pytest
from backend.data.execution import ExecutionQueue
def test_execution_queue_uses_stdlib_queue():
"""Verify ExecutionQueue uses queue.Queue (not multiprocessing)."""
q = ExecutionQueue()
assert isinstance(q.queue, queue.Queue)
def test_basic_operations():
"""Test add, get, empty, and get_or_none."""
q = ExecutionQueue()
assert q.empty() is True
assert q.get_or_none() is None
result = q.add("item1")
assert result == "item1"
assert q.empty() is False
item = q.get()
assert item == "item1"
assert q.empty() is True
def test_thread_safety():
"""Test concurrent access from multiple threads."""
q = ExecutionQueue()
results = []
num_items = 100
def producer():
for i in range(num_items):
q.add(f"item_{i}")
def consumer():
count = 0
while count < num_items:
item = q.get_or_none()
if item is not None:
results.append(item)
count += 1
producer_thread = threading.Thread(target=producer)
consumer_thread = threading.Thread(target=consumer)
producer_thread.start()
consumer_thread.start()
producer_thread.join(timeout=5)
consumer_thread.join(timeout=5)
assert len(results) == num_items

View File

@@ -6,14 +6,14 @@ import fastapi.exceptions
import pytest import pytest
from pytest_snapshot.plugin import Snapshot from pytest_snapshot.plugin import Snapshot
import backend.api.features.store.model as store import backend.server.v2.store.model as store
from backend.api.model import CreateGraph
from backend.blocks.basic import StoreValueBlock from backend.blocks.basic import StoreValueBlock
from backend.blocks.io import AgentInputBlock, AgentOutputBlock from backend.blocks.io import AgentInputBlock, AgentOutputBlock
from backend.data.block import BlockSchema, BlockSchemaInput from backend.data.block import BlockSchema, BlockSchemaInput
from backend.data.graph import Graph, Link, Node from backend.data.graph import Graph, Link, Node
from backend.data.model import SchemaField from backend.data.model import SchemaField
from backend.data.user import DEFAULT_USER_ID from backend.data.user import DEFAULT_USER_ID
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_user from backend.usecases.sample import create_test_user
from backend.util.test import SpinTestServer from backend.util.test import SpinTestServer

View File

@@ -13,7 +13,7 @@ from prisma.models import PendingHumanReview
from prisma.types import PendingHumanReviewUpdateInput from prisma.types import PendingHumanReviewUpdateInput
from pydantic import BaseModel from pydantic import BaseModel
from backend.api.features.executions.review.model import ( from backend.server.v2.executions.review.model import (
PendingHumanReviewModel, PendingHumanReviewModel,
SafeJsonData, SafeJsonData,
) )
@@ -100,7 +100,7 @@ async def get_or_create_human_review(
return None return None
else: else:
return ReviewResult( return ReviewResult(
data=review.payload, data=review.payload if review.status == ReviewStatus.APPROVED else None,
status=review.status, status=review.status,
message=review.reviewMessage or "", message=review.reviewMessage or "",
processed=review.processed, processed=review.processed,

View File

@@ -23,7 +23,7 @@ from backend.util.exceptions import NotFoundError
from backend.util.json import SafeJson from backend.util.json import SafeJson
if TYPE_CHECKING: if TYPE_CHECKING:
from backend.api.features.library.model import LibraryAgentPreset from backend.server.v2.library.model import LibraryAgentPreset
from .db import BaseDbModel from .db import BaseDbModel
from .graph import NodeModel from .graph import NodeModel
@@ -79,7 +79,7 @@ class WebhookWithRelations(Webhook):
# integrations.py → library/model.py → integrations.py (for Webhook) # integrations.py → library/model.py → integrations.py (for Webhook)
# Runtime import is used in WebhookWithRelations.from_db() method instead # Runtime import is used in WebhookWithRelations.from_db() method instead
# Import at runtime to avoid circular dependency # Import at runtime to avoid circular dependency
from backend.api.features.library.model import LibraryAgentPreset from backend.server.v2.library.model import LibraryAgentPreset
return WebhookWithRelations( return WebhookWithRelations(
**Webhook.from_db(webhook).model_dump(), **Webhook.from_db(webhook).model_dump(),
@@ -285,8 +285,8 @@ async def unlink_webhook_from_graph(
user_id: The ID of the user (for authorization) user_id: The ID of the user (for authorization)
""" """
# Avoid circular imports # Avoid circular imports
from backend.api.features.library.db import set_preset_webhook
from backend.data.graph import set_node_webhook from backend.data.graph import set_node_webhook
from backend.server.v2.library.db import set_preset_webhook
# Find all nodes in this graph that use this webhook # Find all nodes in this graph that use this webhook
nodes = await AgentNode.prisma().find_many( nodes = await AgentNode.prisma().find_many(

View File

@@ -4,8 +4,8 @@ from typing import AsyncGenerator
from pydantic import BaseModel, field_serializer from pydantic import BaseModel, field_serializer
from backend.api.model import NotificationPayload
from backend.data.event_bus import AsyncRedisEventBus from backend.data.event_bus import AsyncRedisEventBus
from backend.server.model import NotificationPayload
from backend.util.settings import Settings from backend.util.settings import Settings

View File

@@ -9,8 +9,6 @@ from prisma.enums import OnboardingStep
from prisma.models import UserOnboarding from prisma.models import UserOnboarding
from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput from prisma.types import UserOnboardingCreateInput, UserOnboardingUpdateInput
from backend.api.features.store.model import StoreAgentDetails
from backend.api.model import OnboardingNotificationPayload
from backend.data import execution as execution_db from backend.data import execution as execution_db
from backend.data.credit import get_user_credit_model from backend.data.credit import get_user_credit_model
from backend.data.notification_bus import ( from backend.data.notification_bus import (
@@ -18,6 +16,8 @@ from backend.data.notification_bus import (
NotificationEvent, NotificationEvent,
) )
from backend.data.user import get_user_by_id from backend.data.user import get_user_by_id
from backend.server.model import OnboardingNotificationPayload
from backend.server.v2.store.model import StoreAgentDetails
from backend.util.cache import cached from backend.util.cache import cached
from backend.util.json import SafeJson from backend.util.json import SafeJson
from backend.util.timezone_utils import get_user_timezone_or_utc from backend.util.timezone_utils import get_user_timezone_or_utc
@@ -442,8 +442,6 @@ async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
runs=agent.runs, runs=agent.runs,
rating=agent.rating, rating=agent.rating,
versions=agent.versions, versions=agent.versions,
agentGraphVersions=agent.agentGraphVersions,
agentGraphId=agent.agentGraphId,
last_updated=agent.updated_at, last_updated=agent.updated_at,
) )
for agent in recommended_agents for agent in recommended_agents

View File

@@ -2,11 +2,6 @@ import logging
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, Callable, Concatenate, ParamSpec, TypeVar, cast from typing import TYPE_CHECKING, Callable, Concatenate, ParamSpec, TypeVar, cast
from backend.api.features.library.db import (
add_store_agent_to_library,
list_library_agents,
)
from backend.api.features.store.db import get_store_agent_details, get_store_agents
from backend.data import db from backend.data import db
from backend.data.analytics import ( from backend.data.analytics import (
get_accuracy_trends_and_alerts, get_accuracy_trends_and_alerts,
@@ -66,6 +61,8 @@ from backend.data.user import (
get_user_notification_preference, get_user_notification_preference,
update_user_integrations, update_user_integrations,
) )
from backend.server.v2.library.db import add_store_agent_to_library, list_library_agents
from backend.server.v2.store.db import get_store_agent_details, get_store_agents
from backend.util.service import ( from backend.util.service import (
AppService, AppService,
AppServiceClient, AppServiceClient,

View File

@@ -48,8 +48,27 @@ from backend.data.notifications import (
ZeroBalanceData, ZeroBalanceData,
) )
from backend.data.rabbitmq import SyncRabbitMQ from backend.data.rabbitmq import SyncRabbitMQ
from backend.executor.activity_status_generator import (
generate_activity_status_for_execution,
)
from backend.executor.utils import (
GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS,
GRAPH_EXECUTION_CANCEL_QUEUE_NAME,
GRAPH_EXECUTION_EXCHANGE,
GRAPH_EXECUTION_QUEUE_NAME,
GRAPH_EXECUTION_ROUTING_KEY,
CancelExecutionEvent,
ExecutionOutputEntry,
LogMetadata,
NodeExecutionProgress,
block_usage_cost,
create_execution_queue_config,
execution_usage_cost,
validate_exec,
)
from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.notifications.notifications import queue_notification from backend.notifications.notifications import queue_notification
from backend.server.v2.AutoMod.manager import automod_manager
from backend.util import json from backend.util import json
from backend.util.clients import ( from backend.util.clients import (
get_async_execution_event_bus, get_async_execution_event_bus,
@@ -76,24 +95,7 @@ from backend.util.retry import (
) )
from backend.util.settings import Settings from backend.util.settings import Settings
from .activity_status_generator import generate_activity_status_for_execution
from .automod.manager import automod_manager
from .cluster_lock import ClusterLock from .cluster_lock import ClusterLock
from .utils import (
GRACEFUL_SHUTDOWN_TIMEOUT_SECONDS,
GRAPH_EXECUTION_CANCEL_QUEUE_NAME,
GRAPH_EXECUTION_EXCHANGE,
GRAPH_EXECUTION_QUEUE_NAME,
GRAPH_EXECUTION_ROUTING_KEY,
CancelExecutionEvent,
ExecutionOutputEntry,
LogMetadata,
NodeExecutionProgress,
block_usage_cost,
create_execution_queue_config,
execution_usage_cost,
validate_exec,
)
if TYPE_CHECKING: if TYPE_CHECKING:
from backend.executor import DatabaseManagerAsyncClient, DatabaseManagerClient from backend.executor import DatabaseManagerAsyncClient, DatabaseManagerClient
@@ -114,40 +116,6 @@ utilization_gauge = Gauge(
"Ratio of active graph runs to max graph workers", "Ratio of active graph runs to max graph workers",
) )
# Redis key prefix for tracking insufficient funds Discord notifications.
# We only send one notification per user per agent until they top up credits.
INSUFFICIENT_FUNDS_NOTIFIED_PREFIX = "insufficient_funds_discord_notified"
# TTL for the notification flag (30 days) - acts as a fallback cleanup
INSUFFICIENT_FUNDS_NOTIFIED_TTL_SECONDS = 30 * 24 * 60 * 60
async def clear_insufficient_funds_notifications(user_id: str) -> int:
"""
Clear all insufficient funds notification flags for a user.
This should be called when a user tops up their credits, allowing
Discord notifications to be sent again if they run out of funds.
Args:
user_id: The user ID to clear notifications for.
Returns:
The number of keys that were deleted.
"""
try:
redis_client = await redis.get_redis_async()
pattern = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
keys = [key async for key in redis_client.scan_iter(match=pattern)]
if keys:
return await redis_client.delete(*keys)
return 0
except Exception as e:
logger.warning(
f"Failed to clear insufficient funds notification flags for user "
f"{user_id}: {e}"
)
return 0
# Thread-local storage for ExecutionProcessor instances # Thread-local storage for ExecutionProcessor instances
_tls = threading.local() _tls = threading.local()
@@ -1295,40 +1263,12 @@ class ExecutionProcessor:
graph_id: str, graph_id: str,
e: InsufficientBalanceError, e: InsufficientBalanceError,
): ):
# Check if we've already sent a notification for this user+agent combo.
# We only send one notification per user per agent until they top up credits.
redis_key = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id}"
try:
redis_client = redis.get_redis()
# SET NX returns True only if the key was newly set (didn't exist)
is_new_notification = redis_client.set(
redis_key,
"1",
nx=True,
ex=INSUFFICIENT_FUNDS_NOTIFIED_TTL_SECONDS,
)
if not is_new_notification:
# Already notified for this user+agent, skip all notifications
logger.debug(
f"Skipping duplicate insufficient funds notification for "
f"user={user_id}, graph={graph_id}"
)
return
except Exception as redis_error:
# If Redis fails, log and continue to send the notification
# (better to occasionally duplicate than to never notify)
logger.warning(
f"Failed to check/set insufficient funds notification flag in Redis: "
f"{redis_error}"
)
shortfall = abs(e.amount) - e.balance shortfall = abs(e.amount) - e.balance
metadata = db_client.get_graph_metadata(graph_id) metadata = db_client.get_graph_metadata(graph_id)
base_url = ( base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url settings.config.frontend_base_url or settings.config.platform_base_url
) )
# Queue user email notification
queue_notification( queue_notification(
NotificationEventModel( NotificationEventModel(
user_id=user_id, user_id=user_id,
@@ -1342,7 +1282,6 @@ class ExecutionProcessor:
) )
) )
# Send Discord system alert
try: try:
user_email = db_client.get_user_email_by_id(user_id) user_email = db_client.get_user_email_by_id(user_id)

View File

@@ -1,560 +0,0 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prisma.enums import NotificationType
from backend.data.notifications import ZeroBalanceData
from backend.executor.manager import (
INSUFFICIENT_FUNDS_NOTIFIED_PREFIX,
ExecutionProcessor,
clear_insufficient_funds_notifications,
)
from backend.util.exceptions import InsufficientBalanceError
from backend.util.test import SpinTestServer
async def async_iter(items):
"""Helper to create an async iterator from a list."""
for item in items:
yield item
@pytest.mark.asyncio(loop_scope="session")
async def test_handle_insufficient_funds_sends_discord_alert_first_time(
server: SpinTestServer,
):
"""Test that the first insufficient funds notification sends a Discord alert."""
execution_processor = ExecutionProcessor()
user_id = "test-user-123"
graph_id = "test-graph-456"
error = InsufficientBalanceError(
message="Insufficient balance",
user_id=user_id,
balance=72, # $0.72
amount=-714, # Attempting to spend $7.14
)
with patch(
"backend.executor.manager.queue_notification"
) as mock_queue_notif, patch(
"backend.executor.manager.get_notification_manager_client"
) as mock_get_client, patch(
"backend.executor.manager.settings"
) as mock_settings, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Setup mocks
mock_client = MagicMock()
mock_get_client.return_value = mock_client
mock_settings.config.frontend_base_url = "https://test.com"
# Mock Redis to simulate first-time notification (set returns True)
mock_redis_client = MagicMock()
mock_redis_module.get_redis.return_value = mock_redis_client
mock_redis_client.set.return_value = True # Key was newly set
# Create mock database client
mock_db_client = MagicMock()
mock_graph_metadata = MagicMock()
mock_graph_metadata.name = "Test Agent"
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
# Test the insufficient funds handler
execution_processor._handle_insufficient_funds_notif(
db_client=mock_db_client,
user_id=user_id,
graph_id=graph_id,
e=error,
)
# Verify notification was queued
mock_queue_notif.assert_called_once()
notification_call = mock_queue_notif.call_args[0][0]
assert notification_call.type == NotificationType.ZERO_BALANCE
assert notification_call.user_id == user_id
assert isinstance(notification_call.data, ZeroBalanceData)
assert notification_call.data.current_balance == 72
# Verify Redis was checked with correct key pattern
expected_key = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id}"
mock_redis_client.set.assert_called_once()
call_args = mock_redis_client.set.call_args
assert call_args[0][0] == expected_key
assert call_args[1]["nx"] is True
# Verify Discord alert was sent
mock_client.discord_system_alert.assert_called_once()
discord_message = mock_client.discord_system_alert.call_args[0][0]
assert "Insufficient Funds Alert" in discord_message
assert "test@example.com" in discord_message
assert "Test Agent" in discord_message
@pytest.mark.asyncio(loop_scope="session")
async def test_handle_insufficient_funds_skips_duplicate_notifications(
server: SpinTestServer,
):
"""Test that duplicate insufficient funds notifications skip both email and Discord."""
execution_processor = ExecutionProcessor()
user_id = "test-user-123"
graph_id = "test-graph-456"
error = InsufficientBalanceError(
message="Insufficient balance",
user_id=user_id,
balance=72,
amount=-714,
)
with patch(
"backend.executor.manager.queue_notification"
) as mock_queue_notif, patch(
"backend.executor.manager.get_notification_manager_client"
) as mock_get_client, patch(
"backend.executor.manager.settings"
) as mock_settings, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Setup mocks
mock_client = MagicMock()
mock_get_client.return_value = mock_client
mock_settings.config.frontend_base_url = "https://test.com"
# Mock Redis to simulate duplicate notification (set returns False/None)
mock_redis_client = MagicMock()
mock_redis_module.get_redis.return_value = mock_redis_client
mock_redis_client.set.return_value = None # Key already existed
# Create mock database client
mock_db_client = MagicMock()
mock_db_client.get_graph_metadata.return_value = MagicMock(name="Test Agent")
# Test the insufficient funds handler
execution_processor._handle_insufficient_funds_notif(
db_client=mock_db_client,
user_id=user_id,
graph_id=graph_id,
e=error,
)
# Verify email notification was NOT queued (deduplication worked)
mock_queue_notif.assert_not_called()
# Verify Discord alert was NOT sent (deduplication worked)
mock_client.discord_system_alert.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
async def test_handle_insufficient_funds_different_agents_get_separate_alerts(
server: SpinTestServer,
):
"""Test that different agents for the same user get separate Discord alerts."""
execution_processor = ExecutionProcessor()
user_id = "test-user-123"
graph_id_1 = "test-graph-111"
graph_id_2 = "test-graph-222"
error = InsufficientBalanceError(
message="Insufficient balance",
user_id=user_id,
balance=72,
amount=-714,
)
with patch("backend.executor.manager.queue_notification"), patch(
"backend.executor.manager.get_notification_manager_client"
) as mock_get_client, patch(
"backend.executor.manager.settings"
) as mock_settings, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
mock_settings.config.frontend_base_url = "https://test.com"
mock_redis_client = MagicMock()
mock_redis_module.get_redis.return_value = mock_redis_client
# Both calls return True (first time for each agent)
mock_redis_client.set.return_value = True
mock_db_client = MagicMock()
mock_graph_metadata = MagicMock()
mock_graph_metadata.name = "Test Agent"
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
# First agent notification
execution_processor._handle_insufficient_funds_notif(
db_client=mock_db_client,
user_id=user_id,
graph_id=graph_id_1,
e=error,
)
# Second agent notification
execution_processor._handle_insufficient_funds_notif(
db_client=mock_db_client,
user_id=user_id,
graph_id=graph_id_2,
e=error,
)
# Verify Discord alerts were sent for both agents
assert mock_client.discord_system_alert.call_count == 2
# Verify Redis was called with different keys
assert mock_redis_client.set.call_count == 2
calls = mock_redis_client.set.call_args_list
assert (
calls[0][0][0]
== f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id_1}"
)
assert (
calls[1][0][0]
== f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id_2}"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_clear_insufficient_funds_notifications(server: SpinTestServer):
"""Test that clearing notifications removes all keys for a user."""
user_id = "test-user-123"
with patch("backend.executor.manager.redis") as mock_redis_module:
mock_redis_client = MagicMock()
# get_redis_async is an async function, so we need AsyncMock for it
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
# Mock scan_iter to return some keys as an async iterator
mock_keys = [
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1",
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-2",
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-3",
]
mock_redis_client.scan_iter.return_value = async_iter(mock_keys)
# delete is awaited, so use AsyncMock
mock_redis_client.delete = AsyncMock(return_value=3)
# Clear notifications
result = await clear_insufficient_funds_notifications(user_id)
# Verify correct pattern was used
expected_pattern = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
mock_redis_client.scan_iter.assert_called_once_with(match=expected_pattern)
# Verify delete was called with all keys
mock_redis_client.delete.assert_called_once_with(*mock_keys)
# Verify return value
assert result == 3
@pytest.mark.asyncio(loop_scope="session")
async def test_clear_insufficient_funds_notifications_no_keys(server: SpinTestServer):
"""Test clearing notifications when there are no keys to clear."""
user_id = "test-user-no-notifications"
with patch("backend.executor.manager.redis") as mock_redis_module:
mock_redis_client = MagicMock()
# get_redis_async is an async function, so we need AsyncMock for it
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
# Mock scan_iter to return no keys as an async iterator
mock_redis_client.scan_iter.return_value = async_iter([])
# Clear notifications
result = await clear_insufficient_funds_notifications(user_id)
# Verify delete was not called
mock_redis_client.delete.assert_not_called()
# Verify return value
assert result == 0
@pytest.mark.asyncio(loop_scope="session")
async def test_clear_insufficient_funds_notifications_handles_redis_error(
server: SpinTestServer,
):
"""Test that clearing notifications handles Redis errors gracefully."""
user_id = "test-user-redis-error"
with patch("backend.executor.manager.redis") as mock_redis_module:
# Mock get_redis_async to raise an error
mock_redis_module.get_redis_async = AsyncMock(
side_effect=Exception("Redis connection failed")
)
# Clear notifications should not raise, just return 0
result = await clear_insufficient_funds_notifications(user_id)
# Verify it returned 0 (graceful failure)
assert result == 0
@pytest.mark.asyncio(loop_scope="session")
async def test_handle_insufficient_funds_continues_on_redis_error(
server: SpinTestServer,
):
"""Test that both email and Discord notifications are still sent when Redis fails."""
execution_processor = ExecutionProcessor()
user_id = "test-user-123"
graph_id = "test-graph-456"
error = InsufficientBalanceError(
message="Insufficient balance",
user_id=user_id,
balance=72,
amount=-714,
)
with patch(
"backend.executor.manager.queue_notification"
) as mock_queue_notif, patch(
"backend.executor.manager.get_notification_manager_client"
) as mock_get_client, patch(
"backend.executor.manager.settings"
) as mock_settings, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
mock_settings.config.frontend_base_url = "https://test.com"
# Mock Redis to raise an error
mock_redis_client = MagicMock()
mock_redis_module.get_redis.return_value = mock_redis_client
mock_redis_client.set.side_effect = Exception("Redis connection error")
mock_db_client = MagicMock()
mock_graph_metadata = MagicMock()
mock_graph_metadata.name = "Test Agent"
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
# Test the insufficient funds handler
execution_processor._handle_insufficient_funds_notif(
db_client=mock_db_client,
user_id=user_id,
graph_id=graph_id,
e=error,
)
# Verify email notification was still queued despite Redis error
mock_queue_notif.assert_called_once()
# Verify Discord alert was still sent despite Redis error
mock_client.discord_system_alert.assert_called_once()
@pytest.mark.asyncio(loop_scope="session")
async def test_add_transaction_clears_notifications_on_grant(server: SpinTestServer):
"""Test that _add_transaction clears notification flags when adding GRANT credits."""
from prisma.enums import CreditTransactionType
from backend.data.credit import UserCredit
user_id = "test-user-grant-clear"
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Mock the query to return a successful transaction
mock_query.return_value = [{"balance": 1000, "transactionKey": "test-tx-key"}]
# Mock async Redis for notification clearing
mock_redis_client = MagicMock()
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
mock_redis_client.scan_iter.return_value = async_iter(
[f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1"]
)
mock_redis_client.delete = AsyncMock(return_value=1)
# Create a concrete instance
credit_model = UserCredit()
# Call _add_transaction with GRANT type (should clear notifications)
await credit_model._add_transaction(
user_id=user_id,
amount=500, # Positive amount
transaction_type=CreditTransactionType.GRANT,
is_active=True, # Active transaction
)
# Verify notification clearing was called
mock_redis_module.get_redis_async.assert_called_once()
mock_redis_client.scan_iter.assert_called_once_with(
match=f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
)
@pytest.mark.asyncio(loop_scope="session")
async def test_add_transaction_clears_notifications_on_top_up(server: SpinTestServer):
"""Test that _add_transaction clears notification flags when adding TOP_UP credits."""
from prisma.enums import CreditTransactionType
from backend.data.credit import UserCredit
user_id = "test-user-topup-clear"
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Mock the query to return a successful transaction
mock_query.return_value = [{"balance": 2000, "transactionKey": "test-tx-key-2"}]
# Mock async Redis for notification clearing
mock_redis_client = MagicMock()
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
mock_redis_client.scan_iter.return_value = async_iter([])
mock_redis_client.delete = AsyncMock(return_value=0)
credit_model = UserCredit()
# Call _add_transaction with TOP_UP type (should clear notifications)
await credit_model._add_transaction(
user_id=user_id,
amount=1000, # Positive amount
transaction_type=CreditTransactionType.TOP_UP,
is_active=True,
)
# Verify notification clearing was attempted
mock_redis_module.get_redis_async.assert_called_once()
@pytest.mark.asyncio(loop_scope="session")
async def test_add_transaction_skips_clearing_for_inactive_transaction(
server: SpinTestServer,
):
"""Test that _add_transaction does NOT clear notifications for inactive transactions."""
from prisma.enums import CreditTransactionType
from backend.data.credit import UserCredit
user_id = "test-user-inactive"
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Mock the query to return a successful transaction
mock_query.return_value = [{"balance": 500, "transactionKey": "test-tx-key-3"}]
# Mock async Redis
mock_redis_client = MagicMock()
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
credit_model = UserCredit()
# Call _add_transaction with is_active=False (should NOT clear notifications)
await credit_model._add_transaction(
user_id=user_id,
amount=500,
transaction_type=CreditTransactionType.TOP_UP,
is_active=False, # Inactive - pending Stripe payment
)
# Verify notification clearing was NOT called
mock_redis_module.get_redis_async.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
async def test_add_transaction_skips_clearing_for_usage_transaction(
server: SpinTestServer,
):
"""Test that _add_transaction does NOT clear notifications for USAGE transactions."""
from prisma.enums import CreditTransactionType
from backend.data.credit import UserCredit
user_id = "test-user-usage"
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
"backend.executor.manager.redis"
) as mock_redis_module:
# Mock the query to return a successful transaction
mock_query.return_value = [{"balance": 400, "transactionKey": "test-tx-key-4"}]
# Mock async Redis
mock_redis_client = MagicMock()
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
credit_model = UserCredit()
# Call _add_transaction with USAGE type (spending, should NOT clear)
await credit_model._add_transaction(
user_id=user_id,
amount=-100, # Negative - spending credits
transaction_type=CreditTransactionType.USAGE,
is_active=True,
)
# Verify notification clearing was NOT called
mock_redis_module.get_redis_async.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
async def test_enable_transaction_clears_notifications(server: SpinTestServer):
"""Test that _enable_transaction clears notification flags when enabling a TOP_UP."""
from prisma.enums import CreditTransactionType
from backend.data.credit import UserCredit
user_id = "test-user-enable"
with patch("backend.data.credit.CreditTransaction") as mock_credit_tx, patch(
"backend.data.credit.query_raw_with_schema"
) as mock_query, patch("backend.executor.manager.redis") as mock_redis_module:
# Mock finding the pending transaction
mock_transaction = MagicMock()
mock_transaction.amount = 1000
mock_transaction.type = CreditTransactionType.TOP_UP
mock_credit_tx.prisma.return_value.find_first = AsyncMock(
return_value=mock_transaction
)
# Mock the query to return updated balance
mock_query.return_value = [{"balance": 1500}]
# Mock async Redis for notification clearing
mock_redis_client = MagicMock()
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
mock_redis_client.scan_iter.return_value = async_iter(
[f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1"]
)
mock_redis_client.delete = AsyncMock(return_value=1)
credit_model = UserCredit()
# Call _enable_transaction (simulates Stripe checkout completion)
from backend.util.json import SafeJson
await credit_model._enable_transaction(
transaction_key="cs_test_123",
user_id=user_id,
metadata=SafeJson({"payment": "completed"}),
)
# Verify notification clearing was called
mock_redis_module.get_redis_async.assert_called_once()
mock_redis_client.scan_iter.assert_called_once_with(
match=f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
)

View File

@@ -3,16 +3,16 @@ import logging
import fastapi.responses import fastapi.responses
import pytest import pytest
import backend.api.features.library.model import backend.server.v2.library.model
import backend.api.features.store.model import backend.server.v2.store.model
from backend.api.model import CreateGraph
from backend.api.rest_api import AgentServer
from backend.blocks.basic import StoreValueBlock from backend.blocks.basic import StoreValueBlock
from backend.blocks.data_manipulation import FindInDictionaryBlock from backend.blocks.data_manipulation import FindInDictionaryBlock
from backend.blocks.io import AgentInputBlock from backend.blocks.io import AgentInputBlock
from backend.blocks.maths import CalculatorBlock, Operation from backend.blocks.maths import CalculatorBlock, Operation
from backend.data import execution, graph from backend.data import execution, graph
from backend.data.model import User from backend.data.model import User
from backend.server.model import CreateGraph
from backend.server.rest_api import AgentServer
from backend.usecases.sample import create_test_graph, create_test_user from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.test import SpinTestServer, wait_execution from backend.util.test import SpinTestServer, wait_execution
@@ -356,7 +356,7 @@ async def test_execute_preset(server: SpinTestServer):
test_graph = await create_graph(server, test_graph, test_user) test_graph = await create_graph(server, test_graph, test_user)
# Create preset with initial values # Create preset with initial values
preset = backend.api.features.library.model.LibraryAgentPresetCreatable( preset = backend.server.v2.library.model.LibraryAgentPresetCreatable(
name="Test Preset With Clash", name="Test Preset With Clash",
description="Test preset with clashing input values", description="Test preset with clashing input values",
graph_id=test_graph.id, graph_id=test_graph.id,
@@ -444,7 +444,7 @@ async def test_execute_preset_with_clash(server: SpinTestServer):
test_graph = await create_graph(server, test_graph, test_user) test_graph = await create_graph(server, test_graph, test_user)
# Create preset with initial values # Create preset with initial values
preset = backend.api.features.library.model.LibraryAgentPresetCreatable( preset = backend.server.v2.library.model.LibraryAgentPresetCreatable(
name="Test Preset With Clash", name="Test Preset With Clash",
description="Test preset with clashing input values", description="Test preset with clashing input values",
graph_id=test_graph.id, graph_id=test_graph.id,
@@ -485,7 +485,7 @@ async def test_store_listing_graph(server: SpinTestServer):
test_user = await create_test_user() test_user = await create_test_user()
test_graph = await create_graph(server, create_test_graph(), test_user) test_graph = await create_graph(server, create_test_graph(), test_user)
store_submission_request = backend.api.features.store.model.StoreSubmissionRequest( store_submission_request = backend.server.v2.store.model.StoreSubmissionRequest(
agent_id=test_graph.id, agent_id=test_graph.id,
agent_version=test_graph.version, agent_version=test_graph.version,
slug=test_graph.id, slug=test_graph.id,
@@ -514,7 +514,7 @@ async def test_store_listing_graph(server: SpinTestServer):
admin_user = await create_test_user(alt_user=True) admin_user = await create_test_user(alt_user=True)
await server.agent_server.test_review_store_listing( await server.agent_server.test_review_store_listing(
backend.api.features.store.model.ReviewSubmissionRequest( backend.server.v2.store.model.ReviewSubmissionRequest(
store_listing_version_id=slv_id, store_listing_version_id=slv_id,
is_approved=True, is_approved=True,
comments="Test comments", comments="Test comments",
@@ -523,7 +523,7 @@ async def test_store_listing_graph(server: SpinTestServer):
) )
# Add the approved store listing to the admin user's library so they can execute it # Add the approved store listing to the admin user's library so they can execute it
from backend.api.features.library.db import add_store_agent_to_library from backend.server.v2.library.db import add_store_agent_to_library
await add_store_agent_to_library( await add_store_agent_to_library(
store_listing_version_id=slv_id, user_id=admin_user.id store_listing_version_id=slv_id, user_id=admin_user.id

View File

@@ -23,7 +23,6 @@ from dotenv import load_dotenv
from pydantic import BaseModel, Field, ValidationError from pydantic import BaseModel, Field, ValidationError
from sqlalchemy import MetaData, create_engine from sqlalchemy import MetaData, create_engine
from backend.data.auth.oauth import cleanup_expired_oauth_tokens
from backend.data.block import BlockInput from backend.data.block import BlockInput
from backend.data.execution import GraphExecutionWithNodes from backend.data.execution import GraphExecutionWithNodes
from backend.data.model import CredentialsMetaInput from backend.data.model import CredentialsMetaInput
@@ -243,12 +242,6 @@ def cleanup_expired_files():
run_async(cleanup_expired_files_async()) run_async(cleanup_expired_files_async())
def cleanup_oauth_tokens():
"""Clean up expired OAuth tokens from the database."""
# Wait for completion
run_async(cleanup_expired_oauth_tokens())
def execution_accuracy_alerts(): def execution_accuracy_alerts():
"""Check execution accuracy and send alerts if drops are detected.""" """Check execution accuracy and send alerts if drops are detected."""
return report_execution_accuracy_alerts() return report_execution_accuracy_alerts()
@@ -453,17 +446,6 @@ class Scheduler(AppService):
jobstore=Jobstores.EXECUTION.value, jobstore=Jobstores.EXECUTION.value,
) )
# OAuth Token Cleanup - configurable interval
self.scheduler.add_job(
cleanup_oauth_tokens,
id="cleanup_oauth_tokens",
trigger="interval",
replace_existing=True,
seconds=config.oauth_token_cleanup_interval_hours
* 3600, # Convert hours to seconds
jobstore=Jobstores.EXECUTION.value,
)
# Execution Accuracy Monitoring - configurable interval # Execution Accuracy Monitoring - configurable interval
self.scheduler.add_job( self.scheduler.add_job(
execution_accuracy_alerts, execution_accuracy_alerts,
@@ -622,11 +604,6 @@ class Scheduler(AppService):
"""Manually trigger cleanup of expired cloud storage files.""" """Manually trigger cleanup of expired cloud storage files."""
return cleanup_expired_files() return cleanup_expired_files()
@expose
def execute_cleanup_oauth_tokens(self):
"""Manually trigger cleanup of expired OAuth tokens."""
return cleanup_oauth_tokens()
@expose @expose
def execute_report_execution_accuracy_alerts(self): def execute_report_execution_accuracy_alerts(self):
"""Manually trigger execution accuracy alert checking.""" """Manually trigger execution accuracy alert checking."""

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from backend.api.model import CreateGraph
from backend.data import db from backend.data import db
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.clients import get_scheduler_client from backend.util.clients import get_scheduler_client
from backend.util.test import SpinTestServer from backend.util.test import SpinTestServer

View File

@@ -149,10 +149,10 @@ async def setup_webhook_for_block(
async def migrate_legacy_triggered_graphs(): async def migrate_legacy_triggered_graphs():
from prisma.models import AgentGraph from prisma.models import AgentGraph
from backend.api.features.library.db import create_preset
from backend.api.features.library.model import LibraryAgentPresetCreatable
from backend.data.graph import AGENT_GRAPH_INCLUDE, GraphModel, set_node_webhook from backend.data.graph import AGENT_GRAPH_INCLUDE, GraphModel, set_node_webhook
from backend.data.model import is_credentials_field_name from backend.data.model import is_credentials_field_name
from backend.server.v2.library.db import create_preset
from backend.server.v2.library.model import LibraryAgentPresetCreatable
triggered_graphs = [ triggered_graphs = [
GraphModel.from_db(_graph) GraphModel.from_db(_graph)

View File

@@ -1,5 +1,5 @@
from backend.api.rest_api import AgentServer
from backend.app import run_processes from backend.app import run_processes
from backend.server.rest_api import AgentServer
def main(): def main():

View File

@@ -3,12 +3,12 @@ from typing import Dict, Set
from fastapi import WebSocket from fastapi import WebSocket
from backend.api.model import NotificationPayload, WSMessage, WSMethod
from backend.data.execution import ( from backend.data.execution import (
ExecutionEventType, ExecutionEventType,
GraphExecutionEvent, GraphExecutionEvent,
NodeExecutionEvent, NodeExecutionEvent,
) )
from backend.server.model import NotificationPayload, WSMessage, WSMethod
_EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = { _EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = {
ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT, ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT,

View File

@@ -4,13 +4,13 @@ from unittest.mock import AsyncMock
import pytest import pytest
from fastapi import WebSocket from fastapi import WebSocket
from backend.api.conn_manager import ConnectionManager
from backend.api.model import NotificationPayload, WSMessage, WSMethod
from backend.data.execution import ( from backend.data.execution import (
ExecutionStatus, ExecutionStatus,
GraphExecutionEvent, GraphExecutionEvent,
NodeExecutionEvent, NodeExecutionEvent,
) )
from backend.server.conn_manager import ConnectionManager
from backend.server.model import NotificationPayload, WSMessage, WSMethod
@pytest.fixture @pytest.fixture

View File

@@ -0,0 +1,29 @@
from fastapi import FastAPI
from backend.monitoring.instrumentation import instrument_fastapi
from backend.server.middleware.security import SecurityHeadersMiddleware
from .routes.integrations import integrations_router
from .routes.tools import tools_router
from .routes.v1 import v1_router
external_app = FastAPI(
title="AutoGPT External API",
description="External API for AutoGPT integrations",
docs_url="/docs",
version="1.0",
)
external_app.add_middleware(SecurityHeadersMiddleware)
external_app.include_router(v1_router, prefix="/v1")
external_app.include_router(tools_router, prefix="/v1")
external_app.include_router(integrations_router, prefix="/v1")
# Add Prometheus instrumentation
instrument_fastapi(
external_app,
service_name="external-api",
expose_endpoint=True,
endpoint="/metrics",
include_in_schema=True,
)

View File

@@ -0,0 +1,36 @@
from fastapi import HTTPException, Security
from fastapi.security import APIKeyHeader
from prisma.enums import APIKeyPermission
from backend.data.api_key import APIKeyInfo, has_permission, validate_api_key
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
async def require_api_key(api_key: str | None = Security(api_key_header)) -> APIKeyInfo:
"""Base middleware for API key authentication"""
if api_key is None:
raise HTTPException(status_code=401, detail="Missing API key")
api_key_obj = await validate_api_key(api_key)
if not api_key_obj:
raise HTTPException(status_code=401, detail="Invalid API key")
return api_key_obj
def require_permission(permission: APIKeyPermission):
"""Dependency function for checking specific permissions"""
async def check_permission(
api_key: APIKeyInfo = Security(require_api_key),
) -> APIKeyInfo:
if not has_permission(api_key, permission):
raise HTTPException(
status_code=403,
detail=f"API key lacks the required permission '{permission}'",
)
return api_key
return check_permission

View File

@@ -16,9 +16,7 @@ from fastapi import APIRouter, Body, HTTPException, Path, Security, status
from prisma.enums import APIKeyPermission from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field, SecretStr from pydantic import BaseModel, Field, SecretStr
from backend.api.external.middleware import require_permission from backend.data.api_key import APIKeyInfo
from backend.api.features.integrations.models import get_all_provider_names
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.model import ( from backend.data.model import (
APIKeyCredentials, APIKeyCredentials,
Credentials, Credentials,
@@ -30,6 +28,8 @@ from backend.data.model import (
from backend.integrations.creds_manager import IntegrationCredentialsManager from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.server.external.middleware import require_permission
from backend.server.integrations.models import get_all_provider_names
from backend.util.settings import Settings from backend.util.settings import Settings
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -255,7 +255,7 @@ def _get_oauth_handler_for_external(
@integrations_router.get("/providers", response_model=list[ProviderInfo]) @integrations_router.get("/providers", response_model=list[ProviderInfo])
async def list_providers( async def list_providers(
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS) require_permission(APIKeyPermission.READ_INTEGRATIONS)
), ),
) -> list[ProviderInfo]: ) -> list[ProviderInfo]:
@@ -319,7 +319,7 @@ async def list_providers(
async def initiate_oauth( async def initiate_oauth(
provider: Annotated[str, Path(title="The OAuth provider")], provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthInitiateRequest, request: OAuthInitiateRequest,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS) require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
), ),
) -> OAuthInitiateResponse: ) -> OAuthInitiateResponse:
@@ -337,10 +337,7 @@ async def initiate_oauth(
if not validate_callback_url(request.callback_url): if not validate_callback_url(request.callback_url):
raise HTTPException( raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
detail=( detail=f"Callback URL origin is not allowed. Allowed origins: {settings.config.external_oauth_callback_origins}",
f"Callback URL origin is not allowed. "
f"Allowed origins: {settings.config.external_oauth_callback_origins}",
),
) )
# Validate provider # Validate provider
@@ -362,15 +359,13 @@ async def initiate_oauth(
) )
# Store state token with external flow metadata # Store state token with external flow metadata
# Note: initiated_by_api_key_id is only available for API key auth, not OAuth
api_key_id = getattr(auth, "id", None) if auth.type == "api_key" else None
state_token, code_challenge = await creds_manager.store.store_state_token( state_token, code_challenge = await creds_manager.store.store_state_token(
user_id=auth.user_id, user_id=api_key.user_id,
provider=provider if isinstance(provider_name, str) else provider_name.value, provider=provider if isinstance(provider_name, str) else provider_name.value,
scopes=request.scopes, scopes=request.scopes,
callback_url=request.callback_url, callback_url=request.callback_url,
state_metadata=request.state_metadata, state_metadata=request.state_metadata,
initiated_by_api_key_id=api_key_id, initiated_by_api_key_id=api_key.id,
) )
# Build login URL # Build login URL
@@ -398,7 +393,7 @@ async def initiate_oauth(
async def complete_oauth( async def complete_oauth(
provider: Annotated[str, Path(title="The OAuth provider")], provider: Annotated[str, Path(title="The OAuth provider")],
request: OAuthCompleteRequest, request: OAuthCompleteRequest,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS) require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
), ),
) -> OAuthCompleteResponse: ) -> OAuthCompleteResponse:
@@ -411,7 +406,7 @@ async def complete_oauth(
""" """
# Verify state token # Verify state token
valid_state = await creds_manager.store.verify_state_token( valid_state = await creds_manager.store.verify_state_token(
auth.user_id, request.state_token, provider api_key.user_id, request.state_token, provider
) )
if not valid_state: if not valid_state:
@@ -458,7 +453,7 @@ async def complete_oauth(
) )
# Store credentials # Store credentials
await creds_manager.create(auth.user_id, credentials) await creds_manager.create(api_key.user_id, credentials)
logger.info(f"Successfully completed external OAuth for provider {provider}") logger.info(f"Successfully completed external OAuth for provider {provider}")
@@ -475,7 +470,7 @@ async def complete_oauth(
@integrations_router.get("/credentials", response_model=list[CredentialSummary]) @integrations_router.get("/credentials", response_model=list[CredentialSummary])
async def list_credentials( async def list_credentials(
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS) require_permission(APIKeyPermission.READ_INTEGRATIONS)
), ),
) -> list[CredentialSummary]: ) -> list[CredentialSummary]:
@@ -484,7 +479,7 @@ async def list_credentials(
Returns metadata about each credential without exposing sensitive tokens. Returns metadata about each credential without exposing sensitive tokens.
""" """
credentials = await creds_manager.store.get_all_creds(auth.user_id) credentials = await creds_manager.store.get_all_creds(api_key.user_id)
return [ return [
CredentialSummary( CredentialSummary(
id=cred.id, id=cred.id,
@@ -504,7 +499,7 @@ async def list_credentials(
) )
async def list_credentials_by_provider( async def list_credentials_by_provider(
provider: Annotated[str, Path(title="The provider to list credentials for")], provider: Annotated[str, Path(title="The provider to list credentials for")],
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.READ_INTEGRATIONS) require_permission(APIKeyPermission.READ_INTEGRATIONS)
), ),
) -> list[CredentialSummary]: ) -> list[CredentialSummary]:
@@ -512,7 +507,7 @@ async def list_credentials_by_provider(
List credentials for a specific provider. List credentials for a specific provider.
""" """
credentials = await creds_manager.store.get_creds_by_provider( credentials = await creds_manager.store.get_creds_by_provider(
auth.user_id, provider api_key.user_id, provider
) )
return [ return [
CredentialSummary( CredentialSummary(
@@ -541,7 +536,7 @@ async def create_credential(
CreateUserPasswordCredentialRequest, CreateUserPasswordCredentialRequest,
CreateHostScopedCredentialRequest, CreateHostScopedCredentialRequest,
] = Body(..., discriminator="type"), ] = Body(..., discriminator="type"),
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.MANAGE_INTEGRATIONS) require_permission(APIKeyPermission.MANAGE_INTEGRATIONS)
), ),
) -> CreateCredentialResponse: ) -> CreateCredentialResponse:
@@ -596,7 +591,7 @@ async def create_credential(
# Store credentials # Store credentials
try: try:
await creds_manager.create(auth.user_id, credentials) await creds_manager.create(api_key.user_id, credentials)
except Exception as e: except Exception as e:
logger.error(f"Failed to store credentials: {e}") logger.error(f"Failed to store credentials: {e}")
raise HTTPException( raise HTTPException(
@@ -628,7 +623,7 @@ class DeleteCredentialResponse(BaseModel):
async def delete_credential( async def delete_credential(
provider: Annotated[str, Path(title="The provider")], provider: Annotated[str, Path(title="The provider")],
cred_id: Annotated[str, Path(title="The credential ID to delete")], cred_id: Annotated[str, Path(title="The credential ID to delete")],
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(
require_permission(APIKeyPermission.DELETE_INTEGRATIONS) require_permission(APIKeyPermission.DELETE_INTEGRATIONS)
), ),
) -> DeleteCredentialResponse: ) -> DeleteCredentialResponse:
@@ -639,7 +634,7 @@ async def delete_credential(
use the main API's delete endpoint which handles webhook cleanup and use the main API's delete endpoint which handles webhook cleanup and
token revocation. token revocation.
""" """
creds = await creds_manager.store.get_creds_by_id(auth.user_id, cred_id) creds = await creds_manager.store.get_creds_by_id(api_key.user_id, cred_id)
if not creds: if not creds:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found" status_code=status.HTTP_404_NOT_FOUND, detail="Credentials not found"
@@ -650,6 +645,6 @@ async def delete_credential(
detail="Credentials do not match the specified provider", detail="Credentials do not match the specified provider",
) )
await creds_manager.delete(auth.user_id, cred_id) await creds_manager.delete(api_key.user_id, cred_id)
return DeleteCredentialResponse(deleted=True, credentials_id=cred_id) return DeleteCredentialResponse(deleted=True, credentials_id=cred_id)

View File

@@ -14,19 +14,19 @@ from fastapi import APIRouter, Security
from prisma.enums import APIKeyPermission from prisma.enums import APIKeyPermission
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from backend.api.external.middleware import require_permission from backend.data.api_key import APIKeyInfo
from backend.api.features.chat.model import ChatSession from backend.server.external.middleware import require_permission
from backend.api.features.chat.tools import find_agent_tool, run_agent_tool from backend.server.v2.chat.model import ChatSession
from backend.api.features.chat.tools.models import ToolResponseBase from backend.server.v2.chat.tools import find_agent_tool, run_agent_tool
from backend.data.auth.base import APIAuthorizationInfo from backend.server.v2.chat.tools.models import ToolResponseBase
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
tools_router = APIRouter(prefix="/tools", tags=["tools"]) tools_router = APIRouter(prefix="/tools", tags=["tools"])
# Note: We use Security() as a function parameter dependency (auth: APIAuthorizationInfo = Security(...)) # Note: We use Security() as a function parameter dependency (api_key: APIKeyInfo = Security(...))
# rather than in the decorator's dependencies= list. This avoids duplicate permission checks # rather than in the decorator's dependencies= list. This avoids duplicate permission checks
# while still enforcing auth AND giving us access to auth for extracting user_id. # while still enforcing auth AND giving us access to the api_key for extracting user_id.
# Request models # Request models
@@ -80,9 +80,7 @@ def _create_ephemeral_session(user_id: str | None) -> ChatSession:
) )
async def find_agent( async def find_agent(
request: FindAgentRequest, request: FindAgentRequest,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
require_permission(APIKeyPermission.USE_TOOLS)
),
) -> dict[str, Any]: ) -> dict[str, Any]:
""" """
Search for agents in the marketplace based on capabilities and user needs. Search for agents in the marketplace based on capabilities and user needs.
@@ -93,9 +91,9 @@ async def find_agent(
Returns: Returns:
List of matching agents or no results response List of matching agents or no results response
""" """
session = _create_ephemeral_session(auth.user_id) session = _create_ephemeral_session(api_key.user_id)
result = await find_agent_tool._execute( result = await find_agent_tool._execute(
user_id=auth.user_id, user_id=api_key.user_id,
session=session, session=session,
query=request.query, query=request.query,
) )
@@ -107,9 +105,7 @@ async def find_agent(
) )
async def run_agent( async def run_agent(
request: RunAgentRequest, request: RunAgentRequest,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.USE_TOOLS)),
require_permission(APIKeyPermission.USE_TOOLS)
),
) -> dict[str, Any]: ) -> dict[str, Any]:
""" """
Run or schedule an agent from the marketplace. Run or schedule an agent from the marketplace.
@@ -133,9 +129,9 @@ async def run_agent(
- execution_started: If agent was run or scheduled successfully - execution_started: If agent was run or scheduled successfully
- error: If something went wrong - error: If something went wrong
""" """
session = _create_ephemeral_session(auth.user_id) session = _create_ephemeral_session(api_key.user_id)
result = await run_agent_tool._execute( result = await run_agent_tool._execute(
user_id=auth.user_id, user_id=api_key.user_id,
session=session, session=session,
username_agent_slug=request.username_agent_slug, username_agent_slug=request.username_agent_slug,
inputs=request.inputs, inputs=request.inputs,

View File

@@ -5,60 +5,46 @@ from typing import Annotated, Any, Literal, Optional, Sequence
from fastapi import APIRouter, Body, HTTPException, Security from fastapi import APIRouter, Body, HTTPException, Security
from prisma.enums import AgentExecutionStatus, APIKeyPermission from prisma.enums import AgentExecutionStatus, APIKeyPermission
from pydantic import BaseModel, Field
from typing_extensions import TypedDict from typing_extensions import TypedDict
import backend.api.features.store.cache as store_cache
import backend.api.features.store.model as store_model
import backend.data.block import backend.data.block
from backend.api.external.middleware import require_permission import backend.server.v2.store.cache as store_cache
import backend.server.v2.store.model as store_model
from backend.data import execution as execution_db from backend.data import execution as execution_db
from backend.data import graph as graph_db from backend.data import graph as graph_db
from backend.data import user as user_db from backend.data.api_key import APIKeyInfo
from backend.data.auth.base import APIAuthorizationInfo
from backend.data.block import BlockInput, CompletedBlockOutput from backend.data.block import BlockInput, CompletedBlockOutput
from backend.executor.utils import add_graph_execution from backend.executor.utils import add_graph_execution
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings from backend.util.settings import Settings
from .integrations import integrations_router
from .tools import tools_router
settings = Settings() settings = Settings()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
v1_router = APIRouter() v1_router = APIRouter()
v1_router.include_router(integrations_router)
v1_router.include_router(tools_router) class NodeOutput(TypedDict):
key: str
value: Any
class UserInfoResponse(BaseModel): class ExecutionNode(TypedDict):
id: str node_id: str
name: Optional[str] input: Any
email: str output: dict[str, Any]
timezone: str = Field(
description="The user's last known timezone (e.g. 'Europe/Amsterdam'), "
"or 'not-set' if not set"
)
@v1_router.get( class ExecutionNodeOutput(TypedDict):
path="/me", node_id: str
tags=["user", "meta"], outputs: list[NodeOutput]
)
async def get_user_info(
auth: APIAuthorizationInfo = Security(
require_permission(APIKeyPermission.IDENTITY)
),
) -> UserInfoResponse:
user = await user_db.get_user_by_id(auth.user_id)
return UserInfoResponse(
id=user.id, class GraphExecutionResult(TypedDict):
name=user.name, execution_id: str
email=user.email, status: str
timezone=user.timezone, nodes: list[ExecutionNode]
) output: Optional[list[dict[str, str]]]
@v1_router.get( @v1_router.get(
@@ -79,9 +65,7 @@ async def get_graph_blocks() -> Sequence[dict[Any, Any]]:
async def execute_graph_block( async def execute_graph_block(
block_id: str, block_id: str,
data: BlockInput, data: BlockInput,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
require_permission(APIKeyPermission.EXECUTE_BLOCK)
),
) -> CompletedBlockOutput: ) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id) obj = backend.data.block.get_block(block_id)
if not obj: if not obj:
@@ -101,14 +85,12 @@ async def execute_graph(
graph_id: str, graph_id: str,
graph_version: int, graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)], node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
require_permission(APIKeyPermission.EXECUTE_GRAPH)
),
) -> dict[str, Any]: ) -> dict[str, Any]:
try: try:
graph_exec = await add_graph_execution( graph_exec = await add_graph_execution(
graph_id=graph_id, graph_id=graph_id,
user_id=auth.user_id, user_id=api_key.user_id,
inputs=node_input, inputs=node_input,
graph_version=graph_version, graph_version=graph_version,
) )
@@ -118,19 +100,6 @@ async def execute_graph(
raise HTTPException(status_code=400, detail=msg) raise HTTPException(status_code=400, detail=msg)
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: dict[str, Any]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: list[ExecutionNode]
output: Optional[list[dict[str, str]]]
@v1_router.get( @v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results", path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"], tags=["graphs"],
@@ -138,12 +107,10 @@ class GraphExecutionResult(TypedDict):
async def get_graph_execution_results( async def get_graph_execution_results(
graph_id: str, graph_id: str,
graph_exec_id: str, graph_exec_id: str,
auth: APIAuthorizationInfo = Security( api_key: APIKeyInfo = Security(require_permission(APIKeyPermission.READ_GRAPH)),
require_permission(APIKeyPermission.READ_GRAPH)
),
) -> GraphExecutionResult: ) -> GraphExecutionResult:
graph_exec = await execution_db.get_graph_execution( graph_exec = await execution_db.get_graph_execution(
user_id=auth.user_id, user_id=api_key.user_id,
execution_id=graph_exec_id, execution_id=graph_exec_id,
include_node_executions=True, include_node_executions=True,
) )
@@ -155,7 +122,7 @@ async def get_graph_execution_results(
if not await graph_db.get_graph( if not await graph_db.get_graph(
graph_id=graph_exec.graph_id, graph_id=graph_exec.graph_id,
version=graph_exec.graph_version, version=graph_exec.graph_version,
user_id=auth.user_id, user_id=api_key.user_id,
): ):
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")

View File

@@ -17,8 +17,6 @@ from fastapi import (
from pydantic import BaseModel, Field, SecretStr from pydantic import BaseModel, Field, SecretStr
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_502_BAD_GATEWAY from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_502_BAD_GATEWAY
from backend.api.features.library.db import set_preset_webhook, update_preset
from backend.api.features.library.model import LibraryAgentPreset
from backend.data.graph import NodeModel, get_graph, set_node_webhook from backend.data.graph import NodeModel, get_graph, set_node_webhook
from backend.data.integrations import ( from backend.data.integrations import (
WebhookEvent, WebhookEvent,
@@ -47,6 +45,13 @@ from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME from backend.integrations.oauth import CREDENTIALS_BY_PROVIDER, HANDLERS_BY_NAME
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.integrations.webhooks import get_webhook_manager from backend.integrations.webhooks import get_webhook_manager
from backend.server.integrations.models import (
ProviderConstants,
ProviderNamesResponse,
get_all_provider_names,
)
from backend.server.v2.library.db import set_preset_webhook, update_preset
from backend.server.v2.library.model import LibraryAgentPreset
from backend.util.exceptions import ( from backend.util.exceptions import (
GraphNotInLibraryError, GraphNotInLibraryError,
MissingConfigError, MissingConfigError,
@@ -55,8 +60,6 @@ from backend.util.exceptions import (
) )
from backend.util.settings import Settings from backend.util.settings import Settings
from .models import ProviderConstants, ProviderNamesResponse, get_all_provider_names
if TYPE_CHECKING: if TYPE_CHECKING:
from backend.integrations.oauth import BaseOAuthHandler from backend.integrations.oauth import BaseOAuthHandler

View File

@@ -3,7 +3,7 @@ from fastapi import FastAPI
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from starlette.applications import Starlette from starlette.applications import Starlette
from backend.api.middleware.security import SecurityHeadersMiddleware from backend.server.middleware.security import SecurityHeadersMiddleware
@pytest.fixture @pytest.fixture

View File

@@ -4,7 +4,7 @@ from typing import Any, Literal, Optional
import pydantic import pydantic
from prisma.enums import OnboardingStep from prisma.enums import OnboardingStep
from backend.data.auth.api_key import APIKeyInfo, APIKeyPermission from backend.data.api_key import APIKeyInfo, APIKeyPermission
from backend.data.graph import Graph from backend.data.graph import Graph
from backend.util.timezone_name import TimeZoneName from backend.util.timezone_name import TimeZoneName

View File

@@ -16,34 +16,35 @@ from fastapi.middleware.gzip import GZipMiddleware
from fastapi.routing import APIRoute from fastapi.routing import APIRoute
from prisma.errors import PrismaError from prisma.errors import PrismaError
import backend.api.features.admin.credit_admin_routes
import backend.api.features.admin.execution_analytics_routes
import backend.api.features.admin.oauth_admin_routes
import backend.api.features.admin.store_admin_routes
import backend.api.features.builder
import backend.api.features.builder.routes
import backend.api.features.chat.routes as chat_routes
import backend.api.features.executions.review.routes
import backend.api.features.library.db
import backend.api.features.library.model
import backend.api.features.library.routes
import backend.api.features.oauth
import backend.api.features.otto.routes
import backend.api.features.postmark.postmark
import backend.api.features.store.model
import backend.api.features.store.routes
import backend.api.features.v1
import backend.data.block import backend.data.block
import backend.data.db import backend.data.db
import backend.data.graph import backend.data.graph
import backend.data.user import backend.data.user
import backend.integrations.webhooks.utils import backend.integrations.webhooks.utils
import backend.server.routers.postmark.postmark
import backend.server.routers.v1
import backend.server.v2.admin.credit_admin_routes
import backend.server.v2.admin.execution_analytics_routes
import backend.server.v2.admin.store_admin_routes
import backend.server.v2.builder
import backend.server.v2.builder.routes
import backend.server.v2.chat.routes as chat_routes
import backend.server.v2.executions.review.routes
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
import backend.server.v2.otto.routes
import backend.server.v2.store.model
import backend.server.v2.store.routes
import backend.util.service import backend.util.service
import backend.util.settings import backend.util.settings
from backend.blocks.llm import DEFAULT_LLM_MODEL from backend.blocks.llm import LlmModel
from backend.data.model import Credentials from backend.data.model import Credentials
from backend.integrations.providers import ProviderName from backend.integrations.providers import ProviderName
from backend.monitoring.instrumentation import instrument_fastapi from backend.monitoring.instrumentation import instrument_fastapi
from backend.server.external.api import external_app
from backend.server.middleware.security import SecurityHeadersMiddleware
from backend.server.utils.cors import build_cors_params
from backend.util import json from backend.util import json
from backend.util.cloud_storage import shutdown_cloud_storage_handler from backend.util.cloud_storage import shutdown_cloud_storage_handler
from backend.util.exceptions import ( from backend.util.exceptions import (
@@ -54,13 +55,6 @@ from backend.util.exceptions import (
from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly from backend.util.feature_flag import initialize_launchdarkly, shutdown_launchdarkly
from backend.util.service import UnhealthyServiceError from backend.util.service import UnhealthyServiceError
from .external.fastapi_app import external_api
from .features.analytics import router as analytics_router
from .features.integrations.router import router as integrations_router
from .middleware.security import SecurityHeadersMiddleware
from .utils.cors import build_cors_params
from .utils.openapi import sort_openapi
settings = backend.util.settings.Settings() settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -114,7 +108,7 @@ async def lifespan_context(app: fastapi.FastAPI):
await backend.data.user.migrate_and_encrypt_user_integrations() await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials() await backend.data.graph.fix_llm_provider_credentials()
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL) await backend.data.graph.migrate_llm_models(LlmModel.GPT4O)
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs() await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
with launch_darkly_context(): with launch_darkly_context():
@@ -181,9 +175,6 @@ app.add_middleware(GZipMiddleware, minimum_size=50_000) # 50KB threshold
# Add 401 responses to authenticated endpoints in OpenAPI spec # Add 401 responses to authenticated endpoints in OpenAPI spec
add_auth_responses_to_openapi(app) add_auth_responses_to_openapi(app)
# Sort OpenAPI schema to eliminate diff on refactors
sort_openapi(app)
# Add Prometheus instrumentation # Add Prometheus instrumentation
instrument_fastapi( instrument_fastapi(
app, app,
@@ -262,52 +253,42 @@ app.add_exception_handler(MissingConfigError, handle_internal_http_error(503))
app.add_exception_handler(ValueError, handle_internal_http_error(400)) app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500)) app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.api.features.v1.v1_router, tags=["v1"], prefix="/api") app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
app.include_router( app.include_router(
integrations_router, backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
prefix="/api/integrations",
tags=["v1", "integrations"],
) )
app.include_router( app.include_router(
analytics_router, backend.server.v2.builder.routes.router, tags=["v2"], prefix="/api/builder"
prefix="/api/analytics",
tags=["analytics"],
) )
app.include_router( app.include_router(
backend.api.features.store.routes.router, tags=["v2"], prefix="/api/store" backend.server.v2.admin.store_admin_routes.router,
)
app.include_router(
backend.api.features.builder.routes.router, tags=["v2"], prefix="/api/builder"
)
app.include_router(
backend.api.features.admin.store_admin_routes.router,
tags=["v2", "admin"], tags=["v2", "admin"],
prefix="/api/store", prefix="/api/store",
) )
app.include_router( app.include_router(
backend.api.features.admin.credit_admin_routes.router, backend.server.v2.admin.credit_admin_routes.router,
tags=["v2", "admin"], tags=["v2", "admin"],
prefix="/api/credits", prefix="/api/credits",
) )
app.include_router( app.include_router(
backend.api.features.admin.execution_analytics_routes.router, backend.server.v2.admin.execution_analytics_routes.router,
tags=["v2", "admin"], tags=["v2", "admin"],
prefix="/api/executions", prefix="/api/executions",
) )
app.include_router( app.include_router(
backend.api.features.executions.review.routes.router, backend.server.v2.executions.review.routes.router,
tags=["v2", "executions", "review"], tags=["v2", "executions", "review"],
prefix="/api/review", prefix="/api/review",
) )
app.include_router( app.include_router(
backend.api.features.library.routes.router, tags=["v2"], prefix="/api/library" backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
) )
app.include_router( app.include_router(
backend.api.features.otto.routes.router, tags=["v2", "otto"], prefix="/api/otto" backend.server.v2.otto.routes.router, tags=["v2", "otto"], prefix="/api/otto"
) )
app.include_router( app.include_router(
backend.api.features.postmark.postmark.router, backend.server.routers.postmark.postmark.router,
tags=["v1", "email"], tags=["v1", "email"],
prefix="/api/email", prefix="/api/email",
) )
@@ -316,18 +297,8 @@ app.include_router(
tags=["v2", "chat"], tags=["v2", "chat"],
prefix="/api/chat", prefix="/api/chat",
) )
app.include_router(
backend.api.features.oauth.router,
tags=["oauth"],
prefix="/api/oauth",
)
app.include_router(
backend.api.features.admin.oauth_admin_routes.router,
tags=["v2", "admin", "oauth"],
prefix="/api/oauth",
)
app.mount("/external-api", external_api) app.mount("/external-api", external_app)
@app.get(path="/health", tags=["health"], dependencies=[]) @app.get(path="/health", tags=["health"], dependencies=[])
@@ -380,7 +351,7 @@ class AgentServer(backend.util.service.AppProcess):
graph_version: Optional[int] = None, graph_version: Optional[int] = None,
node_input: Optional[dict[str, Any]] = None, node_input: Optional[dict[str, Any]] = None,
): ):
return await backend.api.features.v1.execute_graph( return await backend.server.routers.v1.execute_graph(
user_id=user_id, user_id=user_id,
graph_id=graph_id, graph_id=graph_id,
graph_version=graph_version, graph_version=graph_version,
@@ -395,16 +366,16 @@ class AgentServer(backend.util.service.AppProcess):
user_id: str, user_id: str,
for_export: bool = False, for_export: bool = False,
): ):
return await backend.api.features.v1.get_graph( return await backend.server.routers.v1.get_graph(
graph_id, user_id, graph_version, for_export graph_id, user_id, graph_version, for_export
) )
@staticmethod @staticmethod
async def test_create_graph( async def test_create_graph(
create_graph: backend.api.features.v1.CreateGraph, create_graph: backend.server.routers.v1.CreateGraph,
user_id: str, user_id: str,
): ):
return await backend.api.features.v1.create_new_graph(create_graph, user_id) return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod @staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str): async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
@@ -420,45 +391,45 @@ class AgentServer(backend.util.service.AppProcess):
@staticmethod @staticmethod
async def test_delete_graph(graph_id: str, user_id: str): async def test_delete_graph(graph_id: str, user_id: str):
"""Used for clean-up after a test run""" """Used for clean-up after a test run"""
await backend.api.features.library.db.delete_library_agent_by_graph_id( await backend.server.v2.library.db.delete_library_agent_by_graph_id(
graph_id=graph_id, user_id=user_id graph_id=graph_id, user_id=user_id
) )
return await backend.api.features.v1.delete_graph(graph_id, user_id) return await backend.server.routers.v1.delete_graph(graph_id, user_id)
@staticmethod @staticmethod
async def test_get_presets(user_id: str, page: int = 1, page_size: int = 10): async def test_get_presets(user_id: str, page: int = 1, page_size: int = 10):
return await backend.api.features.library.routes.presets.list_presets( return await backend.server.v2.library.routes.presets.list_presets(
user_id=user_id, page=page, page_size=page_size user_id=user_id, page=page, page_size=page_size
) )
@staticmethod @staticmethod
async def test_get_preset(preset_id: str, user_id: str): async def test_get_preset(preset_id: str, user_id: str):
return await backend.api.features.library.routes.presets.get_preset( return await backend.server.v2.library.routes.presets.get_preset(
preset_id=preset_id, user_id=user_id preset_id=preset_id, user_id=user_id
) )
@staticmethod @staticmethod
async def test_create_preset( async def test_create_preset(
preset: backend.api.features.library.model.LibraryAgentPresetCreatable, preset: backend.server.v2.library.model.LibraryAgentPresetCreatable,
user_id: str, user_id: str,
): ):
return await backend.api.features.library.routes.presets.create_preset( return await backend.server.v2.library.routes.presets.create_preset(
preset=preset, user_id=user_id preset=preset, user_id=user_id
) )
@staticmethod @staticmethod
async def test_update_preset( async def test_update_preset(
preset_id: str, preset_id: str,
preset: backend.api.features.library.model.LibraryAgentPresetUpdatable, preset: backend.server.v2.library.model.LibraryAgentPresetUpdatable,
user_id: str, user_id: str,
): ):
return await backend.api.features.library.routes.presets.update_preset( return await backend.server.v2.library.routes.presets.update_preset(
preset_id=preset_id, preset=preset, user_id=user_id preset_id=preset_id, preset=preset, user_id=user_id
) )
@staticmethod @staticmethod
async def test_delete_preset(preset_id: str, user_id: str): async def test_delete_preset(preset_id: str, user_id: str):
return await backend.api.features.library.routes.presets.delete_preset( return await backend.server.v2.library.routes.presets.delete_preset(
preset_id=preset_id, user_id=user_id preset_id=preset_id, user_id=user_id
) )
@@ -468,7 +439,7 @@ class AgentServer(backend.util.service.AppProcess):
user_id: str, user_id: str,
inputs: Optional[dict[str, Any]] = None, inputs: Optional[dict[str, Any]] = None,
): ):
return await backend.api.features.library.routes.presets.execute_preset( return await backend.server.v2.library.routes.presets.execute_preset(
preset_id=preset_id, preset_id=preset_id,
user_id=user_id, user_id=user_id,
inputs=inputs or {}, inputs=inputs or {},
@@ -477,20 +448,18 @@ class AgentServer(backend.util.service.AppProcess):
@staticmethod @staticmethod
async def test_create_store_listing( async def test_create_store_listing(
request: backend.api.features.store.model.StoreSubmissionRequest, user_id: str request: backend.server.v2.store.model.StoreSubmissionRequest, user_id: str
): ):
return await backend.api.features.store.routes.create_submission( return await backend.server.v2.store.routes.create_submission(request, user_id)
request, user_id
)
### ADMIN ### ### ADMIN ###
@staticmethod @staticmethod
async def test_review_store_listing( async def test_review_store_listing(
request: backend.api.features.store.model.ReviewSubmissionRequest, request: backend.server.v2.store.model.ReviewSubmissionRequest,
user_id: str, user_id: str,
): ):
return await backend.api.features.admin.store_admin_routes.review_submission( return await backend.server.v2.admin.store_admin_routes.review_submission(
request.store_listing_version_id, request, user_id request.store_listing_version_id, request, user_id
) )
@@ -500,7 +469,10 @@ class AgentServer(backend.util.service.AppProcess):
provider: ProviderName, provider: ProviderName,
credentials: Credentials, credentials: Credentials,
) -> Credentials: ) -> Credentials:
from .features.integrations.router import create_credentials, get_credential from backend.server.integrations.router import (
create_credentials,
get_credential,
)
try: try:
return await create_credentials( return await create_credentials(

View File

@@ -6,11 +6,10 @@ from typing import Annotated
import fastapi import fastapi
import pydantic import pydantic
from autogpt_libs.auth import get_user_id from autogpt_libs.auth import get_user_id
from autogpt_libs.auth.dependencies import requires_user
import backend.data.analytics import backend.data.analytics
router = fastapi.APIRouter(dependencies=[fastapi.Security(requires_user)]) router = fastapi.APIRouter()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -0,0 +1,150 @@
"""Example of analytics tests with improved error handling and assertions."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.test_helpers import (
assert_error_response_structure,
assert_mock_called_with_partial,
assert_response_status,
safe_parse_json,
)
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module"""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
def test_log_raw_metric_success_improved(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw metric logging with improved assertions."""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
# Improved assertions with better error messages
assert_response_status(response, 200, "Metric logging should succeed")
response_data = safe_parse_json(response, "Metric response parsing")
assert response_data == "metric-123-uuid", f"Unexpected response: {response_data}"
# Verify the function was called with correct parameters
assert_mock_called_with_partial(
mock_log_metric,
user_id=test_user_id,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response_data}, indent=2, sort_keys=True),
"analytics_log_metric_success_improved",
)
def test_log_raw_metric_invalid_request_improved() -> None:
"""Test invalid metric request with improved error assertions."""
# Test missing required fields
response = client.post("/log_raw_metric", json={})
error_data = assert_error_response_structure(
response, expected_status=422, expected_error_fields=["loc", "msg", "type"]
)
# Verify specific error details
detail = error_data["detail"]
assert isinstance(detail, list), "Error detail should be a list"
assert len(detail) > 0, "Should have at least one error"
# Check that required fields are mentioned in errors
error_fields = [error["loc"][-1] for error in detail if "loc" in error]
assert "metric_name" in error_fields, "Should report missing metric_name"
assert "metric_value" in error_fields, "Should report missing metric_value"
assert "data_string" in error_fields, "Should report missing data_string"
def test_log_raw_metric_type_validation_improved(
mocker: pytest_mock.MockFixture,
) -> None:
"""Test metric type validation with improved assertions."""
# Mock the analytics function to avoid event loop issues
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=Mock(id="test-id"),
)
invalid_requests = [
{
"data": {
"metric_name": "test",
"metric_value": "not_a_number", # Invalid type
"data_string": "test",
},
"expected_error": "Input should be a valid number",
},
{
"data": {
"metric_name": "", # Empty string
"metric_value": 1.0,
"data_string": "test",
},
"expected_error": "String should have at least 1 character",
},
{
"data": {
"metric_name": "test",
"metric_value": 123, # Valid number
"data_string": "", # Empty data_string
},
"expected_error": "String should have at least 1 character",
},
]
for test_case in invalid_requests:
response = client.post("/log_raw_metric", json=test_case["data"])
error_data = assert_error_response_structure(response, expected_status=422)
# Check that expected error is in the response
error_text = json.dumps(error_data)
assert (
test_case["expected_error"] in error_text
or test_case["expected_error"].lower() in error_text.lower()
), f"Expected error '{test_case['expected_error']}' not found in: {error_text}"

View File

@@ -0,0 +1,115 @@
"""Example of parametrized tests for analytics endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module"""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_values_parametrized(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values using parametrize."""
# Mock the analytics function
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
# Better error handling
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
response_data = response.json()
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"), # Missing all fields
({"metric_name": "test"}, "Field required"), # Missing metric_value
(
{"metric_name": "test", "metric_value": "not_a_number"},
"Input should be a valid number",
), # Invalid type
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
), # Empty name
],
)
def test_log_raw_metric_invalid_requests_parametrized(
mocker: pytest_mock.MockFixture,
invalid_data: dict,
expected_error: str,
) -> None:
"""Test invalid metric requests with parametrize."""
# Mock the analytics function to avoid event loop issues
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=Mock(id="test-id"),
)
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail
# Verify error message contains expected error
error_text = json.dumps(error_detail)
assert expected_error in error_text or expected_error.lower() in error_text.lower()

View File

@@ -0,0 +1,284 @@
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
@pytest.fixture(autouse=True)
def setup_app_auth(mock_jwt_user):
"""Setup auth overrides for all tests in this module"""
from autogpt_libs.auth.jwt_utils import get_jwt_payload
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
yield
app.dependency_overrides.clear()
def test_log_raw_metric_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw metric logging"""
# Mock the analytics function
mock_result = Mock(id="metric-123-uuid")
mock_log_metric = mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": "page_load_time",
"metric_value": 2.5,
"data_string": "/dashboard",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "metric-123-uuid"
# Verify the function was called with correct parameters
mock_log_metric.assert_called_once_with(
user_id=test_user_id,
metric_name="page_load_time",
metric_value=2.5,
data_string="/dashboard",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_success",
)
def test_log_raw_metric_various_values(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw metric logging with various metric values"""
# Mock the analytics function
mock_result = Mock(id="metric-456-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
# Test with integer value
request_data = {
"metric_name": "api_calls_count",
"metric_value": 100,
"data_string": "external_api",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with zero value
request_data = {
"metric_name": "error_count",
"metric_value": 0,
"data_string": "no_errors",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Test with negative value
request_data = {
"metric_name": "temperature_delta",
"metric_value": -5.2,
"data_string": "cooling",
}
response = client.post("/log_raw_metric", json=request_data)
assert response.status_code == 200
# Snapshot the last response
configured_snapshot.assert_match(
json.dumps({"metric_id": response.json()}, indent=2, sort_keys=True),
"analytics_log_metric_various_values",
)
def test_log_raw_analytics_success(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
test_user_id: str,
) -> None:
"""Test successful raw analytics logging"""
# Mock the analytics function
mock_result = Mock(id="analytics-789-uuid")
mock_log_analytics = mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "user_action",
"data": {
"action": "button_click",
"button_id": "submit_form",
"timestamp": "2023-01-01T00:00:00Z",
"metadata": {
"form_type": "registration",
"fields_filled": 5,
},
},
"data_index": "button_click_submit_form",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
assert response_data == "analytics-789-uuid"
# Verify the function was called with correct parameters
mock_log_analytics.assert_called_once_with(
test_user_id,
"user_action",
request_data["data"],
"button_click_submit_form",
)
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps({"analytics_id": response_data}, indent=2, sort_keys=True),
"analytics_log_analytics_success",
)
def test_log_raw_analytics_complex_data(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
) -> None:
"""Test raw analytics logging with complex nested data"""
# Mock the analytics function
mock_result = Mock(id="analytics-complex-uuid")
mocker.patch(
"backend.data.analytics.log_raw_analytics",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"type": "agent_execution",
"data": {
"agent_id": "agent_123",
"execution_id": "exec_456",
"status": "completed",
"duration_ms": 3500,
"nodes_executed": 15,
"blocks_used": [
{"block_id": "llm_block", "count": 3},
{"block_id": "http_block", "count": 5},
{"block_id": "code_block", "count": 2},
],
"errors": [],
"metadata": {
"trigger": "manual",
"user_tier": "premium",
"environment": "production",
},
},
"data_index": "agent_123_exec_456",
}
response = client.post("/log_raw_analytics", json=request_data)
assert response.status_code == 200
response_data = response.json()
# Snapshot test the complex data structure
configured_snapshot.assert_match(
json.dumps(
{
"analytics_id": response_data,
"logged_data": request_data["data"],
},
indent=2,
sort_keys=True,
),
"analytics_log_analytics_complex_data",
)
def test_log_raw_metric_invalid_request() -> None:
"""Test raw metric logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_metric", json={})
assert response.status_code == 422
# Invalid metric_value type
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": "not_a_number",
"data_string": "test",
},
)
assert response.status_code == 422
# Missing data_string
response = client.post(
"/log_raw_metric",
json={
"metric_name": "test",
"metric_value": 1.0,
},
)
assert response.status_code == 422
def test_log_raw_analytics_invalid_request() -> None:
"""Test raw analytics logging with invalid request data"""
# Missing required fields
response = client.post("/log_raw_analytics", json={})
assert response.status_code == 422
# Invalid data type (should be dict)
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": "not_a_dict",
"data_index": "test",
},
)
assert response.status_code == 422
# Missing data_index
response = client.post(
"/log_raw_analytics",
json={
"type": "test",
"data": {"key": "value"},
},
)
assert response.status_code == 422

View File

@@ -4,15 +4,12 @@ from typing import Annotated
from fastapi import APIRouter, Body, HTTPException, Query, Security from fastapi import APIRouter, Body, HTTPException, Query, Security
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from backend.api.utils.api_key_auth import APIKeyAuthenticator
from backend.data.user import ( from backend.data.user import (
get_user_by_email, get_user_by_email,
set_user_email_verification, set_user_email_verification,
unsubscribe_user_by_token, unsubscribe_user_by_token,
) )
from backend.util.settings import Settings from backend.server.routers.postmark.models import (
from .models import (
PostmarkBounceEnum, PostmarkBounceEnum,
PostmarkBounceWebhook, PostmarkBounceWebhook,
PostmarkClickWebhook, PostmarkClickWebhook,
@@ -22,6 +19,8 @@ from .models import (
PostmarkSubscriptionChangeWebhook, PostmarkSubscriptionChangeWebhook,
PostmarkWebhook, PostmarkWebhook,
) )
from backend.server.utils.api_key_auth import APIKeyAuthenticator
from backend.util.settings import Settings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
settings = Settings() settings = Settings()

View File

@@ -28,21 +28,12 @@ from pydantic import BaseModel
from starlette.status import HTTP_204_NO_CONTENT, HTTP_404_NOT_FOUND from starlette.status import HTTP_204_NO_CONTENT, HTTP_404_NOT_FOUND
from typing_extensions import Optional, TypedDict from typing_extensions import Optional, TypedDict
from backend.api.model import ( import backend.server.integrations.router
CreateAPIKeyRequest, import backend.server.routers.analytics
CreateAPIKeyResponse, import backend.server.v2.library.db as library_db
CreateGraph, from backend.data import api_key as api_key_db
GraphExecutionSource,
RequestTopUp,
SetGraphActiveVersion,
TimezoneResponse,
UpdatePermissionsRequest,
UpdateTimezoneRequest,
UploadFileResponse,
)
from backend.data import execution as execution_db from backend.data import execution as execution_db
from backend.data import graph as graph_db from backend.data import graph as graph_db
from backend.data.auth import api_key as api_key_db
from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks from backend.data.block import BlockInput, CompletedBlockOutput, get_block, get_blocks
from backend.data.credit import ( from backend.data.credit import (
AutoTopUpConfig, AutoTopUpConfig,
@@ -88,6 +79,19 @@ from backend.monitoring.instrumentation import (
record_graph_execution, record_graph_execution,
record_graph_operation, record_graph_operation,
) )
from backend.server.model import (
CreateAPIKeyRequest,
CreateAPIKeyResponse,
CreateGraph,
GraphExecutionSource,
RequestTopUp,
SetGraphActiveVersion,
TimezoneResponse,
UpdatePermissionsRequest,
UpdateTimezoneRequest,
UploadFileResponse,
)
from backend.server.v2.store.model import StoreAgentDetails
from backend.util.cache import cached from backend.util.cache import cached
from backend.util.clients import get_scheduler_client from backend.util.clients import get_scheduler_client
from backend.util.cloud_storage import get_cloud_storage_handler from backend.util.cloud_storage import get_cloud_storage_handler
@@ -101,10 +105,6 @@ from backend.util.timezone_utils import (
) )
from backend.util.virus_scanner import scan_content_safe from backend.util.virus_scanner import scan_content_safe
from .library import db as library_db
from .library import model as library_model
from .store.model import StoreAgentDetails
def _create_file_size_error(size_bytes: int, max_size_mb: int) -> HTTPException: def _create_file_size_error(size_bytes: int, max_size_mb: int) -> HTTPException:
"""Create standardized file size error response.""" """Create standardized file size error response."""
@@ -118,9 +118,76 @@ settings = Settings()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
async def hide_activity_summaries_if_disabled(
executions: list[execution_db.GraphExecutionMeta], user_id: str
) -> list[execution_db.GraphExecutionMeta]:
"""Hide activity summaries and scores if AI_ACTIVITY_STATUS feature is disabled."""
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
return executions # Return as-is if feature is enabled
# Filter out activity features if disabled
filtered_executions = []
for execution in executions:
if execution.stats:
filtered_stats = execution.stats.without_activity_features()
execution = execution.model_copy(update={"stats": filtered_stats})
filtered_executions.append(execution)
return filtered_executions
async def hide_activity_summary_if_disabled(
execution: execution_db.GraphExecution | execution_db.GraphExecutionWithNodes,
user_id: str,
) -> execution_db.GraphExecution | execution_db.GraphExecutionWithNodes:
"""Hide activity summary and score for a single execution if AI_ACTIVITY_STATUS feature is disabled."""
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
return execution # Return as-is if feature is enabled
# Filter out activity features if disabled
if execution.stats:
filtered_stats = execution.stats.without_activity_features()
return execution.model_copy(update={"stats": filtered_stats})
return execution
async def _update_library_agent_version_and_settings(
user_id: str, agent_graph: graph_db.GraphModel
) -> library_db.library_model.LibraryAgent:
# Keep the library agent up to date with the new active version
library = await library_db.update_agent_version_in_library(
user_id, agent_graph.id, agent_graph.version
)
# If the graph has HITL node, initialize the setting if it's not already set.
if (
agent_graph.has_human_in_the_loop
and library.settings.human_in_the_loop_safe_mode is None
):
await library_db.update_library_agent_settings(
user_id=user_id,
agent_id=library.id,
settings=library.settings.model_copy(
update={"human_in_the_loop_safe_mode": True}
),
)
return library
# Define the API routes # Define the API routes
v1_router = APIRouter() v1_router = APIRouter()
v1_router.include_router(
backend.server.integrations.router.router,
prefix="/integrations",
tags=["integrations"],
)
v1_router.include_router(
backend.server.routers.analytics.router,
prefix="/analytics",
tags=["analytics"],
dependencies=[Security(requires_user)],
)
######################################################## ########################################################
##################### Auth ############################# ##################### Auth #############################
@@ -886,28 +953,6 @@ async def set_graph_active_version(
await on_graph_deactivate(current_active_graph, user_id=user_id) await on_graph_deactivate(current_active_graph, user_id=user_id)
async def _update_library_agent_version_and_settings(
user_id: str, agent_graph: graph_db.GraphModel
) -> library_model.LibraryAgent:
# Keep the library agent up to date with the new active version
library = await library_db.update_agent_version_in_library(
user_id, agent_graph.id, agent_graph.version
)
# If the graph has HITL node, initialize the setting if it's not already set.
if (
agent_graph.has_human_in_the_loop
and library.settings.human_in_the_loop_safe_mode is None
):
await library_db.update_library_agent_settings(
user_id=user_id,
agent_id=library.id,
settings=library.settings.model_copy(
update={"human_in_the_loop_safe_mode": True}
),
)
return library
@v1_router.patch( @v1_router.patch(
path="/graphs/{graph_id}/settings", path="/graphs/{graph_id}/settings",
summary="Update graph settings", summary="Update graph settings",
@@ -1110,23 +1155,6 @@ async def list_graph_executions(
) )
async def hide_activity_summaries_if_disabled(
executions: list[execution_db.GraphExecutionMeta], user_id: str
) -> list[execution_db.GraphExecutionMeta]:
"""Hide activity summaries and scores if AI_ACTIVITY_STATUS feature is disabled."""
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
return executions # Return as-is if feature is enabled
# Filter out activity features if disabled
filtered_executions = []
for execution in executions:
if execution.stats:
filtered_stats = execution.stats.without_activity_features()
execution = execution.model_copy(update={"stats": filtered_stats})
filtered_executions.append(execution)
return filtered_executions
@v1_router.get( @v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}", path="/graphs/{graph_id}/executions/{graph_exec_id}",
summary="Get execution details", summary="Get execution details",
@@ -1169,21 +1197,6 @@ async def get_graph_execution(
return result return result
async def hide_activity_summary_if_disabled(
execution: execution_db.GraphExecution | execution_db.GraphExecutionWithNodes,
user_id: str,
) -> execution_db.GraphExecution | execution_db.GraphExecutionWithNodes:
"""Hide activity summary and score for a single execution if AI_ACTIVITY_STATUS feature is disabled."""
if await is_feature_enabled(Flag.AI_ACTIVITY_STATUS, user_id):
return execution # Return as-is if feature is enabled
# Filter out activity features if disabled
if execution.stats:
filtered_stats = execution.stats.without_activity_features()
return execution.model_copy(update={"stats": filtered_stats})
return execution
@v1_router.delete( @v1_router.delete(
path="/executions/{graph_exec_id}", path="/executions/{graph_exec_id}",
summary="Delete graph execution", summary="Delete graph execution",
@@ -1244,7 +1257,7 @@ async def enable_execution_sharing(
) )
# Return the share URL # Return the share URL
frontend_url = settings.config.frontend_base_url or "http://localhost:3000" frontend_url = Settings().config.frontend_base_url or "http://localhost:3000"
share_url = f"{frontend_url}/share/{share_token}" share_url = f"{frontend_url}/share/{share_token}"
return ShareResponse(share_url=share_url, share_token=share_token) return ShareResponse(share_url=share_url, share_token=share_token)

View File

@@ -11,13 +11,13 @@ import starlette.datastructures
from fastapi import HTTPException, UploadFile from fastapi import HTTPException, UploadFile
from pytest_snapshot.plugin import Snapshot from pytest_snapshot.plugin import Snapshot
import backend.server.routers.v1 as v1_routes
from backend.data.credit import AutoTopUpConfig from backend.data.credit import AutoTopUpConfig
from backend.data.graph import GraphModel from backend.data.graph import GraphModel
from backend.server.routers.v1 import upload_file
from .v1 import upload_file, v1_router
app = fastapi.FastAPI() app = fastapi.FastAPI()
app.include_router(v1_router) app.include_router(v1_routes.v1_router)
client = fastapi.testclient.TestClient(app) client = fastapi.testclient.TestClient(app)
@@ -50,7 +50,7 @@ def test_get_or_create_user_route(
} }
mocker.patch( mocker.patch(
"backend.api.features.v1.get_or_create_user", "backend.server.routers.v1.get_or_create_user",
return_value=mock_user, return_value=mock_user,
) )
@@ -71,7 +71,7 @@ def test_update_user_email_route(
) -> None: ) -> None:
"""Test update user email endpoint""" """Test update user email endpoint"""
mocker.patch( mocker.patch(
"backend.api.features.v1.update_user_email", "backend.server.routers.v1.update_user_email",
return_value=None, return_value=None,
) )
@@ -107,7 +107,7 @@ def test_get_graph_blocks(
# Mock get_blocks # Mock get_blocks
mocker.patch( mocker.patch(
"backend.api.features.v1.get_blocks", "backend.server.routers.v1.get_blocks",
return_value={"test-block": lambda: mock_block}, return_value={"test-block": lambda: mock_block},
) )
@@ -146,7 +146,7 @@ def test_execute_graph_block(
mock_block.execute = mock_execute mock_block.execute = mock_execute
mocker.patch( mocker.patch(
"backend.api.features.v1.get_block", "backend.server.routers.v1.get_block",
return_value=mock_block, return_value=mock_block,
) )
@@ -155,7 +155,7 @@ def test_execute_graph_block(
mock_user.timezone = "UTC" mock_user.timezone = "UTC"
mocker.patch( mocker.patch(
"backend.api.features.v1.get_user_by_id", "backend.server.routers.v1.get_user_by_id",
return_value=mock_user, return_value=mock_user,
) )
@@ -181,7 +181,7 @@ def test_execute_graph_block_not_found(
) -> None: ) -> None:
"""Test execute block with non-existent block""" """Test execute block with non-existent block"""
mocker.patch( mocker.patch(
"backend.api.features.v1.get_block", "backend.server.routers.v1.get_block",
return_value=None, return_value=None,
) )
@@ -200,7 +200,7 @@ def test_get_user_credits(
mock_credit_model = Mock() mock_credit_model = Mock()
mock_credit_model.get_credits = AsyncMock(return_value=1000) mock_credit_model.get_credits = AsyncMock(return_value=1000)
mocker.patch( mocker.patch(
"backend.api.features.v1.get_user_credit_model", "backend.server.routers.v1.get_user_credit_model",
return_value=mock_credit_model, return_value=mock_credit_model,
) )
@@ -227,7 +227,7 @@ def test_request_top_up(
return_value="https://checkout.example.com/session123" return_value="https://checkout.example.com/session123"
) )
mocker.patch( mocker.patch(
"backend.api.features.v1.get_user_credit_model", "backend.server.routers.v1.get_user_credit_model",
return_value=mock_credit_model, return_value=mock_credit_model,
) )
@@ -254,7 +254,7 @@ def test_get_auto_top_up(
mock_config = AutoTopUpConfig(threshold=100, amount=500) mock_config = AutoTopUpConfig(threshold=100, amount=500)
mocker.patch( mocker.patch(
"backend.api.features.v1.get_auto_top_up", "backend.server.routers.v1.get_auto_top_up",
return_value=mock_config, return_value=mock_config,
) )
@@ -279,7 +279,7 @@ def test_configure_auto_top_up(
"""Test configure auto top-up endpoint - this test would have caught the enum casting bug""" """Test configure auto top-up endpoint - this test would have caught the enum casting bug"""
# Mock the set_auto_top_up function to avoid database operations # Mock the set_auto_top_up function to avoid database operations
mocker.patch( mocker.patch(
"backend.api.features.v1.set_auto_top_up", "backend.server.routers.v1.set_auto_top_up",
return_value=None, return_value=None,
) )
@@ -289,7 +289,7 @@ def test_configure_auto_top_up(
mock_credit_model.top_up_credits.return_value = None mock_credit_model.top_up_credits.return_value = None
mocker.patch( mocker.patch(
"backend.api.features.v1.get_user_credit_model", "backend.server.routers.v1.get_user_credit_model",
return_value=mock_credit_model, return_value=mock_credit_model,
) )
@@ -311,7 +311,7 @@ def test_configure_auto_top_up_validation_errors(
) -> None: ) -> None:
"""Test configure auto top-up endpoint validation""" """Test configure auto top-up endpoint validation"""
# Mock set_auto_top_up to avoid database operations for successful case # Mock set_auto_top_up to avoid database operations for successful case
mocker.patch("backend.api.features.v1.set_auto_top_up") mocker.patch("backend.server.routers.v1.set_auto_top_up")
# Mock credit model to avoid Stripe API calls for the successful case # Mock credit model to avoid Stripe API calls for the successful case
mock_credit_model = mocker.AsyncMock() mock_credit_model = mocker.AsyncMock()
@@ -319,7 +319,7 @@ def test_configure_auto_top_up_validation_errors(
mock_credit_model.top_up_credits.return_value = None mock_credit_model.top_up_credits.return_value = None
mocker.patch( mocker.patch(
"backend.api.features.v1.get_user_credit_model", "backend.server.routers.v1.get_user_credit_model",
return_value=mock_credit_model, return_value=mock_credit_model,
) )
@@ -393,7 +393,7 @@ def test_get_graph(
) )
mocker.patch( mocker.patch(
"backend.api.features.v1.graph_db.get_graph", "backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph, return_value=mock_graph,
) )
@@ -415,7 +415,7 @@ def test_get_graph_not_found(
) -> None: ) -> None:
"""Test get graph with non-existent ID""" """Test get graph with non-existent ID"""
mocker.patch( mocker.patch(
"backend.api.features.v1.graph_db.get_graph", "backend.server.routers.v1.graph_db.get_graph",
return_value=None, return_value=None,
) )
@@ -443,15 +443,15 @@ def test_delete_graph(
) )
mocker.patch( mocker.patch(
"backend.api.features.v1.graph_db.get_graph", "backend.server.routers.v1.graph_db.get_graph",
return_value=mock_graph, return_value=mock_graph,
) )
mocker.patch( mocker.patch(
"backend.api.features.v1.on_graph_deactivate", "backend.server.routers.v1.on_graph_deactivate",
return_value=None, return_value=None,
) )
mocker.patch( mocker.patch(
"backend.api.features.v1.graph_db.delete_graph", "backend.server.routers.v1.graph_db.delete_graph",
return_value=3, # Number of versions deleted return_value=3, # Number of versions deleted
) )
@@ -498,8 +498,8 @@ async def test_upload_file_success(test_user_id: str):
) )
# Mock dependencies # Mock dependencies
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( with patch("backend.server.routers.v1.scan_content_safe") as mock_scan, patch(
"backend.api.features.v1.get_cloud_storage_handler" "backend.server.routers.v1.get_cloud_storage_handler"
) as mock_handler_getter: ) as mock_handler_getter:
mock_scan.return_value = None mock_scan.return_value = None
@@ -550,8 +550,8 @@ async def test_upload_file_no_filename(test_user_id: str):
), ),
) )
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( with patch("backend.server.routers.v1.scan_content_safe") as mock_scan, patch(
"backend.api.features.v1.get_cloud_storage_handler" "backend.server.routers.v1.get_cloud_storage_handler"
) as mock_handler_getter: ) as mock_handler_getter:
mock_scan.return_value = None mock_scan.return_value = None
@@ -610,7 +610,7 @@ async def test_upload_file_virus_scan_failure(test_user_id: str):
headers=starlette.datastructures.Headers({"content-type": "text/plain"}), headers=starlette.datastructures.Headers({"content-type": "text/plain"}),
) )
with patch("backend.api.features.v1.scan_content_safe") as mock_scan: with patch("backend.server.routers.v1.scan_content_safe") as mock_scan:
# Mock virus scan to raise exception # Mock virus scan to raise exception
mock_scan.side_effect = RuntimeError("Virus detected!") mock_scan.side_effect = RuntimeError("Virus detected!")
@@ -631,8 +631,8 @@ async def test_upload_file_cloud_storage_failure(test_user_id: str):
headers=starlette.datastructures.Headers({"content-type": "text/plain"}), headers=starlette.datastructures.Headers({"content-type": "text/plain"}),
) )
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( with patch("backend.server.routers.v1.scan_content_safe") as mock_scan, patch(
"backend.api.features.v1.get_cloud_storage_handler" "backend.server.routers.v1.get_cloud_storage_handler"
) as mock_handler_getter: ) as mock_handler_getter:
mock_scan.return_value = None mock_scan.return_value = None
@@ -678,8 +678,8 @@ async def test_upload_file_gcs_not_configured_fallback(test_user_id: str):
headers=starlette.datastructures.Headers({"content-type": "text/plain"}), headers=starlette.datastructures.Headers({"content-type": "text/plain"}),
) )
with patch("backend.api.features.v1.scan_content_safe") as mock_scan, patch( with patch("backend.server.routers.v1.scan_content_safe") as mock_scan, patch(
"backend.api.features.v1.get_cloud_storage_handler" "backend.server.routers.v1.get_cloud_storage_handler"
) as mock_handler_getter: ) as mock_handler_getter:
mock_scan.return_value = None mock_scan.return_value = None

View File

@@ -8,7 +8,7 @@ import pytest
from fastapi import HTTPException, Request from fastapi import HTTPException, Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from backend.api.utils.api_key_auth import APIKeyAuthenticator from backend.server.utils.api_key_auth import APIKeyAuthenticator
from backend.util.exceptions import MissingConfigError from backend.util.exceptions import MissingConfigError

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from backend.api.utils.cors import build_cors_params from backend.server.utils.cors import build_cors_params
from backend.util.settings import AppEnvironment from backend.util.settings import AppEnvironment

View File

@@ -9,13 +9,16 @@ if TYPE_CHECKING:
from pydantic import ValidationError from pydantic import ValidationError
from backend.data.execution import ExecutionStatus from backend.data.execution import ExecutionStatus
from backend.server.v2.AutoMod.models import (
AutoModRequest,
AutoModResponse,
ModerationConfig,
)
from backend.util.exceptions import ModerationError from backend.util.exceptions import ModerationError
from backend.util.feature_flag import Flag, is_feature_enabled from backend.util.feature_flag import Flag, is_feature_enabled
from backend.util.request import Requests from backend.util.request import Requests
from backend.util.settings import Settings from backend.util.settings import Settings
from .models import AutoModRequest, AutoModResponse, ModerationConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

Some files were not shown because too many files have changed in this diff Show More