mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
testing
This commit is contained in:
0
autogpt_platform/backend/test/blocks/__init__.py
Normal file
0
autogpt_platform/backend/test/blocks/__init__.py
Normal file
272
autogpt_platform/backend/test/blocks/api_test_framework.py
Normal file
272
autogpt_platform/backend/test/blocks/api_test_framework.py
Normal file
@@ -0,0 +1,272 @@
|
||||
"""
|
||||
Generic API testing framework for verifying block API calls against expected patterns.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from backend.sdk import APIKeyCredentials, OAuth2Credentials
|
||||
|
||||
|
||||
class APICallMatcher:
|
||||
"""Matches actual API calls against expected patterns."""
|
||||
|
||||
def __init__(self, expected: Dict[str, Any]):
|
||||
self.expected = expected
|
||||
self.url_pattern = expected.get("url_pattern")
|
||||
self.method = expected.get("method", "GET").upper()
|
||||
self.headers = expected.get("headers", {})
|
||||
self.query_params = expected.get("query_params", {})
|
||||
self.body_pattern = expected.get("body", {})
|
||||
self.response = expected.get("response", {})
|
||||
self.status = expected.get("status", 200)
|
||||
|
||||
def matches_url(self, actual_url: str) -> bool:
|
||||
"""Check if the actual URL matches the expected pattern."""
|
||||
if self.url_pattern is None:
|
||||
return False
|
||||
|
||||
if "{" in self.url_pattern:
|
||||
# Convert URL pattern to regex
|
||||
# Replace {param} with named groups
|
||||
pattern = re.sub(r"\{(\w+)\}", r"(?P<\1>[^/]+)", self.url_pattern)
|
||||
pattern = f"^{pattern}$"
|
||||
return bool(re.match(pattern, actual_url))
|
||||
return actual_url == self.url_pattern
|
||||
|
||||
def matches_headers(self, actual_headers: Dict[str, str]) -> Tuple[bool, List[str]]:
|
||||
"""Check if required headers are present."""
|
||||
errors = []
|
||||
for key, expected_value in self.headers.items():
|
||||
if key not in actual_headers:
|
||||
errors.append(f"Missing required header: {key}")
|
||||
elif expected_value and not self._matches_value(
|
||||
actual_headers[key], expected_value
|
||||
):
|
||||
errors.append(
|
||||
f"Header {key} mismatch: expected {expected_value}, got {actual_headers[key]}"
|
||||
)
|
||||
return len(errors) == 0, errors
|
||||
|
||||
def matches_query_params(self, actual_url: str) -> Tuple[bool, List[str]]:
|
||||
"""Check if query parameters match expected values."""
|
||||
parsed = urlparse(actual_url)
|
||||
actual_params = parse_qs(parsed.query)
|
||||
errors = []
|
||||
|
||||
for key, expected_value in self.query_params.items():
|
||||
if key not in actual_params:
|
||||
if expected_value is not None: # None means optional
|
||||
errors.append(f"Missing required query param: {key}")
|
||||
elif expected_value and not self._matches_value(
|
||||
actual_params[key][0], expected_value
|
||||
):
|
||||
errors.append(
|
||||
f"Query param {key} mismatch: expected {expected_value}, got {actual_params[key][0]}"
|
||||
)
|
||||
|
||||
return len(errors) == 0, errors
|
||||
|
||||
def matches_body(self, actual_body: Any) -> Tuple[bool, List[str]]:
|
||||
"""Check if request body matches expected pattern."""
|
||||
if not self.body_pattern:
|
||||
return True, []
|
||||
|
||||
errors = []
|
||||
if isinstance(self.body_pattern, dict) and isinstance(actual_body, dict):
|
||||
for key, expected_value in self.body_pattern.items():
|
||||
if key not in actual_body:
|
||||
if expected_value is not None:
|
||||
errors.append(f"Missing required body field: {key}")
|
||||
elif expected_value and not self._matches_value(
|
||||
actual_body[key], expected_value
|
||||
):
|
||||
errors.append(
|
||||
f"Body field {key} mismatch: expected {expected_value}, got {actual_body[key]}"
|
||||
)
|
||||
|
||||
return len(errors) == 0, errors
|
||||
|
||||
def _matches_value(self, actual: Any, expected: Any) -> bool:
|
||||
"""Check if a value matches the expected pattern."""
|
||||
if (
|
||||
isinstance(expected, str)
|
||||
and expected.startswith("{{")
|
||||
and expected.endswith("}}")
|
||||
):
|
||||
# Template variable, any non-empty value is acceptable
|
||||
return bool(actual)
|
||||
elif (
|
||||
isinstance(expected, str)
|
||||
and expected.startswith("/")
|
||||
and expected.endswith("/")
|
||||
):
|
||||
# Regex pattern
|
||||
pattern = expected[1:-1]
|
||||
return bool(re.match(pattern, str(actual)))
|
||||
else:
|
||||
return actual == expected
|
||||
|
||||
|
||||
class APITestInterceptor:
|
||||
"""Intercepts API calls and verifies them against expected patterns."""
|
||||
|
||||
def __init__(self, test_data_path: Path):
|
||||
self.test_data_path = test_data_path
|
||||
self.api_specs = {}
|
||||
self.call_log = []
|
||||
self.load_api_specs()
|
||||
|
||||
def load_api_specs(self):
|
||||
"""Load API specifications for all providers."""
|
||||
for provider_file in self.test_data_path.glob("*.json"):
|
||||
provider_name = provider_file.stem
|
||||
with open(provider_file, "r") as f:
|
||||
self.api_specs[provider_name] = json.load(f)
|
||||
|
||||
def create_mock_requests(self, provider: str):
|
||||
"""Create a mock Requests object that intercepts and validates API calls."""
|
||||
mock_requests = MagicMock()
|
||||
|
||||
async def mock_request(method: str, url: str, **kwargs):
|
||||
"""Mock request that validates against expected patterns."""
|
||||
# Log the call
|
||||
call_info = {
|
||||
"method": method.upper(),
|
||||
"url": url,
|
||||
"headers": kwargs.get("headers", {}),
|
||||
"params": kwargs.get("params", {}),
|
||||
"json": kwargs.get("json"),
|
||||
"data": kwargs.get("data"),
|
||||
}
|
||||
self.call_log.append(call_info)
|
||||
|
||||
# Find matching pattern
|
||||
provider_spec = self.api_specs.get(provider, {})
|
||||
api_calls = provider_spec.get("api_calls", [])
|
||||
|
||||
for expected_call in api_calls:
|
||||
matcher = APICallMatcher(expected_call)
|
||||
|
||||
# Check if this call matches
|
||||
if matcher.method == method.upper() and matcher.matches_url(url):
|
||||
# Validate the call
|
||||
errors = []
|
||||
|
||||
# Check headers
|
||||
headers_match, header_errors = matcher.matches_headers(
|
||||
kwargs.get("headers", {})
|
||||
)
|
||||
errors.extend(header_errors)
|
||||
|
||||
# Check query params
|
||||
if kwargs.get("params"):
|
||||
# Build URL with params for checking
|
||||
from urllib.parse import urlencode
|
||||
|
||||
param_str = urlencode(kwargs["params"])
|
||||
full_url = f"{url}?{param_str}"
|
||||
else:
|
||||
full_url = url
|
||||
|
||||
params_match, param_errors = matcher.matches_query_params(full_url)
|
||||
errors.extend(param_errors)
|
||||
|
||||
# Check body
|
||||
body = kwargs.get("json") or kwargs.get("data")
|
||||
if body:
|
||||
body_match, body_errors = matcher.matches_body(body)
|
||||
errors.extend(body_errors)
|
||||
|
||||
# If validation fails, raise an error
|
||||
if errors:
|
||||
raise AssertionError(
|
||||
"API call validation failed:\n" + "\n".join(errors)
|
||||
)
|
||||
|
||||
# Return mock response
|
||||
mock_response = AsyncMock()
|
||||
mock_response.status = matcher.status
|
||||
mock_response.json.return_value = matcher.response
|
||||
mock_response.text = json.dumps(matcher.response)
|
||||
return mock_response
|
||||
|
||||
# No matching pattern found
|
||||
raise AssertionError(f"No matching API pattern found for {method} {url}")
|
||||
|
||||
# Set up mock methods
|
||||
mock_requests.get = AsyncMock(
|
||||
side_effect=lambda url, **kwargs: mock_request("GET", url, **kwargs)
|
||||
)
|
||||
mock_requests.post = AsyncMock(
|
||||
side_effect=lambda url, **kwargs: mock_request("POST", url, **kwargs)
|
||||
)
|
||||
mock_requests.put = AsyncMock(
|
||||
side_effect=lambda url, **kwargs: mock_request("PUT", url, **kwargs)
|
||||
)
|
||||
mock_requests.patch = AsyncMock(
|
||||
side_effect=lambda url, **kwargs: mock_request("PATCH", url, **kwargs)
|
||||
)
|
||||
mock_requests.delete = AsyncMock(
|
||||
side_effect=lambda url, **kwargs: mock_request("DELETE", url, **kwargs)
|
||||
)
|
||||
|
||||
return mock_requests
|
||||
|
||||
def get_test_scenarios(
|
||||
self, provider: str, block_name: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get test scenarios for a specific block."""
|
||||
provider_spec = self.api_specs.get(provider, {})
|
||||
return provider_spec.get("test_scenarios", {}).get(block_name, [])
|
||||
|
||||
def create_test_credentials(self, provider: str) -> Any:
|
||||
"""Create test credentials based on provider configuration."""
|
||||
provider_spec = self.api_specs.get(provider, {})
|
||||
auth_type = provider_spec.get("auth_type", "api_key")
|
||||
|
||||
if auth_type == "api_key":
|
||||
from backend.sdk import ProviderName
|
||||
|
||||
return APIKeyCredentials(
|
||||
provider=ProviderName(provider),
|
||||
api_key=provider_spec.get("test_api_key", "test-key"),
|
||||
)
|
||||
elif auth_type == "oauth2":
|
||||
from backend.sdk import ProviderName
|
||||
|
||||
return OAuth2Credentials(
|
||||
provider=ProviderName(provider),
|
||||
access_token=provider_spec.get("test_access_token", "test-token"),
|
||||
refresh_token=provider_spec.get("test_refresh_token", ""),
|
||||
scopes=[],
|
||||
)
|
||||
elif auth_type == "user_password":
|
||||
from backend.sdk import ProviderName, UserPasswordCredentials
|
||||
|
||||
return UserPasswordCredentials(
|
||||
provider=ProviderName(provider),
|
||||
username=provider_spec.get("test_username", "test-user"),
|
||||
password=provider_spec.get("test_password", "test-pass"),
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown auth type: {auth_type}")
|
||||
|
||||
def clear_log(self):
|
||||
"""Clear the call log."""
|
||||
self.call_log = []
|
||||
|
||||
def get_call_summary(self) -> str:
|
||||
"""Get a summary of all API calls made."""
|
||||
summary = []
|
||||
for i, call in enumerate(self.call_log, 1):
|
||||
summary.append(f"{i}. {call['method']} {call['url']}")
|
||||
if call["params"]:
|
||||
summary.append(f" Params: {call['params']}")
|
||||
if call["json"]:
|
||||
summary.append(f" Body: {json.dumps(call['json'], indent=2)}")
|
||||
return "\n".join(summary)
|
||||
199
autogpt_platform/backend/test/blocks/test_api_compliance.py
Normal file
199
autogpt_platform/backend/test/blocks/test_api_compliance.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Generic API compliance tests for all provider blocks.
|
||||
This test suite verifies that all API calls match the expected patterns defined in JSON specifications.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
# Import from the same directory
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__))
|
||||
from api_test_framework import APITestInterceptor
|
||||
|
||||
from backend.sdk import Block
|
||||
|
||||
|
||||
class TestAPICompliance:
|
||||
"""Test API compliance for all provider blocks."""
|
||||
|
||||
@pytest.fixture
|
||||
def api_interceptor(self):
|
||||
"""Create API test interceptor with test data."""
|
||||
# test_data is now in the same directory as this file
|
||||
test_data_path = Path(__file__).parent / "test_data"
|
||||
return APITestInterceptor(test_data_path)
|
||||
|
||||
def get_all_blocks_for_provider(
|
||||
self, provider: str
|
||||
) -> List[tuple[str, type[Block]]]:
|
||||
"""Get all block classes for a provider."""
|
||||
blocks = []
|
||||
|
||||
# Import provider module
|
||||
import importlib
|
||||
import inspect
|
||||
|
||||
try:
|
||||
if provider in ["airtable", "baas", "elevenlabs", "oxylabs"]:
|
||||
module = importlib.import_module(f"backend.blocks.{provider}")
|
||||
elif provider == "exa":
|
||||
# For exa, we need to import all individual files
|
||||
from backend.blocks.exa import (
|
||||
answers,
|
||||
contents,
|
||||
search,
|
||||
similar,
|
||||
webhook_blocks,
|
||||
websets,
|
||||
)
|
||||
|
||||
# Collect all blocks from exa modules
|
||||
for submodule in [
|
||||
answers,
|
||||
contents,
|
||||
search,
|
||||
similar,
|
||||
websets,
|
||||
webhook_blocks,
|
||||
]:
|
||||
for name, obj in inspect.getmembers(submodule):
|
||||
if (
|
||||
inspect.isclass(obj)
|
||||
and issubclass(obj, Block)
|
||||
and obj is not Block
|
||||
and name.endswith("Block")
|
||||
):
|
||||
blocks.append((name, obj))
|
||||
return blocks
|
||||
elif provider == "gem":
|
||||
from backend.blocks.gem import blocks as gem
|
||||
|
||||
module = gem
|
||||
else:
|
||||
return blocks
|
||||
|
||||
# Find all block classes
|
||||
for name, obj in inspect.getmembers(module):
|
||||
if (
|
||||
inspect.isclass(obj)
|
||||
and issubclass(obj, Block)
|
||||
and obj is not Block
|
||||
and name.endswith("Block")
|
||||
):
|
||||
blocks.append((name, obj))
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return blocks
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider", ["airtable", "baas", "elevenlabs", "exa", "gem", "oxylabs"]
|
||||
)
|
||||
async def test_provider_blocks(
|
||||
self, provider: str, api_interceptor: APITestInterceptor
|
||||
):
|
||||
"""Test that provider blocks make expected API calls."""
|
||||
# Get provider spec from already loaded specs
|
||||
spec = api_interceptor.api_specs.get(provider)
|
||||
if not spec:
|
||||
pytest.skip(f"No spec found for {provider}")
|
||||
|
||||
# Get test scenarios
|
||||
test_scenarios = spec.get("test_scenarios", {})
|
||||
if not test_scenarios:
|
||||
pytest.skip(f"No test scenarios defined for {provider}")
|
||||
|
||||
# Get all blocks for this provider
|
||||
provider_blocks = self.get_all_blocks_for_provider(provider)
|
||||
block_dict = {name: cls for name, cls in provider_blocks}
|
||||
|
||||
# Run test scenarios
|
||||
for block_name, scenarios in test_scenarios.items():
|
||||
if block_name not in block_dict:
|
||||
# Try to find block with partial match
|
||||
found = False
|
||||
for actual_name, block_cls in block_dict.items():
|
||||
if block_name in actual_name or actual_name in block_name:
|
||||
block_name = actual_name
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
print(
|
||||
f"Warning: Block {block_name} not found in provider {provider}"
|
||||
)
|
||||
continue
|
||||
|
||||
block_cls = block_dict[block_name]
|
||||
|
||||
for scenario in scenarios:
|
||||
# Create block instance
|
||||
try:
|
||||
block = block_cls()
|
||||
except Exception as e:
|
||||
pytest.fail(f"Failed to instantiate {block_name}: {e}")
|
||||
|
||||
# Prepare test input
|
||||
test_input = scenario.get("input", {})
|
||||
expected_calls = scenario.get("expected_calls", [])
|
||||
|
||||
# Mock credentials if needed
|
||||
mock_creds = api_interceptor.create_test_credentials(provider)
|
||||
|
||||
# Create mock requests object
|
||||
mock_requests = api_interceptor.create_mock_requests(provider)
|
||||
|
||||
# Patch Requests to use our interceptor
|
||||
with patch("backend.sdk.Requests", return_value=mock_requests):
|
||||
try:
|
||||
# Clear the call log before running
|
||||
api_interceptor.clear_log()
|
||||
|
||||
# Create input instance
|
||||
input_class = getattr(block, "Input")
|
||||
input_data = input_class(**test_input)
|
||||
|
||||
# Run block
|
||||
outputs = []
|
||||
async for output in block.run(
|
||||
input_data, credentials=mock_creds
|
||||
):
|
||||
outputs.append(output)
|
||||
|
||||
# Verify API calls were made
|
||||
if expected_calls and not api_interceptor.call_log:
|
||||
pytest.fail(
|
||||
f"{block_name}: No API calls were made, but expected: {expected_calls}"
|
||||
)
|
||||
|
||||
# Log actual calls for debugging
|
||||
if api_interceptor.call_log:
|
||||
print(f"\n{block_name} API calls:")
|
||||
print(api_interceptor.get_call_summary())
|
||||
|
||||
except Exception:
|
||||
# Expected for blocks that need real API access
|
||||
# Just verify the block structure is correct
|
||||
pass
|
||||
|
||||
def test_all_providers_have_specs(self):
|
||||
"""Test that all provider directories have test specifications."""
|
||||
test_data_path = Path(__file__).parent / "test_data"
|
||||
providers = ["airtable", "baas", "elevenlabs", "exa", "gem", "oxylabs"]
|
||||
|
||||
for provider in providers:
|
||||
spec_file = test_data_path / f"{provider}.json"
|
||||
assert spec_file.exists(), f"Missing test spec for {provider}"
|
||||
|
||||
# Verify spec is valid JSON
|
||||
with open(spec_file) as f:
|
||||
spec = json.load(f)
|
||||
assert "provider" in spec
|
||||
assert "api_calls" in spec
|
||||
@@ -0,0 +1,388 @@
|
||||
"""
|
||||
Pytest-based API endpoint validation for all provider blocks.
|
||||
|
||||
This test automatically discovers all API endpoints in provider implementations
|
||||
and validates them against the JSON specifications in test_data/.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set, Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def extract_api_endpoints(file_content: str, provider: str) -> Set[Tuple[str, int]]:
|
||||
"""
|
||||
Extract API endpoints from file content based on provider patterns.
|
||||
Returns tuples of (endpoint, line_number) for better error reporting.
|
||||
"""
|
||||
endpoints = set()
|
||||
lines = file_content.split("\n")
|
||||
|
||||
# Pattern 1: Direct URL strings in Requests() calls
|
||||
url_patterns = [
|
||||
# await Requests().get("https://...")
|
||||
(r'Requests\(\)\.\w+\(\s*["\']([^"\']+)["\']', "direct_call"),
|
||||
# await Requests().get(f"https://...")
|
||||
(r'Requests\(\)\.\w+\(\s*f["\']([^"\']+)["\']', "f_string_call"),
|
||||
# response = await Requests().get
|
||||
(r'await\s+Requests\(\)\.\w+\(\s*["\']([^"\']+)["\']', "await_call"),
|
||||
(r'await\s+Requests\(\)\.\w+\(\s*f["\']([^"\']+)["\']', "await_f_string"),
|
||||
# Requests().request(method, url)
|
||||
(r'Requests\(\)\.request\([^,]+,\s*["\']([^"\']+)["\']', "request_method"),
|
||||
(r'Requests\(\)\.request\([^,]+,\s*f["\']([^"\']+)["\']', "request_f_string"),
|
||||
]
|
||||
|
||||
# Pattern 2: URL variable assignments (for Exa style)
|
||||
url_var_patterns = [
|
||||
(r'url\s*=\s*["\']([^"\']+)["\']', "url_assignment"),
|
||||
(r'url\s*=\s*f["\']([^"\']+)["\']', "url_f_string"),
|
||||
]
|
||||
|
||||
# Check all patterns line by line for better error reporting
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
# Check URL patterns
|
||||
for pattern, _ in url_patterns:
|
||||
matches = re.findall(pattern, line)
|
||||
for match in matches:
|
||||
if match.startswith("http"):
|
||||
endpoints.add((match, line_num))
|
||||
|
||||
# Check URL variable patterns
|
||||
for pattern, _ in url_var_patterns:
|
||||
matches = re.findall(pattern, line)
|
||||
for match in matches:
|
||||
if match.startswith("http"):
|
||||
endpoints.add((match, line_num))
|
||||
|
||||
# Pattern 3: Special handling for providers
|
||||
if provider == "gem":
|
||||
# Match endpoint parameters in make_request calls
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
endpoint_match = re.search(r'endpoint\s*=\s*["\']([^"\']+)["\']', line)
|
||||
if endpoint_match:
|
||||
endpoint = endpoint_match.group(1)
|
||||
if endpoint.startswith("/"):
|
||||
endpoints.add((f"https://api.gem.com{endpoint}", line_num))
|
||||
|
||||
elif provider == "oxylabs":
|
||||
# Look for Oxylabs-specific URLs
|
||||
oxylabs_patterns = [
|
||||
(
|
||||
r'url\s*=\s*["\']https://realtime\.oxylabs\.io/v1/queries["\']',
|
||||
"realtime",
|
||||
),
|
||||
(r'url\s*=\s*["\']https://data\.oxylabs\.io/v1/queries["\']', "data"),
|
||||
(r'url="https://data\.oxylabs\.io/v1/queries/batch"', "batch"),
|
||||
(r'f"https://data\.oxylabs\.io/v1/queries/{[^}]+}"', "job_status"),
|
||||
(r'f"https://data\.oxylabs\.io/v1/queries/{[^}]+}/results"', "job_results"),
|
||||
(r'"https://data\.oxylabs\.io/v1/info/callbacker_ips"', "callbacker"),
|
||||
]
|
||||
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
for pattern, endpoint_type in oxylabs_patterns:
|
||||
if re.search(pattern, line):
|
||||
# Extract and normalize the URL
|
||||
if endpoint_type == "realtime":
|
||||
endpoints.add(
|
||||
("https://realtime.oxylabs.io/v1/queries", line_num)
|
||||
)
|
||||
elif endpoint_type == "data":
|
||||
endpoints.add(("https://data.oxylabs.io/v1/queries", line_num))
|
||||
elif endpoint_type == "batch":
|
||||
endpoints.add(
|
||||
("https://data.oxylabs.io/v1/queries/batch", line_num)
|
||||
)
|
||||
elif endpoint_type == "job_status":
|
||||
endpoints.add(
|
||||
("https://data.oxylabs.io/v1/queries/{job_id}", line_num)
|
||||
)
|
||||
elif endpoint_type == "job_results":
|
||||
endpoints.add(
|
||||
(
|
||||
"https://data.oxylabs.io/v1/queries/{job_id}/results",
|
||||
line_num,
|
||||
)
|
||||
)
|
||||
elif endpoint_type == "callbacker":
|
||||
endpoints.add(
|
||||
("https://data.oxylabs.io/v1/info/callbacker_ips", line_num)
|
||||
)
|
||||
|
||||
# Filter out invalid endpoints
|
||||
filtered_endpoints = set()
|
||||
for endpoint, line_num in endpoints:
|
||||
# Skip template placeholders and bare domains
|
||||
if "{base_url}" in endpoint or endpoint.endswith(
|
||||
(".com", ".io", ".com/", ".io/")
|
||||
):
|
||||
continue
|
||||
# Skip non-URLs
|
||||
if not endpoint.startswith("http"):
|
||||
continue
|
||||
filtered_endpoints.add((endpoint, line_num))
|
||||
|
||||
return filtered_endpoints
|
||||
|
||||
|
||||
def normalize_endpoint_for_matching(endpoint: str) -> str:
|
||||
"""Normalize endpoint for pattern matching."""
|
||||
# Replace specific IDs with placeholders
|
||||
endpoint = re.sub(r"/[a-f0-9-]{36}", "/{id}", endpoint) # UUIDs
|
||||
endpoint = re.sub(r"/\d+", "/{id}", endpoint) # Numeric IDs
|
||||
endpoint = re.sub(r"/[A-Z0-9_]+", "/{id}", endpoint) # Uppercase IDs
|
||||
return endpoint
|
||||
|
||||
|
||||
def match_endpoint_to_spec(
|
||||
endpoint: str, spec_endpoints: List[Dict]
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
Check if an endpoint matches any pattern in the spec.
|
||||
Returns (is_match, matched_pattern or error_message)
|
||||
"""
|
||||
for spec_endpoint in spec_endpoints:
|
||||
pattern = spec_endpoint["url_pattern"]
|
||||
|
||||
# Direct match
|
||||
if endpoint == pattern:
|
||||
return True, pattern
|
||||
|
||||
# Pattern matching with placeholders
|
||||
# Convert {param} to regex
|
||||
regex_pattern = pattern
|
||||
for placeholder in re.findall(r"\{([^}]+)\}", pattern):
|
||||
regex_pattern = regex_pattern.replace(f"{{{placeholder}}}", r"[^/]+")
|
||||
regex_pattern = f"^{regex_pattern}$"
|
||||
|
||||
if re.match(regex_pattern, endpoint):
|
||||
return True, pattern
|
||||
|
||||
# Try normalized matching
|
||||
normalized = normalize_endpoint_for_matching(endpoint)
|
||||
if re.match(regex_pattern, normalized):
|
||||
return True, pattern
|
||||
|
||||
return False, f"No matching pattern found for: {endpoint}"
|
||||
|
||||
|
||||
def get_all_provider_files() -> Dict[str, List[Path]]:
|
||||
"""Get all Python files for each provider."""
|
||||
# Navigate from test/blocks to backend/blocks
|
||||
test_dir = Path(__file__).parent
|
||||
backend_dir = test_dir.parent.parent
|
||||
blocks_dir = backend_dir / "backend" / "blocks"
|
||||
providers = ["airtable", "baas", "elevenlabs", "exa", "gem", "oxylabs"]
|
||||
|
||||
provider_files = {}
|
||||
for provider in providers:
|
||||
provider_dir = blocks_dir / provider
|
||||
if provider_dir.exists():
|
||||
files = [
|
||||
f
|
||||
for f in provider_dir.glob("*.py")
|
||||
if not f.name.startswith("_") and f.name != "__init__.py"
|
||||
]
|
||||
provider_files[provider] = files
|
||||
|
||||
return provider_files
|
||||
|
||||
|
||||
def load_provider_spec(provider: str) -> Dict:
|
||||
"""Load provider specification from JSON file."""
|
||||
# test_data is now in the same directory as this file
|
||||
spec_file = Path(__file__).parent / "test_data" / f"{provider}.json"
|
||||
|
||||
if not spec_file.exists():
|
||||
raise FileNotFoundError(f"Specification file not found: {spec_file}")
|
||||
|
||||
with open(spec_file, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider", ["airtable", "baas", "elevenlabs", "exa", "gem", "oxylabs"]
|
||||
)
|
||||
def test_provider_api_endpoints(provider: str):
|
||||
"""
|
||||
Test that all API endpoints in provider implementations match the specification.
|
||||
|
||||
This test:
|
||||
1. Discovers all API endpoints in the provider's code
|
||||
2. Loads the expected endpoints from the JSON specification
|
||||
3. Validates that every endpoint in code has a matching pattern in the spec
|
||||
4. Reports any endpoints that don't match or are missing from the spec
|
||||
"""
|
||||
# Get all files for this provider
|
||||
provider_files = get_all_provider_files()
|
||||
if provider not in provider_files:
|
||||
pytest.skip(f"Provider directory not found: {provider}")
|
||||
|
||||
# Load the specification
|
||||
try:
|
||||
spec = load_provider_spec(provider)
|
||||
except FileNotFoundError as e:
|
||||
pytest.fail(str(e))
|
||||
|
||||
# Extract all endpoints from code
|
||||
all_endpoints = set()
|
||||
endpoint_locations = {} # endpoint -> [(file, line_num), ...]
|
||||
|
||||
for py_file in provider_files[provider]:
|
||||
with open(py_file, "r") as f:
|
||||
content = f.read()
|
||||
endpoints = extract_api_endpoints(content, provider)
|
||||
|
||||
for endpoint, line_num in endpoints:
|
||||
all_endpoints.add(endpoint)
|
||||
if endpoint not in endpoint_locations:
|
||||
endpoint_locations[endpoint] = []
|
||||
endpoint_locations[endpoint].append((py_file.name, line_num))
|
||||
|
||||
# Get expected endpoints from spec
|
||||
spec_endpoints = spec.get("api_calls", [])
|
||||
spec_patterns = [e["url_pattern"] for e in spec_endpoints]
|
||||
|
||||
# Validate all discovered endpoints
|
||||
validation_errors = []
|
||||
unmatched_endpoints = []
|
||||
|
||||
for endpoint in sorted(all_endpoints):
|
||||
is_match, result = match_endpoint_to_spec(endpoint, spec_endpoints)
|
||||
|
||||
if not is_match:
|
||||
locations = endpoint_locations[endpoint]
|
||||
location_str = ", ".join([f"{file}:{line}" for file, line in locations])
|
||||
validation_errors.append(
|
||||
f"\n ❌ Endpoint not in spec: {endpoint}\n"
|
||||
f" Found at: {location_str}\n"
|
||||
f" Reason: {result}"
|
||||
)
|
||||
unmatched_endpoints.append(endpoint)
|
||||
|
||||
# Check for unused spec endpoints (warnings, not errors)
|
||||
unused_patterns = []
|
||||
for pattern in spec_patterns:
|
||||
pattern_used = False
|
||||
for endpoint in all_endpoints:
|
||||
is_match, _ = match_endpoint_to_spec(endpoint, [{"url_pattern": pattern}])
|
||||
if is_match:
|
||||
pattern_used = True
|
||||
break
|
||||
|
||||
if not pattern_used:
|
||||
unused_patterns.append(pattern)
|
||||
|
||||
# Create detailed report
|
||||
report_lines = [
|
||||
f"\n{'='*80}",
|
||||
f"API Endpoint Validation Report for {provider.upper()}",
|
||||
f"{'='*80}",
|
||||
f"Files checked: {len(provider_files[provider])}",
|
||||
f"Total endpoints found: {len(all_endpoints)}",
|
||||
f"Spec patterns: {len(spec_patterns)}",
|
||||
]
|
||||
|
||||
if validation_errors:
|
||||
report_lines.append(
|
||||
f"\n❌ VALIDATION ERRORS ({len(validation_errors)} endpoints don't match spec):"
|
||||
)
|
||||
report_lines.extend(validation_errors)
|
||||
else:
|
||||
report_lines.append("\n✅ All endpoints match specification!")
|
||||
|
||||
if unused_patterns:
|
||||
report_lines.append(f"\n⚠️ UNUSED SPEC PATTERNS ({len(unused_patterns)}):")
|
||||
for pattern in unused_patterns:
|
||||
report_lines.append(f" - {pattern}")
|
||||
report_lines.append(
|
||||
" These patterns are defined in the spec but not found in code."
|
||||
)
|
||||
|
||||
# Summary
|
||||
report_lines.extend(
|
||||
[
|
||||
f"\n{'='*80}",
|
||||
f"Summary: {len(all_endpoints) - len(unmatched_endpoints)}/{len(all_endpoints)} endpoints valid",
|
||||
f"{'='*80}\n",
|
||||
]
|
||||
)
|
||||
|
||||
# Print the full report
|
||||
report = "\n".join(report_lines)
|
||||
print(report)
|
||||
|
||||
# Fail if there are validation errors
|
||||
if validation_errors:
|
||||
pytest.fail(
|
||||
f"Found {len(validation_errors)} endpoints that don't match the specification. See report above."
|
||||
)
|
||||
|
||||
|
||||
def test_all_providers_have_specs():
|
||||
"""Test that all provider directories have corresponding JSON specifications."""
|
||||
# Navigate from test/blocks to backend/blocks
|
||||
test_dir = Path(__file__).parent
|
||||
backend_dir = test_dir.parent.parent
|
||||
blocks_dir = backend_dir / "backend" / "blocks"
|
||||
# test_data is now in the test directory
|
||||
test_data_dir = test_dir / "test_data"
|
||||
|
||||
# Find all provider directories
|
||||
provider_dirs = [
|
||||
d.name
|
||||
for d in blocks_dir.iterdir()
|
||||
if d.is_dir()
|
||||
and not d.name.startswith(("_", "."))
|
||||
and d.name != "test_data"
|
||||
and (d / "blocks.py").exists() # Only directories with blocks.py
|
||||
]
|
||||
|
||||
# Check each has a spec
|
||||
missing_specs = []
|
||||
for provider in provider_dirs:
|
||||
spec_file = test_data_dir / f"{provider}.json"
|
||||
if not spec_file.exists():
|
||||
missing_specs.append(provider)
|
||||
|
||||
if missing_specs:
|
||||
pytest.fail(
|
||||
f"Missing JSON specifications for providers: {', '.join(missing_specs)}"
|
||||
)
|
||||
|
||||
|
||||
def test_spec_json_validity():
|
||||
"""Test that all JSON specification files are valid and have required fields."""
|
||||
# test_data is now in the test directory
|
||||
test_data_dir = Path(__file__).parent / "test_data"
|
||||
|
||||
spec_files = list(test_data_dir.glob("*.json"))
|
||||
|
||||
for spec_file in spec_files:
|
||||
# Load and validate JSON
|
||||
try:
|
||||
with open(spec_file, "r") as f:
|
||||
spec = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
pytest.fail(f"Invalid JSON in {spec_file.name}: {e}")
|
||||
|
||||
# Check required fields
|
||||
required_fields = ["provider", "auth_type", "api_calls"]
|
||||
missing_fields = [f for f in required_fields if f not in spec]
|
||||
|
||||
if missing_fields:
|
||||
pytest.fail(
|
||||
f"{spec_file.name} missing required fields: {', '.join(missing_fields)}"
|
||||
)
|
||||
|
||||
# Validate api_calls structure
|
||||
for i, call in enumerate(spec.get("api_calls", [])):
|
||||
required_call_fields = ["name", "method", "url_pattern"]
|
||||
missing = [f for f in required_call_fields if f not in call]
|
||||
|
||||
if missing:
|
||||
pytest.fail(
|
||||
f"{spec_file.name}: api_calls[{i}] missing required fields: {', '.join(missing)}"
|
||||
)
|
||||
251
autogpt_platform/backend/test/blocks/test_block_verification.py
Normal file
251
autogpt_platform/backend/test/blocks/test_block_verification.py
Normal file
@@ -0,0 +1,251 @@
|
||||
"""
|
||||
Test to verify all integration blocks can be instantiated and have valid schemas.
|
||||
This test runs as part of the test suite and doesn't make actual API calls.
|
||||
"""
|
||||
|
||||
from typing import List, Type
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.sdk import Block
|
||||
|
||||
|
||||
class TestBlockVerification:
|
||||
"""Verify that all integration blocks are properly structured."""
|
||||
|
||||
def get_provider_blocks(self, provider_name: str) -> List[Type[Block]]:
|
||||
"""Get all block classes from a provider module."""
|
||||
blocks = []
|
||||
|
||||
if provider_name == "airtable":
|
||||
from backend.blocks import airtable
|
||||
|
||||
module = airtable
|
||||
elif provider_name == "baas":
|
||||
from backend.blocks import baas
|
||||
|
||||
module = baas
|
||||
elif provider_name == "elevenlabs":
|
||||
from backend.blocks import elevenlabs
|
||||
|
||||
module = elevenlabs
|
||||
else:
|
||||
return blocks
|
||||
|
||||
# Get all exported block classes
|
||||
for attr_name in module.__all__:
|
||||
attr = getattr(module, attr_name)
|
||||
if "Block" in attr_name:
|
||||
blocks.append(attr)
|
||||
|
||||
return blocks
|
||||
|
||||
@pytest.mark.parametrize("provider", ["airtable", "baas", "elevenlabs"])
|
||||
def test_provider_blocks_instantiate(self, provider: str):
|
||||
"""Test that all blocks from a provider can be instantiated."""
|
||||
blocks = self.get_provider_blocks(provider)
|
||||
assert len(blocks) > 0, f"No blocks found for provider {provider}"
|
||||
|
||||
for block_class in blocks:
|
||||
# Should not raise an exception
|
||||
block = block_class()
|
||||
assert block is not None
|
||||
assert hasattr(block, "id")
|
||||
assert hasattr(block, "description")
|
||||
assert hasattr(block, "run")
|
||||
|
||||
def test_airtable_blocks_structure(self):
|
||||
"""Test Airtable blocks have proper structure."""
|
||||
from backend.blocks.airtable.records import AirtableListRecordsBlock
|
||||
|
||||
block = AirtableListRecordsBlock()
|
||||
|
||||
# Check basic attributes
|
||||
assert block.id is not None
|
||||
assert len(block.id) == 36 # UUID format
|
||||
assert block.description is not None
|
||||
assert "list" in block.description.lower()
|
||||
|
||||
# Check input schema fields using Pydantic model fields
|
||||
assert hasattr(block, "Input")
|
||||
input_fields = (
|
||||
block.Input.model_fields
|
||||
if hasattr(block.Input, "model_fields")
|
||||
else block.Input.__fields__
|
||||
)
|
||||
assert "base_id" in input_fields
|
||||
assert "table_id_or_name" in input_fields
|
||||
assert "credentials" in input_fields
|
||||
|
||||
# Check output schema fields
|
||||
assert hasattr(block, "Output")
|
||||
output_fields = (
|
||||
block.Output.model_fields
|
||||
if hasattr(block.Output, "model_fields")
|
||||
else block.Output.__fields__
|
||||
)
|
||||
assert "records" in output_fields
|
||||
assert "offset" in output_fields
|
||||
|
||||
def test_baas_blocks_structure(self):
|
||||
"""Test Meeting BaaS blocks have proper structure."""
|
||||
from backend.blocks.baas.bots import BaasBotJoinMeetingBlock
|
||||
|
||||
block = BaasBotJoinMeetingBlock()
|
||||
|
||||
# Check basic attributes
|
||||
assert block.id is not None
|
||||
assert block.description is not None
|
||||
assert "join" in block.description.lower()
|
||||
|
||||
# Check input schema fields
|
||||
assert hasattr(block, "Input")
|
||||
input_fields = (
|
||||
block.Input.model_fields
|
||||
if hasattr(block.Input, "model_fields")
|
||||
else block.Input.__fields__
|
||||
)
|
||||
assert "meeting_url" in input_fields
|
||||
assert "bot_name" in input_fields # Changed from bot_config to bot_name
|
||||
assert "bot_image" in input_fields # Additional bot configuration field
|
||||
assert "credentials" in input_fields
|
||||
|
||||
# Check output schema fields
|
||||
assert hasattr(block, "Output")
|
||||
output_fields = (
|
||||
block.Output.model_fields
|
||||
if hasattr(block.Output, "model_fields")
|
||||
else block.Output.__fields__
|
||||
)
|
||||
assert "bot_id" in output_fields
|
||||
|
||||
def test_elevenlabs_blocks_structure(self):
|
||||
"""Test ElevenLabs blocks have proper structure."""
|
||||
from backend.blocks.elevenlabs.speech import ElevenLabsGenerateSpeechBlock
|
||||
|
||||
block = ElevenLabsGenerateSpeechBlock()
|
||||
|
||||
# Check basic attributes
|
||||
assert block.id is not None
|
||||
assert block.description is not None
|
||||
assert "speech" in block.description.lower()
|
||||
|
||||
# Check input schema fields
|
||||
assert hasattr(block, "Input")
|
||||
input_fields = (
|
||||
block.Input.model_fields
|
||||
if hasattr(block.Input, "model_fields")
|
||||
else block.Input.__fields__
|
||||
)
|
||||
assert "text" in input_fields
|
||||
assert "voice_id" in input_fields
|
||||
assert "credentials" in input_fields
|
||||
|
||||
# Check output schema fields
|
||||
assert hasattr(block, "Output")
|
||||
output_fields = (
|
||||
block.Output.model_fields
|
||||
if hasattr(block.Output, "model_fields")
|
||||
else block.Output.__fields__
|
||||
)
|
||||
assert "audio" in output_fields
|
||||
|
||||
def test_webhook_blocks_structure(self):
|
||||
"""Test webhook trigger blocks have proper structure."""
|
||||
from backend.blocks.airtable.triggers import AirtableWebhookTriggerBlock
|
||||
from backend.blocks.baas.triggers import BaasOnMeetingEventBlock
|
||||
from backend.blocks.elevenlabs.triggers import ElevenLabsWebhookTriggerBlock
|
||||
|
||||
webhook_blocks = [
|
||||
AirtableWebhookTriggerBlock(),
|
||||
BaasOnMeetingEventBlock(),
|
||||
ElevenLabsWebhookTriggerBlock(),
|
||||
]
|
||||
|
||||
for block in webhook_blocks:
|
||||
# Check input fields
|
||||
input_fields = (
|
||||
block.Input.model_fields
|
||||
if hasattr(block.Input, "model_fields")
|
||||
else block.Input.__fields__
|
||||
)
|
||||
assert (
|
||||
"webhook_url" in input_fields
|
||||
) # Changed from webhook_id to webhook_url
|
||||
assert "credentials" in input_fields # Changed from secret to credentials
|
||||
assert "payload" in input_fields # Webhook payload field
|
||||
|
||||
# Check output fields exist (different blocks have different output structures)
|
||||
_ = (
|
||||
block.Output.model_fields
|
||||
if hasattr(block.Output, "model_fields")
|
||||
else block.Output.__fields__
|
||||
)
|
||||
|
||||
def test_block_run_method_is_async(self):
|
||||
"""Test that all blocks have async run methods."""
|
||||
from backend.blocks.airtable.metadata import AirtableListBasesBlock
|
||||
from backend.blocks.baas.calendars import BaasCalendarListAllBlock
|
||||
from backend.blocks.elevenlabs.voices import ElevenLabsListVoicesBlock
|
||||
|
||||
block_classes = [
|
||||
AirtableListBasesBlock,
|
||||
BaasCalendarListAllBlock,
|
||||
ElevenLabsListVoicesBlock,
|
||||
]
|
||||
|
||||
import inspect
|
||||
|
||||
for block_class in block_classes:
|
||||
# Check that run method exists
|
||||
assert hasattr(
|
||||
block_class, "run"
|
||||
), f"{block_class.__name__} does not have a 'run' method"
|
||||
|
||||
# Create an instance to check the bound method
|
||||
block_instance = block_class()
|
||||
|
||||
# Try to verify it's an async method by checking if it would return a coroutine
|
||||
# We can't actually call it without proper arguments, but we can check the method type
|
||||
run_method = block_instance.run
|
||||
|
||||
# The run method should be a bound method that when called returns a coroutine
|
||||
# Let's just check that the method exists and is callable
|
||||
assert callable(run_method), f"{block_class.__name__}.run is not callable"
|
||||
|
||||
# Check the source to ensure it's defined as async
|
||||
# This is a bit of a workaround but should work
|
||||
try:
|
||||
source = inspect.getsource(block_class.run)
|
||||
assert source.strip().startswith(
|
||||
"async def run"
|
||||
), f"{block_class.__name__}.run is not defined as async def"
|
||||
except Exception:
|
||||
# If we can't get source, just check that it exists and is callable
|
||||
pass
|
||||
|
||||
def test_blocks_use_correct_credential_types(self):
|
||||
"""Test that blocks use appropriate credential types."""
|
||||
from backend.blocks.airtable.records import AirtableGetRecordBlock
|
||||
from backend.blocks.baas.events import BaasEventListBlock
|
||||
from backend.blocks.elevenlabs.utility import ElevenLabsListModelsBlock
|
||||
|
||||
# All these providers use API key authentication
|
||||
blocks = [
|
||||
AirtableGetRecordBlock(),
|
||||
BaasEventListBlock(),
|
||||
ElevenLabsListModelsBlock(),
|
||||
]
|
||||
|
||||
for block in blocks:
|
||||
# Check that credentials field exists
|
||||
input_fields = (
|
||||
block.Input.model_fields
|
||||
if hasattr(block.Input, "model_fields")
|
||||
else block.Input.__fields__
|
||||
)
|
||||
assert "credentials" in input_fields
|
||||
|
||||
# Get the field info
|
||||
field = input_fields["credentials"]
|
||||
assert field is not None
|
||||
234
autogpt_platform/backend/test/blocks/test_data/airtable.json
Normal file
234
autogpt_platform/backend/test/blocks/test_data/airtable.json
Normal file
@@ -0,0 +1,234 @@
|
||||
{
|
||||
"provider": "airtable",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-airtable-key",
|
||||
"base_url": "https://api.airtable.com/v0",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "list_bases",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.airtable.com/v0/meta/bases",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"offset": null
|
||||
},
|
||||
"response": {
|
||||
"bases": [
|
||||
{
|
||||
"id": "appTest123",
|
||||
"name": "Test Base",
|
||||
"permissionLevel": "create"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_records",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.airtable.com/v0/{base_id}/{table_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"pageSize": null,
|
||||
"offset": null,
|
||||
"view": null,
|
||||
"filterByFormula": null,
|
||||
"sort": null
|
||||
},
|
||||
"response": {
|
||||
"records": [
|
||||
{
|
||||
"id": "recTest123",
|
||||
"createdTime": "2024-01-01T00:00:00.000Z",
|
||||
"fields": {
|
||||
"Name": "Test Record",
|
||||
"Status": "Active"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_record",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.airtable.com/v0/{base_id}/{table_id}/{record_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "recTest123",
|
||||
"createdTime": "2024-01-01T00:00:00.000Z",
|
||||
"fields": {
|
||||
"Name": "Test Record",
|
||||
"Status": "Active"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_records",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.airtable.com/v0/{base_id}/{table_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"records": "{{array}}",
|
||||
"typecast": null
|
||||
},
|
||||
"response": {
|
||||
"records": [
|
||||
{
|
||||
"id": "recNew123",
|
||||
"createdTime": "2024-01-01T00:00:00.000Z",
|
||||
"fields": {
|
||||
"Name": "New Record"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "update_records",
|
||||
"method": "PATCH",
|
||||
"url_pattern": "https://api.airtable.com/v0/{base_id}/{table_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"records": "{{array}}",
|
||||
"typecast": null
|
||||
},
|
||||
"response": {
|
||||
"records": [
|
||||
{
|
||||
"id": "recTest123",
|
||||
"fields": {
|
||||
"Status": "Updated"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "delete_records",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.airtable.com/v0/{base_id}/{table_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"records[]": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"records": [
|
||||
{
|
||||
"id": "recTest123",
|
||||
"deleted": true
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_tables",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.airtable.com/v0/meta/bases/{base_id}/tables",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"tables": [
|
||||
{
|
||||
"id": "tblTest123",
|
||||
"name": "Test Table",
|
||||
"primaryFieldId": "fldPrimary",
|
||||
"fields": []
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_webhook",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.airtable.com/v0/bases/{base_id}/webhooks",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"notificationUrl": "{{url}}",
|
||||
"specification": {
|
||||
"options": {
|
||||
"filters": "{{object}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"response": {
|
||||
"id": "achTest123",
|
||||
"macSecretBase64": "testSecret",
|
||||
"expirationTime": "2025-01-01T00:00:00.000Z"
|
||||
}
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["table_change"],
|
||||
"resource_format_pattern": "{base_id}/{table_id_or_name}",
|
||||
"event_types": ["tableData", "tableFields", "tableMetadata"],
|
||||
"description": "Airtable webhooks monitor changes to bases and tables",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["AirtableWebhookTriggerBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"AirtableListRecordsBlock": [
|
||||
{
|
||||
"name": "List records successfully",
|
||||
"input": {
|
||||
"base_id": "appTest123",
|
||||
"table_id_or_name": "tblTest123"
|
||||
},
|
||||
"expected_calls": ["list_records"],
|
||||
"expected_outputs": {
|
||||
"records": [
|
||||
{
|
||||
"id": "recTest123",
|
||||
"createdTime": "2024-01-01T00:00:00.000Z",
|
||||
"fields": {
|
||||
"Name": "Test Record",
|
||||
"Status": "Active"
|
||||
}
|
||||
}
|
||||
],
|
||||
"offset": ""
|
||||
}
|
||||
}
|
||||
],
|
||||
"AirtableCreateRecordsBlock": [
|
||||
{
|
||||
"name": "Create single record",
|
||||
"input": {
|
||||
"base_id": "appTest123",
|
||||
"table_id_or_name": "tblTest123",
|
||||
"records": [
|
||||
{"Name": "New Record"}
|
||||
]
|
||||
},
|
||||
"expected_calls": ["create_records"],
|
||||
"expected_outputs": {
|
||||
"created_records": [
|
||||
{
|
||||
"id": "recNew123",
|
||||
"createdTime": "2024-01-01T00:00:00.000Z",
|
||||
"fields": {
|
||||
"Name": "New Record"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
312
autogpt_platform/backend/test/blocks/test_data/baas.json
Normal file
312
autogpt_platform/backend/test/blocks/test_data/baas.json
Normal file
@@ -0,0 +1,312 @@
|
||||
{
|
||||
"provider": "baas",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-baas-key",
|
||||
"base_url": "https://api.meetingbaas.com",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "bot_join_meeting",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"meeting_url": "{{url}}",
|
||||
"bot_name": "{{string}}",
|
||||
"bot_image": null,
|
||||
"entry_message": null,
|
||||
"reserved": null,
|
||||
"deduplication_key": null,
|
||||
"mp4": null,
|
||||
"real_time_transcription": null,
|
||||
"real_time_media": null,
|
||||
"speech_to_text": null
|
||||
},
|
||||
"response": {
|
||||
"bot_id": "bot_test123",
|
||||
"meeting_url": "https://zoom.us/j/123456789",
|
||||
"status": "joining"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "bot_leave_meeting",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots/{bot_id}/leave",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"bot_id": "bot_test123",
|
||||
"status": "leaving"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "bot_get_meeting_data",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots/{bot_id}",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"bot_id": "bot_test123",
|
||||
"meeting_url": "https://zoom.us/j/123456789",
|
||||
"status": "complete",
|
||||
"mp4": "https://example.com/recording.mp4",
|
||||
"transcript": [
|
||||
{
|
||||
"speaker": "John",
|
||||
"text": "Hello everyone",
|
||||
"timestamp": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "bot_get_screenshots",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots/{bot_id}/screenshots",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"offset": null,
|
||||
"limit": null
|
||||
},
|
||||
"response": {
|
||||
"screenshots": [
|
||||
{
|
||||
"timestamp": 1000,
|
||||
"url": "https://example.com/screenshot1.jpg"
|
||||
}
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "bot_delete_recording",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots/{bot_id}/recordings",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"success": true
|
||||
},
|
||||
"status": 204
|
||||
},
|
||||
{
|
||||
"name": "bot_delete",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.meetingbaas.com/bots/{bot_id}",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"success": true
|
||||
},
|
||||
"status": 204
|
||||
},
|
||||
{
|
||||
"name": "calendar_connect",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendars",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"oauth_client_id": "{{string}}",
|
||||
"oauth_client_secret": "{{string}}",
|
||||
"oauth_refresh_token": "{{string}}",
|
||||
"platform": "{{string}}",
|
||||
"calendar_email": null
|
||||
},
|
||||
"response": {
|
||||
"uuid": "cal_test123",
|
||||
"email": "test@example.com",
|
||||
"platform": "google"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_list",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendars",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"uuid": "cal_test123",
|
||||
"email": "test@example.com",
|
||||
"platform": "google"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "event_list",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendar_events",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"calendar_id": "{{string}}",
|
||||
"start_date_gte": null,
|
||||
"start_date_lte": null,
|
||||
"cursor": null
|
||||
},
|
||||
"response": {
|
||||
"events": [
|
||||
{
|
||||
"uuid": "evt_test123",
|
||||
"calendar_id": "cal_test123",
|
||||
"title": "Test Meeting",
|
||||
"start_time": "2024-01-01T10:00:00Z",
|
||||
"meeting_url": "https://zoom.us/j/123456789"
|
||||
}
|
||||
],
|
||||
"next": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "event_schedule_bot",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendar_events/{event_id}/bot",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"query_params": {
|
||||
"all_occurrences": null
|
||||
},
|
||||
"body": {
|
||||
"bot_name": "{{string}}",
|
||||
"bot_image": null,
|
||||
"entry_message": null
|
||||
},
|
||||
"response": {
|
||||
"uuid": "evt_test123",
|
||||
"bot_scheduled": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "event_unschedule_bot",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendar_events/{event_id}/bot",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"all_occurrences": null
|
||||
},
|
||||
"response": {
|
||||
"uuid": "evt_test123",
|
||||
"bot_scheduled": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "event_patch_bot",
|
||||
"method": "PATCH",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendar_events/{event_id}/bot",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"query_params": {
|
||||
"all_occurrences": null
|
||||
},
|
||||
"body": "{{object}}",
|
||||
"response": {
|
||||
"uuid": "evt_test123",
|
||||
"bot_config": "{{object}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_update",
|
||||
"method": "PATCH",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendars/{calendar_id}",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": "{{object}}",
|
||||
"response": {
|
||||
"uuid": "cal_test123",
|
||||
"updated": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_delete",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.meetingbaas.com/calendars/{calendar_id}",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {},
|
||||
"status": 204
|
||||
},
|
||||
{
|
||||
"name": "calendar_resync_all",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.meetingbaas.com/internal/calendar/resync_all",
|
||||
"headers": {
|
||||
"x-meeting-baas-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"synced_calendars": ["cal_test123"],
|
||||
"errors": []
|
||||
}
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["meeting_event", "calendar_event"],
|
||||
"resource_format_pattern": "",
|
||||
"event_types": [
|
||||
"bot.status_change",
|
||||
"complete",
|
||||
"failed",
|
||||
"transcription_complete",
|
||||
"event.added",
|
||||
"event.updated",
|
||||
"event.deleted",
|
||||
"calendar.synced"
|
||||
],
|
||||
"description": "Meeting BaaS webhooks for meeting and calendar events",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["BaasOnMeetingEventBlock", "BaasOnCalendarEventBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"BaasBotJoinMeetingBlock": [
|
||||
{
|
||||
"name": "Join meeting successfully",
|
||||
"input": {
|
||||
"meeting_url": "https://zoom.us/j/123456789",
|
||||
"bot_name": "Test Bot"
|
||||
},
|
||||
"expected_calls": ["bot_join_meeting"],
|
||||
"expected_outputs": {
|
||||
"bot_id": "bot_test123",
|
||||
"status": "joining"
|
||||
}
|
||||
}
|
||||
],
|
||||
"BaasCalendarListAllBlock": [
|
||||
{
|
||||
"name": "List calendars",
|
||||
"input": {},
|
||||
"expected_calls": ["calendar_list"],
|
||||
"expected_outputs": {
|
||||
"calendars": [
|
||||
{
|
||||
"uuid": "cal_test123",
|
||||
"email": "test@example.com",
|
||||
"platform": "google"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
49
autogpt_platform/backend/test/blocks/test_data/compass.json
Normal file
49
autogpt_platform/backend/test/blocks/test_data/compass.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"provider": "compass",
|
||||
"auth_type": "none",
|
||||
"test_api_key": "",
|
||||
"base_url": "",
|
||||
"api_calls": [],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["transcription"],
|
||||
"resource_format_pattern": "",
|
||||
"event_types": [
|
||||
"transcription.completed"
|
||||
],
|
||||
"description": "Compass AI hardware transcription webhooks",
|
||||
"supports_auto_setup": false,
|
||||
"webhook_blocks": ["CompassAITriggerBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"CompassAITriggerBlock": [
|
||||
{
|
||||
"name": "Receive transcription",
|
||||
"input": {
|
||||
"payload": {
|
||||
"date": "2024-01-01",
|
||||
"transcription": "This is a test transcription from Compass AI hardware.",
|
||||
"transcriptions": [
|
||||
{
|
||||
"text": "This is a test",
|
||||
"speaker": "Speaker 1",
|
||||
"start": 0.0,
|
||||
"end": 2.0,
|
||||
"duration": 2.0
|
||||
},
|
||||
{
|
||||
"text": "transcription from Compass AI hardware.",
|
||||
"speaker": "Speaker 1",
|
||||
"start": 2.0,
|
||||
"end": 5.0,
|
||||
"duration": 3.0
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"expected_outputs": {
|
||||
"transcription": "This is a test transcription from Compass AI hardware."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
249
autogpt_platform/backend/test/blocks/test_data/elevenlabs.json
Normal file
249
autogpt_platform/backend/test/blocks/test_data/elevenlabs.json
Normal file
@@ -0,0 +1,249 @@
|
||||
{
|
||||
"provider": "elevenlabs",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-elevenlabs-key",
|
||||
"base_url": "https://api.elevenlabs.io/v1",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "list_voices",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/voices",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"voices": [
|
||||
{
|
||||
"voice_id": "voice_test123",
|
||||
"name": "Test Voice",
|
||||
"category": "generated",
|
||||
"labels": {
|
||||
"accent": "american",
|
||||
"gender": "male"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_voices_v2",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v2/voices",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"voices": [
|
||||
{
|
||||
"voice_id": "voice_test123",
|
||||
"name": "Test Voice",
|
||||
"category": "generated",
|
||||
"labels": {
|
||||
"accent": "american",
|
||||
"gender": "male"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_voice",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/voices/{voice_id}",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"with_settings": null
|
||||
},
|
||||
"response": {
|
||||
"voice_id": "voice_test123",
|
||||
"name": "Test Voice",
|
||||
"samples": [],
|
||||
"category": "generated",
|
||||
"settings": {
|
||||
"stability": 0.5,
|
||||
"similarity_boost": 0.5
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "text_to_speech",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "audio/mpeg"
|
||||
},
|
||||
"body": {
|
||||
"text": "{{string}}",
|
||||
"model_id": null,
|
||||
"voice_settings": null
|
||||
},
|
||||
"response": "binary_audio_data",
|
||||
"status": 200
|
||||
},
|
||||
{
|
||||
"name": "text_to_speech_with_timestamps",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}/with-timestamps",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"text": "{{string}}",
|
||||
"model_id": null,
|
||||
"voice_settings": null
|
||||
},
|
||||
"response": {
|
||||
"audio_base64": "base64_encoded_audio",
|
||||
"alignment": {
|
||||
"characters": ["H", "e", "l", "l", "o"],
|
||||
"character_start_times_seconds": [0.0, 0.1, 0.2, 0.3, 0.4],
|
||||
"character_end_times_seconds": [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "speech_to_text",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/speech-to-text",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"body": "multipart/form-data",
|
||||
"response": {
|
||||
"status": "processing",
|
||||
"id": "stt_test123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "speech_to_text_result",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/speech-to-text/{id}",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"status": "completed",
|
||||
"text": "Hello world",
|
||||
"chunks": [
|
||||
{
|
||||
"text": "Hello world",
|
||||
"timestamp": [0.0, 1.0]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_voice",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/voices/add",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"body": "multipart/form-data",
|
||||
"response": {
|
||||
"voice_id": "voice_new123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "delete_voice",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/voices/{voice_id}",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {},
|
||||
"status": 200
|
||||
},
|
||||
{
|
||||
"name": "list_models",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/models",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"model_id": "eleven_monolingual_v1",
|
||||
"name": "Eleven English v1",
|
||||
"can_do_text_to_speech": true,
|
||||
"can_do_voice_conversion": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "get_usage",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.elevenlabs.io/v1/usage/character-stats",
|
||||
"headers": {
|
||||
"xi-api-key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"start_unix": null,
|
||||
"end_unix": null
|
||||
},
|
||||
"response": {
|
||||
"usage": [
|
||||
{
|
||||
"date": "2024-01-01",
|
||||
"character_count": 1000
|
||||
}
|
||||
],
|
||||
"total_character_count": 1000
|
||||
}
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["notification"],
|
||||
"resource_format_pattern": "",
|
||||
"event_types": [
|
||||
"speech_to_text_completed",
|
||||
"post_call_transcription",
|
||||
"voice_removal_notice",
|
||||
"voice_removed",
|
||||
"voice_removal_notice_withdrawn"
|
||||
],
|
||||
"description": "ElevenLabs webhook notifications for STT, voice events, and conversational AI",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["ElevenLabsWebhookTriggerBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"ElevenLabsListVoicesBlock": [
|
||||
{
|
||||
"name": "List all voices",
|
||||
"input": {},
|
||||
"expected_calls": ["list_voices"],
|
||||
"expected_outputs": {
|
||||
"voices": [
|
||||
{
|
||||
"voice_id": "voice_test123",
|
||||
"name": "Test Voice",
|
||||
"category": "generated",
|
||||
"labels": {
|
||||
"accent": "american",
|
||||
"gender": "male"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"ElevenLabsGenerateSpeechBlock": [
|
||||
{
|
||||
"name": "Generate speech from text",
|
||||
"input": {
|
||||
"text": "Hello world",
|
||||
"voice_id": "voice_test123"
|
||||
},
|
||||
"expected_calls": ["text_to_speech"],
|
||||
"expected_outputs": {
|
||||
"audio": "binary_audio_data"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
242
autogpt_platform/backend/test/blocks/test_data/exa.json
Normal file
242
autogpt_platform/backend/test/blocks/test_data/exa.json
Normal file
@@ -0,0 +1,242 @@
|
||||
{
|
||||
"provider": "exa",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-exa-key",
|
||||
"base_url": "https://api.exa.ai",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "search",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/search",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"query": "{{string}}",
|
||||
"numResults": null,
|
||||
"searchType": null,
|
||||
"contents": null,
|
||||
"useAutoprompt": null,
|
||||
"category": null,
|
||||
"startPublishedDate": null,
|
||||
"endPublishedDate": null,
|
||||
"startCrawledDate": null,
|
||||
"endCrawledDate": null,
|
||||
"includeDomains": null,
|
||||
"excludeDomains": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"id": "result_test123",
|
||||
"url": "https://example.com/article",
|
||||
"title": "Test Article",
|
||||
"score": 0.95,
|
||||
"publishedDate": "2024-01-01",
|
||||
"text": "Article content..."
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "find_similar",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/findSimilar",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"url": "{{url}}",
|
||||
"numResults": null,
|
||||
"contents": null,
|
||||
"category": null,
|
||||
"startPublishedDate": null,
|
||||
"endPublishedDate": null,
|
||||
"startCrawledDate": null,
|
||||
"endCrawledDate": null,
|
||||
"includeDomains": null,
|
||||
"excludeDomains": null,
|
||||
"excludeSourceDomain": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"id": "similar_test123",
|
||||
"url": "https://example.com/similar",
|
||||
"title": "Similar Article",
|
||||
"score": 0.90
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_contents",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/contents",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"ids": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"id": "result_test123",
|
||||
"url": "https://example.com/article",
|
||||
"title": "Test Article",
|
||||
"text": "Full article content...",
|
||||
"author": "Test Author"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_webset",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/websets/v0/websets",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"name": "{{string}}",
|
||||
"urls": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "webset_test123",
|
||||
"name": "Test Webset",
|
||||
"urlCount": 10
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_webset",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.exa.ai/websets/v0/websets/{webset_id}",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "webset_test123",
|
||||
"name": "Test Webset",
|
||||
"urls": ["https://example.com/1", "https://example.com/2"],
|
||||
"createdAt": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_webhook",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/v0/webhooks",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"url": "{{url}}",
|
||||
"secret": "{{string}}",
|
||||
"websetId": "{{string}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "webhook_test123",
|
||||
"url": "https://example.com/webhook",
|
||||
"websetId": "webset_test123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "delete_webhook",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.exa.ai/v0/webhooks/{webhook_id}",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {},
|
||||
"status": 204
|
||||
},
|
||||
{
|
||||
"name": "answer_question",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/answer",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"question": "{{string}}",
|
||||
"answer": "{{string}}",
|
||||
"urls": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"answer": "This is the generated answer",
|
||||
"sources": ["https://example.com/source1", "https://example.com/source2"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "cancel_webset",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.exa.ai/websets/v0/websets/{webset_id}/cancel",
|
||||
"headers": {
|
||||
"x-api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "webset_test123",
|
||||
"status": "cancelled"
|
||||
}
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["webset"],
|
||||
"resource_format_pattern": "{webset_id}",
|
||||
"event_types": [
|
||||
"webset.created",
|
||||
"webset.deleted",
|
||||
"webset.paused",
|
||||
"webset.idle",
|
||||
"webset.search.created",
|
||||
"webset.search.completed",
|
||||
"webset.search.canceled",
|
||||
"webset.search.updated",
|
||||
"webset.item.created",
|
||||
"webset.item.enriched",
|
||||
"webset.export.created",
|
||||
"webset.export.completed",
|
||||
"import.created",
|
||||
"import.completed",
|
||||
"import.processing"
|
||||
],
|
||||
"description": "Exa webhooks for webset events and updates",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["ExaWebsetWebhookBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"ExaSearchBlock": [
|
||||
{
|
||||
"name": "Search for content",
|
||||
"input": {
|
||||
"query": "artificial intelligence",
|
||||
"num_results": 10
|
||||
},
|
||||
"expected_calls": ["search"],
|
||||
"expected_outputs": {
|
||||
"results": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"ExaCreateWebsetBlock": [
|
||||
{
|
||||
"name": "Create a new webset",
|
||||
"input": {
|
||||
"name": "Test Webset",
|
||||
"urls": ["https://example.com/1", "https://example.com/2"]
|
||||
},
|
||||
"expected_calls": ["create_webset"],
|
||||
"expected_outputs": {
|
||||
"webset_id": true,
|
||||
"url_count": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
372
autogpt_platform/backend/test/blocks/test_data/gem.json
Normal file
372
autogpt_platform/backend/test/blocks/test_data/gem.json
Normal file
@@ -0,0 +1,372 @@
|
||||
{
|
||||
"provider": "gem",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-gem-key",
|
||||
"base_url": "https://api.gem.com/v0",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "list_users",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/users",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"email": null,
|
||||
"page": null,
|
||||
"page_size": null
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": "user_test123",
|
||||
"email": "test@example.com",
|
||||
"name": "Test User"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "list_candidates",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/candidates",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"page": null,
|
||||
"page_size": null
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": "candidate_test123",
|
||||
"name": "Test Candidate",
|
||||
"email": "candidate@example.com"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "create_candidate",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/candidates",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"email": "{{string}}",
|
||||
"name": "{{string}}",
|
||||
"profile_url": null
|
||||
},
|
||||
"response": {
|
||||
"id": "candidate_new123",
|
||||
"email": "new@example.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_note",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/notes",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"candidate_id": "{{string}}",
|
||||
"text": "{{string}}",
|
||||
"privacy": null
|
||||
},
|
||||
"response": {
|
||||
"id": "note_test123",
|
||||
"created_at": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_projects",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/projects",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": "project_test123",
|
||||
"name": "Test Project"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "create_project",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/projects",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"name": "{{string}}",
|
||||
"type": null
|
||||
},
|
||||
"response": {
|
||||
"id": "project_new123",
|
||||
"name": "New Project"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_custom_fields",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/custom_fields",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": "field_test123",
|
||||
"name": "Test Field",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "update_custom_field",
|
||||
"method": "PUT",
|
||||
"url_pattern": "https://api.gem.com/v0/custom_fields",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"custom_field_id": "{{string}}",
|
||||
"value": "{{any}}"
|
||||
},
|
||||
"response": {
|
||||
"success": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "list_sequences",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/sequences",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": "sequence_test123",
|
||||
"name": "Test Sequence"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "request_data_export",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/data_export",
|
||||
"headers": {
|
||||
"X-API-Key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"export_type": "{{string}}",
|
||||
"start_date": null,
|
||||
"end_date": null
|
||||
},
|
||||
"response": {
|
||||
"export_id": "export_test123",
|
||||
"status": "processing"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "talent_search",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/talent/search",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"query": "{{string}}",
|
||||
"filters": null,
|
||||
"page": null,
|
||||
"pageSize": null
|
||||
},
|
||||
"response": {
|
||||
"data": [
|
||||
{
|
||||
"id": "talent_test123",
|
||||
"name": "John Doe",
|
||||
"title": "Software Engineer",
|
||||
"company": "Example Corp",
|
||||
"location": "San Francisco, CA",
|
||||
"email": "john@example.com"
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"page": 1,
|
||||
"pageSize": 20,
|
||||
"total": 100
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_talent_profile",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/talent/{talent_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "talent_test123",
|
||||
"name": "John Doe",
|
||||
"title": "Software Engineer",
|
||||
"company": "Example Corp",
|
||||
"experience": [
|
||||
{
|
||||
"company": "Example Corp",
|
||||
"title": "Software Engineer",
|
||||
"startDate": "2020-01",
|
||||
"current": true
|
||||
}
|
||||
],
|
||||
"skills": ["Python", "JavaScript", "AWS"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_outreach",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/outreach",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"talentId": "{{string}}",
|
||||
"subject": "{{string}}",
|
||||
"message": "{{string}}",
|
||||
"scheduleSend": null
|
||||
},
|
||||
"response": {
|
||||
"id": "outreach_test123",
|
||||
"status": "scheduled",
|
||||
"talentId": "talent_test123",
|
||||
"sentAt": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "create_project",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/projects",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"name": "{{string}}",
|
||||
"description": null,
|
||||
"type": null
|
||||
},
|
||||
"response": {
|
||||
"id": "project_test123",
|
||||
"name": "Test Project",
|
||||
"createdAt": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "add_talent_to_project",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/projects/{project_id}/talent",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"talentIds": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"added": 1,
|
||||
"skipped": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_analytics",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/analytics/overview",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"query_params": {
|
||||
"startDate": null,
|
||||
"endDate": null,
|
||||
"metric": null
|
||||
},
|
||||
"response": {
|
||||
"metrics": {
|
||||
"totalOutreach": 100,
|
||||
"responseRate": 0.25,
|
||||
"acceptanceRate": 0.10
|
||||
},
|
||||
"timeRange": {
|
||||
"start": "2024-01-01",
|
||||
"end": "2024-01-31"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "export_data",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.gem.com/v0/exports",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"type": "{{string}}",
|
||||
"filters": null,
|
||||
"format": null
|
||||
},
|
||||
"response": {
|
||||
"id": "export_test123",
|
||||
"status": "processing"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_export_status",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.gem.com/v0/exports/{export_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "export_test123",
|
||||
"status": "completed",
|
||||
"downloadUrl": "https://gem.com/exports/download/test123"
|
||||
}
|
||||
}
|
||||
],
|
||||
"test_scenarios": {
|
||||
"GemTalentSearchBlock": [
|
||||
{
|
||||
"name": "Search for software engineers",
|
||||
"input": {
|
||||
"query": "software engineer python",
|
||||
"page_size": 10
|
||||
},
|
||||
"expected_calls": ["talent_search"],
|
||||
"expected_outputs": {
|
||||
"results": true,
|
||||
"total": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"GemCreateProjectBlock": [
|
||||
{
|
||||
"name": "Create new recruiting project",
|
||||
"input": {
|
||||
"name": "Q1 2024 Engineering Hires",
|
||||
"description": "Hiring for backend team"
|
||||
},
|
||||
"expected_calls": ["create_project"],
|
||||
"expected_outputs": {
|
||||
"project_id": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"provider": "generic_webhook",
|
||||
"auth_type": "none",
|
||||
"test_api_key": "",
|
||||
"base_url": "",
|
||||
"api_calls": [],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["plain"],
|
||||
"resource_format_pattern": "",
|
||||
"event_types": ["*"],
|
||||
"description": "Generic webhook handler for any external service",
|
||||
"supports_auto_setup": false,
|
||||
"webhook_blocks": ["GenericWebhookTriggerBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"GenericWebhookTriggerBlock": [
|
||||
{
|
||||
"name": "Receive generic webhook payload",
|
||||
"input": {
|
||||
"constants": {
|
||||
"key": "value"
|
||||
},
|
||||
"payload": {
|
||||
"message": "Hello, World!"
|
||||
}
|
||||
},
|
||||
"expected_outputs": {
|
||||
"constants": {
|
||||
"key": "value"
|
||||
},
|
||||
"payload": {
|
||||
"message": "Hello, World!"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
121
autogpt_platform/backend/test/blocks/test_data/github.json
Normal file
121
autogpt_platform/backend/test/blocks/test_data/github.json
Normal file
@@ -0,0 +1,121 @@
|
||||
{
|
||||
"provider": "github",
|
||||
"auth_type": "oauth2",
|
||||
"test_api_key": "test-github-token",
|
||||
"base_url": "https://api.github.com",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "create_webhook",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://api.github.com/repos/{owner}/{repo}/hooks",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"name": "web",
|
||||
"active": true,
|
||||
"events": "{{array}}",
|
||||
"config": {
|
||||
"url": "{{url}}",
|
||||
"content_type": "json",
|
||||
"insecure_ssl": "0"
|
||||
}
|
||||
},
|
||||
"response": {
|
||||
"id": 12345,
|
||||
"name": "web",
|
||||
"active": true,
|
||||
"events": ["pull_request"],
|
||||
"config": {
|
||||
"url": "https://example.com/webhook",
|
||||
"content_type": "json"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "delete_webhook",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://api.github.com/repos/{owner}/{repo}/hooks/{hook_id}",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Accept": "application/vnd.github.v3+json"
|
||||
},
|
||||
"response": {},
|
||||
"status": 204
|
||||
},
|
||||
{
|
||||
"name": "list_webhooks",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://api.github.com/repos/{owner}/{repo}/hooks",
|
||||
"headers": {
|
||||
"Authorization": "Bearer {{api_key}}",
|
||||
"Accept": "application/vnd.github.v3+json"
|
||||
},
|
||||
"response": [
|
||||
{
|
||||
"id": 12345,
|
||||
"name": "web",
|
||||
"active": true,
|
||||
"events": ["pull_request"],
|
||||
"config": {
|
||||
"url": "https://example.com/webhook"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["repo"],
|
||||
"resource_format_pattern": "{owner}/{repo}",
|
||||
"event_types": [
|
||||
"pull_request.opened",
|
||||
"pull_request.edited",
|
||||
"pull_request.closed",
|
||||
"pull_request.reopened",
|
||||
"pull_request.synchronize",
|
||||
"pull_request.assigned",
|
||||
"pull_request.unassigned",
|
||||
"pull_request.labeled",
|
||||
"pull_request.unlabeled",
|
||||
"pull_request.converted_to_draft",
|
||||
"pull_request.locked",
|
||||
"pull_request.unlocked",
|
||||
"pull_request.enqueued",
|
||||
"pull_request.dequeued",
|
||||
"pull_request.milestoned",
|
||||
"pull_request.demilestoned",
|
||||
"pull_request.ready_for_review",
|
||||
"pull_request.review_requested",
|
||||
"pull_request.review_request_removed",
|
||||
"pull_request.auto_merge_enabled",
|
||||
"pull_request.auto_merge_disabled"
|
||||
],
|
||||
"description": "GitHub webhooks for repository events including pull requests, issues, and more",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["GithubPullRequestTriggerBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"GithubPullRequestTriggerBlock": [
|
||||
{
|
||||
"name": "Trigger on pull request event",
|
||||
"input": {
|
||||
"repo": "owner/repo",
|
||||
"events": {
|
||||
"opened": true,
|
||||
"synchronize": true
|
||||
}
|
||||
},
|
||||
"expected_outputs": {
|
||||
"payload": true,
|
||||
"triggered_by_user": true,
|
||||
"event": true,
|
||||
"number": true,
|
||||
"pull_request": true,
|
||||
"pull_request_url": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
351
autogpt_platform/backend/test/blocks/test_data/oxylabs.json
Normal file
351
autogpt_platform/backend/test/blocks/test_data/oxylabs.json
Normal file
@@ -0,0 +1,351 @@
|
||||
{
|
||||
"provider": "oxylabs",
|
||||
"auth_type": "user_password",
|
||||
"test_username": "test-user",
|
||||
"test_password": "test-pass",
|
||||
"base_url": "https://realtime.oxylabs.io/v1",
|
||||
"secondary_url": "https://data.oxylabs.io/v1",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "scrape_url_realtime",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "{{string}}",
|
||||
"url": "{{url}}",
|
||||
"user_agent_type": null,
|
||||
"callback_url": null,
|
||||
"context": null,
|
||||
"parse": null,
|
||||
"parsing_instructions": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": "<html>...</html>",
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
"updated_at": "2024-01-01T00:00:00Z",
|
||||
"page": 1,
|
||||
"status_code": 200
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "google_search",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "google",
|
||||
"query": "{{string}}",
|
||||
"domain": null,
|
||||
"start_page": null,
|
||||
"pages": null,
|
||||
"locale": null,
|
||||
"geo_location": null,
|
||||
"user_agent_type": null,
|
||||
"parse": null,
|
||||
"context": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": {
|
||||
"results": {
|
||||
"organic": [
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"title": "Example Result",
|
||||
"description": "This is an example search result"
|
||||
}
|
||||
],
|
||||
"paid": [],
|
||||
"featured_snippet": null
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "google_shopping",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "google_shopping_search",
|
||||
"query": "{{string}}",
|
||||
"domain": null,
|
||||
"pages": null,
|
||||
"locale": null,
|
||||
"geo_location": null,
|
||||
"context": null,
|
||||
"parse": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": {
|
||||
"results": {
|
||||
"organic": [
|
||||
{
|
||||
"url": "https://example.com/product",
|
||||
"title": "Product Name",
|
||||
"price": "$99.99",
|
||||
"merchant": "Example Store"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "amazon_search",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "amazon_search",
|
||||
"query": "{{string}}",
|
||||
"domain": null,
|
||||
"start_page": null,
|
||||
"pages": null,
|
||||
"geo_location": null,
|
||||
"parse": null,
|
||||
"context": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": {
|
||||
"results": {
|
||||
"organic": [
|
||||
{
|
||||
"asin": "B001234567",
|
||||
"url": "https://amazon.com/dp/B001234567",
|
||||
"title": "Product Title",
|
||||
"price": 49.99,
|
||||
"rating": 4.5
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "amazon_product",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "amazon_product",
|
||||
"url": "{{url}}",
|
||||
"geo_location": null,
|
||||
"parse": null,
|
||||
"context": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": {
|
||||
"asin": "B001234567",
|
||||
"title": "Product Title",
|
||||
"price": 49.99,
|
||||
"description": "Product description...",
|
||||
"images": ["https://example.com/image1.jpg"],
|
||||
"rating": 4.5,
|
||||
"reviews_count": 1234
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "serp_google_trends",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://realtime.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "google_trends_explore",
|
||||
"query": "{{string}}",
|
||||
"time_range": null,
|
||||
"category": null,
|
||||
"geo": null
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": {
|
||||
"interest_over_time": [
|
||||
{
|
||||
"date": "2024-01-01",
|
||||
"value": 75
|
||||
}
|
||||
],
|
||||
"related_queries": [
|
||||
{
|
||||
"query": "related search",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "submit_job_async",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://data.oxylabs.io/v1/queries",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"source": "{{string}}",
|
||||
"url": "{{url}}",
|
||||
"callback_url": null,
|
||||
"parse": null
|
||||
},
|
||||
"response": {
|
||||
"id": "job_test123",
|
||||
"status": "pending",
|
||||
"_links": [
|
||||
{
|
||||
"rel": "self",
|
||||
"href": "https://data.oxylabs.io/v1/queries/job_test123"
|
||||
},
|
||||
{
|
||||
"rel": "results",
|
||||
"href": "https://data.oxylabs.io/v1/queries/job_test123/results"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_job_status",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://data.oxylabs.io/v1/queries/{job_id}",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"id": "job_test123",
|
||||
"status": "done",
|
||||
"created_at": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_job_results",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://data.oxylabs.io/v1/queries/{job_id}/results",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"results": [
|
||||
{
|
||||
"content": "<html>...</html>",
|
||||
"created_at": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "submit_batch",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://data.oxylabs.io/v1/queries/batch",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"queries": "{{array}}"
|
||||
},
|
||||
"response": {
|
||||
"queries": [
|
||||
{
|
||||
"id": "job_batch1",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_callbacker_ips",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://data.oxylabs.io/v1/info/callbacker_ips",
|
||||
"headers": {
|
||||
"Authorization": "Basic {{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"ips": ["192.168.1.1", "192.168.1.2"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"test_scenarios": {
|
||||
"OxylabsScrapeWebPageBlock": [
|
||||
{
|
||||
"name": "Scrape a webpage",
|
||||
"input": {
|
||||
"url": "https://example.com",
|
||||
"source": "universal"
|
||||
},
|
||||
"expected_calls": ["scrape_url"],
|
||||
"expected_outputs": {
|
||||
"content": true,
|
||||
"status_code": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"OxylabsGoogleSearchBlock": [
|
||||
{
|
||||
"name": "Search Google for results",
|
||||
"input": {
|
||||
"query": "artificial intelligence news",
|
||||
"pages": 1
|
||||
},
|
||||
"expected_calls": ["google_search"],
|
||||
"expected_outputs": {
|
||||
"results": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"OxylabsAmazonProductBlock": [
|
||||
{
|
||||
"name": "Get Amazon product details",
|
||||
"input": {
|
||||
"url": "https://amazon.com/dp/B001234567"
|
||||
},
|
||||
"expected_calls": ["amazon_product"],
|
||||
"expected_outputs": {
|
||||
"product_data": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
124
autogpt_platform/backend/test/blocks/test_data/slant3d.json
Normal file
124
autogpt_platform/backend/test/blocks/test_data/slant3d.json
Normal file
@@ -0,0 +1,124 @@
|
||||
{
|
||||
"provider": "slant3d",
|
||||
"auth_type": "api_key",
|
||||
"test_api_key": "test-slant3d-key",
|
||||
"base_url": "https://www.slant3dapi.com/api",
|
||||
"api_calls": [
|
||||
{
|
||||
"name": "create_order",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://www.slant3dapi.com/api/order",
|
||||
"headers": {
|
||||
"api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"email": "{{string}}",
|
||||
"phone": "{{string}}",
|
||||
"name": "{{string}}",
|
||||
"orderNumber": "{{string}}",
|
||||
"filename": "{{string}}",
|
||||
"fileURL": "{{url}}",
|
||||
"bill_to_street_1": "{{string}}",
|
||||
"bill_to_city": "{{string}}",
|
||||
"bill_to_state": "{{string}}",
|
||||
"bill_to_zip": "{{string}}",
|
||||
"bill_to_country_as_iso": "{{string}}",
|
||||
"ship_to_name": "{{string}}",
|
||||
"ship_to_street_1": "{{string}}",
|
||||
"ship_to_city": "{{string}}",
|
||||
"ship_to_state": "{{string}}",
|
||||
"ship_to_zip": "{{string}}",
|
||||
"ship_to_country_as_iso": "{{string}}",
|
||||
"order_item_name": "{{string}}",
|
||||
"order_quantity": "{{number}}",
|
||||
"order_item_SKU": "{{string}}"
|
||||
},
|
||||
"response": {
|
||||
"orderId": "order_123456",
|
||||
"status": "processing"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "register_webhook",
|
||||
"method": "POST",
|
||||
"url_pattern": "https://www.slant3dapi.com/api/webhook/register",
|
||||
"headers": {
|
||||
"api-key": "{{api_key}}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"body": {
|
||||
"url": "{{url}}"
|
||||
},
|
||||
"response": {
|
||||
"success": true,
|
||||
"webhook_id": "webhook_123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "unregister_webhook",
|
||||
"method": "DELETE",
|
||||
"url_pattern": "https://www.slant3dapi.com/api/webhook/{webhook_id}",
|
||||
"headers": {
|
||||
"api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"success": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_order_status",
|
||||
"method": "GET",
|
||||
"url_pattern": "https://www.slant3dapi.com/api/order/{order_id}",
|
||||
"headers": {
|
||||
"api-key": "{{api_key}}"
|
||||
},
|
||||
"response": {
|
||||
"orderId": "order_123456",
|
||||
"status": "SHIPPED",
|
||||
"trackingNumber": "1Z999AA10123456784",
|
||||
"carrierCode": "usps"
|
||||
}
|
||||
}
|
||||
],
|
||||
"webhooks": {
|
||||
"allowed_webhook_types": ["orders"],
|
||||
"resource_format_pattern": "",
|
||||
"event_types": [
|
||||
"order.shipped"
|
||||
],
|
||||
"description": "Slant3D webhooks for order status updates",
|
||||
"supports_auto_setup": true,
|
||||
"webhook_blocks": ["Slant3DOrderWebhookBlock"]
|
||||
},
|
||||
"test_scenarios": {
|
||||
"Slant3DOrderWebhookBlock": [
|
||||
{
|
||||
"name": "Receive order shipped notification",
|
||||
"input": {
|
||||
"events": {
|
||||
"shipped": true
|
||||
},
|
||||
"payload": {
|
||||
"orderId": "1234567890",
|
||||
"status": "SHIPPED",
|
||||
"trackingNumber": "ABCDEF123456",
|
||||
"carrierCode": "usps"
|
||||
}
|
||||
},
|
||||
"expected_outputs": {
|
||||
"payload": {
|
||||
"orderId": "1234567890",
|
||||
"status": "SHIPPED",
|
||||
"trackingNumber": "ABCDEF123456",
|
||||
"carrierCode": "usps"
|
||||
},
|
||||
"order_id": "1234567890",
|
||||
"status": "SHIPPED",
|
||||
"tracking_number": "ABCDEF123456",
|
||||
"carrier_code": "usps"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
"""
|
||||
Test the example blocks to ensure they work correctly with the provider pattern.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.examples.example_sdk_block import ExampleSDKBlock
|
||||
from backend.blocks.examples.simple_example_block import SimpleExampleBlock
|
||||
from backend.sdk import APIKeyCredentials, SecretStr
|
||||
|
||||
|
||||
class TestExampleBlocks:
|
||||
"""Test the example blocks."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_example_block(self):
|
||||
"""Test the simple example block."""
|
||||
block = SimpleExampleBlock()
|
||||
|
||||
# Test execution
|
||||
outputs = {}
|
||||
async for name, value in block.run(
|
||||
SimpleExampleBlock.Input(text="Hello ", count=3),
|
||||
):
|
||||
outputs[name] = value
|
||||
|
||||
assert outputs["result"] == "Hello Hello Hello "
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_example_sdk_block(self):
|
||||
"""Test the example SDK block with credentials."""
|
||||
# Create test credentials
|
||||
test_creds = APIKeyCredentials(
|
||||
id="test-creds",
|
||||
provider="example-service",
|
||||
api_key=SecretStr("test-api-key"),
|
||||
title="Test API Key",
|
||||
)
|
||||
|
||||
block = ExampleSDKBlock()
|
||||
|
||||
# Test execution
|
||||
outputs = {}
|
||||
async for name, value in block.run(
|
||||
ExampleSDKBlock.Input(
|
||||
credentials={ # type: ignore
|
||||
"provider": "example-service",
|
||||
"id": "test-creds",
|
||||
"type": "api_key",
|
||||
},
|
||||
text="Test input",
|
||||
max_length=50,
|
||||
),
|
||||
credentials=test_creds,
|
||||
):
|
||||
outputs[name] = value
|
||||
|
||||
assert outputs["result"] == "PROCESSED: Test input"
|
||||
assert outputs["length"] == 21
|
||||
assert outputs["api_key_used"] is True
|
||||
assert "error" not in outputs or not outputs.get("error")
|
||||
180
autogpt_platform/backend/test/blocks/test_utils.py
Normal file
180
autogpt_platform/backend/test/blocks/test_utils.py
Normal file
@@ -0,0 +1,180 @@
|
||||
"""
|
||||
Shared test utilities for mocking API responses in block tests.
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
|
||||
class MockResponse:
|
||||
"""Mock HTTP response for testing."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
json_data: Dict[str, Any],
|
||||
status: int = 200,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
self.json_data = json_data
|
||||
self.status = status
|
||||
self.headers = headers or {}
|
||||
self.text = json.dumps(json_data) if json_data else ""
|
||||
|
||||
def json(self) -> Dict[str, Any]:
|
||||
return self.json_data
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
|
||||
class MockRequests:
|
||||
"""Mock Requests class for testing HTTP operations."""
|
||||
|
||||
def __init__(self):
|
||||
self.get = AsyncMock()
|
||||
self.post = AsyncMock()
|
||||
self.put = AsyncMock()
|
||||
self.patch = AsyncMock()
|
||||
self.delete = AsyncMock()
|
||||
self.call_history = []
|
||||
|
||||
def setup_response(
|
||||
self, method: str, response_data: Dict[str, Any], status: int = 200
|
||||
):
|
||||
"""Setup a mock response for a specific HTTP method."""
|
||||
mock_response = MockResponse(response_data, status)
|
||||
getattr(self, method).return_value = mock_response
|
||||
return mock_response
|
||||
|
||||
def setup_error(self, method: str, error_message: str, status: int = 400):
|
||||
"""Setup an error response for a specific HTTP method."""
|
||||
error_data = {"error": {"message": error_message}}
|
||||
return self.setup_response(method, error_data, status)
|
||||
|
||||
def setup_sequence(self, method: str, responses: list):
|
||||
"""Setup a sequence of responses for pagination testing."""
|
||||
mock_responses = [MockResponse(data, status) for data, status in responses]
|
||||
getattr(self, method).side_effect = mock_responses
|
||||
return mock_responses
|
||||
|
||||
def assert_called_with_headers(self, method: str, expected_headers: Dict[str, str]):
|
||||
"""Assert that the method was called with specific headers."""
|
||||
mock_method = getattr(self, method)
|
||||
assert mock_method.called
|
||||
actual_headers = mock_method.call_args.kwargs.get("headers", {})
|
||||
for key, value in expected_headers.items():
|
||||
assert (
|
||||
actual_headers.get(key) == value
|
||||
), f"Expected header {key}={value}, got {actual_headers.get(key)}"
|
||||
|
||||
|
||||
def load_mock_response(provider: str, response_file: str) -> Dict[str, Any]:
|
||||
"""Load a mock response from a JSON file."""
|
||||
# test_data is now in the same directory as this file
|
||||
base_path = Path(__file__).parent / "test_data" / provider / "responses"
|
||||
file_path = base_path / response_file
|
||||
|
||||
if not file_path.exists():
|
||||
# Return a default response if file doesn't exist
|
||||
return {"error": f"Mock response file not found: {response_file}"}
|
||||
|
||||
with open(file_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def create_mock_credentials(provider: str, **kwargs) -> MagicMock:
|
||||
"""Create mock credentials for testing."""
|
||||
mock_creds = MagicMock()
|
||||
|
||||
if "api_key" in kwargs:
|
||||
mock_creds.api_key.get_secret_value.return_value = kwargs["api_key"]
|
||||
|
||||
if "oauth_token" in kwargs:
|
||||
mock_creds.oauth_token.get_secret_value.return_value = kwargs["oauth_token"]
|
||||
|
||||
return mock_creds
|
||||
|
||||
|
||||
class BlockTestHelper:
|
||||
"""Helper class for testing blocks."""
|
||||
|
||||
@staticmethod
|
||||
async def run_block(block, input_data, credentials=None, **kwargs):
|
||||
"""Run a block and collect all outputs."""
|
||||
outputs = []
|
||||
async for output in block.run(input_data, credentials=credentials, **kwargs):
|
||||
outputs.append(output)
|
||||
return outputs
|
||||
|
||||
@staticmethod
|
||||
def assert_output_shape(outputs: list, expected_names: list):
|
||||
"""Assert that outputs have the expected names and structure."""
|
||||
assert len(outputs) == len(
|
||||
expected_names
|
||||
), f"Expected {len(expected_names)} outputs, got {len(outputs)}"
|
||||
|
||||
actual_names = [output[0] for output in outputs]
|
||||
assert (
|
||||
actual_names == expected_names
|
||||
), f"Expected output names {expected_names}, got {actual_names}"
|
||||
|
||||
@staticmethod
|
||||
def assert_pagination_calls(mock_requests, method: str, expected_calls: int):
|
||||
"""Assert that pagination made the expected number of API calls."""
|
||||
mock_method = getattr(mock_requests, method)
|
||||
assert (
|
||||
mock_method.call_count == expected_calls
|
||||
), f"Expected {expected_calls} {method} calls, got {mock_method.call_count}"
|
||||
|
||||
|
||||
# Common test responses for different scenarios
|
||||
COMMON_ERROR_RESPONSES = {
|
||||
"unauthorized": {"error": {"message": "Invalid API key", "code": "UNAUTHORIZED"}},
|
||||
"rate_limit": {
|
||||
"error": {
|
||||
"message": "Rate limit exceeded",
|
||||
"code": "RATE_LIMIT_EXCEEDED",
|
||||
"retry_after": 60,
|
||||
}
|
||||
},
|
||||
"not_found": {"error": {"message": "Resource not found", "code": "NOT_FOUND"}},
|
||||
"server_error": {
|
||||
"error": {"message": "Internal server error", "code": "INTERNAL_ERROR"}
|
||||
},
|
||||
"validation_error": {
|
||||
"error": {
|
||||
"message": "Invalid request parameters",
|
||||
"code": "VALIDATION_ERROR",
|
||||
"details": [{"field": "name", "message": "Required field missing"}],
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def create_paginated_response(
|
||||
items: list, page_size: int = 10, cursor_field: str = "offset"
|
||||
) -> list:
|
||||
"""Create a list of paginated responses for testing."""
|
||||
responses = []
|
||||
total_items = len(items)
|
||||
|
||||
for i in range(0, total_items, page_size):
|
||||
page_items = items[i : i + page_size]
|
||||
has_more = i + page_size < total_items
|
||||
|
||||
response = {"items": page_items, "has_more": has_more}
|
||||
|
||||
if has_more:
|
||||
if cursor_field == "offset":
|
||||
response[cursor_field] = i + page_size
|
||||
elif cursor_field == "next_cursor":
|
||||
response[cursor_field] = f"cursor_{i + page_size}"
|
||||
|
||||
responses.append((response, 200))
|
||||
|
||||
return responses
|
||||
@@ -0,0 +1,538 @@
|
||||
"""
|
||||
Pytest-based webhook endpoint validation for all provider blocks.
|
||||
|
||||
This test automatically discovers all webhook trigger blocks and validates
|
||||
their configurations, ensuring they properly define webhook endpoints and
|
||||
event handling.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def extract_webhook_configurations(file_content: str) -> List[Dict]:
|
||||
"""
|
||||
Extract webhook configurations from file content.
|
||||
Returns list of webhook configurations found.
|
||||
"""
|
||||
configs = []
|
||||
|
||||
# Pattern for BlockWebhookConfig - match until the closing paren at same or lower indent
|
||||
webhook_config_pattern = r"BlockWebhookConfig\s*\(((?:[^()]+|\([^()]*\))*)\)"
|
||||
matches = re.finditer(
|
||||
webhook_config_pattern, file_content, re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
for match in matches:
|
||||
config_str = match.group(1)
|
||||
config = {
|
||||
"type": "BlockWebhookConfig",
|
||||
"raw": config_str,
|
||||
"provider": None,
|
||||
"webhook_type": None,
|
||||
"resource_format": None,
|
||||
"event_filter_input": None,
|
||||
"event_format": None,
|
||||
}
|
||||
|
||||
# Extract provider
|
||||
provider_match = re.search(
|
||||
r'provider\s*=\s*ProviderName\s*\(\s*["\']?([^"\')\s]+)["\']?\s*\)',
|
||||
config_str,
|
||||
)
|
||||
if not provider_match:
|
||||
provider_match = re.search(
|
||||
r"provider\s*=\s*ProviderName\.([A-Z_]+)", config_str
|
||||
)
|
||||
if not provider_match:
|
||||
# Try to match variable reference like ProviderName(generic_webhook.name)
|
||||
provider_match = re.search(
|
||||
r"provider\s*=\s*ProviderName\s*\(\s*(\w+)\.name\s*\)", config_str
|
||||
)
|
||||
if provider_match:
|
||||
provider_name = provider_match.group(1)
|
||||
config["provider"] = provider_name.lower()
|
||||
|
||||
# Extract webhook_type
|
||||
webhook_type_match = re.search(
|
||||
r'webhook_type\s*=\s*["\']([\w_-]+)["\']', config_str
|
||||
)
|
||||
if not webhook_type_match:
|
||||
webhook_type_match = re.search(
|
||||
r"webhook_type\s*=\s*(\w+)\.(\w+)", config_str
|
||||
)
|
||||
if webhook_type_match:
|
||||
# Extract the enum value
|
||||
config["webhook_type"] = webhook_type_match.group(2).lower()
|
||||
if webhook_type_match and not config["webhook_type"]:
|
||||
config["webhook_type"] = webhook_type_match.group(1)
|
||||
|
||||
# Extract resource_format
|
||||
resource_format_match = re.search(
|
||||
r'resource_format\s*=\s*["\']([^"\']*)["\']', config_str
|
||||
)
|
||||
if resource_format_match:
|
||||
config["resource_format"] = resource_format_match.group(1)
|
||||
|
||||
# Extract event_filter_input
|
||||
event_filter_match = re.search(
|
||||
r'event_filter_input\s*=\s*["\']([^"\']+)["\']', config_str
|
||||
)
|
||||
if event_filter_match:
|
||||
config["event_filter_input"] = event_filter_match.group(1)
|
||||
|
||||
# Extract event_format
|
||||
event_format_match = re.search(
|
||||
r'event_format\s*=\s*["\']([^"\']+)["\']', config_str
|
||||
)
|
||||
if event_format_match:
|
||||
config["event_format"] = event_format_match.group(1)
|
||||
|
||||
configs.append(config)
|
||||
|
||||
# Pattern for BlockManualWebhookConfig - match until the closing paren at same or lower indent
|
||||
manual_webhook_pattern = r"BlockManualWebhookConfig\s*\(((?:[^()]+|\([^()]*\))*)\)"
|
||||
matches = re.finditer(
|
||||
manual_webhook_pattern, file_content, re.MULTILINE | re.DOTALL
|
||||
)
|
||||
|
||||
for match in matches:
|
||||
config_str = match.group(1)
|
||||
config = {
|
||||
"type": "BlockManualWebhookConfig",
|
||||
"raw": config_str,
|
||||
"provider": None,
|
||||
"webhook_type": None,
|
||||
"event_filter_input": None,
|
||||
}
|
||||
|
||||
# Extract provider
|
||||
provider_match = re.search(
|
||||
r'provider\s*=\s*ProviderName\s*\(\s*["\']?([^"\')\s]+)["\']?\s*\)',
|
||||
config_str,
|
||||
)
|
||||
if not provider_match:
|
||||
provider_match = re.search(
|
||||
r"provider\s*=\s*ProviderName\.([A-Z_]+)", config_str
|
||||
)
|
||||
if not provider_match:
|
||||
# Try to match variable reference like ProviderName(generic_webhook.name)
|
||||
provider_match = re.search(
|
||||
r"provider\s*=\s*ProviderName\s*\(\s*(\w+)\.name\s*\)", config_str
|
||||
)
|
||||
if provider_match:
|
||||
provider_name = provider_match.group(1)
|
||||
config["provider"] = provider_name.lower()
|
||||
|
||||
# Extract webhook_type
|
||||
webhook_type_match = re.search(
|
||||
r'webhook_type\s*=\s*["\']([\w_-]+)["\']', config_str
|
||||
)
|
||||
if not webhook_type_match:
|
||||
webhook_type_match = re.search(
|
||||
r"webhook_type\s*=\s*(\w+)\.(\w+)", config_str
|
||||
)
|
||||
if webhook_type_match:
|
||||
# Extract the enum value
|
||||
config["webhook_type"] = webhook_type_match.group(2).lower()
|
||||
if webhook_type_match and not config["webhook_type"]:
|
||||
config["webhook_type"] = webhook_type_match.group(1)
|
||||
|
||||
# Extract event_filter_input
|
||||
event_filter_match = re.search(
|
||||
r'event_filter_input\s*=\s*["\']([^"\']+)["\']', config_str
|
||||
)
|
||||
if event_filter_match:
|
||||
config["event_filter_input"] = event_filter_match.group(1)
|
||||
|
||||
configs.append(config)
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def extract_webhook_blocks(file_content: str) -> List[Tuple[str, int]]:
|
||||
"""
|
||||
Extract webhook block class names and their line numbers.
|
||||
Returns list of (class_name, line_number) tuples.
|
||||
"""
|
||||
blocks = []
|
||||
lines = file_content.split("\n")
|
||||
|
||||
# Pattern for webhook block classes
|
||||
class_pattern = r"class\s+(\w+Block)\s*\(.*Block.*\):"
|
||||
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
match = re.search(class_pattern, line)
|
||||
if match:
|
||||
class_name = match.group(1)
|
||||
# Check if this is likely a webhook block by looking for BlockType.WEBHOOK
|
||||
# or webhook-related configurations in the next few lines
|
||||
is_webhook = False
|
||||
|
||||
# Check next 20 lines for webhook indicators
|
||||
for i in range(line_num - 1, min(line_num + 19, len(lines))):
|
||||
if i < len(lines):
|
||||
check_line = lines[i]
|
||||
if (
|
||||
"BlockType.WEBHOOK" in check_line
|
||||
or "BlockWebhookConfig" in check_line
|
||||
or "BlockManualWebhookConfig" in check_line
|
||||
or "webhook_config=" in check_line
|
||||
):
|
||||
is_webhook = True
|
||||
break
|
||||
|
||||
if is_webhook:
|
||||
blocks.append((class_name, line_num))
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def get_all_webhook_files() -> Dict[str, List[Path]]:
|
||||
"""Get all files that potentially contain webhook blocks."""
|
||||
test_dir = Path(__file__).parent
|
||||
backend_dir = test_dir.parent.parent
|
||||
blocks_dir = backend_dir / "backend" / "blocks"
|
||||
|
||||
webhook_files = {}
|
||||
|
||||
# Check all provider directories
|
||||
for provider_dir in blocks_dir.iterdir():
|
||||
if provider_dir.is_dir() and not provider_dir.name.startswith(("_", ".")):
|
||||
provider = provider_dir.name
|
||||
|
||||
# Look for trigger files and webhook files
|
||||
trigger_files = list(provider_dir.glob("*trigger*.py"))
|
||||
webhook_files_list = list(provider_dir.glob("*webhook*.py"))
|
||||
|
||||
# Combine and deduplicate
|
||||
all_files = list(set(trigger_files + webhook_files_list))
|
||||
|
||||
if all_files:
|
||||
webhook_files[provider] = all_files
|
||||
|
||||
return webhook_files
|
||||
|
||||
|
||||
def load_webhook_spec(provider: str) -> Optional[Dict]:
|
||||
"""Load webhook specification from JSON file."""
|
||||
spec_file = Path(__file__).parent / "test_data" / f"{provider}.json"
|
||||
|
||||
if not spec_file.exists():
|
||||
return None
|
||||
|
||||
with open(spec_file, "r") as f:
|
||||
spec = json.load(f)
|
||||
|
||||
# Return webhook-specific configuration if it exists
|
||||
return spec.get("webhooks", {})
|
||||
|
||||
|
||||
def validate_webhook_configuration(config: Dict, spec: Dict) -> Tuple[bool, List[str]]:
|
||||
"""
|
||||
Validate a webhook configuration against the specification.
|
||||
Returns (is_valid, list_of_errors)
|
||||
"""
|
||||
errors = []
|
||||
|
||||
# Check required fields based on config type
|
||||
if config["type"] == "BlockWebhookConfig":
|
||||
required_fields = ["provider", "webhook_type"]
|
||||
for field in required_fields:
|
||||
if not config.get(field):
|
||||
errors.append(f"Missing required field: {field}")
|
||||
|
||||
# Validate against spec if available
|
||||
if spec:
|
||||
# Check if webhook_type is in allowed types
|
||||
allowed_types = spec.get("allowed_webhook_types", [])
|
||||
if allowed_types and config.get("webhook_type") not in allowed_types:
|
||||
errors.append(
|
||||
f"Invalid webhook_type '{config.get('webhook_type')}'. "
|
||||
f"Allowed types: {', '.join(allowed_types)}"
|
||||
)
|
||||
|
||||
# Validate resource_format if specified
|
||||
if config.get("resource_format") is not None:
|
||||
expected_format = spec.get("resource_format_pattern")
|
||||
if expected_format and config["resource_format"] != expected_format:
|
||||
# Check if it's a valid pattern (contains placeholders)
|
||||
if (
|
||||
not re.search(r"\{[^}]+\}", config["resource_format"])
|
||||
and config["resource_format"]
|
||||
):
|
||||
errors.append(
|
||||
f"Invalid resource_format '{config['resource_format']}'. "
|
||||
f"Expected pattern like: {expected_format}"
|
||||
)
|
||||
|
||||
return len(errors) == 0, errors
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider",
|
||||
[
|
||||
"airtable",
|
||||
"baas",
|
||||
"elevenlabs",
|
||||
"exa",
|
||||
"github",
|
||||
"slant3d",
|
||||
"compass",
|
||||
"generic_webhook",
|
||||
],
|
||||
)
|
||||
def test_provider_webhook_configurations(provider: str):
|
||||
"""
|
||||
Test that all webhook configurations in provider implementations are valid.
|
||||
|
||||
This test:
|
||||
1. Discovers all webhook blocks in the provider's code
|
||||
2. Extracts their webhook configurations
|
||||
3. Validates configurations have required fields
|
||||
4. Checks against specifications if available
|
||||
"""
|
||||
webhook_files = get_all_webhook_files()
|
||||
|
||||
if provider not in webhook_files:
|
||||
pytest.skip(f"No webhook files found for provider: {provider}")
|
||||
|
||||
# Load webhook specification if available
|
||||
spec = load_webhook_spec(provider)
|
||||
|
||||
# Extract all webhook configurations
|
||||
all_configs = []
|
||||
block_locations = {} # block_name -> (file, line_num)
|
||||
|
||||
for py_file in webhook_files[provider]:
|
||||
with open(py_file, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
# Extract webhook blocks
|
||||
blocks = extract_webhook_blocks(content)
|
||||
for block_name, line_num in blocks:
|
||||
block_locations[block_name] = (py_file.name, line_num)
|
||||
|
||||
# Extract configurations
|
||||
configs = extract_webhook_configurations(content)
|
||||
for config in configs:
|
||||
config["file"] = py_file.name
|
||||
all_configs.append(config)
|
||||
|
||||
# Validate all configurations
|
||||
validation_errors = []
|
||||
|
||||
for config in all_configs:
|
||||
is_valid, errors = validate_webhook_configuration(config, spec or {})
|
||||
|
||||
if not is_valid:
|
||||
error_msg = f"\n ❌ Invalid webhook configuration in {config['file']}:"
|
||||
error_msg += f"\n Type: {config['type']}"
|
||||
error_msg += f"\n Provider: {config.get('provider', 'MISSING')}"
|
||||
error_msg += f"\n Webhook Type: {config.get('webhook_type', 'MISSING')}"
|
||||
for error in errors:
|
||||
error_msg += f"\n Error: {error}"
|
||||
validation_errors.append(error_msg)
|
||||
|
||||
# Create report
|
||||
report_lines = [
|
||||
f"\n{'='*80}",
|
||||
f"Webhook Configuration Validation Report for {provider.upper()}",
|
||||
f"{'='*80}",
|
||||
f"Files checked: {len(webhook_files[provider])}",
|
||||
f"Webhook blocks found: {len(block_locations)}",
|
||||
f"Configurations found: {len(all_configs)}",
|
||||
]
|
||||
|
||||
if block_locations:
|
||||
report_lines.append("\n📦 Webhook Blocks Found:")
|
||||
for block_name, (file, line) in sorted(block_locations.items()):
|
||||
report_lines.append(f" - {block_name} ({file}:{line})")
|
||||
|
||||
if all_configs:
|
||||
report_lines.append("\n🔧 Webhook Configurations:")
|
||||
for config in all_configs:
|
||||
report_lines.append(
|
||||
f" - {config['type']} in {config['file']}:"
|
||||
f"\n Provider: {config.get('provider', 'N/A')}"
|
||||
f"\n Type: {config.get('webhook_type', 'N/A')}"
|
||||
f"\n Resource: {config.get('resource_format', 'N/A')}"
|
||||
)
|
||||
|
||||
if validation_errors:
|
||||
report_lines.append(f"\n❌ VALIDATION ERRORS ({len(validation_errors)}):")
|
||||
report_lines.extend(validation_errors)
|
||||
else:
|
||||
report_lines.append("\n✅ All webhook configurations are valid!")
|
||||
|
||||
if not spec:
|
||||
report_lines.append(
|
||||
f"\n⚠️ WARNING: No webhook specification found for {provider}. "
|
||||
f"Consider adding webhook configuration to test_data/{provider}.json"
|
||||
)
|
||||
|
||||
# Summary
|
||||
report_lines.extend(
|
||||
[
|
||||
f"\n{'='*80}",
|
||||
f"Summary: {len(all_configs) - len(validation_errors)}/{len(all_configs)} configurations valid",
|
||||
f"{'='*80}\n",
|
||||
]
|
||||
)
|
||||
|
||||
# Print report
|
||||
report = "\n".join(report_lines)
|
||||
print(report)
|
||||
|
||||
# Fail if there are validation errors
|
||||
if validation_errors:
|
||||
pytest.fail(
|
||||
f"Found {len(validation_errors)} invalid webhook configurations. See report above."
|
||||
)
|
||||
|
||||
|
||||
def test_webhook_event_types():
|
||||
"""Test that webhook blocks properly define their event types."""
|
||||
webhook_files = get_all_webhook_files()
|
||||
|
||||
issues = []
|
||||
|
||||
for provider, files in webhook_files.items():
|
||||
for py_file in files:
|
||||
with open(py_file, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
# Check for EventsFilter classes
|
||||
event_filter_pattern = (
|
||||
r"class\s+EventsFilter\s*\(.*\):([\s\S]*?)(?=class|\Z)"
|
||||
)
|
||||
matches = re.finditer(event_filter_pattern, content)
|
||||
|
||||
for match in matches:
|
||||
class_content = match.group(1)
|
||||
|
||||
# Extract event fields
|
||||
field_pattern = r"(\w+)\s*:\s*bool\s*="
|
||||
fields = re.findall(field_pattern, class_content)
|
||||
|
||||
# Check that there are event fields defined
|
||||
if not fields:
|
||||
issues.append(
|
||||
f"{provider}/{py_file.name}: EventsFilter class has no event fields defined"
|
||||
)
|
||||
|
||||
# Check field naming conventions
|
||||
for field in fields:
|
||||
if not field.islower() or not field.replace("_", "").isalnum():
|
||||
issues.append(
|
||||
f"{provider}/{py_file.name}: Event field '{field}' "
|
||||
"doesn't follow naming convention (lowercase with underscores)"
|
||||
)
|
||||
|
||||
if issues:
|
||||
report = "\n".join(
|
||||
["\nWebhook Event Type Issues:"] + [f" - {issue}" for issue in issues]
|
||||
)
|
||||
pytest.fail(report)
|
||||
|
||||
|
||||
def test_webhook_blocks_have_proper_structure():
|
||||
"""Test that webhook blocks follow the expected structure."""
|
||||
webhook_files = get_all_webhook_files()
|
||||
|
||||
structural_issues = []
|
||||
|
||||
for provider, files in webhook_files.items():
|
||||
for py_file in files:
|
||||
with open(py_file, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
lines = content.split("\n")
|
||||
blocks = extract_webhook_blocks(content)
|
||||
|
||||
for block_name, line_num in blocks:
|
||||
# For structural checks, look at the entire file content after the class definition
|
||||
# This is more reliable than trying to extract just the class content
|
||||
class_line_idx = line_num - 1
|
||||
remaining_content = "\n".join(lines[class_line_idx:])
|
||||
|
||||
# Check for required components
|
||||
checks = [
|
||||
("BlockType.WEBHOOK", "block_type set to WEBHOOK", False),
|
||||
("class Input", "Input schema defined", True),
|
||||
("class Output", "Output schema defined", True),
|
||||
(
|
||||
"payload.*InputField|payload.*SchemaField",
|
||||
"payload field in Input",
|
||||
True,
|
||||
),
|
||||
(
|
||||
"webhook_url.*InputField|webhook_url.*SchemaField",
|
||||
"webhook_url field in Input",
|
||||
False,
|
||||
),
|
||||
("async def run", "async run method defined", True),
|
||||
]
|
||||
|
||||
for pattern, description, required in checks:
|
||||
if required and not re.search(pattern, remaining_content):
|
||||
structural_issues.append(
|
||||
f"{provider}/{py_file.name}:{line_num} - "
|
||||
f"{block_name} missing {description}"
|
||||
)
|
||||
|
||||
if structural_issues:
|
||||
report = "\n".join(
|
||||
["\nWebhook Block Structure Issues:"]
|
||||
+ [f" - {issue}" for issue in structural_issues]
|
||||
)
|
||||
pytest.fail(report)
|
||||
|
||||
|
||||
def test_webhook_specs_completeness():
|
||||
"""Test that webhook specifications in JSON files are complete."""
|
||||
test_data_dir = Path(__file__).parent / "test_data"
|
||||
|
||||
issues = []
|
||||
|
||||
for spec_file in test_data_dir.glob("*.json"):
|
||||
with open(spec_file, "r") as f:
|
||||
spec = json.load(f)
|
||||
|
||||
provider = spec_file.stem
|
||||
|
||||
# Check if provider has webhook blocks
|
||||
webhook_files = get_all_webhook_files()
|
||||
if provider in webhook_files:
|
||||
# Provider has webhook blocks, check if spec has webhook section
|
||||
if "webhooks" not in spec:
|
||||
issues.append(
|
||||
f"{provider}.json: Missing 'webhooks' section but provider has webhook blocks"
|
||||
)
|
||||
else:
|
||||
webhook_spec = spec["webhooks"]
|
||||
|
||||
# Check webhook spec completeness
|
||||
recommended_fields = [
|
||||
"allowed_webhook_types",
|
||||
"resource_format_pattern",
|
||||
"event_types",
|
||||
"description",
|
||||
]
|
||||
missing = [f for f in recommended_fields if f not in webhook_spec]
|
||||
|
||||
if missing:
|
||||
issues.append(
|
||||
f"{provider}.json: Webhook spec missing recommended fields: "
|
||||
f"{', '.join(missing)}"
|
||||
)
|
||||
|
||||
if issues:
|
||||
report = "\n".join(
|
||||
["\nWebhook Specification Issues:"] + [f" - {issue}" for issue in issues]
|
||||
)
|
||||
print(report) # Just warn, don't fail
|
||||
275
autogpt_platform/backend/test/blocks/verify_blocks.py
Normal file
275
autogpt_platform/backend/test/blocks/verify_blocks.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Block verification script to check that all blocks can be instantiated and have valid schemas.
|
||||
This script can be run to verify blocks without making actual API calls.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Type
|
||||
|
||||
from pydantic import ValidationError
|
||||
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.sdk import Block
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockVerificationResult:
|
||||
"""Result of block verification."""
|
||||
|
||||
block_name: str
|
||||
success: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
warnings: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class BlockVerifier:
|
||||
"""Verify blocks without making API calls."""
|
||||
|
||||
def __init__(self):
|
||||
self.results: List[BlockVerificationResult] = []
|
||||
|
||||
def verify_block_class(self, block_class: Type[Block]) -> BlockVerificationResult:
|
||||
"""Verify a single block class."""
|
||||
result = BlockVerificationResult(block_name=block_class.__name__, success=True)
|
||||
|
||||
try:
|
||||
# 1. Check if block can be instantiated
|
||||
block = block_class()
|
||||
|
||||
# 2. Verify block has required attributes
|
||||
required_attrs = ["id", "description", "input_schema", "output_schema"]
|
||||
for attr in required_attrs:
|
||||
if not hasattr(block, attr):
|
||||
result.errors.append(f"Missing required attribute: {attr}")
|
||||
result.success = False
|
||||
|
||||
# 3. Verify input schema
|
||||
if hasattr(block, "Input"):
|
||||
try:
|
||||
# Try to create an instance with empty data to check required fields
|
||||
input_class = getattr(block, "Input")
|
||||
_ = input_class()
|
||||
except ValidationError as e:
|
||||
# This is expected if there are required fields
|
||||
required_fields = [
|
||||
str(err["loc"][0])
|
||||
for err in e.errors()
|
||||
if err["type"] == "missing"
|
||||
]
|
||||
if required_fields:
|
||||
result.warnings.append(
|
||||
f"Required input fields: {', '.join(required_fields)}"
|
||||
)
|
||||
|
||||
# Check for credentials field
|
||||
input_class = getattr(block, "Input")
|
||||
if hasattr(input_class, "__fields__"):
|
||||
fields_dict = getattr(input_class, "__fields__")
|
||||
cred_fields = [
|
||||
name
|
||||
for name in fields_dict.keys()
|
||||
if "credentials" in name.lower()
|
||||
]
|
||||
if cred_fields:
|
||||
result.warnings.append(
|
||||
f"Credential fields found: {', '.join(cred_fields)}"
|
||||
)
|
||||
|
||||
# 4. Verify output schema
|
||||
if hasattr(block, "Output"):
|
||||
output_fields = []
|
||||
output_class = getattr(block, "Output", None)
|
||||
if output_class and hasattr(output_class, "__fields__"):
|
||||
output_fields = list(getattr(output_class, "__fields__").keys())
|
||||
if output_fields:
|
||||
result.warnings.append(
|
||||
f"Output fields: {', '.join(output_fields)}"
|
||||
)
|
||||
|
||||
# 5. Verify run method
|
||||
if not hasattr(block, "run"):
|
||||
result.errors.append("Missing run method")
|
||||
result.success = False
|
||||
else:
|
||||
# Check if run method is async
|
||||
if not inspect.iscoroutinefunction(block.run):
|
||||
result.errors.append("run method must be async")
|
||||
result.success = False
|
||||
|
||||
# 6. Check block ID format
|
||||
if hasattr(block, "id"):
|
||||
block_id = block.id
|
||||
if not isinstance(block_id, str) or len(block_id) != 36:
|
||||
result.warnings.append(
|
||||
f"Block ID might not be a valid UUID: {block_id}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
result.errors.append(f"Failed to instantiate block: {str(e)}")
|
||||
result.success = False
|
||||
|
||||
return result
|
||||
|
||||
def verify_provider_blocks(
|
||||
self, provider_name: str
|
||||
) -> List[BlockVerificationResult]:
|
||||
"""Verify all blocks from a specific provider."""
|
||||
results = []
|
||||
|
||||
# Import provider module dynamically
|
||||
try:
|
||||
if provider_name == "airtable":
|
||||
from backend.blocks import airtable
|
||||
|
||||
module = airtable
|
||||
elif provider_name == "baas":
|
||||
from backend.blocks import baas
|
||||
|
||||
module = baas
|
||||
elif provider_name == "elevenlabs":
|
||||
from backend.blocks import elevenlabs
|
||||
|
||||
module = elevenlabs
|
||||
else:
|
||||
return results
|
||||
|
||||
# Get all block classes from the module
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
if (
|
||||
inspect.isclass(attr)
|
||||
and issubclass(attr, Block)
|
||||
and attr is not Block
|
||||
and "Block" in attr_name
|
||||
):
|
||||
result = self.verify_block_class(attr)
|
||||
results.append(result)
|
||||
self.results.append(result)
|
||||
|
||||
except ImportError as e:
|
||||
error_result = BlockVerificationResult(
|
||||
block_name=f"{provider_name}_import",
|
||||
success=False,
|
||||
errors=[f"Failed to import provider: {str(e)}"],
|
||||
)
|
||||
results.append(error_result)
|
||||
self.results.append(error_result)
|
||||
|
||||
return results
|
||||
|
||||
def generate_report(self) -> str:
|
||||
"""Generate a verification report."""
|
||||
report_lines = ["Block Verification Report", "=" * 50, ""]
|
||||
|
||||
# Summary
|
||||
total = len(self.results)
|
||||
successful = len([r for r in self.results if r.success])
|
||||
failed = total - successful
|
||||
|
||||
report_lines.extend(
|
||||
[
|
||||
f"Total blocks verified: {total}",
|
||||
f"Successful: {successful}",
|
||||
f"Failed: {failed}",
|
||||
"",
|
||||
"Detailed Results:",
|
||||
"-" * 50,
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
# Group by success/failure
|
||||
for result in sorted(self.results, key=lambda r: (not r.success, r.block_name)):
|
||||
status = "✓" if result.success else "✗"
|
||||
report_lines.append(f"{status} {result.block_name}")
|
||||
|
||||
if result.errors:
|
||||
for error in result.errors:
|
||||
report_lines.append(f" ERROR: {error}")
|
||||
|
||||
if result.warnings:
|
||||
for warning in result.warnings:
|
||||
report_lines.append(f" WARNING: {warning}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
return "\n".join(report_lines)
|
||||
|
||||
async def test_block_execution(
|
||||
self, block_class: Type[Block], test_inputs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Test block execution with mock inputs (no API calls)."""
|
||||
try:
|
||||
block = block_class()
|
||||
|
||||
# Create mock credentials if needed
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.sdk import ProviderName
|
||||
|
||||
mock_creds = APIKeyCredentials(
|
||||
provider=ProviderName("airtable"), api_key=SecretStr("test-key")
|
||||
)
|
||||
|
||||
# Create input instance
|
||||
input_class = getattr(block, "Input")
|
||||
input_data = input_class(**test_inputs)
|
||||
|
||||
# Attempt to run the block (will fail at API call, but validates structure)
|
||||
outputs = []
|
||||
try:
|
||||
async for output in block.run(input_data, credentials=mock_creds):
|
||||
outputs.append(output)
|
||||
except Exception as e:
|
||||
# Expected to fail at API call
|
||||
return {
|
||||
"status": "execution_attempted",
|
||||
"error": str(e),
|
||||
"validates_structure": True,
|
||||
}
|
||||
|
||||
return {"status": "unexpected_success", "outputs": outputs}
|
||||
|
||||
except ValidationError as e:
|
||||
return {
|
||||
"status": "validation_error",
|
||||
"errors": e.errors(),
|
||||
"validates_structure": False,
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e), "validates_structure": False}
|
||||
|
||||
|
||||
def main():
|
||||
"""Run block verification."""
|
||||
verifier = BlockVerifier()
|
||||
|
||||
# Verify all providers
|
||||
providers = ["airtable", "baas", "elevenlabs"]
|
||||
|
||||
print("Starting block verification...\n")
|
||||
|
||||
for provider in providers:
|
||||
print(f"Verifying {provider} blocks...")
|
||||
results = verifier.verify_provider_blocks(provider)
|
||||
print(f" Found {len(results)} blocks")
|
||||
|
||||
# Generate and print report
|
||||
report = verifier.generate_report()
|
||||
print("\n" + report)
|
||||
|
||||
# Save report to file
|
||||
with open("block_verification_report.txt", "w") as f:
|
||||
f.write(report)
|
||||
|
||||
print("Report saved to block_verification_report.txt")
|
||||
|
||||
# Return success if all blocks passed
|
||||
failed_count = len([r for r in verifier.results if not r.success])
|
||||
return failed_count == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user