mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-11 16:18:07 -05:00
Compare commits
39 Commits
master
...
add-iffy-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a7cf7e352d | ||
|
|
cb736f7973 | ||
|
|
ec9882756f | ||
|
|
80431734be | ||
|
|
c5e299cd3c | ||
|
|
718bb085ad | ||
|
|
0cd8ae4ec4 | ||
|
|
187487ab97 | ||
|
|
e9c2e9dfe4 | ||
|
|
8aa60441e7 | ||
|
|
284400b47c | ||
|
|
eb32ed1b7b | ||
|
|
4cb70a895a | ||
|
|
cb9bedcb67 | ||
|
|
fd4a61f415 | ||
|
|
9bc41c94ce | ||
|
|
2f8f71e8c3 | ||
|
|
b6ebfd67c4 | ||
|
|
c13996c906 | ||
|
|
47f3836e55 | ||
|
|
ec46177883 | ||
|
|
e549b8e46a | ||
|
|
982b8b6945 | ||
|
|
5ad9485a77 | ||
|
|
22413a3cc7 | ||
|
|
870e6a901a | ||
|
|
75e0fdda9c | ||
|
|
8251ccecf7 | ||
|
|
0fa1da4f65 | ||
|
|
19d12170d5 | ||
|
|
76ca64fa31 | ||
|
|
55d7dd77aa | ||
|
|
405fdfdb6c | ||
|
|
6ceb2d1499 | ||
|
|
23c643c0d2 | ||
|
|
d2a849eee2 | ||
|
|
384a7ecd7a | ||
|
|
46ad1fe5e5 | ||
|
|
33af467c5a |
@@ -1,3 +1,5 @@
|
||||
import hashlib
|
||||
import hmac
|
||||
import inspect
|
||||
import logging
|
||||
import secrets
|
||||
@@ -138,3 +140,88 @@ class APIKeyValidator:
|
||||
# This helps FastAPI recognize it as a security dependency
|
||||
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
|
||||
return validate_api_key
|
||||
|
||||
|
||||
class HMACValidator:
|
||||
"""
|
||||
Configurable HMAC-based validator for FastAPI applications.
|
||||
|
||||
This class is useful for validating signed requests such as webhooks,
|
||||
where the signature is computed using HMAC SHA256 and sent in a request header.
|
||||
It compares the provided signature to a computed one using a shared secret and the raw request body.
|
||||
|
||||
Examples:
|
||||
Basic usage:
|
||||
```python
|
||||
validator = HMACValidator(
|
||||
header_name="X-Signature",
|
||||
secret="your-shared-secret"
|
||||
)
|
||||
|
||||
@app.post("/webhook", dependencies=[Depends(validator.get_dependency())])
|
||||
async def webhook_handler():
|
||||
return {"status": "ok"}
|
||||
```
|
||||
|
||||
Custom integration:
|
||||
```python
|
||||
validator = HMACValidator(
|
||||
header_name="X-Custom-Signature",
|
||||
secret=secrets.webhook_secret
|
||||
)
|
||||
|
||||
@router.post("/custom-endpoint")
|
||||
async def handler(
|
||||
_ = Depends(validator.get_dependency())
|
||||
):
|
||||
...
|
||||
```
|
||||
|
||||
Args:
|
||||
header_name (str): The name of the request header containing the HMAC signature.
|
||||
secret (str): The shared secret used to compute the HMAC hash.
|
||||
error_status (int): HTTP status code to return when validation fails.
|
||||
error_message (str): Error message to return when validation fails.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
header_name: str,
|
||||
secret: str,
|
||||
error_status: int = HTTP_401_UNAUTHORIZED,
|
||||
error_message: str = "Invalid HMAC signature",
|
||||
):
|
||||
self.secret = secret
|
||||
self.header = APIKeyHeader(name=header_name)
|
||||
self.error_status = error_status
|
||||
self.error_message = error_message
|
||||
|
||||
async def __call__(
|
||||
self,
|
||||
request: Request,
|
||||
signature: str = Security(APIKeyHeader(name="X-Signature")),
|
||||
) -> bool:
|
||||
body = await request.body()
|
||||
computed_signature = hmac.new(
|
||||
self.secret.encode(), body, hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
if not hmac.compare_digest(computed_signature, signature):
|
||||
raise HTTPException(
|
||||
status_code=self.error_status, detail=self.error_message
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def get_dependency(self):
|
||||
"""
|
||||
Returns a callable dependency that FastAPI will recognize as a security scheme
|
||||
"""
|
||||
|
||||
async def validate_signature(
|
||||
request: Request, signature: str = Security(self.header)
|
||||
) -> bool:
|
||||
return await self(request, signature)
|
||||
|
||||
validate_signature.__name__ = f"validate_{self.header.model.name}"
|
||||
return validate_signature
|
||||
|
||||
@@ -55,6 +55,22 @@ async def get_user_by_id(user_id: str) -> User:
|
||||
return User.model_validate(user)
|
||||
|
||||
|
||||
async def get_user_info_by_id(user_id: str) -> dict:
|
||||
# TODO: Change return type to a Pydantic model instead of a dict
|
||||
try:
|
||||
user = await User.prisma().find_unique(where={"id": user_id})
|
||||
if not user:
|
||||
raise ValueError(f"User with ID {user_id} not found")
|
||||
|
||||
return {
|
||||
"id": user.id,
|
||||
"name": user.name,
|
||||
"email": user.email,
|
||||
}
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get user info for user {user_id}: {e}") from e
|
||||
|
||||
|
||||
async def get_user_email_by_id(user_id: str) -> Optional[str]:
|
||||
try:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
|
||||
@@ -38,6 +38,7 @@ from backend.data.user import (
|
||||
get_active_user_ids_in_timerange,
|
||||
get_user_email_by_id,
|
||||
get_user_email_verification,
|
||||
get_user_info_by_id,
|
||||
get_user_integrations,
|
||||
get_user_metadata,
|
||||
get_user_notification_preference,
|
||||
@@ -121,6 +122,7 @@ class DatabaseManager(AppService):
|
||||
update_user_metadata = _(update_user_metadata)
|
||||
get_user_integrations = _(get_user_integrations)
|
||||
update_user_integrations = _(update_user_integrations)
|
||||
get_user_info_by_id = _(get_user_info_by_id)
|
||||
|
||||
# User Comms - async
|
||||
get_active_user_ids_in_timerange = _(get_active_user_ids_in_timerange)
|
||||
@@ -181,6 +183,7 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
update_user_metadata = _(d.update_user_metadata)
|
||||
get_user_integrations = _(d.get_user_integrations)
|
||||
update_user_integrations = _(d.update_user_integrations)
|
||||
get_user_info_by_id = _(d.get_user_info_by_id)
|
||||
|
||||
# User Comms - async
|
||||
get_active_user_ids_in_timerange = _(d.get_active_user_ids_in_timerange)
|
||||
|
||||
@@ -26,6 +26,7 @@ from backend.data.notifications import (
|
||||
from backend.data.rabbitmq import SyncRabbitMQ
|
||||
from backend.executor.utils import LogMetadata, create_execution_queue_config
|
||||
from backend.notifications.notifications import queue_notification
|
||||
from backend.server.v2.iffy.block_moderation import moderate_block_content
|
||||
from backend.util.exceptions import InsufficientBalanceError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -1292,4 +1293,4 @@ def llprint(message: str):
|
||||
Low-level print/log helper function for use in signal handlers.
|
||||
Regular log/print statements are not allowed in signal handlers.
|
||||
"""
|
||||
os.write(sys.stdout.fileno(), (message + "\n").encode())
|
||||
os.write(sys.stdout.fileno(), (message + "\n").encode())
|
||||
@@ -25,6 +25,7 @@ import backend.server.routers.postmark.postmark
|
||||
import backend.server.routers.v1
|
||||
import backend.server.v2.admin.credit_admin_routes
|
||||
import backend.server.v2.admin.store_admin_routes
|
||||
import backend.server.v2.iffy.routes
|
||||
import backend.server.v2.library.db
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.library.routes
|
||||
@@ -205,6 +206,10 @@ app.include_router(
|
||||
prefix="/api/email",
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
backend.server.v2.iffy.routes.iffy_router, tags=["v2", "iffy"], prefix="/api/iffy"
|
||||
)
|
||||
|
||||
app.mount("/external-api", external_app)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import logging
|
||||
from typing import List, Tuple
|
||||
|
||||
from backend.data.block import BlockInput, BlockType, get_block
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.server.v2.iffy.models import BlockContentForModeration
|
||||
from backend.server.v2.iffy.service import IffyService
|
||||
from backend.util.settings import BehaveAs, Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def moderate_block_content(
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
node_id: str,
|
||||
block_id: str,
|
||||
input_data: BlockInput,
|
||||
user_id: str,
|
||||
) -> None:
|
||||
"""
|
||||
Moderate the content of a single block before execution.
|
||||
|
||||
Args:
|
||||
graph_id: The ID of the graph
|
||||
graph_exec_id: The ID of the graph execution
|
||||
node_id: The ID of the node being executed
|
||||
block_id: The ID of the block being executed
|
||||
input_data: Input data for the block
|
||||
user_id: The ID of the user running the block
|
||||
"""
|
||||
if settings.config.behave_as == BehaveAs.LOCAL:
|
||||
return
|
||||
|
||||
try:
|
||||
block = get_block(block_id)
|
||||
if not block or block.block_type == BlockType.NOTE:
|
||||
return
|
||||
|
||||
block_content = BlockContentForModeration(
|
||||
graph_id=graph_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
block_id=block.id,
|
||||
block_name=block.name,
|
||||
block_type=block.block_type.value,
|
||||
input_data=input_data,
|
||||
)
|
||||
|
||||
# Send to Iffy for moderation
|
||||
result = IffyService.moderate_content(user_id, block_content)
|
||||
|
||||
# CRITICAL: Ensure we never proceed if moderation fails
|
||||
if not result.is_safe:
|
||||
logger.error(
|
||||
f"Content moderation failed for {block.name}: {result.reason}"
|
||||
)
|
||||
raise ValueError(f"Content moderation failed for {block.name}: {result.reason}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during content moderation: {str(e)}")
|
||||
raise ValueError("Content moderation system error")
|
||||
52
autogpt_platform/backend/backend/server/v2/iffy/models.py
Normal file
52
autogpt_platform/backend/backend/server/v2/iffy/models.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Optional, TypedDict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class EventType(str, Enum):
|
||||
RECORD_FLAGGED = "record.flagged"
|
||||
RECORD_COMPLIANT = "record.compliant"
|
||||
RECORD_UNFLAGGED = "record.unflagged"
|
||||
USER_SUSPENDED = "user.suspended"
|
||||
USER_UNSUSPENDED = "user.unsuspended"
|
||||
USER_BANNED = "user.banned"
|
||||
USER_UNBANNED = "user.unbanned"
|
||||
USER_COMPLIANT = "user.compliant"
|
||||
|
||||
|
||||
class IffyWebhookEvent(BaseModel):
|
||||
event: EventType
|
||||
payload: Dict[str, Any]
|
||||
timestamp: str
|
||||
|
||||
|
||||
class UserData(TypedDict):
|
||||
clientId: str
|
||||
email: Optional[str]
|
||||
name: Optional[str]
|
||||
username: Optional[str]
|
||||
|
||||
|
||||
class IffyPayload(BaseModel):
|
||||
clientId: str
|
||||
name: str
|
||||
entity: str = "block_execution"
|
||||
metadata: Dict[str, Any]
|
||||
content: Dict[str, Any]
|
||||
user: Dict[str, Any]
|
||||
|
||||
|
||||
class ModerationResult(BaseModel):
|
||||
is_safe: bool
|
||||
reason: str
|
||||
|
||||
|
||||
class BlockContentForModeration(BaseModel):
|
||||
graph_id: str
|
||||
graph_exec_id: str
|
||||
node_id: str
|
||||
block_id: str
|
||||
block_name: str
|
||||
block_type: str
|
||||
input_data: Dict[str, Any]
|
||||
123
autogpt_platform/backend/backend/server/v2/iffy/routes.py
Normal file
123
autogpt_platform/backend/backend/server/v2/iffy/routes.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.auth.middleware import HMACValidator
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
||||
|
||||
from backend.util.service import get_service_client
|
||||
from backend.util.settings import Settings
|
||||
from backend.server.routers.v1 import _cancel_execution
|
||||
|
||||
from .models import BlockContentForModeration, EventType, IffyWebhookEvent, UserData
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
iffy_router = APIRouter()
|
||||
|
||||
iffy_signature_validator = HMACValidator(
|
||||
header_name="X-Signature",
|
||||
secret=settings.secrets.iffy_webhook_secret,
|
||||
error_message="Invalid Iffy signature",
|
||||
)
|
||||
|
||||
|
||||
# This handles the webhook events from iffy like stopping an execution if a flagged block is detected.
|
||||
async def handle_record_event(
|
||||
event_type: EventType, block_content: BlockContentForModeration
|
||||
) -> Response:
|
||||
"""Handle record-related webhook events
|
||||
If any blocks are flagged, we stop the execution and log the event."""
|
||||
|
||||
if event_type == EventType.RECORD_FLAGGED:
|
||||
logger.warning(
|
||||
f'Content flagged for node "{block_content.node_id}" ("{block_content.block_name}") '
|
||||
f'in execution "{block_content.graph_exec_id}"'
|
||||
)
|
||||
|
||||
# Stop execution directly
|
||||
await _cancel_execution(block_content.graph_exec_id)
|
||||
logger.info(f'Successfully stopped execution "{block_content.graph_exec_id}" due to flagged content')
|
||||
|
||||
return Response(status_code=200)
|
||||
|
||||
elif event_type in (EventType.RECORD_COMPLIANT, EventType.RECORD_UNFLAGGED):
|
||||
logger.info(
|
||||
f'Content cleared for node "{block_content.node_id}" in execution "{block_content.graph_exec_id}"'
|
||||
)
|
||||
|
||||
return Response(status_code=200)
|
||||
|
||||
|
||||
async def handle_user_event(event_type: EventType, user_payload: UserData) -> Response:
|
||||
"""Handle user-related webhook events
|
||||
For now we are just logging these events from iffy
|
||||
and replying with a 200 status code to keep iffy happy and to prevent it from retrying the request.
|
||||
"""
|
||||
|
||||
user_id = user_payload.clientId
|
||||
if not user_id:
|
||||
logger.error("Received user event without user ID")
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Missing required field 'clientId' in user event payload",
|
||||
)
|
||||
|
||||
status_updated_at = user_payload.get("statusUpdatedAt", "unknown time")
|
||||
status_updated_via = user_payload.get("statusUpdatedVia", "unknown method")
|
||||
|
||||
event_messages = {
|
||||
EventType.USER_SUSPENDED: f'User "{user_id}" has been SUSPENDED via {status_updated_via} at {status_updated_at}',
|
||||
EventType.USER_UNSUSPENDED: f'User "{user_id}" has been UNSUSPENDED via {status_updated_via} at {status_updated_at}',
|
||||
EventType.USER_COMPLIANT: f'User "{user_id}" has been marked as COMPLIANT via {status_updated_via} at {status_updated_at}',
|
||||
# Users can only be manually banned and unbanned on the iffy dashboard, for now logging these events
|
||||
EventType.USER_BANNED: f'User "{user_id}" has been BANNED via {status_updated_via} at {status_updated_at}',
|
||||
EventType.USER_UNBANNED: f'User "{user_id}" has been UNBANNED via {status_updated_via} at {status_updated_at}',
|
||||
}
|
||||
|
||||
if event_type in event_messages:
|
||||
log_message = event_messages[event_type]
|
||||
(
|
||||
logger.warning(log_message)
|
||||
if "suspended" in event_type or "banned" in event_type
|
||||
else logger.info(log_message)
|
||||
)
|
||||
|
||||
return Response(status_code=200)
|
||||
|
||||
|
||||
@iffy_router.post("/webhook")
|
||||
async def handle_iffy_webhook(
|
||||
request: Request, _=Depends(iffy_signature_validator.get_dependency())
|
||||
) -> Response:
|
||||
body = await request.body()
|
||||
try:
|
||||
event_data = IffyWebhookEvent.model_validate_json(body)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse Iffy webhook data: {e}")
|
||||
raise HTTPException(status_code=400, detail="Invalid request body")
|
||||
|
||||
try:
|
||||
if event_data.event.startswith("record."):
|
||||
metadata = event_data.payload.get("metadata", {})
|
||||
block_content = BlockContentForModeration(
|
||||
graph_id=metadata.get("graphId", ""),
|
||||
graph_exec_id=metadata.get("graphExecutionId", ""),
|
||||
node_id=metadata.get("nodeId", ""),
|
||||
block_id=metadata.get("blockId", ""),
|
||||
block_name=metadata.get("blockName", "Unknown Block"),
|
||||
block_type=metadata.get("blockType", ""),
|
||||
input_data=metadata.get("inputData", {}),
|
||||
)
|
||||
return await handle_record_event(event_data.event, block_content)
|
||||
elif event_data.event.startswith("user."):
|
||||
# Create UserData from payload
|
||||
user_data = UserData(**event_data.payload)
|
||||
return await handle_user_event(event_data.event, user_data)
|
||||
else:
|
||||
logger.info(f"Received unhandled Iffy event: {event_data.event}")
|
||||
return Response(status_code=200)
|
||||
except Exception as e:
|
||||
if "not active/running" in str(e):
|
||||
return Response(status_code=200)
|
||||
raise HTTPException(status_code=200, detail=str(e))
|
||||
182
autogpt_platform/backend/backend/server/v2/iffy/service.py
Normal file
182
autogpt_platform/backend/backend/server/v2/iffy/service.py
Normal file
@@ -0,0 +1,182 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
from backend.util import json
|
||||
from backend.util.openrouter import open_router_moderate_content
|
||||
from backend.util.service import get_service_client
|
||||
from backend.util.settings import BehaveAs, Settings
|
||||
|
||||
from .models import BlockContentForModeration, IffyPayload, ModerationResult, UserData
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_db():
|
||||
from backend.executor.database import DatabaseManagerClient
|
||||
|
||||
return get_service_client(DatabaseManagerClient)
|
||||
|
||||
|
||||
class IffyService:
|
||||
"""Service class for handling content moderation through Iffy API"""
|
||||
|
||||
@staticmethod
|
||||
def get_user_data(user_id: str) -> UserData:
|
||||
"""Get user data for Iffy API from user_id"""
|
||||
# Initialize with default values
|
||||
user_data: UserData = {
|
||||
"clientId": user_id,
|
||||
"email": None,
|
||||
"name": None,
|
||||
}
|
||||
|
||||
try:
|
||||
user = get_db().get_user_info_by_id(user_id)
|
||||
if user:
|
||||
user_data.update(
|
||||
{
|
||||
"id": user["id"],
|
||||
"name": user["name"],
|
||||
"email": user["email"],
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get user details for {user_id}: {str(e)}")
|
||||
|
||||
return user_data
|
||||
|
||||
@staticmethod
|
||||
def moderate_content(
|
||||
user_id: str, block_content: BlockContentForModeration
|
||||
) -> ModerationResult:
|
||||
"""
|
||||
Send block content to Iffy for content moderation.
|
||||
Only used in cloud mode - local mode skips moderation entirely.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user executing the block
|
||||
block_content: The content of the block to be moderated (BlockContentForModeration model)
|
||||
|
||||
Returns:
|
||||
ModerationResult: Result of the moderation check
|
||||
"""
|
||||
|
||||
IFFY_API_KEY = settings.secrets.iffy_api_key
|
||||
IFFY_API_URL = settings.secrets.iffy_api_url
|
||||
|
||||
if settings.config.behave_as == BehaveAs.LOCAL:
|
||||
logger.info("Content moderation skipped - running in local mode")
|
||||
return ModerationResult(
|
||||
is_safe=True, reason="Moderation skipped - running in local mode"
|
||||
)
|
||||
|
||||
# Validate Iffy API URL and key at the start
|
||||
if not IFFY_API_URL or not IFFY_API_KEY:
|
||||
logger.warning(
|
||||
"Iffy API URL or key not configured, falling back to OpenRouter moderation"
|
||||
)
|
||||
input_data = json.dumps(block_content.input_data)
|
||||
is_safe, reason = open_router_moderate_content(input_data)
|
||||
return ModerationResult(
|
||||
is_safe=is_safe,
|
||||
reason=f"Iffy not configured. OpenRouter result: {reason}",
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate URL format
|
||||
if not IFFY_API_URL.startswith(("http://", "https://")):
|
||||
logger.error(f"Invalid Iffy API URL format: {IFFY_API_URL}")
|
||||
input_data = json.dumps(block_content.input_data)
|
||||
is_safe, reason = open_router_moderate_content(input_data)
|
||||
return ModerationResult(
|
||||
is_safe=is_safe, reason="Invalid Iffy API URL format"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {IFFY_API_KEY}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
input_data = json.dumps(block_content.input_data)
|
||||
user_data = IffyService.get_user_data(user_id)
|
||||
|
||||
# Prepare the metadata
|
||||
metadata = {
|
||||
"graphId": str(block_content.graph_id),
|
||||
"graphExecutionId": str(block_content.graph_exec_id),
|
||||
"nodeId": str(block_content.node_id),
|
||||
"blockId": str(block_content.block_id),
|
||||
"blockName": str(block_content.block_name),
|
||||
}
|
||||
|
||||
name = f"{block_content.block_name}-{block_content.block_id}"
|
||||
graph_execution_id = (
|
||||
f"{block_content.graph_exec_id}-{block_content.node_id}"
|
||||
)
|
||||
|
||||
# Create the payload
|
||||
payload = IffyPayload(
|
||||
clientId=graph_execution_id,
|
||||
name=name,
|
||||
metadata=metadata,
|
||||
content={"text": input_data, "imageUrls": []},
|
||||
# Only include user data values that are not None
|
||||
user={k: v for k, v in user_data.items() if v is not None},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Sending content to Iffy for moderation - User: {user_data['name'] or user_id}, Block: {name}"
|
||||
)
|
||||
|
||||
base_url = IFFY_API_URL.rstrip("/")
|
||||
api_path = "/api/v1/ingest"
|
||||
response = requests.post(
|
||||
f"{base_url}{api_path}", json=payload.model_dump(), headers=headers
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.info(
|
||||
f"Iffy moderation failed, falling back to OpenRouter. Status: {response.status_code}, Response: {response.text}"
|
||||
)
|
||||
is_safe, reason = open_router_moderate_content(input_data)
|
||||
if is_safe:
|
||||
logger.info(f"OpenRouter moderation passed. Block: {name}")
|
||||
else:
|
||||
logger.info(
|
||||
f"OpenRouter moderation flagged content. Block: {name}, Reason: {reason}"
|
||||
)
|
||||
return ModerationResult(is_safe=is_safe, reason=reason)
|
||||
|
||||
logger.info(f"Successfully sent content to Iffy. Block: {name}")
|
||||
return ModerationResult(is_safe=True, reason="")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error in primary moderation service: {str(e)}", exc_info=True
|
||||
)
|
||||
try:
|
||||
block_name = f"{block_content.block_name}-{block_content.block_id}"
|
||||
input_data = json.dumps(block_content.input_data)
|
||||
is_safe, reason = open_router_moderate_content(input_data)
|
||||
if is_safe:
|
||||
logger.info(
|
||||
f"OpenRouter moderation passed after Iffy failure. Block: {block_name}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"OpenRouter moderation flagged content after Iffy failure. Block: {block_name}, Reason: {reason}"
|
||||
)
|
||||
return ModerationResult(is_safe=is_safe, reason=reason)
|
||||
except Exception as e2:
|
||||
block_name = (
|
||||
getattr(block_content, "block_name", "unknown")
|
||||
+ "-"
|
||||
+ str(getattr(block_content, "block_id", "unknown"))
|
||||
)
|
||||
reason = f"Both moderation services failed. Error: {str(e2)}"
|
||||
logger.error(f"{reason}. Block: {block_name}", exc_info=True)
|
||||
return ModerationResult(is_safe=False, reason=reason)
|
||||
120
autogpt_platform/backend/backend/util/openrouter.py
Normal file
120
autogpt_platform/backend/backend/util/openrouter.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import logging
|
||||
from typing import Tuple
|
||||
|
||||
import openai
|
||||
|
||||
from backend.util.settings import BehaveAs, Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
MODERATION_PROMPT = """You are a content moderation AI. Your task is to analyze the following content and determine if it contains any inappropriate, harmful, or malicious content. Please respond with one of these exact words:
|
||||
- FLAGGED: If the content contains harmful, inappropriate, or malicious content
|
||||
- SAFE: If the content appears to be safe
|
||||
|
||||
Content to moderate:MODERATION_PROMPT
|
||||
{content}
|
||||
|
||||
Respond with only one word from the above choices."""
|
||||
|
||||
|
||||
def open_router_moderate_content(
|
||||
content: str, user_id: str | None = None
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
Use OpenRouter's API to moderate content using an LLM.
|
||||
Uses OpenRouter's auto-routing to select the best available model.
|
||||
|
||||
Args:
|
||||
content: The content to be moderated
|
||||
user_id: Optional unique identifier for the user making the request
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str]: (is_safe, reason)
|
||||
- is_safe: True if content is safe, False if flagged
|
||||
- reason: The raw response from the LLM
|
||||
"""
|
||||
api_key = settings.secrets.open_router_api_key
|
||||
|
||||
if settings.config.behave_as == BehaveAs.LOCAL:
|
||||
logger.info("OpenRouter moderation skipped - running in local mode")
|
||||
return True, "Moderation skipped - running in local mode"
|
||||
|
||||
# If API key is not configured, fail immediately
|
||||
if not api_key:
|
||||
logger.error("OpenRouter API key not configured")
|
||||
return False, "OpenRouter API key not configured"
|
||||
|
||||
try:
|
||||
client = openai.OpenAI(
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
# Use up to 3 retries with exponential backoff
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
extra_headers={
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
"X-User-Id": str(user_id) if user_id else "anonymous",
|
||||
},
|
||||
model="openrouter/auto", # Use auto-routing for best available model
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a content moderation AI. Respond only with FLAGGED or SAFE.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": MODERATION_PROMPT.format(content=content),
|
||||
},
|
||||
],
|
||||
max_tokens=10,
|
||||
temperature=0.1,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.choices:
|
||||
logger.error("No response from OpenRouter moderation")
|
||||
return False, "No response from moderation service"
|
||||
|
||||
result = response.choices[0].message.content.strip().upper()
|
||||
|
||||
is_safe = result == "SAFE"
|
||||
if result not in ["SAFE", "FLAGGED"]:
|
||||
logger.warning(f"Unexpected moderation response: {result}")
|
||||
|
||||
return is_safe, result
|
||||
|
||||
except openai.APITimeoutError:
|
||||
if attempt == max_retries - 1:
|
||||
logger.error("OpenRouter moderation timed out after all retries")
|
||||
return False, "Moderation service timeout"
|
||||
logger.warning(
|
||||
f"OpenRouter timeout, attempt {attempt + 1} of {max_retries}"
|
||||
)
|
||||
continue
|
||||
|
||||
except openai.APIConnectionError:
|
||||
if attempt == max_retries - 1:
|
||||
logger.error("OpenRouter connection error after all retries")
|
||||
return False, "Moderation service connection error"
|
||||
logger.warning(
|
||||
f"OpenRouter connection error, attempt {attempt + 1} of {max_retries}"
|
||||
)
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Unexpected error in OpenRouter moderation attempt {attempt + 1}: {str(e)}",
|
||||
exc_info=True,
|
||||
)
|
||||
if attempt == max_retries - 1:
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in OpenRouter moderation: {str(e)}", exc_info=True)
|
||||
return False, f"Moderation error: {str(e)}"
|
||||
@@ -459,6 +459,16 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
smartlead_api_key: str = Field(default="", description="SmartLead API Key")
|
||||
zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key")
|
||||
|
||||
iffy_api_url: str = Field(default="", description="Iffy API URL")
|
||||
iffy_api_key: str = Field(
|
||||
default="",
|
||||
description="Iffy API Key",
|
||||
)
|
||||
iffy_webhook_secret: str = Field(
|
||||
default="",
|
||||
description="Iffy Webhook Secret",
|
||||
)
|
||||
|
||||
# Add more secret fields as needed
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
|
||||
Reference in New Issue
Block a user