mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-09 22:35:54 -05:00
Merge branch 'dev' into toran/open-2856-handle-failed-replicate-predictions-with-retries-in-all
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
@@ -20,7 +21,7 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.file import MediaFileType
|
||||
from backend.util.file import MediaFileType, store_media_file
|
||||
|
||||
|
||||
class GeminiImageModel(str, Enum):
|
||||
@@ -28,6 +29,20 @@ class GeminiImageModel(str, Enum):
|
||||
NANO_BANANA_PRO = "google/nano-banana-pro"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
MATCH_INPUT_IMAGE = "match_input_image"
|
||||
ASPECT_1_1 = "1:1"
|
||||
ASPECT_2_3 = "2:3"
|
||||
ASPECT_3_2 = "3:2"
|
||||
ASPECT_3_4 = "3:4"
|
||||
ASPECT_4_3 = "4:3"
|
||||
ASPECT_4_5 = "4:5"
|
||||
ASPECT_5_4 = "5:4"
|
||||
ASPECT_9_16 = "9:16"
|
||||
ASPECT_16_9 = "16:9"
|
||||
ASPECT_21_9 = "21:9"
|
||||
|
||||
|
||||
class OutputFormat(str, Enum):
|
||||
JPG = "jpg"
|
||||
PNG = "png"
|
||||
@@ -70,6 +85,11 @@ class AIImageCustomizerBlock(Block):
|
||||
default=[],
|
||||
title="Input Images",
|
||||
)
|
||||
aspect_ratio: AspectRatio = SchemaField(
|
||||
description="Aspect ratio of the generated image",
|
||||
default=AspectRatio.MATCH_INPUT_IMAGE,
|
||||
title="Aspect Ratio",
|
||||
)
|
||||
output_format: OutputFormat = SchemaField(
|
||||
description="Format of the output image",
|
||||
default=OutputFormat.PNG,
|
||||
@@ -93,6 +113,7 @@ class AIImageCustomizerBlock(Block):
|
||||
"prompt": "Make the scene more vibrant and colorful",
|
||||
"model": GeminiImageModel.NANO_BANANA,
|
||||
"images": [],
|
||||
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
|
||||
"output_format": OutputFormat.JPG,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
@@ -117,11 +138,25 @@ class AIImageCustomizerBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Convert local file paths to Data URIs (base64) so Replicate can access them
|
||||
processed_images = await asyncio.gather(
|
||||
*(
|
||||
store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=img,
|
||||
user_id=user_id,
|
||||
return_content=True,
|
||||
)
|
||||
for img in input_data.images
|
||||
)
|
||||
)
|
||||
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.model.value,
|
||||
prompt=input_data.prompt,
|
||||
images=input_data.images,
|
||||
images=processed_images,
|
||||
aspect_ratio=input_data.aspect_ratio.value,
|
||||
output_format=input_data.output_format.value,
|
||||
)
|
||||
yield "image_url", result
|
||||
@@ -134,12 +169,14 @@ class AIImageCustomizerBlock(Block):
|
||||
model_name: str,
|
||||
prompt: str,
|
||||
images: list[MediaFileType],
|
||||
aspect_ratio: str,
|
||||
output_format: str,
|
||||
) -> MediaFileType:
|
||||
client = ReplicateClient(api_token=api_key.get_secret_value())
|
||||
|
||||
input_params: dict = {
|
||||
"prompt": prompt,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"output_format": output_format,
|
||||
}
|
||||
|
||||
|
||||
198
autogpt_platform/backend/backend/blocks/google/_drive.py
Normal file
198
autogpt_platform/backend/backend/blocks/google/_drive.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import asyncio
|
||||
import mimetypes
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import get_exec_file_path
|
||||
from backend.util.request import Requests
|
||||
from backend.util.type import MediaFileType
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
AttachmentView = Literal[
|
||||
"DOCS",
|
||||
"DOCUMENTS",
|
||||
"SPREADSHEETS",
|
||||
"PRESENTATIONS",
|
||||
"DOCS_IMAGES",
|
||||
"FOLDERS",
|
||||
]
|
||||
ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
|
||||
"DOCS",
|
||||
"DOCUMENTS",
|
||||
"SPREADSHEETS",
|
||||
"PRESENTATIONS",
|
||||
"DOCS_IMAGES",
|
||||
"FOLDERS",
|
||||
)
|
||||
|
||||
|
||||
class GoogleDriveFile(BaseModel):
|
||||
"""Represents a single file/folder picked from Google Drive"""
|
||||
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
|
||||
id: str = Field(description="Google Drive file/folder ID")
|
||||
name: Optional[str] = Field(None, description="File/folder name")
|
||||
mime_type: Optional[str] = Field(
|
||||
None,
|
||||
alias="mimeType",
|
||||
description="MIME type (e.g., application/vnd.google-apps.document)",
|
||||
)
|
||||
url: Optional[str] = Field(None, description="URL to open the file")
|
||||
icon_url: Optional[str] = Field(None, alias="iconUrl", description="Icon URL")
|
||||
is_folder: Optional[bool] = Field(
|
||||
None, alias="isFolder", description="Whether this is a folder"
|
||||
)
|
||||
|
||||
|
||||
def GoogleDrivePickerField(
|
||||
multiselect: bool = False,
|
||||
allow_folder_selection: bool = False,
|
||||
allowed_views: Optional[list[AttachmentView]] = None,
|
||||
allowed_mime_types: Optional[list[str]] = None,
|
||||
scopes: Optional[list[str]] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
placeholder: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates a Google Drive Picker input field.
|
||||
|
||||
Args:
|
||||
multiselect: Allow selecting multiple files/folders (default: False)
|
||||
allow_folder_selection: Allow selecting folders (default: False)
|
||||
allowed_views: List of view types to show in picker (default: ["DOCS"])
|
||||
allowed_mime_types: Filter by MIME types (e.g., ["application/pdf"])
|
||||
title: Field title shown in UI
|
||||
description: Field description/help text
|
||||
placeholder: Placeholder text for the button
|
||||
**kwargs: Additional SchemaField arguments (advanced, hidden, etc.)
|
||||
|
||||
Returns:
|
||||
Field definition that produces:
|
||||
- Single GoogleDriveFile when multiselect=False
|
||||
- list[GoogleDriveFile] when multiselect=True
|
||||
|
||||
Example:
|
||||
>>> class MyBlock(Block):
|
||||
... class Input(BlockSchema):
|
||||
... document: GoogleDriveFile = GoogleDrivePickerField(
|
||||
... title="Select Document",
|
||||
... allowed_views=["DOCUMENTS"],
|
||||
... )
|
||||
...
|
||||
... files: list[GoogleDriveFile] = GoogleDrivePickerField(
|
||||
... title="Select Multiple Files",
|
||||
... multiselect=True,
|
||||
... allow_folder_selection=True,
|
||||
... )
|
||||
"""
|
||||
# Build configuration that will be sent to frontend
|
||||
picker_config = {
|
||||
"multiselect": multiselect,
|
||||
"allow_folder_selection": allow_folder_selection,
|
||||
"allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
|
||||
}
|
||||
|
||||
# Add optional configurations
|
||||
if allowed_mime_types:
|
||||
picker_config["allowed_mime_types"] = list(allowed_mime_types)
|
||||
|
||||
# Determine required scopes based on config
|
||||
base_scopes = scopes if scopes is not None else []
|
||||
picker_scopes: set[str] = set(base_scopes)
|
||||
if allow_folder_selection:
|
||||
picker_scopes.add("https://www.googleapis.com/auth/drive")
|
||||
else:
|
||||
# Use drive.file for minimal scope - only access files selected by user in picker
|
||||
picker_scopes.add("https://www.googleapis.com/auth/drive.file")
|
||||
|
||||
views = set(allowed_views or [])
|
||||
if "SPREADSHEETS" in views:
|
||||
picker_scopes.add("https://www.googleapis.com/auth/spreadsheets.readonly")
|
||||
if "DOCUMENTS" in views or "DOCS" in views:
|
||||
picker_scopes.add("https://www.googleapis.com/auth/documents.readonly")
|
||||
|
||||
picker_config["scopes"] = sorted(picker_scopes)
|
||||
|
||||
# Set appropriate default value
|
||||
default_value = [] if multiselect else None
|
||||
|
||||
# Use SchemaField to handle format properly
|
||||
return SchemaField(
|
||||
default=default_value,
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder or "Choose from Google Drive",
|
||||
format="google-drive-picker",
|
||||
advanced=False,
|
||||
json_schema_extra={
|
||||
"google_drive_picker_config": picker_config,
|
||||
**kwargs,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
DRIVE_API_URL = "https://www.googleapis.com/drive/v3/files"
|
||||
_requests = Requests(trusted_origins=["https://www.googleapis.com"])
|
||||
|
||||
|
||||
def GoogleDriveAttachmentField(
|
||||
*,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
placeholder: str | None = None,
|
||||
multiselect: bool = True,
|
||||
allowed_mime_types: list[str] | None = None,
|
||||
**extra: Any,
|
||||
) -> Any:
|
||||
return GoogleDrivePickerField(
|
||||
multiselect=multiselect,
|
||||
allowed_views=list(ATTACHMENT_VIEWS),
|
||||
allowed_mime_types=allowed_mime_types,
|
||||
title=title,
|
||||
description=description,
|
||||
placeholder=placeholder or "Choose files from Google Drive",
|
||||
**extra,
|
||||
)
|
||||
|
||||
|
||||
async def drive_file_to_media_file(
|
||||
drive_file: GoogleDriveFile, *, graph_exec_id: str, access_token: str
|
||||
) -> MediaFileType:
|
||||
if drive_file.is_folder:
|
||||
raise ValueError("Google Drive selection must be a file.")
|
||||
if not access_token:
|
||||
raise ValueError("Google Drive access token is required for file download.")
|
||||
|
||||
url = f"{DRIVE_API_URL}/{drive_file.id}?alt=media"
|
||||
response = await _requests.get(
|
||||
url, headers={"Authorization": f"Bearer {access_token}"}
|
||||
)
|
||||
|
||||
mime_type = drive_file.mime_type or response.headers.get(
|
||||
"content-type", "application/octet-stream"
|
||||
)
|
||||
|
||||
MAX_FILE_SIZE = 100 * 1024 * 1024
|
||||
if len(response.content) > MAX_FILE_SIZE:
|
||||
raise ValueError(
|
||||
f"File too large: {len(response.content)} bytes > {MAX_FILE_SIZE} bytes"
|
||||
)
|
||||
|
||||
base_path = Path(get_exec_file_path(graph_exec_id, ""))
|
||||
base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
extension = mimetypes.guess_extension(mime_type, strict=False) or ".bin"
|
||||
filename = f"{uuid.uuid4()}{extension}"
|
||||
target_path = base_path / filename
|
||||
|
||||
await scan_content_safe(response.content, filename=filename)
|
||||
await asyncio.to_thread(target_path.write_bytes, response.content)
|
||||
|
||||
return MediaFileType(str(target_path.relative_to(base_path)))
|
||||
File diff suppressed because it is too large
Load Diff
160
autogpt_platform/backend/backend/blocks/human_in_the_loop.py
Normal file
160
autogpt_platform/backend/backend/blocks/human_in_the_loop.py
Normal file
@@ -0,0 +1,160 @@
|
||||
import logging
|
||||
from typing import Any, Literal
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.human_review import ReviewResult
|
||||
from backend.data.model import SchemaField
|
||||
from backend.executor.manager import async_update_node_execution_status
|
||||
from backend.util.clients import get_database_manager_async_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HumanInTheLoopBlock(Block):
|
||||
"""
|
||||
This block pauses execution and waits for human approval or modification of the data.
|
||||
|
||||
When executed, it creates a pending review entry and sets the node execution status
|
||||
to REVIEW. The execution will remain paused until a human user either:
|
||||
- Approves the data (with or without modifications)
|
||||
- Rejects the data
|
||||
|
||||
This is useful for workflows that require human validation or intervention before
|
||||
proceeding to the next steps.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
data: Any = SchemaField(description="The data to be reviewed by a human user")
|
||||
name: str = SchemaField(
|
||||
description="A descriptive name for what this data represents",
|
||||
)
|
||||
editable: bool = SchemaField(
|
||||
description="Whether the human reviewer can edit the data",
|
||||
default=True,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
reviewed_data: Any = SchemaField(
|
||||
description="The data after human review (may be modified)"
|
||||
)
|
||||
status: Literal["approved", "rejected"] = SchemaField(
|
||||
description="Status of the review: 'approved' or 'rejected'"
|
||||
)
|
||||
review_message: str = SchemaField(
|
||||
description="Any message provided by the reviewer", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
|
||||
description="Pause execution and wait for human approval or modification of data",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=HumanInTheLoopBlock.Input,
|
||||
output_schema=HumanInTheLoopBlock.Output,
|
||||
test_input={
|
||||
"data": {"name": "John Doe", "age": 30},
|
||||
"name": "User profile data",
|
||||
"editable": True,
|
||||
},
|
||||
test_output=[
|
||||
("reviewed_data", {"name": "John Doe", "age": 30}),
|
||||
("status", "approved"),
|
||||
("review_message", ""),
|
||||
],
|
||||
test_mock={
|
||||
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
|
||||
data={"name": "John Doe", "age": 30},
|
||||
status=ReviewStatus.APPROVED,
|
||||
message="",
|
||||
processed=False,
|
||||
node_exec_id="test-node-exec-id",
|
||||
),
|
||||
"update_node_execution_status": lambda *_args, **_kwargs: None,
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
Execute the Human In The Loop block.
|
||||
|
||||
This method uses one function to handle the complete workflow - checking existing reviews
|
||||
and creating pending ones as needed.
|
||||
"""
|
||||
try:
|
||||
logger.debug(f"HITL block executing for node {node_exec_id}")
|
||||
|
||||
# Use the data layer to handle the complete workflow
|
||||
db_client = get_database_manager_async_client()
|
||||
result = await db_client.get_or_create_human_review(
|
||||
user_id=user_id,
|
||||
node_exec_id=node_exec_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
input_data=input_data.data,
|
||||
message=input_data.name,
|
||||
editable=input_data.editable,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
# Check if we're waiting for human input
|
||||
if result is None:
|
||||
logger.info(
|
||||
f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
|
||||
)
|
||||
try:
|
||||
# Set node status to REVIEW so execution manager can't mark it as COMPLETED
|
||||
# The VALID_STATUS_TRANSITIONS will then prevent any unwanted status changes
|
||||
# Use the proper wrapper function to ensure websocket events are published
|
||||
await async_update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=node_exec_id,
|
||||
status=ExecutionStatus.REVIEW,
|
||||
)
|
||||
# Execution pauses here until API routes process the review
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
# Review is complete (approved or rejected) - check if unprocessed
|
||||
if not result.processed:
|
||||
# Mark as processed before yielding
|
||||
await db_client.update_review_processed_status(
|
||||
node_exec_id=node_exec_id, processed=True
|
||||
)
|
||||
|
||||
if result.status == ReviewStatus.APPROVED:
|
||||
yield "status", "approved"
|
||||
yield "reviewed_data", result.data
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
|
||||
elif result.status == ReviewStatus.REJECTED:
|
||||
yield "status", "rejected"
|
||||
if result.message:
|
||||
yield "review_message", result.message
|
||||
@@ -5,6 +5,8 @@ from datetime import datetime
|
||||
from faker import Faker
|
||||
from prisma import Prisma
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
|
||||
faker = Faker()
|
||||
|
||||
|
||||
@@ -15,9 +17,9 @@ async def check_cron_job(db):
|
||||
|
||||
try:
|
||||
# Check if pg_cron extension exists
|
||||
extension_check = await db.query_raw("CREATE EXTENSION pg_cron;")
|
||||
extension_check = await query_raw_with_schema("CREATE EXTENSION pg_cron;")
|
||||
print(extension_check)
|
||||
extension_check = await db.query_raw(
|
||||
extension_check = await query_raw_with_schema(
|
||||
"SELECT COUNT(*) as count FROM pg_extension WHERE extname = 'pg_cron'"
|
||||
)
|
||||
if extension_check[0]["count"] == 0:
|
||||
@@ -25,7 +27,7 @@ async def check_cron_job(db):
|
||||
return False
|
||||
|
||||
# Check if the refresh job exists
|
||||
job_check = await db.query_raw(
|
||||
job_check = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT jobname, schedule, command
|
||||
FROM cron.job
|
||||
@@ -55,33 +57,33 @@ async def get_materialized_view_counts(db):
|
||||
print("-" * 40)
|
||||
|
||||
# Get counts from mv_agent_run_counts
|
||||
agent_runs = await db.query_raw(
|
||||
agent_runs = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_agents,
|
||||
SUM(run_count) as total_runs,
|
||||
MAX(run_count) as max_runs,
|
||||
MIN(run_count) as min_runs
|
||||
FROM mv_agent_run_counts
|
||||
FROM {schema_prefix}mv_agent_run_counts
|
||||
"""
|
||||
)
|
||||
|
||||
# Get counts from mv_review_stats
|
||||
review_stats = await db.query_raw(
|
||||
review_stats = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_listings,
|
||||
SUM(review_count) as total_reviews,
|
||||
AVG(avg_rating) as overall_avg_rating
|
||||
FROM mv_review_stats
|
||||
FROM {schema_prefix}mv_review_stats
|
||||
"""
|
||||
)
|
||||
|
||||
# Get sample data from StoreAgent view
|
||||
store_agents = await db.query_raw(
|
||||
store_agents = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT COUNT(*) as total_store_agents,
|
||||
AVG(runs) as avg_runs,
|
||||
AVG(rating) as avg_rating
|
||||
FROM "StoreAgent"
|
||||
FROM {schema_prefix}"StoreAgent"
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import asyncio
|
||||
|
||||
from prisma import Prisma
|
||||
|
||||
from backend.data.db import query_raw_with_schema
|
||||
|
||||
|
||||
async def check_store_data(db):
|
||||
"""Check what store data exists in the database."""
|
||||
@@ -89,11 +91,11 @@ async def check_store_data(db):
|
||||
sa.creator_username,
|
||||
sa.categories,
|
||||
sa.updated_at
|
||||
FROM "StoreAgent" sa
|
||||
FROM {schema_prefix}"StoreAgent" sa
|
||||
LIMIT 10;
|
||||
"""
|
||||
|
||||
store_agents = await db.query_raw(query)
|
||||
store_agents = await query_raw_with_schema(query)
|
||||
print(f"Total store agents in view: {len(store_agents)}")
|
||||
|
||||
if store_agents:
|
||||
@@ -111,22 +113,22 @@ async def check_store_data(db):
|
||||
# Check for any APPROVED store listing versions
|
||||
query = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM "StoreListingVersion"
|
||||
FROM {schema_prefix}"StoreListingVersion"
|
||||
WHERE "submissionStatus" = 'APPROVED'
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
approved_count = result[0]["count"] if result else 0
|
||||
print(f"Approved store listing versions: {approved_count}")
|
||||
|
||||
# Check for store listings with hasApprovedVersion = true
|
||||
query = """
|
||||
SELECT COUNT(*) as count
|
||||
FROM "StoreListing"
|
||||
FROM {schema_prefix}"StoreListing"
|
||||
WHERE "hasApprovedVersion" = true AND "isDeleted" = false
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
has_approved_count = result[0]["count"] if result else 0
|
||||
print(f"Store listings with approved versions: {has_approved_count}")
|
||||
|
||||
@@ -134,10 +136,10 @@ async def check_store_data(db):
|
||||
query = """
|
||||
SELECT COUNT(DISTINCT "agentGraphId") as unique_agents,
|
||||
COUNT(*) as total_executions
|
||||
FROM "AgentGraphExecution"
|
||||
FROM {schema_prefix}"AgentGraphExecution"
|
||||
"""
|
||||
|
||||
result = await db.query_raw(query)
|
||||
result = await query_raw_with_schema(query)
|
||||
if result:
|
||||
print("\nAgent Graph Executions:")
|
||||
print(f" Unique agents with executions: {result[0]['unique_agents']}")
|
||||
|
||||
@@ -73,6 +73,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
NodeExecutionEntry(
|
||||
user_id=DEFAULT_USER_ID,
|
||||
graph_id="test_graph",
|
||||
graph_version=1,
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_graph_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
@@ -94,6 +95,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
NodeExecutionEntry(
|
||||
user_id=DEFAULT_USER_ID,
|
||||
graph_id="test_graph",
|
||||
graph_version=1,
|
||||
node_id="test_node",
|
||||
graph_exec_id="test_graph_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
|
||||
@@ -34,6 +34,7 @@ from prisma.types import (
|
||||
AgentNodeExecutionKeyValueDataCreateInput,
|
||||
AgentNodeExecutionUpdateInput,
|
||||
AgentNodeExecutionWhereInput,
|
||||
AgentNodeExecutionWhereUniqueInput,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError
|
||||
from pydantic.fields import Field
|
||||
@@ -96,11 +97,14 @@ NodesInputMasks = Mapping[str, NodeInputMask]
|
||||
VALID_STATUS_TRANSITIONS = {
|
||||
ExecutionStatus.QUEUED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.TERMINATED, # For resuming halted execution
|
||||
ExecutionStatus.REVIEW, # For resuming after review
|
||||
],
|
||||
ExecutionStatus.RUNNING: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.TERMINATED, # For resuming halted execution
|
||||
ExecutionStatus.REVIEW, # For resuming after review
|
||||
],
|
||||
ExecutionStatus.COMPLETED: [
|
||||
ExecutionStatus.RUNNING,
|
||||
@@ -109,11 +113,16 @@ VALID_STATUS_TRANSITIONS = {
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
ExecutionStatus.TERMINATED: [
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
ExecutionStatus.REVIEW: [
|
||||
ExecutionStatus.RUNNING,
|
||||
],
|
||||
}
|
||||
|
||||
@@ -446,6 +455,7 @@ class NodeExecutionResult(BaseModel):
|
||||
user_id=self.user_id,
|
||||
graph_exec_id=self.graph_exec_id,
|
||||
graph_id=self.graph_id,
|
||||
graph_version=self.graph_version,
|
||||
node_exec_id=self.node_exec_id,
|
||||
node_id=self.node_id,
|
||||
block_id=self.block_id,
|
||||
@@ -728,7 +738,7 @@ async def upsert_execution_input(
|
||||
input_name: str,
|
||||
input_data: JsonValue,
|
||||
node_exec_id: str | None = None,
|
||||
) -> tuple[str, BlockInput]:
|
||||
) -> tuple[NodeExecutionResult, BlockInput]:
|
||||
"""
|
||||
Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input.
|
||||
If there is no AgentNodeExecution that has no `input_name` as input, create new one.
|
||||
@@ -761,7 +771,7 @@ async def upsert_execution_input(
|
||||
existing_execution = await AgentNodeExecution.prisma().find_first(
|
||||
where=existing_exec_query_filter,
|
||||
order={"addedTime": "asc"},
|
||||
include={"Input": True},
|
||||
include={"Input": True, "GraphExecution": True},
|
||||
)
|
||||
json_input_data = SafeJson(input_data)
|
||||
|
||||
@@ -773,7 +783,7 @@ async def upsert_execution_input(
|
||||
referencedByInputExecId=existing_execution.id,
|
||||
)
|
||||
)
|
||||
return existing_execution.id, {
|
||||
return NodeExecutionResult.from_db(existing_execution), {
|
||||
**{
|
||||
input_data.name: type_utils.convert(input_data.data, JsonValue)
|
||||
for input_data in existing_execution.Input or []
|
||||
@@ -788,9 +798,10 @@ async def upsert_execution_input(
|
||||
agentGraphExecutionId=graph_exec_id,
|
||||
executionStatus=ExecutionStatus.INCOMPLETE,
|
||||
Input={"create": {"name": input_name, "data": json_input_data}},
|
||||
)
|
||||
),
|
||||
include={"GraphExecution": True},
|
||||
)
|
||||
return result.id, {input_name: input_data}
|
||||
return NodeExecutionResult.from_db(result), {input_name: input_data}
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
@@ -886,9 +897,25 @@ async def update_node_execution_status_batch(
|
||||
node_exec_ids: list[str],
|
||||
status: ExecutionStatus,
|
||||
stats: dict[str, Any] | None = None,
|
||||
):
|
||||
await AgentNodeExecution.prisma().update_many(
|
||||
where={"id": {"in": node_exec_ids}},
|
||||
) -> int:
|
||||
# Validate status transitions - allowed_from should never be empty for valid statuses
|
||||
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
|
||||
if not allowed_from:
|
||||
raise ValueError(
|
||||
f"Invalid status transition: {status} has no valid source statuses"
|
||||
)
|
||||
|
||||
# For batch updates, we filter to only update nodes with valid current statuses
|
||||
where_clause = cast(
|
||||
AgentNodeExecutionWhereInput,
|
||||
{
|
||||
"id": {"in": node_exec_ids},
|
||||
"executionStatus": {"in": [s.value for s in allowed_from]},
|
||||
},
|
||||
)
|
||||
|
||||
return await AgentNodeExecution.prisma().update_many(
|
||||
where=where_clause,
|
||||
data=_get_update_status_data(status, None, stats),
|
||||
)
|
||||
|
||||
@@ -902,15 +929,32 @@ async def update_node_execution_status(
|
||||
if status == ExecutionStatus.QUEUED and execution_data is None:
|
||||
raise ValueError("Execution data must be provided when queuing an execution.")
|
||||
|
||||
res = await AgentNodeExecution.prisma().update(
|
||||
where={"id": node_exec_id},
|
||||
# Validate status transitions - allowed_from should never be empty for valid statuses
|
||||
allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
|
||||
if not allowed_from:
|
||||
raise ValueError(
|
||||
f"Invalid status transition: {status} has no valid source statuses"
|
||||
)
|
||||
|
||||
if res := await AgentNodeExecution.prisma().update(
|
||||
where=cast(
|
||||
AgentNodeExecutionWhereUniqueInput,
|
||||
{
|
||||
"id": node_exec_id,
|
||||
"executionStatus": {"in": [s.value for s in allowed_from]},
|
||||
},
|
||||
),
|
||||
data=_get_update_status_data(status, execution_data, stats),
|
||||
include=EXECUTION_RESULT_INCLUDE,
|
||||
)
|
||||
if not res:
|
||||
raise ValueError(f"Execution {node_exec_id} not found.")
|
||||
):
|
||||
return NodeExecutionResult.from_db(res)
|
||||
|
||||
return NodeExecutionResult.from_db(res)
|
||||
if res := await AgentNodeExecution.prisma().find_unique(
|
||||
where={"id": node_exec_id}, include=EXECUTION_RESULT_INCLUDE
|
||||
):
|
||||
return NodeExecutionResult.from_db(res)
|
||||
|
||||
raise ValueError(f"Execution {node_exec_id} not found.")
|
||||
|
||||
|
||||
def _get_update_status_data(
|
||||
@@ -964,17 +1008,17 @@ async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None:
|
||||
return NodeExecutionResult.from_db(execution)
|
||||
|
||||
|
||||
async def get_node_executions(
|
||||
def _build_node_execution_where_clause(
|
||||
graph_exec_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
block_ids: list[str] | None = None,
|
||||
statuses: list[ExecutionStatus] | None = None,
|
||||
limit: int | None = None,
|
||||
created_time_gte: datetime | None = None,
|
||||
created_time_lte: datetime | None = None,
|
||||
include_exec_data: bool = True,
|
||||
) -> list[NodeExecutionResult]:
|
||||
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
|
||||
) -> AgentNodeExecutionWhereInput:
|
||||
"""
|
||||
Build where clause for node execution queries.
|
||||
"""
|
||||
where_clause: AgentNodeExecutionWhereInput = {}
|
||||
if graph_exec_id:
|
||||
where_clause["agentGraphExecutionId"] = graph_exec_id
|
||||
@@ -991,6 +1035,29 @@ async def get_node_executions(
|
||||
"lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc),
|
||||
}
|
||||
|
||||
return where_clause
|
||||
|
||||
|
||||
async def get_node_executions(
|
||||
graph_exec_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
block_ids: list[str] | None = None,
|
||||
statuses: list[ExecutionStatus] | None = None,
|
||||
limit: int | None = None,
|
||||
created_time_gte: datetime | None = None,
|
||||
created_time_lte: datetime | None = None,
|
||||
include_exec_data: bool = True,
|
||||
) -> list[NodeExecutionResult]:
|
||||
"""⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
|
||||
where_clause = _build_node_execution_where_clause(
|
||||
graph_exec_id=graph_exec_id,
|
||||
node_id=node_id,
|
||||
block_ids=block_ids,
|
||||
statuses=statuses,
|
||||
created_time_gte=created_time_gte,
|
||||
created_time_lte=created_time_lte,
|
||||
)
|
||||
|
||||
executions = await AgentNodeExecution.prisma().find_many(
|
||||
where=where_clause,
|
||||
include=(
|
||||
@@ -1052,6 +1119,7 @@ class NodeExecutionEntry(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
node_exec_id: str
|
||||
node_id: str
|
||||
block_id: str
|
||||
|
||||
294
autogpt_platform/backend/backend/data/human_review.py
Normal file
294
autogpt_platform/backend/backend/data/human_review.py
Normal file
@@ -0,0 +1,294 @@
|
||||
"""
|
||||
Data layer for Human In The Loop (HITL) review operations.
|
||||
Handles all database operations for pending human reviews.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
from prisma.models import PendingHumanReview
|
||||
from prisma.types import PendingHumanReviewUpdateInput
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.server.v2.executions.review.model import (
|
||||
PendingHumanReviewModel,
|
||||
SafeJsonData,
|
||||
)
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReviewResult(BaseModel):
|
||||
"""Result of a review operation."""
|
||||
|
||||
data: Optional[SafeJsonData] = None
|
||||
status: ReviewStatus
|
||||
message: str = ""
|
||||
processed: bool
|
||||
node_exec_id: str
|
||||
|
||||
|
||||
async def get_pending_review_by_node_exec_id(
|
||||
node_exec_id: str, user_id: str
|
||||
) -> Optional["PendingHumanReviewModel"]:
|
||||
"""
|
||||
Get a pending review by node execution ID with user ownership validation.
|
||||
|
||||
Args:
|
||||
node_exec_id: The node execution ID to check
|
||||
user_id: The user ID to validate ownership
|
||||
|
||||
Returns:
|
||||
The existing review if found and owned by the user, None otherwise
|
||||
"""
|
||||
review = await PendingHumanReview.prisma().find_first(
|
||||
where={
|
||||
"nodeExecId": node_exec_id,
|
||||
"userId": user_id,
|
||||
}
|
||||
)
|
||||
|
||||
if review:
|
||||
return PendingHumanReviewModel.from_db(review)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def get_or_create_human_review(
|
||||
user_id: str,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
input_data: SafeJsonData,
|
||||
message: str,
|
||||
editable: bool,
|
||||
) -> Optional[ReviewResult]:
|
||||
"""
|
||||
Get existing review or create a new pending review entry.
|
||||
|
||||
Uses upsert with empty update to get existing or create new review in a single operation.
|
||||
|
||||
Args:
|
||||
user_id: ID of the user who owns this review
|
||||
node_exec_id: ID of the node execution
|
||||
graph_exec_id: ID of the graph execution
|
||||
graph_id: ID of the graph template
|
||||
graph_version: Version of the graph template
|
||||
input_data: The data to be reviewed
|
||||
message: Instructions for the reviewer
|
||||
editable: Whether the data can be edited
|
||||
|
||||
Returns:
|
||||
ReviewResult if the review is complete, None if waiting for human input
|
||||
"""
|
||||
try:
|
||||
logger.debug(f"Getting or creating review for node {node_exec_id}")
|
||||
|
||||
# Upsert - get existing or create new review
|
||||
review = await PendingHumanReview.prisma().upsert(
|
||||
where={"nodeExecId": node_exec_id},
|
||||
data={
|
||||
"create": {
|
||||
"userId": user_id,
|
||||
"nodeExecId": node_exec_id,
|
||||
"graphExecId": graph_exec_id,
|
||||
"graphId": graph_id,
|
||||
"graphVersion": graph_version,
|
||||
"payload": SafeJson(input_data),
|
||||
"instructions": message,
|
||||
"editable": editable,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
"update": {}, # Do nothing on update - keep existing review as is
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Review {'created' if review.createdAt == review.updatedAt else 'retrieved'} for node {node_exec_id} with status {review.status}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Database error in get_or_create_human_review for node {node_exec_id}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
# Early return if already processed
|
||||
if review.processed:
|
||||
return None
|
||||
|
||||
if review.status == ReviewStatus.APPROVED:
|
||||
# Return the approved review result
|
||||
return ReviewResult(
|
||||
data=review.payload,
|
||||
status=ReviewStatus.APPROVED,
|
||||
message=review.reviewMessage or "",
|
||||
processed=review.processed,
|
||||
node_exec_id=review.nodeExecId,
|
||||
)
|
||||
elif review.status == ReviewStatus.REJECTED:
|
||||
# Return the rejected review result
|
||||
return ReviewResult(
|
||||
data=None,
|
||||
status=ReviewStatus.REJECTED,
|
||||
message=review.reviewMessage or "",
|
||||
processed=review.processed,
|
||||
node_exec_id=review.nodeExecId,
|
||||
)
|
||||
else:
|
||||
# Review is pending - return None to continue waiting
|
||||
return None
|
||||
|
||||
|
||||
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
|
||||
"""
|
||||
Check if a graph execution has any pending reviews.
|
||||
|
||||
Args:
|
||||
graph_exec_id: The graph execution ID to check
|
||||
|
||||
Returns:
|
||||
True if there are reviews waiting for human input, False otherwise
|
||||
"""
|
||||
# Check if there are any reviews waiting for human input
|
||||
count = await PendingHumanReview.prisma().count(
|
||||
where={"graphExecId": graph_exec_id, "status": ReviewStatus.WAITING}
|
||||
)
|
||||
return count > 0
|
||||
|
||||
|
||||
async def get_pending_reviews_for_user(
|
||||
user_id: str, page: int = 1, page_size: int = 25
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
"""
|
||||
Get all pending reviews for a user with pagination.
|
||||
|
||||
Args:
|
||||
user_id: User ID to get reviews for
|
||||
page: Page number (1-indexed)
|
||||
page_size: Number of reviews per page
|
||||
|
||||
Returns:
|
||||
List of pending review models
|
||||
"""
|
||||
# Calculate offset for pagination
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={"userId": user_id, "status": ReviewStatus.WAITING},
|
||||
order={"createdAt": "desc"},
|
||||
skip=offset,
|
||||
take=page_size,
|
||||
)
|
||||
|
||||
return [PendingHumanReviewModel.from_db(review) for review in reviews]
|
||||
|
||||
|
||||
async def get_pending_reviews_for_execution(
|
||||
graph_exec_id: str, user_id: str
|
||||
) -> list["PendingHumanReviewModel"]:
|
||||
"""
|
||||
Get all pending reviews for a specific graph execution.
|
||||
|
||||
Args:
|
||||
graph_exec_id: Graph execution ID
|
||||
user_id: User ID for security validation
|
||||
|
||||
Returns:
|
||||
List of pending review models
|
||||
"""
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"graphExecId": graph_exec_id,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
order={"createdAt": "asc"},
|
||||
)
|
||||
|
||||
return [PendingHumanReviewModel.from_db(review) for review in reviews]
|
||||
|
||||
|
||||
async def process_all_reviews_for_execution(
|
||||
user_id: str,
|
||||
review_decisions: dict[str, tuple[ReviewStatus, SafeJsonData | None, str | None]],
|
||||
) -> dict[str, PendingHumanReviewModel]:
|
||||
"""Process all pending reviews for an execution with approve/reject decisions.
|
||||
|
||||
Args:
|
||||
user_id: User ID for ownership validation
|
||||
review_decisions: Map of node_exec_id -> (status, reviewed_data, message)
|
||||
|
||||
Returns:
|
||||
Dict of node_exec_id -> updated review model
|
||||
"""
|
||||
if not review_decisions:
|
||||
return {}
|
||||
|
||||
node_exec_ids = list(review_decisions.keys())
|
||||
|
||||
# Get all reviews for validation
|
||||
reviews = await PendingHumanReview.prisma().find_many(
|
||||
where={
|
||||
"nodeExecId": {"in": node_exec_ids},
|
||||
"userId": user_id,
|
||||
"status": ReviewStatus.WAITING,
|
||||
},
|
||||
)
|
||||
|
||||
# Validate all reviews can be processed
|
||||
if len(reviews) != len(node_exec_ids):
|
||||
missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews}
|
||||
raise ValueError(
|
||||
f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}"
|
||||
)
|
||||
|
||||
# Create parallel update tasks
|
||||
update_tasks = []
|
||||
|
||||
for review in reviews:
|
||||
new_status, reviewed_data, message = review_decisions[review.nodeExecId]
|
||||
has_data_changes = reviewed_data is not None and reviewed_data != review.payload
|
||||
|
||||
# Check edit permissions for actual data modifications
|
||||
if has_data_changes and not review.editable:
|
||||
raise ValueError(f"Review {review.nodeExecId} is not editable")
|
||||
|
||||
update_data: PendingHumanReviewUpdateInput = {
|
||||
"status": new_status,
|
||||
"reviewMessage": message,
|
||||
"wasEdited": has_data_changes,
|
||||
"reviewedAt": datetime.now(timezone.utc),
|
||||
}
|
||||
|
||||
if has_data_changes:
|
||||
update_data["payload"] = SafeJson(reviewed_data)
|
||||
|
||||
task = PendingHumanReview.prisma().update(
|
||||
where={"nodeExecId": review.nodeExecId},
|
||||
data=update_data,
|
||||
)
|
||||
update_tasks.append(task)
|
||||
|
||||
# Execute all updates in parallel and get updated reviews
|
||||
updated_reviews = await asyncio.gather(*update_tasks)
|
||||
|
||||
# Note: Execution resumption is now handled at the API layer after ALL reviews
|
||||
# for an execution are processed (both approved and rejected)
|
||||
|
||||
# Return as dict for easy access
|
||||
return {
|
||||
review.nodeExecId: PendingHumanReviewModel.from_db(review)
|
||||
for review in updated_reviews
|
||||
}
|
||||
|
||||
|
||||
async def update_review_processed_status(node_exec_id: str, processed: bool) -> None:
|
||||
"""Update the processed status of a review."""
|
||||
await PendingHumanReview.prisma().update(
|
||||
where={"nodeExecId": node_exec_id}, data={"processed": processed}
|
||||
)
|
||||
376
autogpt_platform/backend/backend/data/human_review_test.py
Normal file
376
autogpt_platform/backend/backend/data/human_review_test.py
Normal file
@@ -0,0 +1,376 @@
|
||||
import datetime
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.human_review import (
|
||||
get_or_create_human_review,
|
||||
get_pending_review_by_node_exec_id,
|
||||
get_pending_reviews_for_execution,
|
||||
get_pending_reviews_for_user,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
process_all_reviews_for_execution,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_db_review():
|
||||
"""Create a sample database review object"""
|
||||
mock_review = Mock()
|
||||
mock_review.nodeExecId = "test_node_123"
|
||||
mock_review.userId = "test_user"
|
||||
mock_review.graphExecId = "test_graph_exec_456"
|
||||
mock_review.graphId = "test_graph_789"
|
||||
mock_review.graphVersion = 1
|
||||
mock_review.payload = {"data": "test payload"}
|
||||
mock_review.instructions = "Please review"
|
||||
mock_review.editable = True
|
||||
mock_review.status = ReviewStatus.WAITING
|
||||
mock_review.reviewMessage = None
|
||||
mock_review.wasEdited = False
|
||||
mock_review.processed = False
|
||||
mock_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
mock_review.updatedAt = None
|
||||
mock_review.reviewedAt = None
|
||||
return mock_review
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_review_by_node_exec_id_found(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test finding an existing pending review"""
|
||||
mock_find_first = mocker.patch(
|
||||
"backend.data.human_review.PendingHumanReview.prisma"
|
||||
)
|
||||
mock_find_first.return_value.find_first = AsyncMock(return_value=sample_db_review)
|
||||
|
||||
result = await get_pending_review_by_node_exec_id("test_node_123", "test_user")
|
||||
|
||||
assert result is not None
|
||||
assert result.node_exec_id == "test_node_123"
|
||||
assert result.user_id == "test_user"
|
||||
assert result.status == ReviewStatus.WAITING
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_review_by_node_exec_id_not_found(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test when review is not found"""
|
||||
mock_find_first = mocker.patch(
|
||||
"backend.data.human_review.PendingHumanReview.prisma"
|
||||
)
|
||||
mock_find_first.return_value.find_first = AsyncMock(return_value=None)
|
||||
|
||||
result = await get_pending_review_by_node_exec_id("nonexistent", "test_user")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_or_create_human_review_new(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test creating a new human review"""
|
||||
# Mock the upsert to return a new review (created_at == updated_at)
|
||||
sample_db_review.status = ReviewStatus.WAITING
|
||||
sample_db_review.processed = False
|
||||
|
||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||
|
||||
result = await get_or_create_human_review(
|
||||
user_id="test_user",
|
||||
node_exec_id="test_node_123",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
input_data={"data": "test payload"},
|
||||
message="Please review",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
# Should return None for pending reviews (waiting for human input)
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_or_create_human_review_approved(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test retrieving an already approved review"""
|
||||
# Set up review as already approved
|
||||
sample_db_review.status = ReviewStatus.APPROVED
|
||||
sample_db_review.processed = False
|
||||
sample_db_review.reviewMessage = "Looks good"
|
||||
|
||||
mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
|
||||
|
||||
result = await get_or_create_human_review(
|
||||
user_id="test_user",
|
||||
node_exec_id="test_node_123",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
input_data={"data": "test payload"},
|
||||
message="Please review",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
# Should return the approved result
|
||||
assert result is not None
|
||||
assert result.status == ReviewStatus.APPROVED
|
||||
assert result.data == {"data": "test payload"}
|
||||
assert result.message == "Looks good"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_reviews_for_graph_exec_true(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test when there are pending reviews"""
|
||||
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_count.return_value.count = AsyncMock(return_value=2)
|
||||
|
||||
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
|
||||
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_reviews_for_graph_exec_false(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test when there are no pending reviews"""
|
||||
mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_count.return_value.count = AsyncMock(return_value=0)
|
||||
|
||||
result = await has_pending_reviews_for_graph_exec("test_graph_exec")
|
||||
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_reviews_for_user(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test getting pending reviews for a user with pagination"""
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
result = await get_pending_reviews_for_user("test_user", page=2, page_size=10)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].node_exec_id == "test_node_123"
|
||||
|
||||
# Verify pagination parameters
|
||||
call_args = mock_find_many.return_value.find_many.call_args
|
||||
assert call_args.kwargs["skip"] == 10 # (page-1) * page_size = (2-1) * 10
|
||||
assert call_args.kwargs["take"] == 10
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_pending_reviews_for_execution(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test getting pending reviews for specific execution"""
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
result = await get_pending_reviews_for_execution("test_graph_exec_456", "test_user")
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].graph_exec_id == "test_graph_exec_456"
|
||||
|
||||
# Verify it filters by execution and user
|
||||
call_args = mock_find_many.return_value.find_many.call_args
|
||||
where_clause = call_args.kwargs["where"]
|
||||
assert where_clause["userId"] == "test_user"
|
||||
assert where_clause["graphExecId"] == "test_graph_exec_456"
|
||||
assert where_clause["status"] == ReviewStatus.WAITING
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_for_execution_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test successful processing of reviews for an execution"""
|
||||
# Mock finding reviews
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
# Mock updating reviews
|
||||
updated_review = Mock()
|
||||
updated_review.nodeExecId = "test_node_123"
|
||||
updated_review.userId = "test_user"
|
||||
updated_review.graphExecId = "test_graph_exec_456"
|
||||
updated_review.graphId = "test_graph_789"
|
||||
updated_review.graphVersion = 1
|
||||
updated_review.payload = {"data": "modified"}
|
||||
updated_review.instructions = "Please review"
|
||||
updated_review.editable = True
|
||||
updated_review.status = ReviewStatus.APPROVED
|
||||
updated_review.reviewMessage = "Approved"
|
||||
updated_review.wasEdited = True
|
||||
updated_review.processed = False
|
||||
updated_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
updated_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
updated_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
mock_update = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_update.return_value.update = AsyncMock(return_value=updated_review)
|
||||
|
||||
# Mock gather to simulate parallel updates
|
||||
mocker.patch(
|
||||
"backend.data.human_review.asyncio.gather",
|
||||
new=AsyncMock(return_value=[updated_review]),
|
||||
)
|
||||
|
||||
result = await process_all_reviews_for_execution(
|
||||
user_id="test_user",
|
||||
review_decisions={
|
||||
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved")
|
||||
},
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert "test_node_123" in result
|
||||
assert result["test_node_123"].status == ReviewStatus.APPROVED
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_for_execution_validation_errors(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
):
|
||||
"""Test validation errors in process_all_reviews_for_execution"""
|
||||
# Mock finding fewer reviews than requested (some not found)
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(
|
||||
return_value=[]
|
||||
) # No reviews found
|
||||
|
||||
with pytest.raises(ValueError, match="Reviews not found"):
|
||||
await process_all_reviews_for_execution(
|
||||
user_id="test_user",
|
||||
review_decisions={
|
||||
"nonexistent_node": (ReviewStatus.APPROVED, {"data": "test"}, "message")
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_edit_permission_error(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test editing non-editable review"""
|
||||
# Set review as non-editable
|
||||
sample_db_review.editable = False
|
||||
|
||||
# Mock finding reviews
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
|
||||
|
||||
with pytest.raises(ValueError, match="not editable"):
|
||||
await process_all_reviews_for_execution(
|
||||
user_id="test_user",
|
||||
review_decisions={
|
||||
"test_node_123": (
|
||||
ReviewStatus.APPROVED,
|
||||
{"data": "modified"},
|
||||
"message",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_all_reviews_mixed_approval_rejection(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_db_review,
|
||||
):
|
||||
"""Test processing mixed approval and rejection decisions"""
|
||||
# Create second review for rejection
|
||||
second_review = Mock()
|
||||
second_review.nodeExecId = "test_node_456"
|
||||
second_review.userId = "test_user"
|
||||
second_review.graphExecId = "test_graph_exec_456"
|
||||
second_review.graphId = "test_graph_789"
|
||||
second_review.graphVersion = 1
|
||||
second_review.payload = {"data": "original"}
|
||||
second_review.instructions = "Second review"
|
||||
second_review.editable = True
|
||||
second_review.status = ReviewStatus.WAITING
|
||||
second_review.reviewMessage = None
|
||||
second_review.wasEdited = False
|
||||
second_review.processed = False
|
||||
second_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
second_review.updatedAt = None
|
||||
second_review.reviewedAt = None
|
||||
|
||||
# Mock finding reviews
|
||||
mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
|
||||
mock_find_many.return_value.find_many = AsyncMock(
|
||||
return_value=[sample_db_review, second_review]
|
||||
)
|
||||
|
||||
# Mock updating reviews
|
||||
approved_review = Mock()
|
||||
approved_review.nodeExecId = "test_node_123"
|
||||
approved_review.userId = "test_user"
|
||||
approved_review.graphExecId = "test_graph_exec_456"
|
||||
approved_review.graphId = "test_graph_789"
|
||||
approved_review.graphVersion = 1
|
||||
approved_review.payload = {"data": "modified"}
|
||||
approved_review.instructions = "Please review"
|
||||
approved_review.editable = True
|
||||
approved_review.status = ReviewStatus.APPROVED
|
||||
approved_review.reviewMessage = "Approved"
|
||||
approved_review.wasEdited = True
|
||||
approved_review.processed = False
|
||||
approved_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
approved_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
approved_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
|
||||
rejected_review = Mock()
|
||||
rejected_review.nodeExecId = "test_node_456"
|
||||
rejected_review.userId = "test_user"
|
||||
rejected_review.graphExecId = "test_graph_exec_456"
|
||||
rejected_review.graphId = "test_graph_789"
|
||||
rejected_review.graphVersion = 1
|
||||
rejected_review.payload = {"data": "original"}
|
||||
rejected_review.instructions = "Please review"
|
||||
rejected_review.editable = True
|
||||
rejected_review.status = ReviewStatus.REJECTED
|
||||
rejected_review.reviewMessage = "Rejected"
|
||||
rejected_review.wasEdited = False
|
||||
rejected_review.processed = False
|
||||
rejected_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
rejected_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
rejected_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
|
||||
|
||||
mocker.patch(
|
||||
"backend.data.human_review.asyncio.gather",
|
||||
new=AsyncMock(return_value=[approved_review, rejected_review]),
|
||||
)
|
||||
|
||||
result = await process_all_reviews_for_execution(
|
||||
user_id="test_user",
|
||||
review_decisions={
|
||||
"test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved"),
|
||||
"test_node_456": (ReviewStatus.REJECTED, None, "Rejected"),
|
||||
},
|
||||
)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "test_node_123" in result
|
||||
assert "test_node_456" in result
|
||||
@@ -31,6 +31,11 @@ from backend.data.graph import (
|
||||
get_node,
|
||||
validate_graph_execution_permissions,
|
||||
)
|
||||
from backend.data.human_review import (
|
||||
get_or_create_human_review,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
update_review_processed_status,
|
||||
)
|
||||
from backend.data.notifications import (
|
||||
clear_all_user_notification_batches,
|
||||
create_or_add_to_user_notification_batch,
|
||||
@@ -161,6 +166,11 @@ class DatabaseManager(AppService):
|
||||
get_user_email_verification = _(get_user_email_verification)
|
||||
get_user_notification_preference = _(get_user_notification_preference)
|
||||
|
||||
# Human In The Loop
|
||||
get_or_create_human_review = _(get_or_create_human_review)
|
||||
has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
|
||||
update_review_processed_status = _(update_review_processed_status)
|
||||
|
||||
# Notifications - async
|
||||
clear_all_user_notification_batches = _(clear_all_user_notification_batches)
|
||||
create_or_add_to_user_notification_batch = _(
|
||||
@@ -215,6 +225,9 @@ class DatabaseManagerClient(AppServiceClient):
|
||||
# Block error monitoring
|
||||
get_block_error_stats = _(d.get_block_error_stats)
|
||||
|
||||
# Human In The Loop
|
||||
has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
|
||||
|
||||
# User Emails
|
||||
get_user_email_by_id = _(d.get_user_email_by_id)
|
||||
|
||||
@@ -256,6 +269,10 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_execution_kv_data = d.get_execution_kv_data
|
||||
set_execution_kv_data = d.set_execution_kv_data
|
||||
|
||||
# Human In The Loop
|
||||
get_or_create_human_review = d.get_or_create_human_review
|
||||
update_review_processed_status = d.update_review_processed_status
|
||||
|
||||
# User Comms
|
||||
get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange
|
||||
get_user_email_by_id = d.get_user_email_by_id
|
||||
|
||||
@@ -164,6 +164,7 @@ async def execute_node(
|
||||
user_id = data.user_id
|
||||
graph_exec_id = data.graph_exec_id
|
||||
graph_id = data.graph_id
|
||||
graph_version = data.graph_version
|
||||
node_exec_id = data.node_exec_id
|
||||
node_id = data.node_id
|
||||
node_block = node.block
|
||||
@@ -204,6 +205,7 @@ async def execute_node(
|
||||
# Inject extra execution arguments for the blocks via kwargs
|
||||
extra_exec_kwargs: dict = {
|
||||
"graph_id": graph_id,
|
||||
"graph_version": graph_version,
|
||||
"node_id": node_id,
|
||||
"graph_exec_id": graph_exec_id,
|
||||
"node_exec_id": node_exec_id,
|
||||
@@ -284,6 +286,7 @@ async def _enqueue_next_nodes(
|
||||
user_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
log_metadata: LogMetadata,
|
||||
nodes_input_masks: Optional[NodesInputMasks],
|
||||
user_context: UserContext,
|
||||
@@ -301,6 +304,7 @@ async def _enqueue_next_nodes(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version,
|
||||
node_exec_id=node_exec_id,
|
||||
node_id=node_id,
|
||||
block_id=block_id,
|
||||
@@ -334,17 +338,14 @@ async def _enqueue_next_nodes(
|
||||
# Or the same input to be consumed multiple times.
|
||||
async with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"):
|
||||
# Add output data to the earliest incomplete execution, or create a new one.
|
||||
next_node_exec_id, next_node_input = await db_client.upsert_execution_input(
|
||||
next_node_exec, next_node_input = await db_client.upsert_execution_input(
|
||||
node_id=next_node_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
input_name=next_input_name,
|
||||
input_data=next_data,
|
||||
)
|
||||
await async_update_node_execution_status(
|
||||
db_client=db_client,
|
||||
exec_id=next_node_exec_id,
|
||||
status=ExecutionStatus.INCOMPLETE,
|
||||
)
|
||||
next_node_exec_id = next_node_exec.node_exec_id
|
||||
await send_async_execution_update(next_node_exec)
|
||||
|
||||
# Complete missing static input pins data using the last execution input.
|
||||
static_link_names = {
|
||||
@@ -660,6 +661,16 @@ class ExecutionProcessor:
|
||||
log_metadata.info(
|
||||
f"⚙️ Graph execution #{graph_exec.graph_exec_id} is already running, continuing where it left off."
|
||||
)
|
||||
elif exec_meta.status == ExecutionStatus.REVIEW:
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
log_metadata.info(
|
||||
f"⚙️ Graph execution #{graph_exec.graph_exec_id} was waiting for review, resuming execution."
|
||||
)
|
||||
update_graph_execution_state(
|
||||
db_client=db_client,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
status=ExecutionStatus.RUNNING,
|
||||
)
|
||||
elif exec_meta.status == ExecutionStatus.FAILED:
|
||||
exec_meta.status = ExecutionStatus.RUNNING
|
||||
log_metadata.info(
|
||||
@@ -697,19 +708,21 @@ class ExecutionProcessor:
|
||||
raise status
|
||||
exec_meta.status = status
|
||||
|
||||
# Activity status handling
|
||||
activity_response = asyncio.run_coroutine_threadsafe(
|
||||
generate_activity_status_for_execution(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
execution_stats=exec_stats,
|
||||
db_client=get_db_async_client(),
|
||||
user_id=graph_exec.user_id,
|
||||
execution_status=status,
|
||||
),
|
||||
self.node_execution_loop,
|
||||
).result(timeout=60.0)
|
||||
if status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
|
||||
activity_response = asyncio.run_coroutine_threadsafe(
|
||||
generate_activity_status_for_execution(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
execution_stats=exec_stats,
|
||||
db_client=get_db_async_client(),
|
||||
user_id=graph_exec.user_id,
|
||||
execution_status=status,
|
||||
),
|
||||
self.node_execution_loop,
|
||||
).result(timeout=60.0)
|
||||
else:
|
||||
activity_response = None
|
||||
if activity_response is not None:
|
||||
exec_stats.activity_status = activity_response["activity_status"]
|
||||
exec_stats.correctness_score = activity_response["correctness_score"]
|
||||
@@ -845,6 +858,7 @@ class ExecutionProcessor:
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
ExecutionStatus.REVIEW,
|
||||
],
|
||||
):
|
||||
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
|
||||
@@ -853,6 +867,7 @@ class ExecutionProcessor:
|
||||
# ------------------------------------------------------------
|
||||
# Main dispatch / polling loop -----------------------------
|
||||
# ------------------------------------------------------------
|
||||
|
||||
while not execution_queue.empty():
|
||||
if cancel.is_set():
|
||||
break
|
||||
@@ -1006,7 +1021,12 @@ class ExecutionProcessor:
|
||||
elif error is not None:
|
||||
execution_status = ExecutionStatus.FAILED
|
||||
else:
|
||||
execution_status = ExecutionStatus.COMPLETED
|
||||
if db_client.has_pending_reviews_for_graph_exec(
|
||||
graph_exec.graph_exec_id
|
||||
):
|
||||
execution_status = ExecutionStatus.REVIEW
|
||||
else:
|
||||
execution_status = ExecutionStatus.COMPLETED
|
||||
|
||||
if error:
|
||||
execution_stats.error = str(error) or type(error).__name__
|
||||
@@ -1142,6 +1162,7 @@ class ExecutionProcessor:
|
||||
user_id=graph_exec.user_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_version=graph_exec.graph_version,
|
||||
log_metadata=log_metadata,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
user_context=graph_exec.user_context,
|
||||
|
||||
@@ -30,6 +30,7 @@ from backend.data.execution import (
|
||||
GraphExecutionWithNodes,
|
||||
NodesInputMasks,
|
||||
UserContext,
|
||||
get_graph_execution,
|
||||
)
|
||||
from backend.data.graph import GraphModel, Node
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
@@ -764,6 +765,7 @@ async def add_graph_execution(
|
||||
nodes_input_masks: Optional[NodesInputMasks] = None,
|
||||
parent_graph_exec_id: Optional[str] = None,
|
||||
is_sub_graph: bool = False,
|
||||
graph_exec_id: Optional[str] = None,
|
||||
) -> GraphExecutionWithNodes:
|
||||
"""
|
||||
Adds a graph execution to the queue and returns the execution entry.
|
||||
@@ -779,32 +781,48 @@ async def add_graph_execution(
|
||||
nodes_input_masks: Node inputs to use in the execution.
|
||||
parent_graph_exec_id: The ID of the parent graph execution (for nested executions).
|
||||
is_sub_graph: Whether this is a sub-graph execution.
|
||||
graph_exec_id: If provided, resume this existing execution instead of creating a new one.
|
||||
Returns:
|
||||
GraphExecutionEntry: The entry for the graph execution.
|
||||
Raises:
|
||||
ValueError: If the graph is not found or if there are validation errors.
|
||||
NotFoundError: If graph_exec_id is provided but execution is not found.
|
||||
"""
|
||||
if prisma.is_connected():
|
||||
edb = execution_db
|
||||
else:
|
||||
edb = get_database_manager_async_client()
|
||||
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
# Get or create the graph execution
|
||||
if graph_exec_id:
|
||||
# Resume existing execution
|
||||
graph_exec = await get_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=is_sub_graph,
|
||||
execution_id=graph_exec_id,
|
||||
include_node_executions=True,
|
||||
)
|
||||
|
||||
if not graph_exec:
|
||||
raise NotFoundError(f"Graph execution #{graph_exec_id} not found.")
|
||||
|
||||
# Use existing execution's compiled input masks
|
||||
compiled_nodes_input_masks = graph_exec.nodes_input_masks or {}
|
||||
|
||||
logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}")
|
||||
else:
|
||||
# Create new execution
|
||||
graph, starting_nodes_input, compiled_nodes_input_masks = (
|
||||
await validate_and_construct_node_execution_input(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
graph_inputs=inputs or {},
|
||||
graph_version=graph_version,
|
||||
graph_credentials_inputs=graph_credentials_inputs,
|
||||
nodes_input_masks=nodes_input_masks,
|
||||
is_sub_graph=is_sub_graph,
|
||||
)
|
||||
)
|
||||
)
|
||||
graph_exec = None
|
||||
|
||||
try:
|
||||
# Sanity check: running add_graph_execution with the properties of
|
||||
# the graph_exec created here should create the same execution again.
|
||||
graph_exec = await edb.create_graph_execution(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
@@ -817,16 +835,20 @@ async def add_graph_execution(
|
||||
parent_graph_exec_id=parent_graph_exec_id,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created graph execution #{graph_exec.id} for graph "
|
||||
f"#{graph_id} with {len(starting_nodes_input)} starting nodes"
|
||||
)
|
||||
|
||||
# Common path: publish to queue and update status
|
||||
try:
|
||||
graph_exec_entry = graph_exec.to_graph_execution_entry(
|
||||
user_context=await get_user_context(user_id),
|
||||
compiled_nodes_input_masks=compiled_nodes_input_masks,
|
||||
parent_graph_exec_id=parent_graph_exec_id,
|
||||
)
|
||||
logger.info(
|
||||
f"Created graph execution #{graph_exec.id} for graph "
|
||||
f"#{graph_id} with {len(starting_nodes_input)} starting nodes. "
|
||||
f"Now publishing to execution queue."
|
||||
)
|
||||
|
||||
logger.info(f"Publishing execution {graph_exec.id} to execution queue")
|
||||
|
||||
exec_queue = await get_async_execution_queue()
|
||||
await exec_queue.publish_message(
|
||||
|
||||
@@ -29,6 +29,7 @@ import backend.server.v2.admin.store_admin_routes
|
||||
import backend.server.v2.builder
|
||||
import backend.server.v2.builder.routes
|
||||
import backend.server.v2.chat.routes as chat_routes
|
||||
import backend.server.v2.executions.review.routes
|
||||
import backend.server.v2.library.db
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.library.routes
|
||||
@@ -274,6 +275,11 @@ app.include_router(
|
||||
tags=["v2", "admin"],
|
||||
prefix="/api/executions",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.executions.review.routes.router,
|
||||
tags=["v2", "executions", "review"],
|
||||
prefix="/api/review",
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import backend.data.block
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.server.v2.builder.model import (
|
||||
BlockCategoryResponse,
|
||||
@@ -340,13 +341,13 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
|
||||
results = await prisma.get_client().query_raw(
|
||||
results = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM "AgentNodeExecution" execution
|
||||
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
FROM {schema_prefix}"AgentNodeExecution" execution
|
||||
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
|
||||
@@ -0,0 +1,204 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Union
|
||||
|
||||
from prisma.enums import ReviewStatus
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prisma.models import PendingHumanReview
|
||||
|
||||
# SafeJson-compatible type alias for review data
|
||||
SafeJsonData = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
|
||||
|
||||
|
||||
class PendingHumanReviewModel(BaseModel):
|
||||
"""Response model for pending human review data.
|
||||
|
||||
Represents a human review request that is awaiting user action.
|
||||
Contains all necessary information for a user to review and approve
|
||||
or reject data from a Human-in-the-Loop block execution.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier for the review record
|
||||
user_id: ID of the user who must perform the review
|
||||
node_exec_id: ID of the node execution that created this review
|
||||
graph_exec_id: ID of the graph execution containing the node
|
||||
graph_id: ID of the graph template being executed
|
||||
graph_version: Version number of the graph template
|
||||
payload: The actual data payload awaiting review
|
||||
instructions: Instructions or message for the reviewer
|
||||
editable: Whether the reviewer can edit the data
|
||||
status: Current review status (WAITING, APPROVED, or REJECTED)
|
||||
review_message: Optional message from the reviewer
|
||||
created_at: Timestamp when review was created
|
||||
updated_at: Timestamp when review was last modified
|
||||
reviewed_at: Timestamp when review was completed (if applicable)
|
||||
"""
|
||||
|
||||
node_exec_id: str = Field(description="Node execution ID (primary key)")
|
||||
user_id: str = Field(description="User ID associated with the review")
|
||||
graph_exec_id: str = Field(description="Graph execution ID")
|
||||
graph_id: str = Field(description="Graph ID")
|
||||
graph_version: int = Field(description="Graph version")
|
||||
payload: SafeJsonData = Field(description="The actual data payload awaiting review")
|
||||
instructions: str | None = Field(
|
||||
description="Instructions or message for the reviewer", default=None
|
||||
)
|
||||
editable: bool = Field(description="Whether the reviewer can edit the data")
|
||||
status: ReviewStatus = Field(description="Review status")
|
||||
review_message: str | None = Field(
|
||||
description="Optional message from the reviewer", default=None
|
||||
)
|
||||
was_edited: bool | None = Field(
|
||||
description="Whether the data was modified during review", default=None
|
||||
)
|
||||
processed: bool = Field(
|
||||
description="Whether the review result has been processed by the execution engine",
|
||||
default=False,
|
||||
)
|
||||
created_at: datetime = Field(description="When the review was created")
|
||||
updated_at: datetime | None = Field(
|
||||
description="When the review was last updated", default=None
|
||||
)
|
||||
reviewed_at: datetime | None = Field(
|
||||
description="When the review was completed", default=None
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel":
|
||||
"""
|
||||
Convert a database model to a response model.
|
||||
|
||||
Uses the new flat database structure with separate columns for
|
||||
payload, instructions, and editable flag.
|
||||
|
||||
Handles invalid data gracefully by using safe defaults.
|
||||
"""
|
||||
return cls(
|
||||
node_exec_id=review.nodeExecId,
|
||||
user_id=review.userId,
|
||||
graph_exec_id=review.graphExecId,
|
||||
graph_id=review.graphId,
|
||||
graph_version=review.graphVersion,
|
||||
payload=review.payload,
|
||||
instructions=review.instructions,
|
||||
editable=review.editable,
|
||||
status=review.status,
|
||||
review_message=review.reviewMessage,
|
||||
was_edited=review.wasEdited,
|
||||
processed=review.processed,
|
||||
created_at=review.createdAt,
|
||||
updated_at=review.updatedAt,
|
||||
reviewed_at=review.reviewedAt,
|
||||
)
|
||||
|
||||
|
||||
class ReviewItem(BaseModel):
|
||||
"""Single review item for processing."""
|
||||
|
||||
node_exec_id: str = Field(description="Node execution ID to review")
|
||||
approved: bool = Field(
|
||||
description="Whether this review is approved (True) or rejected (False)"
|
||||
)
|
||||
message: str | None = Field(
|
||||
None, description="Optional review message", max_length=2000
|
||||
)
|
||||
reviewed_data: SafeJsonData | None = Field(
|
||||
None, description="Optional edited data (ignored if approved=False)"
|
||||
)
|
||||
|
||||
@field_validator("reviewed_data")
|
||||
@classmethod
|
||||
def validate_reviewed_data(cls, v):
|
||||
"""Validate that reviewed_data is safe and properly structured."""
|
||||
if v is None:
|
||||
return v
|
||||
|
||||
# Validate SafeJson compatibility
|
||||
def validate_safejson_type(obj):
|
||||
"""Ensure object only contains SafeJson compatible types."""
|
||||
if obj is None:
|
||||
return True
|
||||
elif isinstance(obj, (str, int, float, bool)):
|
||||
return True
|
||||
elif isinstance(obj, dict):
|
||||
return all(
|
||||
isinstance(k, str) and validate_safejson_type(v)
|
||||
for k, v in obj.items()
|
||||
)
|
||||
elif isinstance(obj, list):
|
||||
return all(validate_safejson_type(item) for item in obj)
|
||||
else:
|
||||
return False
|
||||
|
||||
if not validate_safejson_type(v):
|
||||
raise ValueError("reviewed_data contains non-SafeJson compatible types")
|
||||
|
||||
# Validate data size to prevent DoS attacks
|
||||
try:
|
||||
json_str = json.dumps(v)
|
||||
if len(json_str) > 1000000: # 1MB limit
|
||||
raise ValueError("reviewed_data is too large (max 1MB)")
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"reviewed_data must be JSON serializable: {str(e)}")
|
||||
|
||||
# Ensure no dangerous nested structures (prevent infinite recursion)
|
||||
def check_depth(obj, max_depth=10, current_depth=0):
|
||||
"""Recursively check object nesting depth to prevent stack overflow attacks."""
|
||||
if current_depth > max_depth:
|
||||
raise ValueError("reviewed_data has excessive nesting depth")
|
||||
|
||||
if isinstance(obj, dict):
|
||||
for value in obj.values():
|
||||
check_depth(value, max_depth, current_depth + 1)
|
||||
elif isinstance(obj, list):
|
||||
for item in obj:
|
||||
check_depth(item, max_depth, current_depth + 1)
|
||||
|
||||
check_depth(v)
|
||||
return v
|
||||
|
||||
@field_validator("message")
|
||||
@classmethod
|
||||
def validate_message(cls, v):
|
||||
"""Validate and sanitize review message."""
|
||||
if v is not None and len(v.strip()) == 0:
|
||||
return None
|
||||
return v
|
||||
|
||||
|
||||
class ReviewRequest(BaseModel):
|
||||
"""Request model for processing ALL pending reviews for an execution.
|
||||
|
||||
This request must include ALL pending reviews for a graph execution.
|
||||
Each review will be either approved (with optional data modifications)
|
||||
or rejected (data ignored). The execution will resume only after ALL reviews are processed.
|
||||
"""
|
||||
|
||||
reviews: List[ReviewItem] = Field(
|
||||
description="All reviews with their approval status, data, and messages"
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_review_completeness(self):
|
||||
"""Validate that we have at least one review to process and no duplicates."""
|
||||
if not self.reviews:
|
||||
raise ValueError("At least one review must be provided")
|
||||
|
||||
# Ensure no duplicate node_exec_ids
|
||||
node_ids = [review.node_exec_id for review in self.reviews]
|
||||
if len(node_ids) != len(set(node_ids)):
|
||||
duplicates = [nid for nid in set(node_ids) if node_ids.count(nid) > 1]
|
||||
raise ValueError(f"Duplicate review IDs found: {', '.join(duplicates)}")
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class ReviewResponse(BaseModel):
|
||||
"""Response from review endpoint."""
|
||||
|
||||
approved_count: int = Field(description="Number of reviews successfully approved")
|
||||
rejected_count: int = Field(description="Number of reviews successfully rejected")
|
||||
failed_count: int = Field(description="Number of reviews that failed processing")
|
||||
error: str | None = Field(None, description="Error message if operation failed")
|
||||
@@ -0,0 +1,459 @@
|
||||
import datetime
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
from prisma.enums import ReviewStatus
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.server.v2.executions.review.model import PendingHumanReviewModel
|
||||
from backend.server.v2.executions.review.routes import router
|
||||
|
||||
# Using a fixed timestamp for reproducible tests
|
||||
FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router, prefix="/api/review")
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
"""Setup auth overrides for all tests in this module"""
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_pending_review() -> PendingHumanReviewModel:
|
||||
"""Create a sample pending review for testing"""
|
||||
return PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id="test_user",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "test payload", "value": 42},
|
||||
instructions="Please review this data",
|
||||
editable=True,
|
||||
status=ReviewStatus.WAITING,
|
||||
review_message=None,
|
||||
was_edited=None,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=None,
|
||||
)
|
||||
|
||||
|
||||
def test_get_pending_reviews_empty(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test getting pending reviews when none exist"""
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
|
||||
)
|
||||
mock_get_reviews.return_value = []
|
||||
|
||||
response = client.get("/api/review/pending")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json() == []
|
||||
mock_get_reviews.assert_called_once_with("test_user", 1, 25)
|
||||
|
||||
|
||||
def test_get_pending_reviews_with_data(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test getting pending reviews with data"""
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
|
||||
)
|
||||
mock_get_reviews.return_value = [sample_pending_review]
|
||||
|
||||
response = client.get("/api/review/pending?page=2&page_size=10")
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data) == 1
|
||||
assert data[0]["node_exec_id"] == "test_node_123"
|
||||
assert data[0]["status"] == "WAITING"
|
||||
mock_get_reviews.assert_called_once_with("test_user", 2, 10)
|
||||
|
||||
|
||||
def test_get_pending_reviews_for_execution_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
snapshot: Snapshot,
|
||||
) -> None:
|
||||
"""Test getting pending reviews for specific execution"""
|
||||
mock_get_graph_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_graph_execution_meta"
|
||||
)
|
||||
mock_get_graph_execution.return_value = {
|
||||
"id": "test_graph_exec_456",
|
||||
"user_id": "test_user",
|
||||
}
|
||||
|
||||
mock_get_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews.return_value = [sample_pending_review]
|
||||
|
||||
response = client.get("/api/review/execution/test_graph_exec_456")
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data) == 1
|
||||
assert data[0]["graph_exec_id"] == "test_graph_exec_456"
|
||||
|
||||
|
||||
def test_get_pending_reviews_for_execution_access_denied(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test access denied when user doesn't own the execution"""
|
||||
mock_get_graph_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_graph_execution_meta"
|
||||
)
|
||||
mock_get_graph_execution.return_value = None
|
||||
|
||||
response = client.get("/api/review/execution/test_graph_exec_456")
|
||||
|
||||
assert response.status_code == 403
|
||||
assert "Access denied" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_approve_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
) -> None:
|
||||
"""Test successful review approval"""
|
||||
# Mock the validation functions
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.return_value = sample_pending_review
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
mock_process_all_reviews.return_value = {"test_node_123": sample_pending_review}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.data.human_review.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
mocker.patch("backend.executor.utils.add_graph_execution")
|
||||
|
||||
request_data = {
|
||||
"approved_reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"message": "Looks good",
|
||||
"reviewed_data": {"data": "modified payload", "value": 50},
|
||||
}
|
||||
],
|
||||
"rejected_review_ids": [],
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 1
|
||||
assert data["rejected_count"] == 0
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_reject_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
) -> None:
|
||||
"""Test successful review rejection"""
|
||||
# Mock the validation functions
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.return_value = sample_pending_review
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
rejected_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id="test_user",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "test payload"},
|
||||
instructions="Please review",
|
||||
editable=True,
|
||||
status=ReviewStatus.REJECTED,
|
||||
review_message="Rejected by user",
|
||||
was_edited=False,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
mock_process_all_reviews.return_value = {"test_node_123": rejected_review}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.data.human_review.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
request_data = {"approved_reviews": [], "rejected_review_ids": ["test_node_123"]}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 0
|
||||
assert data["rejected_count"] == 1
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_mixed_success(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
) -> None:
|
||||
"""Test mixed approve/reject operations"""
|
||||
# Create a second review
|
||||
second_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_456",
|
||||
user_id="test_user",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "second payload"},
|
||||
instructions="Second review",
|
||||
editable=False,
|
||||
status=ReviewStatus.WAITING,
|
||||
review_message=None,
|
||||
was_edited=None,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=None,
|
||||
)
|
||||
|
||||
# Mock the validation functions
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.side_effect = lambda node_id, user_id: (
|
||||
sample_pending_review if node_id == "test_node_123" else second_review
|
||||
)
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review, second_review]
|
||||
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
# Create approved version of first review
|
||||
approved_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_123",
|
||||
user_id="test_user",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "modified"},
|
||||
instructions="Please review",
|
||||
editable=True,
|
||||
status=ReviewStatus.APPROVED,
|
||||
review_message="Approved",
|
||||
was_edited=True,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
# Create rejected version of second review
|
||||
rejected_review = PendingHumanReviewModel(
|
||||
node_exec_id="test_node_456",
|
||||
user_id="test_user",
|
||||
graph_exec_id="test_graph_exec_456",
|
||||
graph_id="test_graph_789",
|
||||
graph_version=1,
|
||||
payload={"data": "second payload"},
|
||||
instructions="Second review",
|
||||
editable=False,
|
||||
status=ReviewStatus.REJECTED,
|
||||
review_message="Rejected by user",
|
||||
was_edited=False,
|
||||
processed=False,
|
||||
created_at=FIXED_NOW,
|
||||
updated_at=None,
|
||||
reviewed_at=FIXED_NOW,
|
||||
)
|
||||
mock_process_all_reviews.return_value = {
|
||||
"test_node_123": approved_review,
|
||||
"test_node_456": rejected_review,
|
||||
}
|
||||
|
||||
mock_has_pending = mocker.patch(
|
||||
"backend.data.human_review.has_pending_reviews_for_graph_exec"
|
||||
)
|
||||
mock_has_pending.return_value = False
|
||||
|
||||
request_data = {
|
||||
"approved_reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"message": "Approved",
|
||||
"reviewed_data": {"data": "modified"},
|
||||
}
|
||||
],
|
||||
"rejected_review_ids": ["test_node_456"],
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 1
|
||||
assert data["rejected_count"] == 1
|
||||
assert data["failed_count"] == 0
|
||||
assert data["error"] is None
|
||||
|
||||
|
||||
def test_process_review_action_empty_request(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test error when no reviews provided"""
|
||||
request_data = {"approved_reviews": [], "rejected_review_ids": []}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "At least one review must be provided" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_review_not_found(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
) -> None:
|
||||
"""Test error when review is not found"""
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.return_value = None
|
||||
|
||||
request_data = {
|
||||
"approved_reviews": [
|
||||
{
|
||||
"node_exec_id": "nonexistent_node",
|
||||
"message": "Test",
|
||||
}
|
||||
],
|
||||
"rejected_review_ids": [],
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 403
|
||||
assert "not found or access denied" in response.json()["detail"]
|
||||
|
||||
|
||||
def test_process_review_action_partial_failure(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
) -> None:
|
||||
"""Test handling of partial failures in review processing"""
|
||||
# Mock successful validation
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.return_value = sample_pending_review
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
# Mock partial failure in processing
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
mock_process_all_reviews.side_effect = ValueError("Some reviews failed validation")
|
||||
|
||||
request_data = {
|
||||
"approved_reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"message": "Test",
|
||||
}
|
||||
],
|
||||
"rejected_review_ids": [],
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["approved_count"] == 0
|
||||
assert data["rejected_count"] == 0
|
||||
assert data["failed_count"] == 1
|
||||
assert "Failed to process reviews" in data["error"]
|
||||
|
||||
|
||||
def test_process_review_action_complete_failure(
|
||||
mocker: pytest_mock.MockFixture,
|
||||
sample_pending_review: PendingHumanReviewModel,
|
||||
) -> None:
|
||||
"""Test complete failure scenario"""
|
||||
# Mock successful validation
|
||||
mock_get_pending_review = mocker.patch(
|
||||
"backend.data.human_review.get_pending_review_by_node_exec_id"
|
||||
)
|
||||
mock_get_pending_review.return_value = sample_pending_review
|
||||
|
||||
mock_get_reviews_for_execution = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
|
||||
)
|
||||
mock_get_reviews_for_execution.return_value = [sample_pending_review]
|
||||
|
||||
# Mock complete failure in processing
|
||||
mock_process_all_reviews = mocker.patch(
|
||||
"backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
|
||||
)
|
||||
mock_process_all_reviews.side_effect = Exception("Database error")
|
||||
|
||||
request_data = {
|
||||
"approved_reviews": [
|
||||
{
|
||||
"node_exec_id": "test_node_123",
|
||||
"message": "Test",
|
||||
}
|
||||
],
|
||||
"rejected_review_ids": [],
|
||||
}
|
||||
|
||||
response = client.post("/api/review/action", json=request_data)
|
||||
|
||||
assert response.status_code == 500
|
||||
assert "error" in response.json()["detail"].lower()
|
||||
@@ -0,0 +1,194 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, HTTPException, Query, Security, status
|
||||
from prisma.enums import ReviewStatus
|
||||
|
||||
from backend.data.execution import get_graph_execution_meta
|
||||
from backend.data.human_review import (
|
||||
get_pending_reviews_for_execution,
|
||||
get_pending_reviews_for_user,
|
||||
has_pending_reviews_for_graph_exec,
|
||||
process_all_reviews_for_execution,
|
||||
)
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.server.v2.executions.review.model import (
|
||||
PendingHumanReviewModel,
|
||||
ReviewRequest,
|
||||
ReviewResponse,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
tags=["executions", "review", "private"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/pending",
|
||||
summary="Get Pending Reviews",
|
||||
response_model=List[PendingHumanReviewModel],
|
||||
responses={
|
||||
200: {"description": "List of pending reviews"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
async def list_pending_reviews(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
|
||||
page_size: int = Query(25, ge=1, le=100, description="Number of reviews per page"),
|
||||
) -> List[PendingHumanReviewModel]:
|
||||
"""Get all pending reviews for the current user.
|
||||
|
||||
Retrieves all reviews with status "WAITING" that belong to the authenticated user.
|
||||
Results are ordered by creation time (newest first).
|
||||
|
||||
Args:
|
||||
user_id: Authenticated user ID from security dependency
|
||||
|
||||
Returns:
|
||||
List of pending review objects with status converted to typed literals
|
||||
|
||||
Raises:
|
||||
HTTPException: If authentication fails or database error occurs
|
||||
|
||||
Note:
|
||||
Reviews with invalid status values are logged as warnings but excluded
|
||||
from results rather than failing the entire request.
|
||||
"""
|
||||
|
||||
return await get_pending_reviews_for_user(user_id, page, page_size)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/execution/{graph_exec_id}",
|
||||
summary="Get Pending Reviews for Execution",
|
||||
response_model=List[PendingHumanReviewModel],
|
||||
responses={
|
||||
200: {"description": "List of pending reviews for the execution"},
|
||||
400: {"description": "Invalid graph execution ID"},
|
||||
403: {"description": "Access denied to graph execution"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
async def list_pending_reviews_for_execution(
|
||||
graph_exec_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> List[PendingHumanReviewModel]:
|
||||
"""Get all pending reviews for a specific graph execution.
|
||||
|
||||
Retrieves all reviews with status "WAITING" for the specified graph execution
|
||||
that belong to the authenticated user. Results are ordered by creation time
|
||||
(oldest first) to preserve review order within the execution.
|
||||
|
||||
Args:
|
||||
graph_exec_id: ID of the graph execution to get reviews for
|
||||
user_id: Authenticated user ID from security dependency
|
||||
|
||||
Returns:
|
||||
List of pending review objects for the specified execution
|
||||
|
||||
Raises:
|
||||
HTTPException:
|
||||
- 403: If user doesn't own the graph execution
|
||||
- 500: If authentication fails or database error occurs
|
||||
|
||||
Note:
|
||||
Only returns reviews owned by the authenticated user for security.
|
||||
Reviews with invalid status are excluded with warning logs.
|
||||
"""
|
||||
|
||||
# Verify user owns the graph execution before returning reviews
|
||||
graph_exec = await get_graph_execution_meta(
|
||||
user_id=user_id, execution_id=graph_exec_id
|
||||
)
|
||||
if not graph_exec:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Access denied to graph execution",
|
||||
)
|
||||
|
||||
return await get_pending_reviews_for_execution(graph_exec_id, user_id)
|
||||
|
||||
|
||||
@router.post("/action", response_model=ReviewResponse)
|
||||
async def process_review_action(
|
||||
request: ReviewRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> ReviewResponse:
|
||||
"""Process reviews with approve or reject actions."""
|
||||
|
||||
# Collect all node exec IDs from the request
|
||||
all_request_node_ids = {review.node_exec_id for review in request.reviews}
|
||||
|
||||
if not all_request_node_ids:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="At least one review must be provided",
|
||||
)
|
||||
|
||||
# Build review decisions map
|
||||
review_decisions = {}
|
||||
for review in request.reviews:
|
||||
if review.approved:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.APPROVED,
|
||||
review.reviewed_data,
|
||||
review.message,
|
||||
)
|
||||
else:
|
||||
review_decisions[review.node_exec_id] = (
|
||||
ReviewStatus.REJECTED,
|
||||
None,
|
||||
review.message,
|
||||
)
|
||||
|
||||
# Process all reviews
|
||||
updated_reviews = await process_all_reviews_for_execution(
|
||||
user_id=user_id,
|
||||
review_decisions=review_decisions,
|
||||
)
|
||||
|
||||
# Count results
|
||||
approved_count = sum(
|
||||
1
|
||||
for review in updated_reviews.values()
|
||||
if review.status == ReviewStatus.APPROVED
|
||||
)
|
||||
rejected_count = sum(
|
||||
1
|
||||
for review in updated_reviews.values()
|
||||
if review.status == ReviewStatus.REJECTED
|
||||
)
|
||||
|
||||
# Resume execution if we processed some reviews
|
||||
if updated_reviews:
|
||||
# Get graph execution ID from any processed review
|
||||
first_review = next(iter(updated_reviews.values()))
|
||||
graph_exec_id = first_review.graph_exec_id
|
||||
|
||||
# Check if any pending reviews remain for this execution
|
||||
still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
|
||||
|
||||
if not still_has_pending:
|
||||
# Resume execution
|
||||
try:
|
||||
await add_graph_execution(
|
||||
graph_id=first_review.graph_id,
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
)
|
||||
logger.info(f"Resumed execution {graph_exec_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}")
|
||||
|
||||
return ReviewResponse(
|
||||
approved_count=approved_count,
|
||||
rejected_count=rejected_count,
|
||||
failed_count=0,
|
||||
error=None,
|
||||
)
|
||||
@@ -22,7 +22,9 @@ router = APIRouter(
|
||||
@router.get(
|
||||
"",
|
||||
summary="List Library Agents",
|
||||
response_model=library_model.LibraryAgentResponse,
|
||||
responses={
|
||||
200: {"description": "List of library agents"},
|
||||
500: {"description": "Server error", "content": {"application/json": {}}},
|
||||
},
|
||||
)
|
||||
@@ -155,7 +157,12 @@ async def get_library_agent_by_graph_id(
|
||||
@router.get(
|
||||
"/marketplace/{store_listing_version_id}",
|
||||
summary="Get Agent By Store ID",
|
||||
tags=["store, library"],
|
||||
tags=["store", "library"],
|
||||
response_model=library_model.LibraryAgent | None,
|
||||
responses={
|
||||
200: {"description": "Library agent found"},
|
||||
404: {"description": "Agent not found"},
|
||||
},
|
||||
)
|
||||
async def get_library_agent_by_store_listing_version_id(
|
||||
store_listing_version_id: str,
|
||||
|
||||
@@ -12,7 +12,7 @@ import prisma.types
|
||||
|
||||
import backend.server.v2.store.exceptions
|
||||
import backend.server.v2.store.model
|
||||
from backend.data.db import transaction
|
||||
from backend.data.db import query_raw_with_schema, transaction
|
||||
from backend.data.graph import (
|
||||
GraphMeta,
|
||||
GraphModel,
|
||||
@@ -120,7 +120,7 @@ async def get_store_agents(
|
||||
is_available,
|
||||
updated_at,
|
||||
ts_rank_cd(search, query) AS rank
|
||||
FROM "StoreAgent",
|
||||
FROM {{schema_prefix}}"StoreAgent",
|
||||
plainto_tsquery('english', $1) AS query
|
||||
WHERE {sql_where_clause}
|
||||
AND search @@ query
|
||||
@@ -131,22 +131,18 @@ async def get_store_agents(
|
||||
# Count query for pagination - only uses search term parameter
|
||||
count_query = f"""
|
||||
SELECT COUNT(*) as count
|
||||
FROM "StoreAgent",
|
||||
FROM {{schema_prefix}}"StoreAgent",
|
||||
plainto_tsquery('english', $1) AS query
|
||||
WHERE {sql_where_clause}
|
||||
AND search @@ query
|
||||
"""
|
||||
|
||||
# Execute both queries with parameters
|
||||
agents = await prisma.client.get_client().query_raw(
|
||||
typing.cast(typing.LiteralString, sql_query), *params
|
||||
)
|
||||
agents = await query_raw_with_schema(sql_query, *params)
|
||||
|
||||
# For count, use params without pagination (last 2 params)
|
||||
count_params = params[:-2]
|
||||
count_result = await prisma.client.get_client().query_raw(
|
||||
typing.cast(typing.LiteralString, count_query), *count_params
|
||||
)
|
||||
count_result = await query_raw_with_schema(count_query, *count_params)
|
||||
|
||||
total = count_result[0]["count"] if count_result else 0
|
||||
total_pages = (total + page_size - 1) // page_size
|
||||
|
||||
@@ -140,6 +140,7 @@ async def execute_block_test(block: Block):
|
||||
"graph_exec_id": str(uuid.uuid4()),
|
||||
"node_exec_id": str(uuid.uuid4()),
|
||||
"user_id": str(uuid.uuid4()),
|
||||
"graph_version": 1, # Default version for tests
|
||||
"user_context": UserContext(timezone="UTC"), # Default for tests
|
||||
}
|
||||
input_model = cast(type[BlockSchema], block.input_schema)
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
-- CreateEnum
|
||||
CREATE TYPE "ReviewStatus" AS ENUM ('WAITING', 'APPROVED', 'REJECTED');
|
||||
|
||||
-- AlterEnum
|
||||
ALTER TYPE "AgentExecutionStatus" ADD VALUE 'REVIEW';
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "PendingHumanReview" (
|
||||
"nodeExecId" TEXT NOT NULL,
|
||||
"userId" TEXT NOT NULL,
|
||||
"graphExecId" TEXT NOT NULL,
|
||||
"graphId" TEXT NOT NULL,
|
||||
"graphVersion" INTEGER NOT NULL,
|
||||
"payload" JSONB NOT NULL,
|
||||
"instructions" TEXT,
|
||||
"editable" BOOLEAN NOT NULL DEFAULT true,
|
||||
"status" "ReviewStatus" NOT NULL DEFAULT 'WAITING',
|
||||
"reviewMessage" TEXT,
|
||||
"wasEdited" BOOLEAN,
|
||||
"processed" BOOLEAN NOT NULL DEFAULT false,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3),
|
||||
"reviewedAt" TIMESTAMP(3),
|
||||
|
||||
CONSTRAINT "PendingHumanReview_pkey" PRIMARY KEY ("nodeExecId")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "PendingHumanReview_userId_status_idx" ON "PendingHumanReview"("userId", "status");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "PendingHumanReview_graphExecId_status_idx" ON "PendingHumanReview"("graphExecId", "status");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "PendingHumanReview_nodeExecId_key" ON "PendingHumanReview"("nodeExecId");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_nodeExecId_fkey" FOREIGN KEY ("nodeExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_graphExecId_fkey" FOREIGN KEY ("graphExecId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -59,6 +59,7 @@ model User {
|
||||
APIKeys APIKey[]
|
||||
IntegrationWebhooks IntegrationWebhook[]
|
||||
NotificationBatches UserNotificationBatch[]
|
||||
PendingHumanReviews PendingHumanReview[]
|
||||
}
|
||||
|
||||
enum OnboardingStep {
|
||||
@@ -351,6 +352,7 @@ enum AgentExecutionStatus {
|
||||
COMPLETED
|
||||
TERMINATED
|
||||
FAILED
|
||||
REVIEW
|
||||
}
|
||||
|
||||
// This model describes the execution of an AgentGraph.
|
||||
@@ -393,6 +395,8 @@ model AgentGraphExecution {
|
||||
shareToken String? @unique
|
||||
sharedAt DateTime?
|
||||
|
||||
PendingHumanReviews PendingHumanReview[]
|
||||
|
||||
@@index([agentGraphId, agentGraphVersion])
|
||||
@@index([userId, isDeleted, createdAt])
|
||||
@@index([createdAt])
|
||||
@@ -423,6 +427,8 @@ model AgentNodeExecution {
|
||||
|
||||
stats Json?
|
||||
|
||||
PendingHumanReview PendingHumanReview?
|
||||
|
||||
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
|
||||
@@index([agentNodeId, executionStatus])
|
||||
@@index([addedTime, queuedTime])
|
||||
@@ -464,6 +470,39 @@ model AgentNodeExecutionKeyValueData {
|
||||
@@id([userId, key])
|
||||
}
|
||||
|
||||
enum ReviewStatus {
|
||||
WAITING
|
||||
APPROVED
|
||||
REJECTED
|
||||
}
|
||||
|
||||
// Pending human reviews for Human-in-the-loop blocks
|
||||
model PendingHumanReview {
|
||||
nodeExecId String @id
|
||||
userId String
|
||||
graphExecId String
|
||||
graphId String
|
||||
graphVersion Int
|
||||
payload Json // The actual payload data to be reviewed
|
||||
instructions String? // Instructions/message for the reviewer
|
||||
editable Boolean @default(true) // Whether the reviewer can edit the data
|
||||
status ReviewStatus @default(WAITING)
|
||||
reviewMessage String? // Optional message from the reviewer
|
||||
wasEdited Boolean? // Whether the data was modified during review
|
||||
processed Boolean @default(false) // Whether the review result has been processed by the execution engine
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime? @updatedAt
|
||||
reviewedAt DateTime?
|
||||
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade)
|
||||
GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@unique([nodeExecId]) // One pending review per node execution
|
||||
@@index([userId, status])
|
||||
@@index([graphExecId, status])
|
||||
}
|
||||
|
||||
// Webhook that is registered with a provider and propagates to one or more nodes
|
||||
model IntegrationWebhook {
|
||||
id String @id @default(uuid())
|
||||
|
||||
@@ -34,7 +34,8 @@ const nextConfig = {
|
||||
},
|
||||
],
|
||||
},
|
||||
output: "standalone",
|
||||
// Vercel has its own deployment mechanism and doesn't need standalone mode
|
||||
...(process.env.VERCEL ? {} : { output: "standalone" }),
|
||||
transpilePackages: ["geist"],
|
||||
};
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
"@rjsf/core": "5.24.13",
|
||||
"@rjsf/utils": "5.24.13",
|
||||
"@rjsf/validator-ajv8": "5.24.13",
|
||||
"@sentry/nextjs": "10.22.0",
|
||||
"@sentry/nextjs": "10.27.0",
|
||||
"@supabase/ssr": "0.7.0",
|
||||
"@supabase/supabase-js": "2.78.0",
|
||||
"@tanstack/react-query": "5.90.6",
|
||||
@@ -134,7 +134,7 @@
|
||||
"axe-playwright": "2.2.2",
|
||||
"chromatic": "13.3.3",
|
||||
"concurrently": "9.2.1",
|
||||
"cross-env": "7.0.3",
|
||||
"cross-env": "10.1.0",
|
||||
"eslint": "8.57.1",
|
||||
"eslint-config-next": "15.5.2",
|
||||
"eslint-plugin-storybook": "9.1.5",
|
||||
|
||||
747
autogpt_platform/frontend/pnpm-lock.yaml
generated
747
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { useState } from "react";
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs";
|
||||
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import { useParams } from "next/navigation";
|
||||
import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs";
|
||||
import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs";
|
||||
import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
@@ -11,7 +10,7 @@ import {
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
|
||||
import { InfoIcon } from "lucide-react";
|
||||
import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default";
|
||||
import { useParams } from "next/navigation";
|
||||
|
||||
export default function SharePage() {
|
||||
const params = useParams();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import { BlockUIType } from "@/app/(platform)/build/components/types";
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
@@ -12,20 +16,16 @@ import {
|
||||
SheetTitle,
|
||||
SheetTrigger,
|
||||
} from "@/components/__legacy__/ui/sheet";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
import { BookOpenIcon } from "@phosphor-icons/react";
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { BlockUIType } from "@/app/(platform)/build/components/types";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { Label } from "@/components/__legacy__/ui/label";
|
||||
import { useMemo } from "react";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
OutputActions,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { BookOpenIcon } from "@phosphor-icons/react";
|
||||
import { useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
|
||||
export const AgentOutputs = ({ flowID }: { flowID: string | null }) => {
|
||||
const hasOutputs = useGraphStore(useShallow((state) => state.hasOutputs));
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CronScheduler";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { InfoIcon } from "lucide-react";
|
||||
import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CronScheduler";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useCronSchedulerDialog } from "./useCronSchedulerDialog";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
|
||||
type CronSchedulerDialogProps = {
|
||||
open: boolean;
|
||||
|
||||
@@ -18,6 +18,7 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
openRunInputDialog,
|
||||
setOpenRunInputDialog,
|
||||
isExecutingGraph,
|
||||
isTerminatingGraph,
|
||||
isSaving,
|
||||
} = useRunGraph();
|
||||
const isGraphRunning = useGraphStore(
|
||||
@@ -34,8 +35,8 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
"border-red-500 bg-gradient-to-br from-red-400 to-red-500 shadow-[inset_0_2px_0_0_rgba(255,255,255,0.5),0_2px_4px_0_rgba(0,0,0,0.2)]",
|
||||
)}
|
||||
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
||||
disabled={!flowID || isExecutingGraph}
|
||||
isLoading={isExecutingGraph || isSaving}
|
||||
disabled={!flowID || isExecutingGraph || isTerminatingGraph}
|
||||
isLoading={isExecutingGraph || isTerminatingGraph || isSaving}
|
||||
>
|
||||
{!isGraphRunning ? (
|
||||
<PlayIcon className="size-6 drop-shadow-sm" />
|
||||
|
||||
@@ -15,9 +15,6 @@ export const useRunGraph = () => {
|
||||
showToast: false,
|
||||
});
|
||||
const { toast } = useToast();
|
||||
const setIsGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.setIsGraphRunning),
|
||||
);
|
||||
const hasInputs = useGraphStore(useShallow((state) => state.hasInputs));
|
||||
const hasCredentials = useGraphStore(
|
||||
useShallow((state) => state.hasCredentials),
|
||||
@@ -34,15 +31,13 @@ export const useRunGraph = () => {
|
||||
const { mutateAsync: executeGraph, isPending: isExecutingGraph } =
|
||||
usePostV1ExecuteGraphAgent({
|
||||
mutation: {
|
||||
onSuccess: (response) => {
|
||||
onSuccess: (response: any) => {
|
||||
const { id } = response.data as GraphExecutionMeta;
|
||||
setQueryStates({
|
||||
flowExecutionID: id,
|
||||
});
|
||||
},
|
||||
onError: (error) => {
|
||||
setIsGraphRunning(false);
|
||||
|
||||
onError: (error: any) => {
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
@@ -52,20 +47,19 @@ export const useRunGraph = () => {
|
||||
},
|
||||
});
|
||||
|
||||
const { mutateAsync: stopGraph } = usePostV1StopGraphExecution({
|
||||
mutation: {
|
||||
onSuccess: () => {
|
||||
setIsGraphRunning(false);
|
||||
const { mutateAsync: stopGraph, isPending: isTerminatingGraph } =
|
||||
usePostV1StopGraphExecution({
|
||||
mutation: {
|
||||
onSuccess: () => {},
|
||||
onError: (error: any) => {
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
onError: (error) => {
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
variant: "destructive",
|
||||
});
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
const handleRunGraph = async () => {
|
||||
await saveGraph(undefined);
|
||||
@@ -96,6 +90,7 @@ export const useRunGraph = () => {
|
||||
handleStopGraph,
|
||||
isSaving,
|
||||
isExecutingGraph,
|
||||
isTerminatingGraph,
|
||||
openRunInputDialog,
|
||||
setOpenRunInputDialog,
|
||||
};
|
||||
|
||||
@@ -7,7 +7,6 @@ import {
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||
import { useMemo, useState } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/input-renderer/fields/CredentialField/helpers";
|
||||
|
||||
@@ -30,9 +29,6 @@ export const useRunInputDialog = ({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
});
|
||||
const setIsGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.setIsGraphRunning),
|
||||
);
|
||||
const { toast } = useToast();
|
||||
|
||||
const { mutateAsync: executeGraph, isPending: isExecutingGraph } =
|
||||
@@ -45,8 +41,6 @@ export const useRunInputDialog = ({
|
||||
});
|
||||
},
|
||||
onError: (error) => {
|
||||
setIsGraphRunning(false);
|
||||
|
||||
toast({
|
||||
title: (error.detail as string) ?? "An unexpected error occurred.",
|
||||
description: "An unexpected error occurred.",
|
||||
|
||||
@@ -13,9 +13,16 @@ import { BuilderActions } from "../../BuilderActions/BuilderActions";
|
||||
import { RunningBackground } from "./components/RunningBackground";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useCopyPaste } from "./useCopyPaste";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
import { CustomControls } from "./components/CustomControl";
|
||||
|
||||
export const Flow = () => {
|
||||
const [{ flowExecutionID }] = useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowExecutionID: parseAsString,
|
||||
});
|
||||
|
||||
const nodes = useNodeStore(useShallow((state) => state.nodes));
|
||||
const onNodesChange = useNodeStore(
|
||||
useShallow((state) => state.onNodesChange),
|
||||
@@ -44,7 +51,9 @@ export const Flow = () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [handleCopyPaste]);
|
||||
const { isGraphRunning } = useGraphStore();
|
||||
const isGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.isGraphRunning),
|
||||
);
|
||||
return (
|
||||
<div className="flex h-full w-full dark:bg-slate-900">
|
||||
<div className="relative flex-1">
|
||||
@@ -72,6 +81,7 @@ export const Flow = () => {
|
||||
{isGraphRunning && <RunningBackground />}
|
||||
</ReactFlow>
|
||||
</div>
|
||||
<FloatingReviewsPanel executionId={flowExecutionID || undefined} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -13,7 +13,6 @@ import { convertNodesPlusBlockInfoIntoCustomNodes } from "../../helper";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { useControlPanelStore } from "../../../stores/controlPanelStore";
|
||||
import { useHistoryStore } from "../../../stores/historyStore";
|
||||
@@ -28,9 +27,6 @@ export const useFlow = () => {
|
||||
const updateNodeExecutionResult = useNodeStore(
|
||||
useShallow((state) => state.updateNodeExecutionResult),
|
||||
);
|
||||
const setIsGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.setIsGraphRunning),
|
||||
);
|
||||
const setGraphSchemas = useGraphStore(
|
||||
useShallow((state) => state.setGraphSchemas),
|
||||
);
|
||||
@@ -126,15 +122,6 @@ export const useFlow = () => {
|
||||
}
|
||||
}, [graph?.links, addLinks]);
|
||||
|
||||
// update graph running status
|
||||
useEffect(() => {
|
||||
const isRunning =
|
||||
executionDetails?.status === AgentExecutionStatus.RUNNING ||
|
||||
executionDetails?.status === AgentExecutionStatus.QUEUED;
|
||||
|
||||
setIsGraphRunning(isRunning);
|
||||
}, [executionDetails?.status, customNodes]);
|
||||
|
||||
// update node execution status in nodes
|
||||
useEffect(() => {
|
||||
if (
|
||||
@@ -182,7 +169,6 @@ export const useFlow = () => {
|
||||
useEdgeStore.getState().setEdges([]);
|
||||
useGraphStore.getState().reset();
|
||||
useEdgeStore.getState().resetEdgeBeads();
|
||||
setIsGraphRunning(false);
|
||||
};
|
||||
}, []);
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ export const useFlowRealtime = () => {
|
||||
const updateStatus = useNodeStore(
|
||||
useShallow((state) => state.updateNodeStatus),
|
||||
);
|
||||
const setIsGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.setIsGraphRunning),
|
||||
const setGraphExecutionStatus = useGraphStore(
|
||||
useShallow((state) => state.setGraphExecutionStatus),
|
||||
);
|
||||
const updateEdgeBeads = useEdgeStore(
|
||||
useShallow((state) => state.updateEdgeBeads),
|
||||
@@ -57,11 +57,7 @@ export const useFlowRealtime = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const isRunning =
|
||||
graphExecution.status === AgentExecutionStatus.RUNNING ||
|
||||
graphExecution.status === AgentExecutionStatus.QUEUED;
|
||||
|
||||
setIsGraphRunning(isRunning);
|
||||
setGraphExecutionStatus(graphExecution.status as AgentExecutionStatus);
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ const statusStyles: Record<AgentExecutionStatus, string> = {
|
||||
INCOMPLETE: "text-slate-700 border-slate-400",
|
||||
QUEUED: "text-blue-700 border-blue-400",
|
||||
RUNNING: "text-amber-700 border-amber-400",
|
||||
REVIEW: "text-orange-700 border-orange-400 bg-orange-50",
|
||||
COMPLETED: "text-green-700 border-green-400",
|
||||
TERMINATED: "text-orange-700 border-orange-400",
|
||||
FAILED: "text-red-700 border-red-400",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
|
||||
export const TextRenderer: React.FC<{
|
||||
value: any;
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
import React, { FC } from "react";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import {
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import {
|
||||
ArrowsOutSimpleIcon,
|
||||
CheckIcon,
|
||||
CopyIcon,
|
||||
DownloadIcon,
|
||||
CheckIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
|
||||
import {
|
||||
OutputItem,
|
||||
OutputActions,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import { FC } from "react";
|
||||
import { useNodeDataViewer } from "./useNodeDataViewer";
|
||||
|
||||
interface NodeDataViewerProps {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import React, { useState, useMemo } from "react";
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/download";
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import React, { useMemo, useState } from "react";
|
||||
|
||||
export const useNodeDataViewer = (
|
||||
data: any,
|
||||
|
||||
@@ -4,6 +4,7 @@ export const nodeStyleBasedOnStatus: Record<AgentExecutionStatus, string> = {
|
||||
INCOMPLETE: "ring-slate-300 bg-slate-300",
|
||||
QUEUED: " ring-blue-300 bg-blue-300",
|
||||
RUNNING: "ring-amber-300 bg-amber-300",
|
||||
REVIEW: "ring-orange-300 bg-orange-300",
|
||||
COMPLETED: "ring-green-300 bg-green-300",
|
||||
TERMINATED: "ring-orange-300 bg-orange-300 ",
|
||||
FAILED: "ring-red-300 bg-red-300",
|
||||
|
||||
@@ -1,70 +1,67 @@
|
||||
import React, {
|
||||
useState,
|
||||
useEffect,
|
||||
useCallback,
|
||||
useRef,
|
||||
useContext,
|
||||
} from "react";
|
||||
import Link from "next/link";
|
||||
import { NodeProps, useReactFlow, Node as XYNode, Edge } from "@xyflow/react";
|
||||
import "@xyflow/react/dist/style.css";
|
||||
import "./customnode.css";
|
||||
import InputModalComponent from "../InputModalComponent";
|
||||
import OutputModalComponent from "../OutputModalComponent";
|
||||
import {
|
||||
BlockIORootSchema,
|
||||
BlockIOSubSchema,
|
||||
BlockIOStringSubSchema,
|
||||
Category,
|
||||
NodeExecutionResult,
|
||||
BlockUIType,
|
||||
BlockCost,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
beautifyString,
|
||||
cn,
|
||||
fillObjectDefaultsFromSchema,
|
||||
getValue,
|
||||
hasNonNullNonObjectValue,
|
||||
isObject,
|
||||
parseKeys,
|
||||
setNestedProperty,
|
||||
} from "@/lib/utils";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { TextRenderer } from "@/components/__legacy__/ui/render";
|
||||
import { history } from "../history";
|
||||
import NodeHandle from "../NodeHandle";
|
||||
import { NodeGenericInputField, NodeTextBoxInput } from "../NodeInputs";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
import { BuilderContext } from "../Flow/Flow";
|
||||
import { Badge } from "../../../../../../components/__legacy__/ui/badge";
|
||||
import NodeOutputs from "../NodeOutputs";
|
||||
import { IconCoin } from "../../../../../../components/__legacy__/ui/icons";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import * as ContextMenu from "@radix-ui/react-context-menu";
|
||||
import {
|
||||
Alert,
|
||||
AlertDescription,
|
||||
} from "../../../../../../components/molecules/Alert/Alert";
|
||||
import {
|
||||
DotsVerticalIcon,
|
||||
TrashIcon,
|
||||
CopyIcon,
|
||||
ExitIcon,
|
||||
Pencil1Icon,
|
||||
} from "@radix-ui/react-icons";
|
||||
import { InfoIcon, Key } from "@phosphor-icons/react";
|
||||
import useCredits from "@/hooks/useCredits";
|
||||
import { getV1GetAyrshareSsoUrl } from "@/app/api/__generated__/endpoints/integrations/integrations";
|
||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { TextRenderer } from "@/components/__legacy__/ui/render";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||
import useCredits from "@/hooks/useCredits";
|
||||
import {
|
||||
BlockCost,
|
||||
BlockIORootSchema,
|
||||
BlockIOStringSubSchema,
|
||||
BlockIOSubSchema,
|
||||
BlockUIType,
|
||||
Category,
|
||||
NodeExecutionResult,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import {
|
||||
beautifyString,
|
||||
cn,
|
||||
fillObjectDefaultsFromSchema,
|
||||
getPrimaryCategoryColor,
|
||||
getValue,
|
||||
hasNonNullNonObjectValue,
|
||||
isObject,
|
||||
parseKeys,
|
||||
setNestedProperty,
|
||||
} from "@/lib/utils";
|
||||
import { InfoIcon, Key } from "@phosphor-icons/react";
|
||||
import * as ContextMenu from "@radix-ui/react-context-menu";
|
||||
import {
|
||||
CopyIcon,
|
||||
DotsVerticalIcon,
|
||||
ExitIcon,
|
||||
Pencil1Icon,
|
||||
TrashIcon,
|
||||
} from "@radix-ui/react-icons";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import { Edge, NodeProps, useReactFlow, Node as XYNode } from "@xyflow/react";
|
||||
import "@xyflow/react/dist/style.css";
|
||||
import Link from "next/link";
|
||||
import React, {
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
||||
import { IconCoin } from "@/components/__legacy__/ui/icons";
|
||||
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
|
||||
import { BuilderContext } from "../Flow/Flow";
|
||||
import { history } from "../history";
|
||||
import InputModalComponent from "../InputModalComponent";
|
||||
import NodeHandle from "../NodeHandle";
|
||||
import { NodeGenericInputField, NodeTextBoxInput } from "../NodeInputs";
|
||||
import NodeOutputs from "../NodeOutputs";
|
||||
import OutputModalComponent from "../OutputModalComponent";
|
||||
import "./customnode.css";
|
||||
|
||||
export type ConnectionData = Array<{
|
||||
edge_id: string;
|
||||
@@ -366,6 +363,7 @@ export const CustomNode = React.memo(
|
||||
// For OUTPUT blocks, only show the 'value' (hides 'name') input connection handle
|
||||
!(nodeType == BlockUIType.OUTPUT && propKey == "name");
|
||||
const isConnected = isInputHandleConnected(propKey);
|
||||
|
||||
return (
|
||||
!isHidden &&
|
||||
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
|
||||
@@ -647,6 +645,8 @@ export const CustomNode = React.memo(
|
||||
return "border-purple-200 dark:border-purple-800 border-4";
|
||||
case "queued":
|
||||
return "border-cyan-200 dark:border-cyan-800 border-4";
|
||||
case "review":
|
||||
return "border-orange-200 dark:border-orange-800 border-4";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
@@ -666,6 +666,8 @@ export const CustomNode = React.memo(
|
||||
return "bg-purple-200 dark:bg-purple-800";
|
||||
case "queued":
|
||||
return "bg-cyan-200 dark:bg-cyan-800";
|
||||
case "review":
|
||||
return "bg-orange-200 dark:bg-orange-800";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
@@ -1010,6 +1012,8 @@ export const CustomNode = React.memo(
|
||||
data.status === "QUEUED",
|
||||
"border-gray-600 bg-gray-600 font-black":
|
||||
data.status === "INCOMPLETE",
|
||||
"border-orange-600 bg-orange-600 text-white":
|
||||
data.status === "REVIEW",
|
||||
},
|
||||
)}
|
||||
>
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { FC, useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
import { Switch } from "../../../../../components/atoms/Switch/Switch";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "../../../../../components/__legacy__/ui/dialog";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area";
|
||||
import { Separator } from "../../../../../components/__legacy__/ui/separator";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
OutputActions,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
|
||||
import { Switch } from "../../../../../components/atoms/Switch/Switch";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
|
||||
interface ExpandableOutputDialogProps {
|
||||
isOpen: boolean;
|
||||
|
||||
@@ -64,6 +64,7 @@ import { useCopyPaste } from "../useCopyPaste";
|
||||
import NewControlPanel from "@/app/(platform)/build/components/NewControlPanel/NewControlPanel";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { BuildActionBar } from "../BuildActionBar";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
|
||||
// This is for the history, this is the minimum distance a block must move before it is logged
|
||||
// It helps to prevent spamming the history with small movements especially when pressing on a input in a block
|
||||
@@ -1024,6 +1025,10 @@ const FlowEditor: React.FC<{
|
||||
saveAndRun={saveAndRun}
|
||||
/>
|
||||
)}
|
||||
<FloatingReviewsPanel
|
||||
executionId={flowExecutionID || undefined}
|
||||
className="fixed bottom-24 right-4"
|
||||
/>
|
||||
<Suspense fallback={null}>
|
||||
<OttoChatWidget
|
||||
graphID={flowID}
|
||||
|
||||
@@ -1,18 +1,34 @@
|
||||
import {
|
||||
ConnectionData,
|
||||
CustomNodeData,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Calendar } from "@/components/__legacy__/ui/calendar";
|
||||
import { LocalValuedInput } from "@/components/__legacy__/ui/input";
|
||||
import {
|
||||
MultiSelector,
|
||||
MultiSelectorContent,
|
||||
MultiSelectorInput,
|
||||
MultiSelectorItem,
|
||||
MultiSelectorList,
|
||||
MultiSelectorTrigger,
|
||||
} from "@/components/__legacy__/ui/multiselect";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { format } from "date-fns";
|
||||
import { CalendarIcon } from "lucide-react";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { Node, useNodeId, useNodesData } from "@xyflow/react";
|
||||
import {
|
||||
ConnectionData,
|
||||
CustomNodeData,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { Cross2Icon, Pencil2Icon, PlusIcon } from "@radix-ui/react-icons";
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
|
||||
import { NodeTableInput } from "@/components/node-table-input";
|
||||
import {
|
||||
BlockIOArraySubSchema,
|
||||
BlockIOBooleanSubSchema,
|
||||
@@ -29,35 +45,20 @@ import {
|
||||
DataType,
|
||||
determineDataType,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { Cross2Icon, Pencil2Icon, PlusIcon } from "@radix-ui/react-icons";
|
||||
import { Node, useNodeId, useNodesData } from "@xyflow/react";
|
||||
import { format } from "date-fns";
|
||||
import { CalendarIcon } from "lucide-react";
|
||||
import React, {
|
||||
FC,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useState,
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "../../../../../components/__legacy__/ui/select";
|
||||
import {
|
||||
MultiSelector,
|
||||
MultiSelectorContent,
|
||||
MultiSelectorInput,
|
||||
MultiSelectorItem,
|
||||
MultiSelectorList,
|
||||
MultiSelectorTrigger,
|
||||
} from "../../../../../components/__legacy__/ui/multiselect";
|
||||
import { LocalValuedInput } from "../../../../../components/__legacy__/ui/input";
|
||||
import NodeHandle from "./NodeHandle";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { Switch } from "../../../../../components/atoms/Switch/Switch";
|
||||
import { NodeTableInput } from "../../../../../components/node-table-input";
|
||||
|
||||
type NodeObjectInputTreeProps = {
|
||||
nodeId: string;
|
||||
@@ -370,6 +371,22 @@ export const NodeGenericInputField: FC<{
|
||||
handleInputChange={handleInputChange}
|
||||
/>
|
||||
);
|
||||
case DataType.GOOGLE_DRIVE_PICKER: {
|
||||
const pickerSchema = propSchema as any;
|
||||
const config: import("@/lib/autogpt-server-api/types").GoogleDrivePickerConfig =
|
||||
pickerSchema.google_drive_picker_config || {};
|
||||
|
||||
return (
|
||||
<GoogleDrivePickerInput
|
||||
config={config}
|
||||
value={currentValue}
|
||||
onChange={(value) => handleInputChange(propKey, value)}
|
||||
error={errors[propKey]}
|
||||
className={className}
|
||||
showRemoveButton={true}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
case DataType.DATE:
|
||||
case DataType.TIME:
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import { create } from "zustand";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
|
||||
interface GraphStore {
|
||||
graphExecutionStatus: AgentExecutionStatus | undefined;
|
||||
isGraphRunning: boolean;
|
||||
setIsGraphRunning: (isGraphRunning: boolean) => void;
|
||||
setGraphExecutionStatus: (status: AgentExecutionStatus | undefined) => void;
|
||||
|
||||
inputSchema: Record<string, any> | null;
|
||||
credentialsInputSchema: Record<string, any> | null;
|
||||
@@ -21,12 +23,20 @@ interface GraphStore {
|
||||
}
|
||||
|
||||
export const useGraphStore = create<GraphStore>((set, get) => ({
|
||||
graphExecutionStatus: undefined,
|
||||
isGraphRunning: false,
|
||||
inputSchema: null,
|
||||
credentialsInputSchema: null,
|
||||
outputSchema: null,
|
||||
|
||||
setIsGraphRunning: (isGraphRunning: boolean) => set({ isGraphRunning }),
|
||||
setGraphExecutionStatus: (status: AgentExecutionStatus | undefined) => {
|
||||
set({
|
||||
graphExecutionStatus: status,
|
||||
isGraphRunning:
|
||||
status === AgentExecutionStatus.RUNNING ||
|
||||
status === AgentExecutionStatus.QUEUED,
|
||||
});
|
||||
},
|
||||
|
||||
setGraphSchemas: (inputSchema, credentialsInputSchema, outputSchema) =>
|
||||
set({ inputSchema, credentialsInputSchema, outputSchema }),
|
||||
@@ -48,6 +58,7 @@ export const useGraphStore = create<GraphStore>((set, get) => ({
|
||||
|
||||
reset: () =>
|
||||
set({
|
||||
graphExecutionStatus: undefined,
|
||||
isGraphRunning: false,
|
||||
inputSchema: null,
|
||||
credentialsInputSchema: null,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { useEffect, useRef } from "react";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { Card } from "@/components/atoms/Card/Card";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { KeyIcon, CheckIcon, WarningIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CheckIcon, KeyIcon, WarningIcon } from "@phosphor-icons/react";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
|
||||
|
||||
export interface CredentialInfo {
|
||||
provider: string;
|
||||
|
||||
@@ -5,15 +5,15 @@ import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import { PlusIcon } from "@phosphor-icons/react";
|
||||
import { useEffect } from "react";
|
||||
import { AgentRunsLoading } from "./components/AgentRunsLoading";
|
||||
import { EmptyAgentRuns } from "./components/EmptyAgentRuns/EmptyAgentRuns";
|
||||
import { RunAgentModal } from "./components/RunAgentModal/RunAgentModal";
|
||||
import { RunsSidebar } from "./components/RunsSidebar/RunsSidebar";
|
||||
import { SelectedRunView } from "./components/SelectedRunView/SelectedRunView";
|
||||
import { SelectedScheduleView } from "./components/SelectedScheduleView/SelectedScheduleView";
|
||||
import { useAgentRunsView } from "./useAgentRunsView";
|
||||
import { RunAgentModal } from "./components/modals/RunAgentModal/RunAgentModal";
|
||||
import { AgentRunsLoading } from "./components/other/AgentRunsLoading";
|
||||
import { EmptyAgentRuns } from "./components/other/EmptyAgentRuns";
|
||||
import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView";
|
||||
import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView";
|
||||
import { AgentRunsLists } from "./components/sidebar/AgentRunsLists/AgentRunsLists";
|
||||
import { useNewAgentLibraryView } from "./useNewAgentLibraryView";
|
||||
|
||||
export function AgentRunsView() {
|
||||
export function NewAgentLibraryView() {
|
||||
const {
|
||||
agent,
|
||||
hasAnyItems,
|
||||
@@ -22,10 +22,11 @@ export function AgentRunsView() {
|
||||
error,
|
||||
agentId,
|
||||
selectedRun,
|
||||
sidebarLoading,
|
||||
handleSelectRun,
|
||||
handleCountsChange,
|
||||
handleClearSelectedRun,
|
||||
} = useAgentRunsView();
|
||||
} = useNewAgentLibraryView();
|
||||
|
||||
useEffect(() => {
|
||||
if (agent) {
|
||||
@@ -73,7 +74,7 @@ export function AgentRunsView() {
|
||||
/>
|
||||
</div>
|
||||
|
||||
<RunsSidebar
|
||||
<AgentRunsLists
|
||||
agent={agent}
|
||||
selectedRunId={selectedRun}
|
||||
onSelectRun={handleSelectRun}
|
||||
@@ -107,6 +108,9 @@ export function AgentRunsView() {
|
||||
onClearSelectedRun={handleClearSelectedRun}
|
||||
/>
|
||||
)
|
||||
) : sidebarLoading ? (
|
||||
// Show loading state while sidebar is loading to prevent flash of empty state
|
||||
<div className="text-gray-600">Loading runs...</div>
|
||||
) : hasAnyItems ? (
|
||||
<div className="text-gray-600">
|
||||
Select a run to view its details
|
||||
@@ -1,4 +1,3 @@
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
IconKey,
|
||||
IconKeyPlus,
|
||||
@@ -12,6 +11,8 @@ import {
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import useCredentials from "@/hooks/useCredentials";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import {
|
||||
@@ -31,11 +32,10 @@ import {
|
||||
FaMedium,
|
||||
FaTwitter,
|
||||
} from "react-icons/fa";
|
||||
import { APIKeyCredentialsModal } from "../APIKeyCredentialsModal/APIKeyCredentialsModal";
|
||||
import { HostScopedCredentialsModal } from "../HotScopedCredentialsModal/HotScopedCredentialsModal";
|
||||
import { OAuthFlowWaitingModal } from "../OAuthWaitingModal/OAuthWaitingModal";
|
||||
import { PasswordCredentialsModal } from "../PasswordCredentialsModal/PasswordCredentialsModal";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import { APIKeyCredentialsModal } from "./APIKeyCredentialsModal/APIKeyCredentialsModal";
|
||||
import { HostScopedCredentialsModal } from "./HotScopedCredentialsModal/HotScopedCredentialsModal";
|
||||
import { OAuthFlowWaitingModal } from "./OAuthWaitingModal/OAuthWaitingModal";
|
||||
import { PasswordCredentialsModal } from "./PasswordCredentialsModal/PasswordCredentialsModal";
|
||||
|
||||
const fallbackIcon = FaKey;
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import React from "react";
|
||||
import { format } from "date-fns";
|
||||
import React from "react";
|
||||
|
||||
import { Input as DSInput } from "@/components/atoms/Input/Input";
|
||||
import { Select as DSSelect } from "@/components/atoms/Select/Select";
|
||||
import { MultiToggle } from "@/components/molecules/MultiToggle/MultiToggle";
|
||||
// Removed shadcn Select usage in favor of DS Select for time picker
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
|
||||
import { TimePicker } from "@/components/molecules/TimePicker/TimePicker";
|
||||
import {
|
||||
BlockIOObjectSubSchema,
|
||||
BlockIOSubSchema,
|
||||
@@ -13,12 +18,8 @@ import {
|
||||
determineDataType,
|
||||
TableRow,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { TimePicker } from "@/components/molecules/TimePicker/TimePicker";
|
||||
import { FileInput } from "@/components/atoms/FileInput/FileInput";
|
||||
import { useRunAgentInputs } from "./useRunAgentInputs";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { useRunAgentInputs } from "./useRunAgentInputs";
|
||||
|
||||
/**
|
||||
* A generic prop structure for the TypeBasedInput.
|
||||
@@ -90,6 +91,23 @@ export function RunAgentInputs({
|
||||
);
|
||||
break;
|
||||
|
||||
case DataType.GOOGLE_DRIVE_PICKER: {
|
||||
const pickerSchema = schema as any;
|
||||
const config: import("@/lib/autogpt-server-api/types").GoogleDrivePickerConfig =
|
||||
pickerSchema.google_drive_picker_config || {};
|
||||
|
||||
innerInputElement = (
|
||||
<GoogleDrivePickerInput
|
||||
config={config}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
className="w-full"
|
||||
showRemoveButton={false}
|
||||
/>
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
case DataType.BOOLEAN:
|
||||
innerInputElement = (
|
||||
<>
|
||||
@@ -1,21 +1,21 @@
|
||||
"use client";
|
||||
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { useState } from "react";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { useAgentRunModal } from "./useAgentRunModal";
|
||||
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
||||
import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection";
|
||||
import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader";
|
||||
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
||||
import { RunAgentModalContextProvider } from "./context";
|
||||
import { AgentDetails } from "./components/AgentDetails/AgentDetails";
|
||||
import { RunActions } from "./components/RunActions/RunActions";
|
||||
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
||||
import { AlarmIcon } from "@phosphor-icons/react";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { AlarmIcon } from "@phosphor-icons/react";
|
||||
import { useState } from "react";
|
||||
import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal";
|
||||
import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection";
|
||||
import { AgentDetails } from "./components/AgentDetails/AgentDetails";
|
||||
import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader";
|
||||
import { ModalHeader } from "./components/ModalHeader/ModalHeader";
|
||||
import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection";
|
||||
import { RunActions } from "./components/RunActions/RunActions";
|
||||
import { RunAgentModalContextProvider } from "./context";
|
||||
import { useAgentRunModal } from "./useAgentRunModal";
|
||||
|
||||
interface Props {
|
||||
triggerSlot: React.ReactNode;
|
||||
@@ -1,13 +1,13 @@
|
||||
import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
|
||||
import { useRunAgentModalContext } from "../../context";
|
||||
import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs";
|
||||
import { InfoIcon } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { toDisplayName } from "@/providers/agent-credentials/helper";
|
||||
import { getCredentialTypeDisplayName } from "./helpers";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import { toDisplayName } from "@/providers/agent-credentials/helper";
|
||||
import { InfoIcon } from "@phosphor-icons/react";
|
||||
import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs";
|
||||
import { useRunAgentModalContext } from "../../context";
|
||||
import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner";
|
||||
import { getCredentialTypeDisplayName } from "./helpers";
|
||||
|
||||
export function ModalRunSection() {
|
||||
const {
|
||||
@@ -1,10 +1,10 @@
|
||||
import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText";
|
||||
import { RunDetailCard } from "../RunDetailCard/RunDetailCard";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { RunAgentModal } from "../RunAgentModal/RunAgentModal";
|
||||
import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText";
|
||||
import { PlusIcon } from "@phosphor-icons/react";
|
||||
import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal";
|
||||
import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard";
|
||||
|
||||
type Props = {
|
||||
agentName: string;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user