diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml
index 902fc461b2..20b6f1d28e 100644
--- a/.github/workflows/claude-dependabot.yml
+++ b/.github/workflows/claude-dependabot.yml
@@ -80,7 +80,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
- node-version: "21"
+ node-version: "22"
- name: Enable corepack
run: corepack enable
diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml
index 31f2769ea4..51eb764b80 100644
--- a/.github/workflows/claude.yml
+++ b/.github/workflows/claude.yml
@@ -90,7 +90,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
- node-version: "21"
+ node-version: "22"
- name: Enable corepack
run: corepack enable
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
index 7af1ec4365..13ef01cc44 100644
--- a/.github/workflows/copilot-setup-steps.yml
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -78,7 +78,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
- node-version: "21"
+ node-version: "22"
- name: Enable corepack
run: corepack enable
@@ -299,4 +299,4 @@ jobs:
echo "✅ AutoGPT Platform development environment setup complete!"
echo "🚀 Ready for development with Docker services running"
echo "📝 Backend server: poetry run serve (port 8000)"
- echo "🌐 Frontend server: pnpm dev (port 3000)"
\ No newline at end of file
+ echo "🌐 Frontend server: pnpm dev (port 3000)"
diff --git a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py
index 912edec78a..850046317a 100644
--- a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py
+++ b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py
@@ -1,3 +1,4 @@
+import asyncio
from enum import Enum
from typing import Literal
@@ -20,7 +21,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
-from backend.util.file import MediaFileType
+from backend.util.file import MediaFileType, store_media_file
class GeminiImageModel(str, Enum):
@@ -28,6 +29,20 @@ class GeminiImageModel(str, Enum):
NANO_BANANA_PRO = "google/nano-banana-pro"
+class AspectRatio(str, Enum):
+ MATCH_INPUT_IMAGE = "match_input_image"
+ ASPECT_1_1 = "1:1"
+ ASPECT_2_3 = "2:3"
+ ASPECT_3_2 = "3:2"
+ ASPECT_3_4 = "3:4"
+ ASPECT_4_3 = "4:3"
+ ASPECT_4_5 = "4:5"
+ ASPECT_5_4 = "5:4"
+ ASPECT_9_16 = "9:16"
+ ASPECT_16_9 = "16:9"
+ ASPECT_21_9 = "21:9"
+
+
class OutputFormat(str, Enum):
JPG = "jpg"
PNG = "png"
@@ -70,6 +85,11 @@ class AIImageCustomizerBlock(Block):
default=[],
title="Input Images",
)
+ aspect_ratio: AspectRatio = SchemaField(
+ description="Aspect ratio of the generated image",
+ default=AspectRatio.MATCH_INPUT_IMAGE,
+ title="Aspect Ratio",
+ )
output_format: OutputFormat = SchemaField(
description="Format of the output image",
default=OutputFormat.PNG,
@@ -93,6 +113,7 @@ class AIImageCustomizerBlock(Block):
"prompt": "Make the scene more vibrant and colorful",
"model": GeminiImageModel.NANO_BANANA,
"images": [],
+ "aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
"output_format": OutputFormat.JPG,
"credentials": TEST_CREDENTIALS_INPUT,
},
@@ -117,11 +138,25 @@ class AIImageCustomizerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
+ # Convert local file paths to Data URIs (base64) so Replicate can access them
+ processed_images = await asyncio.gather(
+ *(
+ store_media_file(
+ graph_exec_id=graph_exec_id,
+ file=img,
+ user_id=user_id,
+ return_content=True,
+ )
+ for img in input_data.images
+ )
+ )
+
result = await self.run_model(
api_key=credentials.api_key,
model_name=input_data.model.value,
prompt=input_data.prompt,
- images=input_data.images,
+ images=processed_images,
+ aspect_ratio=input_data.aspect_ratio.value,
output_format=input_data.output_format.value,
)
yield "image_url", result
@@ -134,12 +169,14 @@ class AIImageCustomizerBlock(Block):
model_name: str,
prompt: str,
images: list[MediaFileType],
+ aspect_ratio: str,
output_format: str,
) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value())
input_params: dict = {
"prompt": prompt,
+ "aspect_ratio": aspect_ratio,
"output_format": output_format,
}
diff --git a/autogpt_platform/backend/backend/blocks/google/_drive.py b/autogpt_platform/backend/backend/blocks/google/_drive.py
new file mode 100644
index 0000000000..46fafd6857
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/google/_drive.py
@@ -0,0 +1,198 @@
+import asyncio
+import mimetypes
+import uuid
+from pathlib import Path
+from typing import Any, Literal, Optional
+
+from pydantic import BaseModel, ConfigDict, Field
+
+from backend.data.model import SchemaField
+from backend.util.file import get_exec_file_path
+from backend.util.request import Requests
+from backend.util.type import MediaFileType
+from backend.util.virus_scanner import scan_content_safe
+
+AttachmentView = Literal[
+ "DOCS",
+ "DOCUMENTS",
+ "SPREADSHEETS",
+ "PRESENTATIONS",
+ "DOCS_IMAGES",
+ "FOLDERS",
+]
+ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = (
+ "DOCS",
+ "DOCUMENTS",
+ "SPREADSHEETS",
+ "PRESENTATIONS",
+ "DOCS_IMAGES",
+ "FOLDERS",
+)
+
+
+class GoogleDriveFile(BaseModel):
+ """Represents a single file/folder picked from Google Drive"""
+
+ model_config = ConfigDict(populate_by_name=True)
+
+ id: str = Field(description="Google Drive file/folder ID")
+ name: Optional[str] = Field(None, description="File/folder name")
+ mime_type: Optional[str] = Field(
+ None,
+ alias="mimeType",
+ description="MIME type (e.g., application/vnd.google-apps.document)",
+ )
+ url: Optional[str] = Field(None, description="URL to open the file")
+ icon_url: Optional[str] = Field(None, alias="iconUrl", description="Icon URL")
+ is_folder: Optional[bool] = Field(
+ None, alias="isFolder", description="Whether this is a folder"
+ )
+
+
+def GoogleDrivePickerField(
+ multiselect: bool = False,
+ allow_folder_selection: bool = False,
+ allowed_views: Optional[list[AttachmentView]] = None,
+ allowed_mime_types: Optional[list[str]] = None,
+ scopes: Optional[list[str]] = None,
+ title: Optional[str] = None,
+ description: Optional[str] = None,
+ placeholder: Optional[str] = None,
+ **kwargs,
+) -> Any:
+ """
+ Creates a Google Drive Picker input field.
+
+ Args:
+ multiselect: Allow selecting multiple files/folders (default: False)
+ allow_folder_selection: Allow selecting folders (default: False)
+ allowed_views: List of view types to show in picker (default: ["DOCS"])
+ allowed_mime_types: Filter by MIME types (e.g., ["application/pdf"])
+ title: Field title shown in UI
+ description: Field description/help text
+ placeholder: Placeholder text for the button
+ **kwargs: Additional SchemaField arguments (advanced, hidden, etc.)
+
+ Returns:
+ Field definition that produces:
+ - Single GoogleDriveFile when multiselect=False
+ - list[GoogleDriveFile] when multiselect=True
+
+ Example:
+ >>> class MyBlock(Block):
+ ... class Input(BlockSchema):
+ ... document: GoogleDriveFile = GoogleDrivePickerField(
+ ... title="Select Document",
+ ... allowed_views=["DOCUMENTS"],
+ ... )
+ ...
+ ... files: list[GoogleDriveFile] = GoogleDrivePickerField(
+ ... title="Select Multiple Files",
+ ... multiselect=True,
+ ... allow_folder_selection=True,
+ ... )
+ """
+ # Build configuration that will be sent to frontend
+ picker_config = {
+ "multiselect": multiselect,
+ "allow_folder_selection": allow_folder_selection,
+ "allowed_views": list(allowed_views) if allowed_views else ["DOCS"],
+ }
+
+ # Add optional configurations
+ if allowed_mime_types:
+ picker_config["allowed_mime_types"] = list(allowed_mime_types)
+
+ # Determine required scopes based on config
+ base_scopes = scopes if scopes is not None else []
+ picker_scopes: set[str] = set(base_scopes)
+ if allow_folder_selection:
+ picker_scopes.add("https://www.googleapis.com/auth/drive")
+ else:
+ # Use drive.file for minimal scope - only access files selected by user in picker
+ picker_scopes.add("https://www.googleapis.com/auth/drive.file")
+
+ views = set(allowed_views or [])
+ if "SPREADSHEETS" in views:
+ picker_scopes.add("https://www.googleapis.com/auth/spreadsheets.readonly")
+ if "DOCUMENTS" in views or "DOCS" in views:
+ picker_scopes.add("https://www.googleapis.com/auth/documents.readonly")
+
+ picker_config["scopes"] = sorted(picker_scopes)
+
+ # Set appropriate default value
+ default_value = [] if multiselect else None
+
+ # Use SchemaField to handle format properly
+ return SchemaField(
+ default=default_value,
+ title=title,
+ description=description,
+ placeholder=placeholder or "Choose from Google Drive",
+ format="google-drive-picker",
+ advanced=False,
+ json_schema_extra={
+ "google_drive_picker_config": picker_config,
+ **kwargs,
+ },
+ )
+
+
+DRIVE_API_URL = "https://www.googleapis.com/drive/v3/files"
+_requests = Requests(trusted_origins=["https://www.googleapis.com"])
+
+
+def GoogleDriveAttachmentField(
+ *,
+ title: str,
+ description: str | None = None,
+ placeholder: str | None = None,
+ multiselect: bool = True,
+ allowed_mime_types: list[str] | None = None,
+ **extra: Any,
+) -> Any:
+ return GoogleDrivePickerField(
+ multiselect=multiselect,
+ allowed_views=list(ATTACHMENT_VIEWS),
+ allowed_mime_types=allowed_mime_types,
+ title=title,
+ description=description,
+ placeholder=placeholder or "Choose files from Google Drive",
+ **extra,
+ )
+
+
+async def drive_file_to_media_file(
+ drive_file: GoogleDriveFile, *, graph_exec_id: str, access_token: str
+) -> MediaFileType:
+ if drive_file.is_folder:
+ raise ValueError("Google Drive selection must be a file.")
+ if not access_token:
+ raise ValueError("Google Drive access token is required for file download.")
+
+ url = f"{DRIVE_API_URL}/{drive_file.id}?alt=media"
+ response = await _requests.get(
+ url, headers={"Authorization": f"Bearer {access_token}"}
+ )
+
+ mime_type = drive_file.mime_type or response.headers.get(
+ "content-type", "application/octet-stream"
+ )
+
+ MAX_FILE_SIZE = 100 * 1024 * 1024
+ if len(response.content) > MAX_FILE_SIZE:
+ raise ValueError(
+ f"File too large: {len(response.content)} bytes > {MAX_FILE_SIZE} bytes"
+ )
+
+ base_path = Path(get_exec_file_path(graph_exec_id, ""))
+ base_path.mkdir(parents=True, exist_ok=True)
+
+ extension = mimetypes.guess_extension(mime_type, strict=False) or ".bin"
+ filename = f"{uuid.uuid4()}{extension}"
+ target_path = base_path / filename
+
+ await scan_content_safe(response.content, filename=filename)
+ await asyncio.to_thread(target_path.write_bytes, response.content)
+
+ return MediaFileType(str(target_path.relative_to(base_path)))
diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py
index c10bcfb255..e8d2d1d74a 100644
--- a/autogpt_platform/backend/backend/blocks/google/sheets.py
+++ b/autogpt_platform/backend/backend/blocks/google/sheets.py
@@ -5,6 +5,7 @@ from typing import Any
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
+from backend.blocks.google._drive import GoogleDriveFile, GoogleDrivePickerField
from backend.data.block import (
Block,
BlockCategory,
@@ -160,6 +161,7 @@ def _convert_dicts_to_rows(
def _build_sheets_service(credentials: GoogleCredentials):
+ """Build Sheets service from platform credentials (with refresh token)."""
settings = Settings()
creds = Credentials(
token=(
@@ -180,6 +182,41 @@ def _build_sheets_service(credentials: GoogleCredentials):
return build("sheets", "v4", credentials=creds)
+def _validate_spreadsheet_file(spreadsheet_file: "GoogleDriveFile") -> str | None:
+ """Validate that the selected file is a Google Sheets spreadsheet.
+
+ Returns None if valid, error message string if invalid.
+ """
+ if spreadsheet_file.mime_type != "application/vnd.google-apps.spreadsheet":
+ file_type = spreadsheet_file.mime_type
+ file_name = spreadsheet_file.name
+ if file_type == "text/csv":
+ return f"Cannot use CSV file '{file_name}' with Google Sheets block. Please use a CSV reader block instead, or convert the CSV to a Google Sheets spreadsheet first."
+ elif file_type in [
+ "application/vnd.ms-excel",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ ]:
+ return f"Cannot use Excel file '{file_name}' with Google Sheets block. Please use an Excel reader block instead, or convert to Google Sheets first."
+ else:
+ return f"Cannot use file '{file_name}' (type: {file_type}) with Google Sheets block. This block only works with Google Sheets spreadsheets."
+ return None
+
+
+def _handle_sheets_api_error(error_msg: str, operation: str = "access") -> str:
+ """Convert common Google Sheets API errors to user-friendly messages."""
+ if "Request contains an invalid argument" in error_msg:
+ return f"Invalid request to Google Sheets API. This usually means the file is not a Google Sheets spreadsheet, the range is invalid, or you don't have permission to {operation} this file."
+ elif "The caller does not have permission" in error_msg or "Forbidden" in error_msg:
+ if operation in ["write", "modify", "update", "append", "clear"]:
+ return "Permission denied. You don't have edit access to this spreadsheet. Make sure it's shared with edit permissions."
+ else:
+ return "Permission denied. You don't have access to this spreadsheet. Make sure it's shared with you and try re-selecting the file."
+ elif "not found" in error_msg.lower() or "does not exist" in error_msg.lower():
+ return "Spreadsheet not found. The file may have been deleted or the link is invalid."
+ else:
+ return f"Failed to {operation} Google Sheet: {error_msg}"
+
+
class SheetOperation(str, Enum):
CREATE = "create"
DELETE = "delete"
@@ -216,18 +253,24 @@ class GoogleSheetsReadBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets.readonly"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to read from",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
range: str = SchemaField(
description="The A1 notation of the range to read",
+ placeholder="Sheet1!A1:Z1000",
)
class Output(BlockSchemaOutput):
result: list[list[str]] = SchemaField(
description="The data read from the spreadsheet",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -241,9 +284,13 @@ class GoogleSheetsReadBlock(Block):
output_schema=GoogleSheetsReadBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
- "range": "Sheet1!A1:B2",
"credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
+ "range": "Sheet1!A1:B2",
},
test_credentials=TEST_CREDENTIALS,
test_output=[
@@ -254,6 +301,17 @@ class GoogleSheetsReadBlock(Block):
["Alice", "85"],
],
),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_read_sheet": lambda *args, **kwargs: [
@@ -266,16 +324,52 @@ class GoogleSheetsReadBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- data = await asyncio.to_thread(
- self._read_sheet, service, spreadsheet_id, input_data.range
- )
- yield "result", data
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+ try:
+ service = _build_sheets_service(credentials)
+ spreadsheet_id = input_data.spreadsheet.id
+ data = await asyncio.to_thread(
+ self._read_sheet, service, spreadsheet_id, input_data.range
+ )
+ yield "result", data
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=spreadsheet_id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", _handle_sheets_api_error(str(e), "read")
def _read_sheet(self, service, spreadsheet_id: str, range: str) -> list[list[str]]:
sheet = service.spreadsheets()
- result = sheet.values().get(spreadsheetId=spreadsheet_id, range=range).execute()
+ range_to_use = range or "A:Z"
+ sheet_name, cell_range = parse_a1_notation(range_to_use)
+ if sheet_name:
+ cleaned_sheet = sheet_name.strip().strip("'\"")
+ formatted_sheet = format_sheet_name(cleaned_sheet)
+ cell_part = cell_range.strip() if cell_range else ""
+ if cell_part:
+ range_to_use = f"{formatted_sheet}!{cell_part}"
+ else:
+ range_to_use = f"{formatted_sheet}!A:Z"
+ # If no sheet name, keep the original range (e.g., "A1:B2" or "B:B")
+ result = (
+ sheet.values()
+ .get(spreadsheetId=spreadsheet_id, range=range_to_use)
+ .execute()
+ )
return result.get("values", [])
@@ -284,12 +378,15 @@ class GoogleSheetsWriteBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to write to",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
range: str = SchemaField(
description="The A1 notation of the range to write",
+ placeholder="Sheet1!A1:B2",
)
values: list[list[str]] = SchemaField(
description="The data to write to the spreadsheet",
@@ -299,6 +396,9 @@ class GoogleSheetsWriteBlock(Block):
result: dict = SchemaField(
description="The result of the write operation",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -312,13 +412,17 @@ class GoogleSheetsWriteBlock(Block):
output_schema=GoogleSheetsWriteBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"range": "Sheet1!A1:B2",
"values": [
["Name", "Score"],
["Bob", "90"],
],
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
@@ -326,6 +430,17 @@ class GoogleSheetsWriteBlock(Block):
"result",
{"updatedCells": 4, "updatedColumns": 2, "updatedRows": 2},
),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_write_sheet": lambda *args, **kwargs: {
@@ -339,16 +454,44 @@ class GoogleSheetsWriteBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._write_sheet,
- service,
- spreadsheet_id,
- input_data.range,
- input_data.values,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ # Customize message for write operations on CSV files
+ if "CSV file" in validation_error:
+ yield "error", validation_error.replace(
+ "Please use a CSV reader block instead, or",
+ "CSV files are read-only through Google Drive. Please",
+ )
+ else:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._write_sheet,
+ service,
+ input_data.spreadsheet.id,
+ input_data.range,
+ input_data.values,
+ )
+ yield "result", result
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", _handle_sheets_api_error(str(e), "write")
def _write_sheet(
self, service, spreadsheet_id: str, range: str, values: list[list[str]]
@@ -373,9 +516,11 @@ class GoogleSheetsAppendBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="Spreadsheet ID or URL",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
sheet_name: str = SchemaField(
description="Optional sheet to append to (defaults to first sheet)",
@@ -411,6 +556,12 @@ class GoogleSheetsAppendBlock(Block):
class Output(BlockSchemaOutput):
result: dict = SchemaField(description="Append API response")
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
def __init__(self):
super().__init__(
@@ -421,13 +572,28 @@ class GoogleSheetsAppendBlock(Block):
output_schema=GoogleSheetsAppendBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
- "values": [["Charlie", "95"]],
"credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
+ "values": [["Charlie", "95"]],
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("result", {"updatedCells": 2, "updatedColumns": 2, "updatedRows": 1}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_append_sheet": lambda *args, **kwargs: {
@@ -441,37 +607,58 @@ class GoogleSheetsAppendBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- # Determine which values to use and convert if needed
- processed_values: list[list[str]]
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
- # Validate that only one format is provided
- if input_data.values and input_data.dict_values:
- raise ValueError("Provide either 'values' or 'dict_values', not both")
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+ try:
+ service = _build_sheets_service(credentials)
- if input_data.dict_values:
- if not input_data.headers:
- raise ValueError("Headers are required when using dict_values")
- processed_values = _convert_dicts_to_rows(
- input_data.dict_values, input_data.headers
+ # Determine which values to use and convert if needed
+ processed_values: list[list[str]]
+
+ # Validate that only one format is provided
+ if input_data.values and input_data.dict_values:
+ raise ValueError("Provide either 'values' or 'dict_values', not both")
+
+ if input_data.dict_values:
+ if not input_data.headers:
+ raise ValueError("Headers are required when using dict_values")
+ processed_values = _convert_dicts_to_rows(
+ input_data.dict_values, input_data.headers
+ )
+ elif input_data.values:
+ processed_values = input_data.values
+ else:
+ raise ValueError("Either 'values' or 'dict_values' must be provided")
+
+ result = await asyncio.to_thread(
+ self._append_sheet,
+ service,
+ input_data.spreadsheet.id,
+ input_data.sheet_name,
+ processed_values,
+ input_data.range,
+ input_data.value_input_option,
+ input_data.insert_data_option,
)
- elif input_data.values:
- processed_values = input_data.values
- else:
- raise ValueError("Either 'values' or 'dict_values' must be provided")
-
- result = await asyncio.to_thread(
- self._append_sheet,
- service,
- spreadsheet_id,
- input_data.sheet_name,
- processed_values,
- input_data.range,
- input_data.value_input_option,
- input_data.insert_data_option,
- )
- yield "result", result
+ yield "result", result
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to append to Google Sheet: {str(e)}"
def _append_sheet(
self,
@@ -512,18 +699,24 @@ class GoogleSheetsClearBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to clear",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
range: str = SchemaField(
description="The A1 notation of the range to clear",
+ placeholder="Sheet1!A1:B2",
)
class Output(BlockSchemaOutput):
result: dict = SchemaField(
description="The result of the clear operation",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -537,13 +730,28 @@ class GoogleSheetsClearBlock(Block):
output_schema=GoogleSheetsClearBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
- "range": "Sheet1!A1:B2",
"credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
+ "range": "Sheet1!A1:B2",
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("result", {"clearedRange": "Sheet1!A1:B2"}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_clear_range": lambda *args, **kwargs: {
@@ -555,15 +763,36 @@ class GoogleSheetsClearBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._clear_range,
- service,
- spreadsheet_id,
- input_data.range,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._clear_range,
+ service,
+ input_data.spreadsheet.id,
+ input_data.range,
+ )
+ yield "result", result
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to clear Google Sheet range: {str(e)}"
def _clear_range(self, service, spreadsheet_id: str, range: str) -> dict:
result = (
@@ -580,15 +809,20 @@ class GoogleSheetsMetadataBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets.readonly"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to get metadata for",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
class Output(BlockSchemaOutput):
result: dict = SchemaField(
description="The metadata of the spreadsheet including sheets info",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -602,8 +836,12 @@ class GoogleSheetsMetadataBlock(Block):
output_schema=GoogleSheetsMetadataBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
},
test_credentials=TEST_CREDENTIALS,
test_output=[
@@ -614,6 +852,17 @@ class GoogleSheetsMetadataBlock(Block):
"sheets": [{"title": "Sheet1", "sheetId": 0}],
},
),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_get_metadata": lambda *args, **kwargs: {
@@ -626,14 +875,35 @@ class GoogleSheetsMetadataBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._get_metadata,
- service,
- spreadsheet_id,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._get_metadata,
+ service,
+ input_data.spreadsheet.id,
+ )
+ yield "result", result
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to get spreadsheet metadata: {str(e)}"
def _get_metadata(self, service, spreadsheet_id: str) -> dict:
result = (
@@ -661,9 +931,11 @@ class GoogleSheetsManageSheetBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="Spreadsheet ID or URL",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
operation: SheetOperation = SchemaField(description="Operation to perform")
sheet_name: str = SchemaField(
@@ -679,6 +951,12 @@ class GoogleSheetsManageSheetBlock(Block):
class Output(BlockSchemaOutput):
result: dict = SchemaField(description="Operation result")
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
def __init__(self):
super().__init__(
@@ -689,13 +967,30 @@ class GoogleSheetsManageSheetBlock(Block):
output_schema=GoogleSheetsManageSheetBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"operation": SheetOperation.CREATE,
"sheet_name": "NewSheet",
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
- test_output=[("result", {"success": True, "sheetId": 123})],
+ test_output=[
+ ("result", {"success": True, "sheetId": 123}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
+ ],
test_mock={
"_manage_sheet": lambda *args, **kwargs: {
"success": True,
@@ -707,18 +1002,39 @@ class GoogleSheetsManageSheetBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._manage_sheet,
- service,
- spreadsheet_id,
- input_data.operation,
- input_data.sheet_name,
- input_data.source_sheet_id,
- input_data.destination_sheet_name,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._manage_sheet,
+ service,
+ input_data.spreadsheet.id,
+ input_data.operation,
+ input_data.sheet_name,
+ input_data.source_sheet_id,
+ input_data.destination_sheet_name,
+ )
+ yield "result", result
+ # Output the GoogleDriveFile for chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to manage sheet: {str(e)}"
def _manage_sheet(
self,
@@ -731,17 +1047,21 @@ class GoogleSheetsManageSheetBlock(Block):
) -> dict:
requests = []
- # Ensure a target sheet name when needed
- target_name = resolve_sheet_name(service, spreadsheet_id, sheet_name)
-
if operation == SheetOperation.CREATE:
+ # For CREATE, use sheet_name directly or default to "New Sheet"
+ target_name = sheet_name or "New Sheet"
requests.append({"addSheet": {"properties": {"title": target_name}}})
elif operation == SheetOperation.DELETE:
+ # For DELETE, resolve sheet name (fall back to first sheet if empty)
+ target_name = resolve_sheet_name(
+ service, spreadsheet_id, sheet_name or None
+ )
sid = sheet_id_by_name(service, spreadsheet_id, target_name)
if sid is None:
return {"error": f"Sheet '{target_name}' not found"}
requests.append({"deleteSheet": {"sheetId": sid}})
elif operation == SheetOperation.COPY:
+ # For COPY, use source_sheet_id and destination_sheet_name directly
requests.append(
{
"duplicateSheet": {
@@ -768,9 +1088,11 @@ class GoogleSheetsBatchOperationsBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to perform batch operations on",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
operations: list[BatchOperation] = SchemaField(
description="List of operations to perform",
@@ -780,6 +1102,9 @@ class GoogleSheetsBatchOperationsBlock(Block):
result: dict = SchemaField(
description="The result of the batch operations",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -793,7 +1118,12 @@ class GoogleSheetsBatchOperationsBlock(Block):
output_schema=GoogleSheetsBatchOperationsBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"operations": [
{
"type": BatchOperationType.UPDATE,
@@ -806,11 +1136,21 @@ class GoogleSheetsBatchOperationsBlock(Block):
"values": [["Data1", "Data2"]],
},
],
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("result", {"totalUpdatedCells": 4, "replies": []}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_batch_operations": lambda *args, **kwargs: {
@@ -823,15 +1163,35 @@ class GoogleSheetsBatchOperationsBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._batch_operations,
- service,
- spreadsheet_id,
- input_data.operations,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._batch_operations,
+ service,
+ input_data.spreadsheet.id,
+ input_data.operations,
+ )
+ yield "result", result
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to perform batch operations: {str(e)}"
def _batch_operations(
self, service, spreadsheet_id: str, operations: list[BatchOperation]
@@ -885,9 +1245,11 @@ class GoogleSheetsFindReplaceBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to perform find/replace on",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
find_text: str = SchemaField(
description="The text to find",
@@ -912,6 +1274,9 @@ class GoogleSheetsFindReplaceBlock(Block):
result: dict = SchemaField(
description="The result of the find/replace operation including number of replacements",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -925,16 +1290,31 @@ class GoogleSheetsFindReplaceBlock(Block):
output_schema=GoogleSheetsFindReplaceBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"find_text": "old_value",
"replace_text": "new_value",
"match_case": False,
"match_entire_cell": False,
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("result", {"occurrencesChanged": 5}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_find_replace": lambda *args, **kwargs: {"occurrencesChanged": 5},
@@ -944,19 +1324,39 @@ class GoogleSheetsFindReplaceBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._find_replace,
- service,
- spreadsheet_id,
- input_data.find_text,
- input_data.replace_text,
- input_data.sheet_id,
- input_data.match_case,
- input_data.match_entire_cell,
- )
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._find_replace,
+ service,
+ input_data.spreadsheet.id,
+ input_data.find_text,
+ input_data.replace_text,
+ input_data.sheet_id,
+ input_data.match_case,
+ input_data.match_entire_cell,
+ )
+ yield "result", result
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to find/replace in Google Sheet: {str(e)}"
def _find_replace(
self,
@@ -995,9 +1395,11 @@ class GoogleSheetsFindBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets.readonly"]
)
- spreadsheet_id: str = SchemaField(
- description="The ID or URL of the spreadsheet to search in",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
)
find_text: str = SchemaField(
description="The text to find",
@@ -1034,6 +1436,9 @@ class GoogleSheetsFindBlock(Block):
count: int = SchemaField(
description="Number of occurrences found",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
error: str = SchemaField(
description="Error message if any",
)
@@ -1047,13 +1452,17 @@ class GoogleSheetsFindBlock(Block):
output_schema=GoogleSheetsFindBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"find_text": "search_value",
"match_case": False,
"match_entire_cell": False,
"find_all": True,
"range": "Sheet1!A1:C10",
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
@@ -1067,6 +1476,17 @@ class GoogleSheetsFindBlock(Block):
],
),
("result", {"success": True}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
],
test_mock={
"_find_text": lambda *args, **kwargs: {
@@ -1083,22 +1503,42 @@ class GoogleSheetsFindBlock(Block):
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._find_text,
- service,
- spreadsheet_id,
- input_data.find_text,
- input_data.sheet_id,
- input_data.match_case,
- input_data.match_entire_cell,
- input_data.find_all,
- input_data.range,
- )
- yield "count", result["count"]
- yield "locations", result["locations"]
- yield "result", {"success": True}
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._find_text,
+ service,
+ input_data.spreadsheet.id,
+ input_data.find_text,
+ input_data.sheet_id,
+ input_data.match_case,
+ input_data.match_entire_cell,
+ input_data.find_all,
+ input_data.range,
+ )
+ yield "count", result["count"]
+ yield "locations", result["locations"]
+ yield "result", {"success": True}
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to find text in Google Sheet: {str(e)}"
def _find_text(
self,
@@ -1263,11 +1703,16 @@ class GoogleSheetsFormatBlock(Block):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/spreadsheets"]
)
- spreadsheet_id: str = SchemaField(
- description="Spreadsheet ID or URL",
- title="Spreadsheet ID or URL",
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
+ )
+ range: str = SchemaField(
+ description="A1 notation – sheet optional",
+ placeholder="Sheet1!A1:B2",
)
- range: str = SchemaField(description="A1 notation – sheet optional")
background_color: dict = SchemaField(default={})
text_color: dict = SchemaField(default={})
bold: bool = SchemaField(default=False)
@@ -1276,6 +1721,12 @@ class GoogleSheetsFormatBlock(Block):
class Output(BlockSchemaOutput):
result: dict = SchemaField(description="API response or success flag")
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
def __init__(self):
super().__init__(
@@ -1286,37 +1737,74 @@ class GoogleSheetsFormatBlock(Block):
output_schema=GoogleSheetsFormatBlock.Output,
disabled=GOOGLE_SHEETS_DISABLED,
test_input={
- "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
"range": "A1:B2",
"background_color": {"red": 1.0, "green": 0.9, "blue": 0.9},
"bold": True,
- "credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
- test_output=[("result", {"success": True})],
+ test_output=[
+ ("result", {"success": True}),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
+ ],
test_mock={"_format_cells": lambda *args, **kwargs: {"success": True}},
)
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
- service = _build_sheets_service(credentials)
- spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id)
- result = await asyncio.to_thread(
- self._format_cells,
- service,
- spreadsheet_id,
- input_data.range,
- input_data.background_color,
- input_data.text_color,
- input_data.bold,
- input_data.italic,
- input_data.font_size,
- )
- if "error" in result:
- yield "error", result["error"]
- else:
- yield "result", result
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ try:
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._format_cells,
+ service,
+ input_data.spreadsheet.id,
+ input_data.range,
+ input_data.background_color,
+ input_data.text_color,
+ input_data.bold,
+ input_data.italic,
+ input_data.font_size,
+ )
+ if "error" in result:
+ yield "error", result["error"]
+ else:
+ yield "result", result
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", f"Failed to format Google Sheet cells: {str(e)}"
def _format_cells(
self,
@@ -1402,6 +1890,9 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
result: dict = SchemaField(
description="The result containing spreadsheet ID and URL",
)
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The created spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
spreadsheet_id: str = SchemaField(
description="The ID of the created spreadsheet",
)
@@ -1427,6 +1918,17 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
("spreadsheet_id", "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms"),
(
"spreadsheet_url",
@@ -1438,6 +1940,7 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
"_create_spreadsheet": lambda *args, **kwargs: {
"spreadsheetId": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"spreadsheetUrl": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ "title": "Test Spreadsheet",
},
},
)
@@ -1456,8 +1959,19 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
if "error" in result:
yield "error", result["error"]
else:
- yield "spreadsheet_id", result["spreadsheetId"]
- yield "spreadsheet_url", result["spreadsheetUrl"]
+ spreadsheet_id = result["spreadsheetId"]
+ spreadsheet_url = result["spreadsheetUrl"]
+ # Output the full GoogleDriveFile object for easy chaining
+ yield "spreadsheet", GoogleDriveFile(
+ id=spreadsheet_id,
+ name=result.get("title", input_data.title),
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=spreadsheet_url,
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ yield "spreadsheet_id", spreadsheet_id
+ yield "spreadsheet_url", spreadsheet_url
yield "result", {"success": True}
def _create_spreadsheet(self, service, title: str, sheet_names: list[str]) -> dict:
@@ -1493,6 +2007,152 @@ class GoogleSheetsCreateSpreadsheetBlock(Block):
return {
"spreadsheetId": spreadsheet_id,
"spreadsheetUrl": spreadsheet_url,
+ "title": title,
}
except Exception as e:
return {"error": str(e)}
+
+
+class GoogleSheetsUpdateCellBlock(Block):
+ """Update a single cell in a Google Sheets spreadsheet."""
+
+ class Input(BlockSchemaInput):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/spreadsheets"]
+ )
+ spreadsheet: GoogleDriveFile = GoogleDrivePickerField(
+ title="Spreadsheet",
+ description="Select a Google Sheets spreadsheet",
+ allowed_views=["SPREADSHEETS"],
+ allowed_mime_types=["application/vnd.google-apps.spreadsheet"],
+ )
+ cell: str = SchemaField(
+ description="Cell address in A1 notation (e.g., 'A1', 'Sheet1!B2')",
+ placeholder="A1",
+ )
+ value: str = SchemaField(
+ description="Value to write to the cell",
+ )
+ value_input_option: ValueInputOption = SchemaField(
+ description="How input data should be interpreted",
+ default=ValueInputOption.USER_ENTERED,
+ advanced=True,
+ )
+
+ class Output(BlockSchemaOutput):
+ result: dict = SchemaField(
+ description="The result of the update operation",
+ )
+ spreadsheet: GoogleDriveFile = SchemaField(
+ description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="df521b68-62d9-42e4-924f-fb6c245516fc",
+ description="Update a single cell in a Google Sheets spreadsheet.",
+ categories={BlockCategory.DATA},
+ input_schema=GoogleSheetsUpdateCellBlock.Input,
+ output_schema=GoogleSheetsUpdateCellBlock.Output,
+ disabled=GOOGLE_SHEETS_DISABLED,
+ test_input={
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "spreadsheet": {
+ "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "name": "Test Spreadsheet",
+ "mimeType": "application/vnd.google-apps.spreadsheet",
+ },
+ "cell": "A1",
+ "value": "Hello World",
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ {"updatedCells": 1, "updatedColumns": 1, "updatedRows": 1},
+ ),
+ (
+ "spreadsheet",
+ GoogleDriveFile(
+ id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ name="Test Spreadsheet",
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ ),
+ ),
+ ],
+ test_mock={
+ "_update_cell": lambda *args, **kwargs: {
+ "updatedCells": 1,
+ "updatedColumns": 1,
+ "updatedRows": 1,
+ },
+ },
+ )
+
+ async def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ try:
+ if not input_data.spreadsheet:
+ yield "error", "No spreadsheet selected"
+ return
+
+ # Check if the selected file is actually a Google Sheets spreadsheet
+ validation_error = _validate_spreadsheet_file(input_data.spreadsheet)
+ if validation_error:
+ yield "error", validation_error
+ return
+
+ service = _build_sheets_service(credentials)
+ result = await asyncio.to_thread(
+ self._update_cell,
+ service,
+ input_data.spreadsheet.id,
+ input_data.cell,
+ input_data.value,
+ input_data.value_input_option,
+ )
+
+ yield "result", result
+ yield "spreadsheet", GoogleDriveFile(
+ id=input_data.spreadsheet.id,
+ name=input_data.spreadsheet.name,
+ mimeType="application/vnd.google-apps.spreadsheet",
+ url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit",
+ iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png",
+ isFolder=False,
+ )
+ except Exception as e:
+ yield "error", _handle_sheets_api_error(str(e), "update")
+
+ def _update_cell(
+ self,
+ service,
+ spreadsheet_id: str,
+ cell: str,
+ value: str,
+ value_input_option: ValueInputOption,
+ ) -> dict:
+ body = {"values": [[value]]}
+ result = (
+ service.spreadsheets()
+ .values()
+ .update(
+ spreadsheetId=spreadsheet_id,
+ range=cell,
+ valueInputOption=value_input_option.value,
+ body=body,
+ )
+ .execute()
+ )
+ return {
+ "updatedCells": result.get("updatedCells", 0),
+ "updatedRows": result.get("updatedRows", 0),
+ "updatedColumns": result.get("updatedColumns", 0),
+ }
diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py
new file mode 100644
index 0000000000..1dd5dbac9d
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py
@@ -0,0 +1,160 @@
+import logging
+from typing import Any, Literal
+
+from prisma.enums import ReviewStatus
+
+from backend.data.block import (
+ Block,
+ BlockCategory,
+ BlockOutput,
+ BlockSchemaInput,
+ BlockSchemaOutput,
+)
+from backend.data.execution import ExecutionStatus
+from backend.data.human_review import ReviewResult
+from backend.data.model import SchemaField
+from backend.executor.manager import async_update_node_execution_status
+from backend.util.clients import get_database_manager_async_client
+
+logger = logging.getLogger(__name__)
+
+
+class HumanInTheLoopBlock(Block):
+ """
+ This block pauses execution and waits for human approval or modification of the data.
+
+ When executed, it creates a pending review entry and sets the node execution status
+ to REVIEW. The execution will remain paused until a human user either:
+ - Approves the data (with or without modifications)
+ - Rejects the data
+
+ This is useful for workflows that require human validation or intervention before
+ proceeding to the next steps.
+ """
+
+ class Input(BlockSchemaInput):
+ data: Any = SchemaField(description="The data to be reviewed by a human user")
+ name: str = SchemaField(
+ description="A descriptive name for what this data represents",
+ )
+ editable: bool = SchemaField(
+ description="Whether the human reviewer can edit the data",
+ default=True,
+ advanced=True,
+ )
+
+ class Output(BlockSchemaOutput):
+ reviewed_data: Any = SchemaField(
+ description="The data after human review (may be modified)"
+ )
+ status: Literal["approved", "rejected"] = SchemaField(
+ description="Status of the review: 'approved' or 'rejected'"
+ )
+ review_message: str = SchemaField(
+ description="Any message provided by the reviewer", default=""
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
+ description="Pause execution and wait for human approval or modification of data",
+ categories={BlockCategory.BASIC},
+ input_schema=HumanInTheLoopBlock.Input,
+ output_schema=HumanInTheLoopBlock.Output,
+ test_input={
+ "data": {"name": "John Doe", "age": 30},
+ "name": "User profile data",
+ "editable": True,
+ },
+ test_output=[
+ ("reviewed_data", {"name": "John Doe", "age": 30}),
+ ("status", "approved"),
+ ("review_message", ""),
+ ],
+ test_mock={
+ "get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
+ data={"name": "John Doe", "age": 30},
+ status=ReviewStatus.APPROVED,
+ message="",
+ processed=False,
+ node_exec_id="test-node-exec-id",
+ ),
+ "update_node_execution_status": lambda *_args, **_kwargs: None,
+ },
+ )
+
+ async def run(
+ self,
+ input_data: Input,
+ *,
+ user_id: str,
+ node_exec_id: str,
+ graph_exec_id: str,
+ graph_id: str,
+ graph_version: int,
+ **kwargs,
+ ) -> BlockOutput:
+ """
+ Execute the Human In The Loop block.
+
+ This method uses one function to handle the complete workflow - checking existing reviews
+ and creating pending ones as needed.
+ """
+ try:
+ logger.debug(f"HITL block executing for node {node_exec_id}")
+
+ # Use the data layer to handle the complete workflow
+ db_client = get_database_manager_async_client()
+ result = await db_client.get_or_create_human_review(
+ user_id=user_id,
+ node_exec_id=node_exec_id,
+ graph_exec_id=graph_exec_id,
+ graph_id=graph_id,
+ graph_version=graph_version,
+ input_data=input_data.data,
+ message=input_data.name,
+ editable=input_data.editable,
+ )
+ except Exception as e:
+ logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
+ raise
+
+ # Check if we're waiting for human input
+ if result is None:
+ logger.info(
+ f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
+ )
+ try:
+ # Set node status to REVIEW so execution manager can't mark it as COMPLETED
+ # The VALID_STATUS_TRANSITIONS will then prevent any unwanted status changes
+ # Use the proper wrapper function to ensure websocket events are published
+ await async_update_node_execution_status(
+ db_client=db_client,
+ exec_id=node_exec_id,
+ status=ExecutionStatus.REVIEW,
+ )
+ # Execution pauses here until API routes process the review
+ return
+ except Exception as e:
+ logger.error(
+ f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
+ )
+ raise
+
+ # Review is complete (approved or rejected) - check if unprocessed
+ if not result.processed:
+ # Mark as processed before yielding
+ await db_client.update_review_processed_status(
+ node_exec_id=node_exec_id, processed=True
+ )
+
+ if result.status == ReviewStatus.APPROVED:
+ yield "status", "approved"
+ yield "reviewed_data", result.data
+ if result.message:
+ yield "review_message", result.message
+
+ elif result.status == ReviewStatus.REJECTED:
+ yield "status", "rejected"
+ if result.message:
+ yield "review_message", result.message
diff --git a/autogpt_platform/backend/backend/check_db.py b/autogpt_platform/backend/backend/check_db.py
index 591c519f84..7e1c3ee14f 100644
--- a/autogpt_platform/backend/backend/check_db.py
+++ b/autogpt_platform/backend/backend/check_db.py
@@ -5,6 +5,8 @@ from datetime import datetime
from faker import Faker
from prisma import Prisma
+from backend.data.db import query_raw_with_schema
+
faker = Faker()
@@ -15,9 +17,9 @@ async def check_cron_job(db):
try:
# Check if pg_cron extension exists
- extension_check = await db.query_raw("CREATE EXTENSION pg_cron;")
+ extension_check = await query_raw_with_schema("CREATE EXTENSION pg_cron;")
print(extension_check)
- extension_check = await db.query_raw(
+ extension_check = await query_raw_with_schema(
"SELECT COUNT(*) as count FROM pg_extension WHERE extname = 'pg_cron'"
)
if extension_check[0]["count"] == 0:
@@ -25,7 +27,7 @@ async def check_cron_job(db):
return False
# Check if the refresh job exists
- job_check = await db.query_raw(
+ job_check = await query_raw_with_schema(
"""
SELECT jobname, schedule, command
FROM cron.job
@@ -55,33 +57,33 @@ async def get_materialized_view_counts(db):
print("-" * 40)
# Get counts from mv_agent_run_counts
- agent_runs = await db.query_raw(
+ agent_runs = await query_raw_with_schema(
"""
SELECT COUNT(*) as total_agents,
SUM(run_count) as total_runs,
MAX(run_count) as max_runs,
MIN(run_count) as min_runs
- FROM mv_agent_run_counts
+ FROM {schema_prefix}mv_agent_run_counts
"""
)
# Get counts from mv_review_stats
- review_stats = await db.query_raw(
+ review_stats = await query_raw_with_schema(
"""
SELECT COUNT(*) as total_listings,
SUM(review_count) as total_reviews,
AVG(avg_rating) as overall_avg_rating
- FROM mv_review_stats
+ FROM {schema_prefix}mv_review_stats
"""
)
# Get sample data from StoreAgent view
- store_agents = await db.query_raw(
+ store_agents = await query_raw_with_schema(
"""
SELECT COUNT(*) as total_store_agents,
AVG(runs) as avg_runs,
AVG(rating) as avg_rating
- FROM "StoreAgent"
+ FROM {schema_prefix}"StoreAgent"
"""
)
diff --git a/autogpt_platform/backend/backend/check_store_data.py b/autogpt_platform/backend/backend/check_store_data.py
index 10aa6507ba..c17393a6d4 100644
--- a/autogpt_platform/backend/backend/check_store_data.py
+++ b/autogpt_platform/backend/backend/check_store_data.py
@@ -5,6 +5,8 @@ import asyncio
from prisma import Prisma
+from backend.data.db import query_raw_with_schema
+
async def check_store_data(db):
"""Check what store data exists in the database."""
@@ -89,11 +91,11 @@ async def check_store_data(db):
sa.creator_username,
sa.categories,
sa.updated_at
- FROM "StoreAgent" sa
+ FROM {schema_prefix}"StoreAgent" sa
LIMIT 10;
"""
- store_agents = await db.query_raw(query)
+ store_agents = await query_raw_with_schema(query)
print(f"Total store agents in view: {len(store_agents)}")
if store_agents:
@@ -111,22 +113,22 @@ async def check_store_data(db):
# Check for any APPROVED store listing versions
query = """
SELECT COUNT(*) as count
- FROM "StoreListingVersion"
+ FROM {schema_prefix}"StoreListingVersion"
WHERE "submissionStatus" = 'APPROVED'
"""
- result = await db.query_raw(query)
+ result = await query_raw_with_schema(query)
approved_count = result[0]["count"] if result else 0
print(f"Approved store listing versions: {approved_count}")
# Check for store listings with hasApprovedVersion = true
query = """
SELECT COUNT(*) as count
- FROM "StoreListing"
+ FROM {schema_prefix}"StoreListing"
WHERE "hasApprovedVersion" = true AND "isDeleted" = false
"""
- result = await db.query_raw(query)
+ result = await query_raw_with_schema(query)
has_approved_count = result[0]["count"] if result else 0
print(f"Store listings with approved versions: {has_approved_count}")
@@ -134,10 +136,10 @@ async def check_store_data(db):
query = """
SELECT COUNT(DISTINCT "agentGraphId") as unique_agents,
COUNT(*) as total_executions
- FROM "AgentGraphExecution"
+ FROM {schema_prefix}"AgentGraphExecution"
"""
- result = await db.query_raw(query)
+ result = await query_raw_with_schema(query)
if result:
print("\nAgent Graph Executions:")
print(f" Unique agents with executions: {result[0]['unique_agents']}")
diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py
index 8e9487f74a..6f604975cf 100644
--- a/autogpt_platform/backend/backend/data/credit_test.py
+++ b/autogpt_platform/backend/backend/data/credit_test.py
@@ -73,6 +73,7 @@ async def test_block_credit_usage(server: SpinTestServer):
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
+ graph_version=1,
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
@@ -94,6 +95,7 @@ async def test_block_credit_usage(server: SpinTestServer):
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
+ graph_version=1,
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py
index a8253f3136..b78633cf58 100644
--- a/autogpt_platform/backend/backend/data/execution.py
+++ b/autogpt_platform/backend/backend/data/execution.py
@@ -34,6 +34,7 @@ from prisma.types import (
AgentNodeExecutionKeyValueDataCreateInput,
AgentNodeExecutionUpdateInput,
AgentNodeExecutionWhereInput,
+ AgentNodeExecutionWhereUniqueInput,
)
from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError
from pydantic.fields import Field
@@ -96,11 +97,14 @@ NodesInputMasks = Mapping[str, NodeInputMask]
VALID_STATUS_TRANSITIONS = {
ExecutionStatus.QUEUED: [
ExecutionStatus.INCOMPLETE,
+ ExecutionStatus.TERMINATED, # For resuming halted execution
+ ExecutionStatus.REVIEW, # For resuming after review
],
ExecutionStatus.RUNNING: [
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.TERMINATED, # For resuming halted execution
+ ExecutionStatus.REVIEW, # For resuming after review
],
ExecutionStatus.COMPLETED: [
ExecutionStatus.RUNNING,
@@ -109,11 +113,16 @@ VALID_STATUS_TRANSITIONS = {
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.RUNNING,
+ ExecutionStatus.REVIEW,
],
ExecutionStatus.TERMINATED: [
ExecutionStatus.INCOMPLETE,
ExecutionStatus.QUEUED,
ExecutionStatus.RUNNING,
+ ExecutionStatus.REVIEW,
+ ],
+ ExecutionStatus.REVIEW: [
+ ExecutionStatus.RUNNING,
],
}
@@ -446,6 +455,7 @@ class NodeExecutionResult(BaseModel):
user_id=self.user_id,
graph_exec_id=self.graph_exec_id,
graph_id=self.graph_id,
+ graph_version=self.graph_version,
node_exec_id=self.node_exec_id,
node_id=self.node_id,
block_id=self.block_id,
@@ -728,7 +738,7 @@ async def upsert_execution_input(
input_name: str,
input_data: JsonValue,
node_exec_id: str | None = None,
-) -> tuple[str, BlockInput]:
+) -> tuple[NodeExecutionResult, BlockInput]:
"""
Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input.
If there is no AgentNodeExecution that has no `input_name` as input, create new one.
@@ -761,7 +771,7 @@ async def upsert_execution_input(
existing_execution = await AgentNodeExecution.prisma().find_first(
where=existing_exec_query_filter,
order={"addedTime": "asc"},
- include={"Input": True},
+ include={"Input": True, "GraphExecution": True},
)
json_input_data = SafeJson(input_data)
@@ -773,7 +783,7 @@ async def upsert_execution_input(
referencedByInputExecId=existing_execution.id,
)
)
- return existing_execution.id, {
+ return NodeExecutionResult.from_db(existing_execution), {
**{
input_data.name: type_utils.convert(input_data.data, JsonValue)
for input_data in existing_execution.Input or []
@@ -788,9 +798,10 @@ async def upsert_execution_input(
agentGraphExecutionId=graph_exec_id,
executionStatus=ExecutionStatus.INCOMPLETE,
Input={"create": {"name": input_name, "data": json_input_data}},
- )
+ ),
+ include={"GraphExecution": True},
)
- return result.id, {input_name: input_data}
+ return NodeExecutionResult.from_db(result), {input_name: input_data}
else:
raise ValueError(
@@ -886,9 +897,25 @@ async def update_node_execution_status_batch(
node_exec_ids: list[str],
status: ExecutionStatus,
stats: dict[str, Any] | None = None,
-):
- await AgentNodeExecution.prisma().update_many(
- where={"id": {"in": node_exec_ids}},
+) -> int:
+ # Validate status transitions - allowed_from should never be empty for valid statuses
+ allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
+ if not allowed_from:
+ raise ValueError(
+ f"Invalid status transition: {status} has no valid source statuses"
+ )
+
+ # For batch updates, we filter to only update nodes with valid current statuses
+ where_clause = cast(
+ AgentNodeExecutionWhereInput,
+ {
+ "id": {"in": node_exec_ids},
+ "executionStatus": {"in": [s.value for s in allowed_from]},
+ },
+ )
+
+ return await AgentNodeExecution.prisma().update_many(
+ where=where_clause,
data=_get_update_status_data(status, None, stats),
)
@@ -902,15 +929,32 @@ async def update_node_execution_status(
if status == ExecutionStatus.QUEUED and execution_data is None:
raise ValueError("Execution data must be provided when queuing an execution.")
- res = await AgentNodeExecution.prisma().update(
- where={"id": node_exec_id},
+ # Validate status transitions - allowed_from should never be empty for valid statuses
+ allowed_from = VALID_STATUS_TRANSITIONS.get(status, [])
+ if not allowed_from:
+ raise ValueError(
+ f"Invalid status transition: {status} has no valid source statuses"
+ )
+
+ if res := await AgentNodeExecution.prisma().update(
+ where=cast(
+ AgentNodeExecutionWhereUniqueInput,
+ {
+ "id": node_exec_id,
+ "executionStatus": {"in": [s.value for s in allowed_from]},
+ },
+ ),
data=_get_update_status_data(status, execution_data, stats),
include=EXECUTION_RESULT_INCLUDE,
- )
- if not res:
- raise ValueError(f"Execution {node_exec_id} not found.")
+ ):
+ return NodeExecutionResult.from_db(res)
- return NodeExecutionResult.from_db(res)
+ if res := await AgentNodeExecution.prisma().find_unique(
+ where={"id": node_exec_id}, include=EXECUTION_RESULT_INCLUDE
+ ):
+ return NodeExecutionResult.from_db(res)
+
+ raise ValueError(f"Execution {node_exec_id} not found.")
def _get_update_status_data(
@@ -964,17 +1008,17 @@ async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None:
return NodeExecutionResult.from_db(execution)
-async def get_node_executions(
+def _build_node_execution_where_clause(
graph_exec_id: str | None = None,
node_id: str | None = None,
block_ids: list[str] | None = None,
statuses: list[ExecutionStatus] | None = None,
- limit: int | None = None,
created_time_gte: datetime | None = None,
created_time_lte: datetime | None = None,
- include_exec_data: bool = True,
-) -> list[NodeExecutionResult]:
- """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
+) -> AgentNodeExecutionWhereInput:
+ """
+ Build where clause for node execution queries.
+ """
where_clause: AgentNodeExecutionWhereInput = {}
if graph_exec_id:
where_clause["agentGraphExecutionId"] = graph_exec_id
@@ -991,6 +1035,29 @@ async def get_node_executions(
"lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc),
}
+ return where_clause
+
+
+async def get_node_executions(
+ graph_exec_id: str | None = None,
+ node_id: str | None = None,
+ block_ids: list[str] | None = None,
+ statuses: list[ExecutionStatus] | None = None,
+ limit: int | None = None,
+ created_time_gte: datetime | None = None,
+ created_time_lte: datetime | None = None,
+ include_exec_data: bool = True,
+) -> list[NodeExecutionResult]:
+ """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints."""
+ where_clause = _build_node_execution_where_clause(
+ graph_exec_id=graph_exec_id,
+ node_id=node_id,
+ block_ids=block_ids,
+ statuses=statuses,
+ created_time_gte=created_time_gte,
+ created_time_lte=created_time_lte,
+ )
+
executions = await AgentNodeExecution.prisma().find_many(
where=where_clause,
include=(
@@ -1052,6 +1119,7 @@ class NodeExecutionEntry(BaseModel):
user_id: str
graph_exec_id: str
graph_id: str
+ graph_version: int
node_exec_id: str
node_id: str
block_id: str
diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py
new file mode 100644
index 0000000000..2b0b2dbfb7
--- /dev/null
+++ b/autogpt_platform/backend/backend/data/human_review.py
@@ -0,0 +1,294 @@
+"""
+Data layer for Human In The Loop (HITL) review operations.
+Handles all database operations for pending human reviews.
+"""
+
+import asyncio
+import logging
+from datetime import datetime, timezone
+from typing import Optional
+
+from prisma.enums import ReviewStatus
+from prisma.models import PendingHumanReview
+from prisma.types import PendingHumanReviewUpdateInput
+from pydantic import BaseModel
+
+from backend.server.v2.executions.review.model import (
+ PendingHumanReviewModel,
+ SafeJsonData,
+)
+from backend.util.json import SafeJson
+
+logger = logging.getLogger(__name__)
+
+
+class ReviewResult(BaseModel):
+ """Result of a review operation."""
+
+ data: Optional[SafeJsonData] = None
+ status: ReviewStatus
+ message: str = ""
+ processed: bool
+ node_exec_id: str
+
+
+async def get_pending_review_by_node_exec_id(
+ node_exec_id: str, user_id: str
+) -> Optional["PendingHumanReviewModel"]:
+ """
+ Get a pending review by node execution ID with user ownership validation.
+
+ Args:
+ node_exec_id: The node execution ID to check
+ user_id: The user ID to validate ownership
+
+ Returns:
+ The existing review if found and owned by the user, None otherwise
+ """
+ review = await PendingHumanReview.prisma().find_first(
+ where={
+ "nodeExecId": node_exec_id,
+ "userId": user_id,
+ }
+ )
+
+ if review:
+ return PendingHumanReviewModel.from_db(review)
+
+ return None
+
+
+async def get_or_create_human_review(
+ user_id: str,
+ node_exec_id: str,
+ graph_exec_id: str,
+ graph_id: str,
+ graph_version: int,
+ input_data: SafeJsonData,
+ message: str,
+ editable: bool,
+) -> Optional[ReviewResult]:
+ """
+ Get existing review or create a new pending review entry.
+
+ Uses upsert with empty update to get existing or create new review in a single operation.
+
+ Args:
+ user_id: ID of the user who owns this review
+ node_exec_id: ID of the node execution
+ graph_exec_id: ID of the graph execution
+ graph_id: ID of the graph template
+ graph_version: Version of the graph template
+ input_data: The data to be reviewed
+ message: Instructions for the reviewer
+ editable: Whether the data can be edited
+
+ Returns:
+ ReviewResult if the review is complete, None if waiting for human input
+ """
+ try:
+ logger.debug(f"Getting or creating review for node {node_exec_id}")
+
+ # Upsert - get existing or create new review
+ review = await PendingHumanReview.prisma().upsert(
+ where={"nodeExecId": node_exec_id},
+ data={
+ "create": {
+ "userId": user_id,
+ "nodeExecId": node_exec_id,
+ "graphExecId": graph_exec_id,
+ "graphId": graph_id,
+ "graphVersion": graph_version,
+ "payload": SafeJson(input_data),
+ "instructions": message,
+ "editable": editable,
+ "status": ReviewStatus.WAITING,
+ },
+ "update": {}, # Do nothing on update - keep existing review as is
+ },
+ )
+
+ logger.info(
+ f"Review {'created' if review.createdAt == review.updatedAt else 'retrieved'} for node {node_exec_id} with status {review.status}"
+ )
+ except Exception as e:
+ logger.error(
+ f"Database error in get_or_create_human_review for node {node_exec_id}: {str(e)}"
+ )
+ raise
+
+ # Early return if already processed
+ if review.processed:
+ return None
+
+ if review.status == ReviewStatus.APPROVED:
+ # Return the approved review result
+ return ReviewResult(
+ data=review.payload,
+ status=ReviewStatus.APPROVED,
+ message=review.reviewMessage or "",
+ processed=review.processed,
+ node_exec_id=review.nodeExecId,
+ )
+ elif review.status == ReviewStatus.REJECTED:
+ # Return the rejected review result
+ return ReviewResult(
+ data=None,
+ status=ReviewStatus.REJECTED,
+ message=review.reviewMessage or "",
+ processed=review.processed,
+ node_exec_id=review.nodeExecId,
+ )
+ else:
+ # Review is pending - return None to continue waiting
+ return None
+
+
+async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:
+ """
+ Check if a graph execution has any pending reviews.
+
+ Args:
+ graph_exec_id: The graph execution ID to check
+
+ Returns:
+ True if there are reviews waiting for human input, False otherwise
+ """
+ # Check if there are any reviews waiting for human input
+ count = await PendingHumanReview.prisma().count(
+ where={"graphExecId": graph_exec_id, "status": ReviewStatus.WAITING}
+ )
+ return count > 0
+
+
+async def get_pending_reviews_for_user(
+ user_id: str, page: int = 1, page_size: int = 25
+) -> list["PendingHumanReviewModel"]:
+ """
+ Get all pending reviews for a user with pagination.
+
+ Args:
+ user_id: User ID to get reviews for
+ page: Page number (1-indexed)
+ page_size: Number of reviews per page
+
+ Returns:
+ List of pending review models
+ """
+ # Calculate offset for pagination
+ offset = (page - 1) * page_size
+
+ reviews = await PendingHumanReview.prisma().find_many(
+ where={"userId": user_id, "status": ReviewStatus.WAITING},
+ order={"createdAt": "desc"},
+ skip=offset,
+ take=page_size,
+ )
+
+ return [PendingHumanReviewModel.from_db(review) for review in reviews]
+
+
+async def get_pending_reviews_for_execution(
+ graph_exec_id: str, user_id: str
+) -> list["PendingHumanReviewModel"]:
+ """
+ Get all pending reviews for a specific graph execution.
+
+ Args:
+ graph_exec_id: Graph execution ID
+ user_id: User ID for security validation
+
+ Returns:
+ List of pending review models
+ """
+ reviews = await PendingHumanReview.prisma().find_many(
+ where={
+ "userId": user_id,
+ "graphExecId": graph_exec_id,
+ "status": ReviewStatus.WAITING,
+ },
+ order={"createdAt": "asc"},
+ )
+
+ return [PendingHumanReviewModel.from_db(review) for review in reviews]
+
+
+async def process_all_reviews_for_execution(
+ user_id: str,
+ review_decisions: dict[str, tuple[ReviewStatus, SafeJsonData | None, str | None]],
+) -> dict[str, PendingHumanReviewModel]:
+ """Process all pending reviews for an execution with approve/reject decisions.
+
+ Args:
+ user_id: User ID for ownership validation
+ review_decisions: Map of node_exec_id -> (status, reviewed_data, message)
+
+ Returns:
+ Dict of node_exec_id -> updated review model
+ """
+ if not review_decisions:
+ return {}
+
+ node_exec_ids = list(review_decisions.keys())
+
+ # Get all reviews for validation
+ reviews = await PendingHumanReview.prisma().find_many(
+ where={
+ "nodeExecId": {"in": node_exec_ids},
+ "userId": user_id,
+ "status": ReviewStatus.WAITING,
+ },
+ )
+
+ # Validate all reviews can be processed
+ if len(reviews) != len(node_exec_ids):
+ missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews}
+ raise ValueError(
+ f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}"
+ )
+
+ # Create parallel update tasks
+ update_tasks = []
+
+ for review in reviews:
+ new_status, reviewed_data, message = review_decisions[review.nodeExecId]
+ has_data_changes = reviewed_data is not None and reviewed_data != review.payload
+
+ # Check edit permissions for actual data modifications
+ if has_data_changes and not review.editable:
+ raise ValueError(f"Review {review.nodeExecId} is not editable")
+
+ update_data: PendingHumanReviewUpdateInput = {
+ "status": new_status,
+ "reviewMessage": message,
+ "wasEdited": has_data_changes,
+ "reviewedAt": datetime.now(timezone.utc),
+ }
+
+ if has_data_changes:
+ update_data["payload"] = SafeJson(reviewed_data)
+
+ task = PendingHumanReview.prisma().update(
+ where={"nodeExecId": review.nodeExecId},
+ data=update_data,
+ )
+ update_tasks.append(task)
+
+ # Execute all updates in parallel and get updated reviews
+ updated_reviews = await asyncio.gather(*update_tasks)
+
+ # Note: Execution resumption is now handled at the API layer after ALL reviews
+ # for an execution are processed (both approved and rejected)
+
+ # Return as dict for easy access
+ return {
+ review.nodeExecId: PendingHumanReviewModel.from_db(review)
+ for review in updated_reviews
+ }
+
+
+async def update_review_processed_status(node_exec_id: str, processed: bool) -> None:
+ """Update the processed status of a review."""
+ await PendingHumanReview.prisma().update(
+ where={"nodeExecId": node_exec_id}, data={"processed": processed}
+ )
diff --git a/autogpt_platform/backend/backend/data/human_review_test.py b/autogpt_platform/backend/backend/data/human_review_test.py
new file mode 100644
index 0000000000..fe6c9057c1
--- /dev/null
+++ b/autogpt_platform/backend/backend/data/human_review_test.py
@@ -0,0 +1,376 @@
+import datetime
+from unittest.mock import AsyncMock, Mock
+
+import pytest
+import pytest_mock
+from prisma.enums import ReviewStatus
+
+from backend.data.human_review import (
+ get_or_create_human_review,
+ get_pending_review_by_node_exec_id,
+ get_pending_reviews_for_execution,
+ get_pending_reviews_for_user,
+ has_pending_reviews_for_graph_exec,
+ process_all_reviews_for_execution,
+)
+
+
+@pytest.fixture
+def sample_db_review():
+ """Create a sample database review object"""
+ mock_review = Mock()
+ mock_review.nodeExecId = "test_node_123"
+ mock_review.userId = "test_user"
+ mock_review.graphExecId = "test_graph_exec_456"
+ mock_review.graphId = "test_graph_789"
+ mock_review.graphVersion = 1
+ mock_review.payload = {"data": "test payload"}
+ mock_review.instructions = "Please review"
+ mock_review.editable = True
+ mock_review.status = ReviewStatus.WAITING
+ mock_review.reviewMessage = None
+ mock_review.wasEdited = False
+ mock_review.processed = False
+ mock_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
+ mock_review.updatedAt = None
+ mock_review.reviewedAt = None
+ return mock_review
+
+
+@pytest.mark.asyncio
+async def test_get_pending_review_by_node_exec_id_found(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test finding an existing pending review"""
+ mock_find_first = mocker.patch(
+ "backend.data.human_review.PendingHumanReview.prisma"
+ )
+ mock_find_first.return_value.find_first = AsyncMock(return_value=sample_db_review)
+
+ result = await get_pending_review_by_node_exec_id("test_node_123", "test_user")
+
+ assert result is not None
+ assert result.node_exec_id == "test_node_123"
+ assert result.user_id == "test_user"
+ assert result.status == ReviewStatus.WAITING
+
+
+@pytest.mark.asyncio
+async def test_get_pending_review_by_node_exec_id_not_found(
+ mocker: pytest_mock.MockFixture,
+):
+ """Test when review is not found"""
+ mock_find_first = mocker.patch(
+ "backend.data.human_review.PendingHumanReview.prisma"
+ )
+ mock_find_first.return_value.find_first = AsyncMock(return_value=None)
+
+ result = await get_pending_review_by_node_exec_id("nonexistent", "test_user")
+
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_get_or_create_human_review_new(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test creating a new human review"""
+ # Mock the upsert to return a new review (created_at == updated_at)
+ sample_db_review.status = ReviewStatus.WAITING
+ sample_db_review.processed = False
+
+ mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
+
+ result = await get_or_create_human_review(
+ user_id="test_user",
+ node_exec_id="test_node_123",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ input_data={"data": "test payload"},
+ message="Please review",
+ editable=True,
+ )
+
+ # Should return None for pending reviews (waiting for human input)
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_get_or_create_human_review_approved(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test retrieving an already approved review"""
+ # Set up review as already approved
+ sample_db_review.status = ReviewStatus.APPROVED
+ sample_db_review.processed = False
+ sample_db_review.reviewMessage = "Looks good"
+
+ mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review)
+
+ result = await get_or_create_human_review(
+ user_id="test_user",
+ node_exec_id="test_node_123",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ input_data={"data": "test payload"},
+ message="Please review",
+ editable=True,
+ )
+
+ # Should return the approved result
+ assert result is not None
+ assert result.status == ReviewStatus.APPROVED
+ assert result.data == {"data": "test payload"}
+ assert result.message == "Looks good"
+
+
+@pytest.mark.asyncio
+async def test_has_pending_reviews_for_graph_exec_true(
+ mocker: pytest_mock.MockFixture,
+):
+ """Test when there are pending reviews"""
+ mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_count.return_value.count = AsyncMock(return_value=2)
+
+ result = await has_pending_reviews_for_graph_exec("test_graph_exec")
+
+ assert result is True
+
+
+@pytest.mark.asyncio
+async def test_has_pending_reviews_for_graph_exec_false(
+ mocker: pytest_mock.MockFixture,
+):
+ """Test when there are no pending reviews"""
+ mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_count.return_value.count = AsyncMock(return_value=0)
+
+ result = await has_pending_reviews_for_graph_exec("test_graph_exec")
+
+ assert result is False
+
+
+@pytest.mark.asyncio
+async def test_get_pending_reviews_for_user(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test getting pending reviews for a user with pagination"""
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
+
+ result = await get_pending_reviews_for_user("test_user", page=2, page_size=10)
+
+ assert len(result) == 1
+ assert result[0].node_exec_id == "test_node_123"
+
+ # Verify pagination parameters
+ call_args = mock_find_many.return_value.find_many.call_args
+ assert call_args.kwargs["skip"] == 10 # (page-1) * page_size = (2-1) * 10
+ assert call_args.kwargs["take"] == 10
+
+
+@pytest.mark.asyncio
+async def test_get_pending_reviews_for_execution(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test getting pending reviews for specific execution"""
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
+
+ result = await get_pending_reviews_for_execution("test_graph_exec_456", "test_user")
+
+ assert len(result) == 1
+ assert result[0].graph_exec_id == "test_graph_exec_456"
+
+ # Verify it filters by execution and user
+ call_args = mock_find_many.return_value.find_many.call_args
+ where_clause = call_args.kwargs["where"]
+ assert where_clause["userId"] == "test_user"
+ assert where_clause["graphExecId"] == "test_graph_exec_456"
+ assert where_clause["status"] == ReviewStatus.WAITING
+
+
+@pytest.mark.asyncio
+async def test_process_all_reviews_for_execution_success(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test successful processing of reviews for an execution"""
+ # Mock finding reviews
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
+
+ # Mock updating reviews
+ updated_review = Mock()
+ updated_review.nodeExecId = "test_node_123"
+ updated_review.userId = "test_user"
+ updated_review.graphExecId = "test_graph_exec_456"
+ updated_review.graphId = "test_graph_789"
+ updated_review.graphVersion = 1
+ updated_review.payload = {"data": "modified"}
+ updated_review.instructions = "Please review"
+ updated_review.editable = True
+ updated_review.status = ReviewStatus.APPROVED
+ updated_review.reviewMessage = "Approved"
+ updated_review.wasEdited = True
+ updated_review.processed = False
+ updated_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
+ updated_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
+ updated_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
+ mock_update = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_update.return_value.update = AsyncMock(return_value=updated_review)
+
+ # Mock gather to simulate parallel updates
+ mocker.patch(
+ "backend.data.human_review.asyncio.gather",
+ new=AsyncMock(return_value=[updated_review]),
+ )
+
+ result = await process_all_reviews_for_execution(
+ user_id="test_user",
+ review_decisions={
+ "test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved")
+ },
+ )
+
+ assert len(result) == 1
+ assert "test_node_123" in result
+ assert result["test_node_123"].status == ReviewStatus.APPROVED
+
+
+@pytest.mark.asyncio
+async def test_process_all_reviews_for_execution_validation_errors(
+ mocker: pytest_mock.MockFixture,
+):
+ """Test validation errors in process_all_reviews_for_execution"""
+ # Mock finding fewer reviews than requested (some not found)
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(
+ return_value=[]
+ ) # No reviews found
+
+ with pytest.raises(ValueError, match="Reviews not found"):
+ await process_all_reviews_for_execution(
+ user_id="test_user",
+ review_decisions={
+ "nonexistent_node": (ReviewStatus.APPROVED, {"data": "test"}, "message")
+ },
+ )
+
+
+@pytest.mark.asyncio
+async def test_process_all_reviews_edit_permission_error(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test editing non-editable review"""
+ # Set review as non-editable
+ sample_db_review.editable = False
+
+ # Mock finding reviews
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review])
+
+ with pytest.raises(ValueError, match="not editable"):
+ await process_all_reviews_for_execution(
+ user_id="test_user",
+ review_decisions={
+ "test_node_123": (
+ ReviewStatus.APPROVED,
+ {"data": "modified"},
+ "message",
+ )
+ },
+ )
+
+
+@pytest.mark.asyncio
+async def test_process_all_reviews_mixed_approval_rejection(
+ mocker: pytest_mock.MockFixture,
+ sample_db_review,
+):
+ """Test processing mixed approval and rejection decisions"""
+ # Create second review for rejection
+ second_review = Mock()
+ second_review.nodeExecId = "test_node_456"
+ second_review.userId = "test_user"
+ second_review.graphExecId = "test_graph_exec_456"
+ second_review.graphId = "test_graph_789"
+ second_review.graphVersion = 1
+ second_review.payload = {"data": "original"}
+ second_review.instructions = "Second review"
+ second_review.editable = True
+ second_review.status = ReviewStatus.WAITING
+ second_review.reviewMessage = None
+ second_review.wasEdited = False
+ second_review.processed = False
+ second_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
+ second_review.updatedAt = None
+ second_review.reviewedAt = None
+
+ # Mock finding reviews
+ mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma")
+ mock_find_many.return_value.find_many = AsyncMock(
+ return_value=[sample_db_review, second_review]
+ )
+
+ # Mock updating reviews
+ approved_review = Mock()
+ approved_review.nodeExecId = "test_node_123"
+ approved_review.userId = "test_user"
+ approved_review.graphExecId = "test_graph_exec_456"
+ approved_review.graphId = "test_graph_789"
+ approved_review.graphVersion = 1
+ approved_review.payload = {"data": "modified"}
+ approved_review.instructions = "Please review"
+ approved_review.editable = True
+ approved_review.status = ReviewStatus.APPROVED
+ approved_review.reviewMessage = "Approved"
+ approved_review.wasEdited = True
+ approved_review.processed = False
+ approved_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
+ approved_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
+ approved_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
+
+ rejected_review = Mock()
+ rejected_review.nodeExecId = "test_node_456"
+ rejected_review.userId = "test_user"
+ rejected_review.graphExecId = "test_graph_exec_456"
+ rejected_review.graphId = "test_graph_789"
+ rejected_review.graphVersion = 1
+ rejected_review.payload = {"data": "original"}
+ rejected_review.instructions = "Please review"
+ rejected_review.editable = True
+ rejected_review.status = ReviewStatus.REJECTED
+ rejected_review.reviewMessage = "Rejected"
+ rejected_review.wasEdited = False
+ rejected_review.processed = False
+ rejected_review.createdAt = datetime.datetime.now(datetime.timezone.utc)
+ rejected_review.updatedAt = datetime.datetime.now(datetime.timezone.utc)
+ rejected_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc)
+
+ mocker.patch(
+ "backend.data.human_review.asyncio.gather",
+ new=AsyncMock(return_value=[approved_review, rejected_review]),
+ )
+
+ result = await process_all_reviews_for_execution(
+ user_id="test_user",
+ review_decisions={
+ "test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved"),
+ "test_node_456": (ReviewStatus.REJECTED, None, "Rejected"),
+ },
+ )
+
+ assert len(result) == 2
+ assert "test_node_123" in result
+ assert "test_node_456" in result
diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py
index df581e0de4..27e8b01043 100644
--- a/autogpt_platform/backend/backend/executor/database.py
+++ b/autogpt_platform/backend/backend/executor/database.py
@@ -31,6 +31,11 @@ from backend.data.graph import (
get_node,
validate_graph_execution_permissions,
)
+from backend.data.human_review import (
+ get_or_create_human_review,
+ has_pending_reviews_for_graph_exec,
+ update_review_processed_status,
+)
from backend.data.notifications import (
clear_all_user_notification_batches,
create_or_add_to_user_notification_batch,
@@ -161,6 +166,11 @@ class DatabaseManager(AppService):
get_user_email_verification = _(get_user_email_verification)
get_user_notification_preference = _(get_user_notification_preference)
+ # Human In The Loop
+ get_or_create_human_review = _(get_or_create_human_review)
+ has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec)
+ update_review_processed_status = _(update_review_processed_status)
+
# Notifications - async
clear_all_user_notification_batches = _(clear_all_user_notification_batches)
create_or_add_to_user_notification_batch = _(
@@ -215,6 +225,9 @@ class DatabaseManagerClient(AppServiceClient):
# Block error monitoring
get_block_error_stats = _(d.get_block_error_stats)
+ # Human In The Loop
+ has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec)
+
# User Emails
get_user_email_by_id = _(d.get_user_email_by_id)
@@ -256,6 +269,10 @@ class DatabaseManagerAsyncClient(AppServiceClient):
get_execution_kv_data = d.get_execution_kv_data
set_execution_kv_data = d.set_execution_kv_data
+ # Human In The Loop
+ get_or_create_human_review = d.get_or_create_human_review
+ update_review_processed_status = d.update_review_processed_status
+
# User Comms
get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange
get_user_email_by_id = d.get_user_email_by_id
diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py
index f04102a950..06ad06e6dc 100644
--- a/autogpt_platform/backend/backend/executor/manager.py
+++ b/autogpt_platform/backend/backend/executor/manager.py
@@ -164,6 +164,7 @@ async def execute_node(
user_id = data.user_id
graph_exec_id = data.graph_exec_id
graph_id = data.graph_id
+ graph_version = data.graph_version
node_exec_id = data.node_exec_id
node_id = data.node_id
node_block = node.block
@@ -204,6 +205,7 @@ async def execute_node(
# Inject extra execution arguments for the blocks via kwargs
extra_exec_kwargs: dict = {
"graph_id": graph_id,
+ "graph_version": graph_version,
"node_id": node_id,
"graph_exec_id": graph_exec_id,
"node_exec_id": node_exec_id,
@@ -284,6 +286,7 @@ async def _enqueue_next_nodes(
user_id: str,
graph_exec_id: str,
graph_id: str,
+ graph_version: int,
log_metadata: LogMetadata,
nodes_input_masks: Optional[NodesInputMasks],
user_context: UserContext,
@@ -301,6 +304,7 @@ async def _enqueue_next_nodes(
user_id=user_id,
graph_exec_id=graph_exec_id,
graph_id=graph_id,
+ graph_version=graph_version,
node_exec_id=node_exec_id,
node_id=node_id,
block_id=block_id,
@@ -334,17 +338,14 @@ async def _enqueue_next_nodes(
# Or the same input to be consumed multiple times.
async with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"):
# Add output data to the earliest incomplete execution, or create a new one.
- next_node_exec_id, next_node_input = await db_client.upsert_execution_input(
+ next_node_exec, next_node_input = await db_client.upsert_execution_input(
node_id=next_node_id,
graph_exec_id=graph_exec_id,
input_name=next_input_name,
input_data=next_data,
)
- await async_update_node_execution_status(
- db_client=db_client,
- exec_id=next_node_exec_id,
- status=ExecutionStatus.INCOMPLETE,
- )
+ next_node_exec_id = next_node_exec.node_exec_id
+ await send_async_execution_update(next_node_exec)
# Complete missing static input pins data using the last execution input.
static_link_names = {
@@ -660,6 +661,16 @@ class ExecutionProcessor:
log_metadata.info(
f"⚙️ Graph execution #{graph_exec.graph_exec_id} is already running, continuing where it left off."
)
+ elif exec_meta.status == ExecutionStatus.REVIEW:
+ exec_meta.status = ExecutionStatus.RUNNING
+ log_metadata.info(
+ f"⚙️ Graph execution #{graph_exec.graph_exec_id} was waiting for review, resuming execution."
+ )
+ update_graph_execution_state(
+ db_client=db_client,
+ graph_exec_id=graph_exec.graph_exec_id,
+ status=ExecutionStatus.RUNNING,
+ )
elif exec_meta.status == ExecutionStatus.FAILED:
exec_meta.status = ExecutionStatus.RUNNING
log_metadata.info(
@@ -697,19 +708,21 @@ class ExecutionProcessor:
raise status
exec_meta.status = status
- # Activity status handling
- activity_response = asyncio.run_coroutine_threadsafe(
- generate_activity_status_for_execution(
- graph_exec_id=graph_exec.graph_exec_id,
- graph_id=graph_exec.graph_id,
- graph_version=graph_exec.graph_version,
- execution_stats=exec_stats,
- db_client=get_db_async_client(),
- user_id=graph_exec.user_id,
- execution_status=status,
- ),
- self.node_execution_loop,
- ).result(timeout=60.0)
+ if status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
+ activity_response = asyncio.run_coroutine_threadsafe(
+ generate_activity_status_for_execution(
+ graph_exec_id=graph_exec.graph_exec_id,
+ graph_id=graph_exec.graph_id,
+ graph_version=graph_exec.graph_version,
+ execution_stats=exec_stats,
+ db_client=get_db_async_client(),
+ user_id=graph_exec.user_id,
+ execution_status=status,
+ ),
+ self.node_execution_loop,
+ ).result(timeout=60.0)
+ else:
+ activity_response = None
if activity_response is not None:
exec_stats.activity_status = activity_response["activity_status"]
exec_stats.correctness_score = activity_response["correctness_score"]
@@ -845,6 +858,7 @@ class ExecutionProcessor:
ExecutionStatus.RUNNING,
ExecutionStatus.QUEUED,
ExecutionStatus.TERMINATED,
+ ExecutionStatus.REVIEW,
],
):
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
@@ -853,6 +867,7 @@ class ExecutionProcessor:
# ------------------------------------------------------------
# Main dispatch / polling loop -----------------------------
# ------------------------------------------------------------
+
while not execution_queue.empty():
if cancel.is_set():
break
@@ -1006,7 +1021,12 @@ class ExecutionProcessor:
elif error is not None:
execution_status = ExecutionStatus.FAILED
else:
- execution_status = ExecutionStatus.COMPLETED
+ if db_client.has_pending_reviews_for_graph_exec(
+ graph_exec.graph_exec_id
+ ):
+ execution_status = ExecutionStatus.REVIEW
+ else:
+ execution_status = ExecutionStatus.COMPLETED
if error:
execution_stats.error = str(error) or type(error).__name__
@@ -1142,6 +1162,7 @@ class ExecutionProcessor:
user_id=graph_exec.user_id,
graph_exec_id=graph_exec.graph_exec_id,
graph_id=graph_exec.graph_id,
+ graph_version=graph_exec.graph_version,
log_metadata=log_metadata,
nodes_input_masks=nodes_input_masks,
user_context=graph_exec.user_context,
diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py
index b11ea45cf5..f8c6da8546 100644
--- a/autogpt_platform/backend/backend/executor/utils.py
+++ b/autogpt_platform/backend/backend/executor/utils.py
@@ -30,6 +30,7 @@ from backend.data.execution import (
GraphExecutionWithNodes,
NodesInputMasks,
UserContext,
+ get_graph_execution,
)
from backend.data.graph import GraphModel, Node
from backend.data.model import CredentialsMetaInput
@@ -764,6 +765,7 @@ async def add_graph_execution(
nodes_input_masks: Optional[NodesInputMasks] = None,
parent_graph_exec_id: Optional[str] = None,
is_sub_graph: bool = False,
+ graph_exec_id: Optional[str] = None,
) -> GraphExecutionWithNodes:
"""
Adds a graph execution to the queue and returns the execution entry.
@@ -779,32 +781,48 @@ async def add_graph_execution(
nodes_input_masks: Node inputs to use in the execution.
parent_graph_exec_id: The ID of the parent graph execution (for nested executions).
is_sub_graph: Whether this is a sub-graph execution.
+ graph_exec_id: If provided, resume this existing execution instead of creating a new one.
Returns:
GraphExecutionEntry: The entry for the graph execution.
Raises:
ValueError: If the graph is not found or if there are validation errors.
+ NotFoundError: If graph_exec_id is provided but execution is not found.
"""
if prisma.is_connected():
edb = execution_db
else:
edb = get_database_manager_async_client()
- graph, starting_nodes_input, compiled_nodes_input_masks = (
- await validate_and_construct_node_execution_input(
- graph_id=graph_id,
+ # Get or create the graph execution
+ if graph_exec_id:
+ # Resume existing execution
+ graph_exec = await get_graph_execution(
user_id=user_id,
- graph_inputs=inputs or {},
- graph_version=graph_version,
- graph_credentials_inputs=graph_credentials_inputs,
- nodes_input_masks=nodes_input_masks,
- is_sub_graph=is_sub_graph,
+ execution_id=graph_exec_id,
+ include_node_executions=True,
+ )
+
+ if not graph_exec:
+ raise NotFoundError(f"Graph execution #{graph_exec_id} not found.")
+
+ # Use existing execution's compiled input masks
+ compiled_nodes_input_masks = graph_exec.nodes_input_masks or {}
+
+ logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}")
+ else:
+ # Create new execution
+ graph, starting_nodes_input, compiled_nodes_input_masks = (
+ await validate_and_construct_node_execution_input(
+ graph_id=graph_id,
+ user_id=user_id,
+ graph_inputs=inputs or {},
+ graph_version=graph_version,
+ graph_credentials_inputs=graph_credentials_inputs,
+ nodes_input_masks=nodes_input_masks,
+ is_sub_graph=is_sub_graph,
+ )
)
- )
- graph_exec = None
- try:
- # Sanity check: running add_graph_execution with the properties of
- # the graph_exec created here should create the same execution again.
graph_exec = await edb.create_graph_execution(
user_id=user_id,
graph_id=graph_id,
@@ -817,16 +835,20 @@ async def add_graph_execution(
parent_graph_exec_id=parent_graph_exec_id,
)
+ logger.info(
+ f"Created graph execution #{graph_exec.id} for graph "
+ f"#{graph_id} with {len(starting_nodes_input)} starting nodes"
+ )
+
+ # Common path: publish to queue and update status
+ try:
graph_exec_entry = graph_exec.to_graph_execution_entry(
user_context=await get_user_context(user_id),
compiled_nodes_input_masks=compiled_nodes_input_masks,
parent_graph_exec_id=parent_graph_exec_id,
)
- logger.info(
- f"Created graph execution #{graph_exec.id} for graph "
- f"#{graph_id} with {len(starting_nodes_input)} starting nodes. "
- f"Now publishing to execution queue."
- )
+
+ logger.info(f"Publishing execution {graph_exec.id} to execution queue")
exec_queue = await get_async_execution_queue()
await exec_queue.publish_message(
diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py
index 15e7485d5d..556903571c 100644
--- a/autogpt_platform/backend/backend/server/rest_api.py
+++ b/autogpt_platform/backend/backend/server/rest_api.py
@@ -29,6 +29,7 @@ import backend.server.v2.admin.store_admin_routes
import backend.server.v2.builder
import backend.server.v2.builder.routes
import backend.server.v2.chat.routes as chat_routes
+import backend.server.v2.executions.review.routes
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
@@ -274,6 +275,11 @@ app.include_router(
tags=["v2", "admin"],
prefix="/api/executions",
)
+app.include_router(
+ backend.server.v2.executions.review.routes.router,
+ tags=["v2", "executions", "review"],
+ prefix="/api/review",
+)
app.include_router(
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
)
diff --git a/autogpt_platform/backend/backend/server/v2/builder/db.py b/autogpt_platform/backend/backend/server/v2/builder/db.py
index ed5938b218..c3f6ac88ab 100644
--- a/autogpt_platform/backend/backend/server/v2/builder/db.py
+++ b/autogpt_platform/backend/backend/server/v2/builder/db.py
@@ -7,6 +7,7 @@ import backend.data.block
from backend.blocks import load_all_blocks
from backend.blocks.llm import LlmModel
from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema
+from backend.data.db import query_raw_with_schema
from backend.integrations.providers import ProviderName
from backend.server.v2.builder.model import (
BlockCategoryResponse,
@@ -340,13 +341,13 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
# Calculate the cutoff timestamp
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
- results = await prisma.get_client().query_raw(
+ results = await query_raw_with_schema(
"""
SELECT
agent_node."agentBlockId" AS block_id,
COUNT(execution.id) AS execution_count
- FROM "AgentNodeExecution" execution
- JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
+ FROM {schema_prefix}"AgentNodeExecution" execution
+ JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
WHERE execution."endedTime" >= $1::timestamp
GROUP BY agent_node."agentBlockId"
ORDER BY execution_count DESC;
diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/model.py b/autogpt_platform/backend/backend/server/v2/executions/review/model.py
new file mode 100644
index 0000000000..74f72fe1ff
--- /dev/null
+++ b/autogpt_platform/backend/backend/server/v2/executions/review/model.py
@@ -0,0 +1,204 @@
+import json
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Union
+
+from prisma.enums import ReviewStatus
+from pydantic import BaseModel, Field, field_validator, model_validator
+
+if TYPE_CHECKING:
+ from prisma.models import PendingHumanReview
+
+# SafeJson-compatible type alias for review data
+SafeJsonData = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
+
+
+class PendingHumanReviewModel(BaseModel):
+ """Response model for pending human review data.
+
+ Represents a human review request that is awaiting user action.
+ Contains all necessary information for a user to review and approve
+ or reject data from a Human-in-the-Loop block execution.
+
+ Attributes:
+ id: Unique identifier for the review record
+ user_id: ID of the user who must perform the review
+ node_exec_id: ID of the node execution that created this review
+ graph_exec_id: ID of the graph execution containing the node
+ graph_id: ID of the graph template being executed
+ graph_version: Version number of the graph template
+ payload: The actual data payload awaiting review
+ instructions: Instructions or message for the reviewer
+ editable: Whether the reviewer can edit the data
+ status: Current review status (WAITING, APPROVED, or REJECTED)
+ review_message: Optional message from the reviewer
+ created_at: Timestamp when review was created
+ updated_at: Timestamp when review was last modified
+ reviewed_at: Timestamp when review was completed (if applicable)
+ """
+
+ node_exec_id: str = Field(description="Node execution ID (primary key)")
+ user_id: str = Field(description="User ID associated with the review")
+ graph_exec_id: str = Field(description="Graph execution ID")
+ graph_id: str = Field(description="Graph ID")
+ graph_version: int = Field(description="Graph version")
+ payload: SafeJsonData = Field(description="The actual data payload awaiting review")
+ instructions: str | None = Field(
+ description="Instructions or message for the reviewer", default=None
+ )
+ editable: bool = Field(description="Whether the reviewer can edit the data")
+ status: ReviewStatus = Field(description="Review status")
+ review_message: str | None = Field(
+ description="Optional message from the reviewer", default=None
+ )
+ was_edited: bool | None = Field(
+ description="Whether the data was modified during review", default=None
+ )
+ processed: bool = Field(
+ description="Whether the review result has been processed by the execution engine",
+ default=False,
+ )
+ created_at: datetime = Field(description="When the review was created")
+ updated_at: datetime | None = Field(
+ description="When the review was last updated", default=None
+ )
+ reviewed_at: datetime | None = Field(
+ description="When the review was completed", default=None
+ )
+
+ @classmethod
+ def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel":
+ """
+ Convert a database model to a response model.
+
+ Uses the new flat database structure with separate columns for
+ payload, instructions, and editable flag.
+
+ Handles invalid data gracefully by using safe defaults.
+ """
+ return cls(
+ node_exec_id=review.nodeExecId,
+ user_id=review.userId,
+ graph_exec_id=review.graphExecId,
+ graph_id=review.graphId,
+ graph_version=review.graphVersion,
+ payload=review.payload,
+ instructions=review.instructions,
+ editable=review.editable,
+ status=review.status,
+ review_message=review.reviewMessage,
+ was_edited=review.wasEdited,
+ processed=review.processed,
+ created_at=review.createdAt,
+ updated_at=review.updatedAt,
+ reviewed_at=review.reviewedAt,
+ )
+
+
+class ReviewItem(BaseModel):
+ """Single review item for processing."""
+
+ node_exec_id: str = Field(description="Node execution ID to review")
+ approved: bool = Field(
+ description="Whether this review is approved (True) or rejected (False)"
+ )
+ message: str | None = Field(
+ None, description="Optional review message", max_length=2000
+ )
+ reviewed_data: SafeJsonData | None = Field(
+ None, description="Optional edited data (ignored if approved=False)"
+ )
+
+ @field_validator("reviewed_data")
+ @classmethod
+ def validate_reviewed_data(cls, v):
+ """Validate that reviewed_data is safe and properly structured."""
+ if v is None:
+ return v
+
+ # Validate SafeJson compatibility
+ def validate_safejson_type(obj):
+ """Ensure object only contains SafeJson compatible types."""
+ if obj is None:
+ return True
+ elif isinstance(obj, (str, int, float, bool)):
+ return True
+ elif isinstance(obj, dict):
+ return all(
+ isinstance(k, str) and validate_safejson_type(v)
+ for k, v in obj.items()
+ )
+ elif isinstance(obj, list):
+ return all(validate_safejson_type(item) for item in obj)
+ else:
+ return False
+
+ if not validate_safejson_type(v):
+ raise ValueError("reviewed_data contains non-SafeJson compatible types")
+
+ # Validate data size to prevent DoS attacks
+ try:
+ json_str = json.dumps(v)
+ if len(json_str) > 1000000: # 1MB limit
+ raise ValueError("reviewed_data is too large (max 1MB)")
+ except (TypeError, ValueError) as e:
+ raise ValueError(f"reviewed_data must be JSON serializable: {str(e)}")
+
+ # Ensure no dangerous nested structures (prevent infinite recursion)
+ def check_depth(obj, max_depth=10, current_depth=0):
+ """Recursively check object nesting depth to prevent stack overflow attacks."""
+ if current_depth > max_depth:
+ raise ValueError("reviewed_data has excessive nesting depth")
+
+ if isinstance(obj, dict):
+ for value in obj.values():
+ check_depth(value, max_depth, current_depth + 1)
+ elif isinstance(obj, list):
+ for item in obj:
+ check_depth(item, max_depth, current_depth + 1)
+
+ check_depth(v)
+ return v
+
+ @field_validator("message")
+ @classmethod
+ def validate_message(cls, v):
+ """Validate and sanitize review message."""
+ if v is not None and len(v.strip()) == 0:
+ return None
+ return v
+
+
+class ReviewRequest(BaseModel):
+ """Request model for processing ALL pending reviews for an execution.
+
+ This request must include ALL pending reviews for a graph execution.
+ Each review will be either approved (with optional data modifications)
+ or rejected (data ignored). The execution will resume only after ALL reviews are processed.
+ """
+
+ reviews: List[ReviewItem] = Field(
+ description="All reviews with their approval status, data, and messages"
+ )
+
+ @model_validator(mode="after")
+ def validate_review_completeness(self):
+ """Validate that we have at least one review to process and no duplicates."""
+ if not self.reviews:
+ raise ValueError("At least one review must be provided")
+
+ # Ensure no duplicate node_exec_ids
+ node_ids = [review.node_exec_id for review in self.reviews]
+ if len(node_ids) != len(set(node_ids)):
+ duplicates = [nid for nid in set(node_ids) if node_ids.count(nid) > 1]
+ raise ValueError(f"Duplicate review IDs found: {', '.join(duplicates)}")
+
+ return self
+
+
+class ReviewResponse(BaseModel):
+ """Response from review endpoint."""
+
+ approved_count: int = Field(description="Number of reviews successfully approved")
+ rejected_count: int = Field(description="Number of reviews successfully rejected")
+ failed_count: int = Field(description="Number of reviews that failed processing")
+ error: str | None = Field(None, description="Error message if operation failed")
diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py b/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py
new file mode 100644
index 0000000000..3bc0dff923
--- /dev/null
+++ b/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py
@@ -0,0 +1,459 @@
+import datetime
+
+import fastapi
+import fastapi.testclient
+import pytest
+import pytest_mock
+from prisma.enums import ReviewStatus
+from pytest_snapshot.plugin import Snapshot
+
+from backend.server.v2.executions.review.model import PendingHumanReviewModel
+from backend.server.v2.executions.review.routes import router
+
+# Using a fixed timestamp for reproducible tests
+FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
+
+app = fastapi.FastAPI()
+app.include_router(router, prefix="/api/review")
+
+client = fastapi.testclient.TestClient(app)
+
+
+@pytest.fixture(autouse=True)
+def setup_app_auth(mock_jwt_user):
+ """Setup auth overrides for all tests in this module"""
+ from autogpt_libs.auth.jwt_utils import get_jwt_payload
+
+ app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
+ yield
+ app.dependency_overrides.clear()
+
+
+@pytest.fixture
+def sample_pending_review() -> PendingHumanReviewModel:
+ """Create a sample pending review for testing"""
+ return PendingHumanReviewModel(
+ node_exec_id="test_node_123",
+ user_id="test_user",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ payload={"data": "test payload", "value": 42},
+ instructions="Please review this data",
+ editable=True,
+ status=ReviewStatus.WAITING,
+ review_message=None,
+ was_edited=None,
+ processed=False,
+ created_at=FIXED_NOW,
+ updated_at=None,
+ reviewed_at=None,
+ )
+
+
+def test_get_pending_reviews_empty(
+ mocker: pytest_mock.MockFixture,
+ snapshot: Snapshot,
+) -> None:
+ """Test getting pending reviews when none exist"""
+ mock_get_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
+ )
+ mock_get_reviews.return_value = []
+
+ response = client.get("/api/review/pending")
+
+ assert response.status_code == 200
+ assert response.json() == []
+ mock_get_reviews.assert_called_once_with("test_user", 1, 25)
+
+
+def test_get_pending_reviews_with_data(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+ snapshot: Snapshot,
+) -> None:
+ """Test getting pending reviews with data"""
+ mock_get_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_user"
+ )
+ mock_get_reviews.return_value = [sample_pending_review]
+
+ response = client.get("/api/review/pending?page=2&page_size=10")
+
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data) == 1
+ assert data[0]["node_exec_id"] == "test_node_123"
+ assert data[0]["status"] == "WAITING"
+ mock_get_reviews.assert_called_once_with("test_user", 2, 10)
+
+
+def test_get_pending_reviews_for_execution_success(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+ snapshot: Snapshot,
+) -> None:
+ """Test getting pending reviews for specific execution"""
+ mock_get_graph_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_graph_execution_meta"
+ )
+ mock_get_graph_execution.return_value = {
+ "id": "test_graph_exec_456",
+ "user_id": "test_user",
+ }
+
+ mock_get_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews.return_value = [sample_pending_review]
+
+ response = client.get("/api/review/execution/test_graph_exec_456")
+
+ assert response.status_code == 200
+ data = response.json()
+ assert len(data) == 1
+ assert data[0]["graph_exec_id"] == "test_graph_exec_456"
+
+
+def test_get_pending_reviews_for_execution_access_denied(
+ mocker: pytest_mock.MockFixture,
+) -> None:
+ """Test access denied when user doesn't own the execution"""
+ mock_get_graph_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_graph_execution_meta"
+ )
+ mock_get_graph_execution.return_value = None
+
+ response = client.get("/api/review/execution/test_graph_exec_456")
+
+ assert response.status_code == 403
+ assert "Access denied" in response.json()["detail"]
+
+
+def test_process_review_action_approve_success(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+) -> None:
+ """Test successful review approval"""
+ # Mock the validation functions
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.return_value = sample_pending_review
+
+ mock_get_reviews_for_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews_for_execution.return_value = [sample_pending_review]
+
+ mock_process_all_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
+ )
+ mock_process_all_reviews.return_value = {"test_node_123": sample_pending_review}
+
+ mock_has_pending = mocker.patch(
+ "backend.data.human_review.has_pending_reviews_for_graph_exec"
+ )
+ mock_has_pending.return_value = False
+
+ mocker.patch("backend.executor.utils.add_graph_execution")
+
+ request_data = {
+ "approved_reviews": [
+ {
+ "node_exec_id": "test_node_123",
+ "message": "Looks good",
+ "reviewed_data": {"data": "modified payload", "value": 50},
+ }
+ ],
+ "rejected_review_ids": [],
+ }
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["approved_count"] == 1
+ assert data["rejected_count"] == 0
+ assert data["failed_count"] == 0
+ assert data["error"] is None
+
+
+def test_process_review_action_reject_success(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+) -> None:
+ """Test successful review rejection"""
+ # Mock the validation functions
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.return_value = sample_pending_review
+
+ mock_get_reviews_for_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews_for_execution.return_value = [sample_pending_review]
+
+ mock_process_all_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
+ )
+ rejected_review = PendingHumanReviewModel(
+ node_exec_id="test_node_123",
+ user_id="test_user",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ payload={"data": "test payload"},
+ instructions="Please review",
+ editable=True,
+ status=ReviewStatus.REJECTED,
+ review_message="Rejected by user",
+ was_edited=False,
+ processed=False,
+ created_at=FIXED_NOW,
+ updated_at=None,
+ reviewed_at=FIXED_NOW,
+ )
+ mock_process_all_reviews.return_value = {"test_node_123": rejected_review}
+
+ mock_has_pending = mocker.patch(
+ "backend.data.human_review.has_pending_reviews_for_graph_exec"
+ )
+ mock_has_pending.return_value = False
+
+ request_data = {"approved_reviews": [], "rejected_review_ids": ["test_node_123"]}
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["approved_count"] == 0
+ assert data["rejected_count"] == 1
+ assert data["failed_count"] == 0
+ assert data["error"] is None
+
+
+def test_process_review_action_mixed_success(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+) -> None:
+ """Test mixed approve/reject operations"""
+ # Create a second review
+ second_review = PendingHumanReviewModel(
+ node_exec_id="test_node_456",
+ user_id="test_user",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ payload={"data": "second payload"},
+ instructions="Second review",
+ editable=False,
+ status=ReviewStatus.WAITING,
+ review_message=None,
+ was_edited=None,
+ processed=False,
+ created_at=FIXED_NOW,
+ updated_at=None,
+ reviewed_at=None,
+ )
+
+ # Mock the validation functions
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.side_effect = lambda node_id, user_id: (
+ sample_pending_review if node_id == "test_node_123" else second_review
+ )
+
+ mock_get_reviews_for_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews_for_execution.return_value = [sample_pending_review, second_review]
+
+ mock_process_all_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
+ )
+ # Create approved version of first review
+ approved_review = PendingHumanReviewModel(
+ node_exec_id="test_node_123",
+ user_id="test_user",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ payload={"data": "modified"},
+ instructions="Please review",
+ editable=True,
+ status=ReviewStatus.APPROVED,
+ review_message="Approved",
+ was_edited=True,
+ processed=False,
+ created_at=FIXED_NOW,
+ updated_at=None,
+ reviewed_at=FIXED_NOW,
+ )
+ # Create rejected version of second review
+ rejected_review = PendingHumanReviewModel(
+ node_exec_id="test_node_456",
+ user_id="test_user",
+ graph_exec_id="test_graph_exec_456",
+ graph_id="test_graph_789",
+ graph_version=1,
+ payload={"data": "second payload"},
+ instructions="Second review",
+ editable=False,
+ status=ReviewStatus.REJECTED,
+ review_message="Rejected by user",
+ was_edited=False,
+ processed=False,
+ created_at=FIXED_NOW,
+ updated_at=None,
+ reviewed_at=FIXED_NOW,
+ )
+ mock_process_all_reviews.return_value = {
+ "test_node_123": approved_review,
+ "test_node_456": rejected_review,
+ }
+
+ mock_has_pending = mocker.patch(
+ "backend.data.human_review.has_pending_reviews_for_graph_exec"
+ )
+ mock_has_pending.return_value = False
+
+ request_data = {
+ "approved_reviews": [
+ {
+ "node_exec_id": "test_node_123",
+ "message": "Approved",
+ "reviewed_data": {"data": "modified"},
+ }
+ ],
+ "rejected_review_ids": ["test_node_456"],
+ }
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["approved_count"] == 1
+ assert data["rejected_count"] == 1
+ assert data["failed_count"] == 0
+ assert data["error"] is None
+
+
+def test_process_review_action_empty_request(
+ mocker: pytest_mock.MockFixture,
+) -> None:
+ """Test error when no reviews provided"""
+ request_data = {"approved_reviews": [], "rejected_review_ids": []}
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 400
+ assert "At least one review must be provided" in response.json()["detail"]
+
+
+def test_process_review_action_review_not_found(
+ mocker: pytest_mock.MockFixture,
+) -> None:
+ """Test error when review is not found"""
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.return_value = None
+
+ request_data = {
+ "approved_reviews": [
+ {
+ "node_exec_id": "nonexistent_node",
+ "message": "Test",
+ }
+ ],
+ "rejected_review_ids": [],
+ }
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 403
+ assert "not found or access denied" in response.json()["detail"]
+
+
+def test_process_review_action_partial_failure(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+) -> None:
+ """Test handling of partial failures in review processing"""
+ # Mock successful validation
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.return_value = sample_pending_review
+
+ mock_get_reviews_for_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews_for_execution.return_value = [sample_pending_review]
+
+ # Mock partial failure in processing
+ mock_process_all_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
+ )
+ mock_process_all_reviews.side_effect = ValueError("Some reviews failed validation")
+
+ request_data = {
+ "approved_reviews": [
+ {
+ "node_exec_id": "test_node_123",
+ "message": "Test",
+ }
+ ],
+ "rejected_review_ids": [],
+ }
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["approved_count"] == 0
+ assert data["rejected_count"] == 0
+ assert data["failed_count"] == 1
+ assert "Failed to process reviews" in data["error"]
+
+
+def test_process_review_action_complete_failure(
+ mocker: pytest_mock.MockFixture,
+ sample_pending_review: PendingHumanReviewModel,
+) -> None:
+ """Test complete failure scenario"""
+ # Mock successful validation
+ mock_get_pending_review = mocker.patch(
+ "backend.data.human_review.get_pending_review_by_node_exec_id"
+ )
+ mock_get_pending_review.return_value = sample_pending_review
+
+ mock_get_reviews_for_execution = mocker.patch(
+ "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution"
+ )
+ mock_get_reviews_for_execution.return_value = [sample_pending_review]
+
+ # Mock complete failure in processing
+ mock_process_all_reviews = mocker.patch(
+ "backend.server.v2.executions.review.routes.process_all_reviews_for_execution"
+ )
+ mock_process_all_reviews.side_effect = Exception("Database error")
+
+ request_data = {
+ "approved_reviews": [
+ {
+ "node_exec_id": "test_node_123",
+ "message": "Test",
+ }
+ ],
+ "rejected_review_ids": [],
+ }
+
+ response = client.post("/api/review/action", json=request_data)
+
+ assert response.status_code == 500
+ assert "error" in response.json()["detail"].lower()
diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py
new file mode 100644
index 0000000000..a8afe03635
--- /dev/null
+++ b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py
@@ -0,0 +1,194 @@
+import logging
+from typing import List
+
+import autogpt_libs.auth as autogpt_auth_lib
+from fastapi import APIRouter, HTTPException, Query, Security, status
+from prisma.enums import ReviewStatus
+
+from backend.data.execution import get_graph_execution_meta
+from backend.data.human_review import (
+ get_pending_reviews_for_execution,
+ get_pending_reviews_for_user,
+ has_pending_reviews_for_graph_exec,
+ process_all_reviews_for_execution,
+)
+from backend.executor.utils import add_graph_execution
+from backend.server.v2.executions.review.model import (
+ PendingHumanReviewModel,
+ ReviewRequest,
+ ReviewResponse,
+)
+
+logger = logging.getLogger(__name__)
+
+
+router = APIRouter(
+ tags=["executions", "review", "private"],
+ dependencies=[Security(autogpt_auth_lib.requires_user)],
+)
+
+
+@router.get(
+ "/pending",
+ summary="Get Pending Reviews",
+ response_model=List[PendingHumanReviewModel],
+ responses={
+ 200: {"description": "List of pending reviews"},
+ 500: {"description": "Server error", "content": {"application/json": {}}},
+ },
+)
+async def list_pending_reviews(
+ user_id: str = Security(autogpt_auth_lib.get_user_id),
+ page: int = Query(1, ge=1, description="Page number (1-indexed)"),
+ page_size: int = Query(25, ge=1, le=100, description="Number of reviews per page"),
+) -> List[PendingHumanReviewModel]:
+ """Get all pending reviews for the current user.
+
+ Retrieves all reviews with status "WAITING" that belong to the authenticated user.
+ Results are ordered by creation time (newest first).
+
+ Args:
+ user_id: Authenticated user ID from security dependency
+
+ Returns:
+ List of pending review objects with status converted to typed literals
+
+ Raises:
+ HTTPException: If authentication fails or database error occurs
+
+ Note:
+ Reviews with invalid status values are logged as warnings but excluded
+ from results rather than failing the entire request.
+ """
+
+ return await get_pending_reviews_for_user(user_id, page, page_size)
+
+
+@router.get(
+ "/execution/{graph_exec_id}",
+ summary="Get Pending Reviews for Execution",
+ response_model=List[PendingHumanReviewModel],
+ responses={
+ 200: {"description": "List of pending reviews for the execution"},
+ 400: {"description": "Invalid graph execution ID"},
+ 403: {"description": "Access denied to graph execution"},
+ 500: {"description": "Server error", "content": {"application/json": {}}},
+ },
+)
+async def list_pending_reviews_for_execution(
+ graph_exec_id: str,
+ user_id: str = Security(autogpt_auth_lib.get_user_id),
+) -> List[PendingHumanReviewModel]:
+ """Get all pending reviews for a specific graph execution.
+
+ Retrieves all reviews with status "WAITING" for the specified graph execution
+ that belong to the authenticated user. Results are ordered by creation time
+ (oldest first) to preserve review order within the execution.
+
+ Args:
+ graph_exec_id: ID of the graph execution to get reviews for
+ user_id: Authenticated user ID from security dependency
+
+ Returns:
+ List of pending review objects for the specified execution
+
+ Raises:
+ HTTPException:
+ - 403: If user doesn't own the graph execution
+ - 500: If authentication fails or database error occurs
+
+ Note:
+ Only returns reviews owned by the authenticated user for security.
+ Reviews with invalid status are excluded with warning logs.
+ """
+
+ # Verify user owns the graph execution before returning reviews
+ graph_exec = await get_graph_execution_meta(
+ user_id=user_id, execution_id=graph_exec_id
+ )
+ if not graph_exec:
+ raise HTTPException(
+ status_code=status.HTTP_403_FORBIDDEN,
+ detail="Access denied to graph execution",
+ )
+
+ return await get_pending_reviews_for_execution(graph_exec_id, user_id)
+
+
+@router.post("/action", response_model=ReviewResponse)
+async def process_review_action(
+ request: ReviewRequest,
+ user_id: str = Security(autogpt_auth_lib.get_user_id),
+) -> ReviewResponse:
+ """Process reviews with approve or reject actions."""
+
+ # Collect all node exec IDs from the request
+ all_request_node_ids = {review.node_exec_id for review in request.reviews}
+
+ if not all_request_node_ids:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="At least one review must be provided",
+ )
+
+ # Build review decisions map
+ review_decisions = {}
+ for review in request.reviews:
+ if review.approved:
+ review_decisions[review.node_exec_id] = (
+ ReviewStatus.APPROVED,
+ review.reviewed_data,
+ review.message,
+ )
+ else:
+ review_decisions[review.node_exec_id] = (
+ ReviewStatus.REJECTED,
+ None,
+ review.message,
+ )
+
+ # Process all reviews
+ updated_reviews = await process_all_reviews_for_execution(
+ user_id=user_id,
+ review_decisions=review_decisions,
+ )
+
+ # Count results
+ approved_count = sum(
+ 1
+ for review in updated_reviews.values()
+ if review.status == ReviewStatus.APPROVED
+ )
+ rejected_count = sum(
+ 1
+ for review in updated_reviews.values()
+ if review.status == ReviewStatus.REJECTED
+ )
+
+ # Resume execution if we processed some reviews
+ if updated_reviews:
+ # Get graph execution ID from any processed review
+ first_review = next(iter(updated_reviews.values()))
+ graph_exec_id = first_review.graph_exec_id
+
+ # Check if any pending reviews remain for this execution
+ still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id)
+
+ if not still_has_pending:
+ # Resume execution
+ try:
+ await add_graph_execution(
+ graph_id=first_review.graph_id,
+ user_id=user_id,
+ graph_exec_id=graph_exec_id,
+ )
+ logger.info(f"Resumed execution {graph_exec_id}")
+ except Exception as e:
+ logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}")
+
+ return ReviewResponse(
+ approved_count=approved_count,
+ rejected_count=rejected_count,
+ failed_count=0,
+ error=None,
+ )
diff --git a/autogpt_platform/backend/backend/server/v2/library/routes/agents.py b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py
index 1bdf255ce5..eeea9d8fb6 100644
--- a/autogpt_platform/backend/backend/server/v2/library/routes/agents.py
+++ b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py
@@ -22,7 +22,9 @@ router = APIRouter(
@router.get(
"",
summary="List Library Agents",
+ response_model=library_model.LibraryAgentResponse,
responses={
+ 200: {"description": "List of library agents"},
500: {"description": "Server error", "content": {"application/json": {}}},
},
)
@@ -155,7 +157,12 @@ async def get_library_agent_by_graph_id(
@router.get(
"/marketplace/{store_listing_version_id}",
summary="Get Agent By Store ID",
- tags=["store, library"],
+ tags=["store", "library"],
+ response_model=library_model.LibraryAgent | None,
+ responses={
+ 200: {"description": "Library agent found"},
+ 404: {"description": "Agent not found"},
+ },
)
async def get_library_agent_by_store_listing_version_id(
store_listing_version_id: str,
diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py
index f6f0f812fe..fe782e8eea 100644
--- a/autogpt_platform/backend/backend/server/v2/store/db.py
+++ b/autogpt_platform/backend/backend/server/v2/store/db.py
@@ -12,7 +12,7 @@ import prisma.types
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
-from backend.data.db import transaction
+from backend.data.db import query_raw_with_schema, transaction
from backend.data.graph import (
GraphMeta,
GraphModel,
@@ -120,7 +120,7 @@ async def get_store_agents(
is_available,
updated_at,
ts_rank_cd(search, query) AS rank
- FROM "StoreAgent",
+ FROM {{schema_prefix}}"StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
@@ -131,22 +131,18 @@ async def get_store_agents(
# Count query for pagination - only uses search term parameter
count_query = f"""
SELECT COUNT(*) as count
- FROM "StoreAgent",
+ FROM {{schema_prefix}}"StoreAgent",
plainto_tsquery('english', $1) AS query
WHERE {sql_where_clause}
AND search @@ query
"""
# Execute both queries with parameters
- agents = await prisma.client.get_client().query_raw(
- typing.cast(typing.LiteralString, sql_query), *params
- )
+ agents = await query_raw_with_schema(sql_query, *params)
# For count, use params without pagination (last 2 params)
count_params = params[:-2]
- count_result = await prisma.client.get_client().query_raw(
- typing.cast(typing.LiteralString, count_query), *count_params
- )
+ count_result = await query_raw_with_schema(count_query, *count_params)
total = count_result[0]["count"] if count_result else 0
total_pages = (total + page_size - 1) // page_size
diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py
index 13b3365446..0a2015254b 100644
--- a/autogpt_platform/backend/backend/util/test.py
+++ b/autogpt_platform/backend/backend/util/test.py
@@ -140,6 +140,7 @@ async def execute_block_test(block: Block):
"graph_exec_id": str(uuid.uuid4()),
"node_exec_id": str(uuid.uuid4()),
"user_id": str(uuid.uuid4()),
+ "graph_version": 1, # Default version for tests
"user_context": UserContext(timezone="UTC"), # Default for tests
}
input_model = cast(type[BlockSchema], block.input_schema)
diff --git a/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql b/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql
new file mode 100644
index 0000000000..5a2cc2f722
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql
@@ -0,0 +1,44 @@
+-- CreateEnum
+CREATE TYPE "ReviewStatus" AS ENUM ('WAITING', 'APPROVED', 'REJECTED');
+
+-- AlterEnum
+ALTER TYPE "AgentExecutionStatus" ADD VALUE 'REVIEW';
+
+-- CreateTable
+CREATE TABLE "PendingHumanReview" (
+ "nodeExecId" TEXT NOT NULL,
+ "userId" TEXT NOT NULL,
+ "graphExecId" TEXT NOT NULL,
+ "graphId" TEXT NOT NULL,
+ "graphVersion" INTEGER NOT NULL,
+ "payload" JSONB NOT NULL,
+ "instructions" TEXT,
+ "editable" BOOLEAN NOT NULL DEFAULT true,
+ "status" "ReviewStatus" NOT NULL DEFAULT 'WAITING',
+ "reviewMessage" TEXT,
+ "wasEdited" BOOLEAN,
+ "processed" BOOLEAN NOT NULL DEFAULT false,
+ "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ "updatedAt" TIMESTAMP(3),
+ "reviewedAt" TIMESTAMP(3),
+
+ CONSTRAINT "PendingHumanReview_pkey" PRIMARY KEY ("nodeExecId")
+);
+
+-- CreateIndex
+CREATE INDEX "PendingHumanReview_userId_status_idx" ON "PendingHumanReview"("userId", "status");
+
+-- CreateIndex
+CREATE INDEX "PendingHumanReview_graphExecId_status_idx" ON "PendingHumanReview"("graphExecId", "status");
+
+-- CreateIndex
+CREATE UNIQUE INDEX "PendingHumanReview_nodeExecId_key" ON "PendingHumanReview"("nodeExecId");
+
+-- AddForeignKey
+ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_nodeExecId_fkey" FOREIGN KEY ("nodeExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_graphExecId_fkey" FOREIGN KEY ("graphExecId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma
index e8755d99ab..ca015a3cb9 100644
--- a/autogpt_platform/backend/schema.prisma
+++ b/autogpt_platform/backend/schema.prisma
@@ -59,6 +59,7 @@ model User {
APIKeys APIKey[]
IntegrationWebhooks IntegrationWebhook[]
NotificationBatches UserNotificationBatch[]
+ PendingHumanReviews PendingHumanReview[]
}
enum OnboardingStep {
@@ -351,6 +352,7 @@ enum AgentExecutionStatus {
COMPLETED
TERMINATED
FAILED
+ REVIEW
}
// This model describes the execution of an AgentGraph.
@@ -393,6 +395,8 @@ model AgentGraphExecution {
shareToken String? @unique
sharedAt DateTime?
+ PendingHumanReviews PendingHumanReview[]
+
@@index([agentGraphId, agentGraphVersion])
@@index([userId, isDeleted, createdAt])
@@index([createdAt])
@@ -423,6 +427,8 @@ model AgentNodeExecution {
stats Json?
+ PendingHumanReview PendingHumanReview?
+
@@index([agentGraphExecutionId, agentNodeId, executionStatus])
@@index([agentNodeId, executionStatus])
@@index([addedTime, queuedTime])
@@ -464,6 +470,39 @@ model AgentNodeExecutionKeyValueData {
@@id([userId, key])
}
+enum ReviewStatus {
+ WAITING
+ APPROVED
+ REJECTED
+}
+
+// Pending human reviews for Human-in-the-loop blocks
+model PendingHumanReview {
+ nodeExecId String @id
+ userId String
+ graphExecId String
+ graphId String
+ graphVersion Int
+ payload Json // The actual payload data to be reviewed
+ instructions String? // Instructions/message for the reviewer
+ editable Boolean @default(true) // Whether the reviewer can edit the data
+ status ReviewStatus @default(WAITING)
+ reviewMessage String? // Optional message from the reviewer
+ wasEdited Boolean? // Whether the data was modified during review
+ processed Boolean @default(false) // Whether the review result has been processed by the execution engine
+ createdAt DateTime @default(now())
+ updatedAt DateTime? @updatedAt
+ reviewedAt DateTime?
+
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+ NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade)
+ GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade)
+
+ @@unique([nodeExecId]) // One pending review per node execution
+ @@index([userId, status])
+ @@index([graphExecId, status])
+}
+
// Webhook that is registered with a provider and propagates to one or more nodes
model IntegrationWebhook {
id String @id @default(uuid())
diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs
index d4df72a643..d4595990a2 100644
--- a/autogpt_platform/frontend/next.config.mjs
+++ b/autogpt_platform/frontend/next.config.mjs
@@ -34,7 +34,8 @@ const nextConfig = {
},
],
},
- output: "standalone",
+ // Vercel has its own deployment mechanism and doesn't need standalone mode
+ ...(process.env.VERCEL ? {} : { output: "standalone" }),
transpilePackages: ["geist"],
};
diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json
index 52ba28064f..29a28059a6 100644
--- a/autogpt_platform/frontend/package.json
+++ b/autogpt_platform/frontend/package.json
@@ -54,7 +54,7 @@
"@rjsf/core": "5.24.13",
"@rjsf/utils": "5.24.13",
"@rjsf/validator-ajv8": "5.24.13",
- "@sentry/nextjs": "10.22.0",
+ "@sentry/nextjs": "10.27.0",
"@supabase/ssr": "0.7.0",
"@supabase/supabase-js": "2.78.0",
"@tanstack/react-query": "5.90.6",
@@ -134,7 +134,7 @@
"axe-playwright": "2.2.2",
"chromatic": "13.3.3",
"concurrently": "9.2.1",
- "cross-env": "7.0.3",
+ "cross-env": "10.1.0",
"eslint": "8.57.1",
"eslint-config-next": "15.5.2",
"eslint-plugin-storybook": "9.1.5",
diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml
index 848fd3a88d..406fcb212f 100644
--- a/autogpt_platform/frontend/pnpm-lock.yaml
+++ b/autogpt_platform/frontend/pnpm-lock.yaml
@@ -87,8 +87,8 @@ importers:
specifier: 5.24.13
version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1))
'@sentry/nextjs':
- specifier: 10.22.0
- version: 10.22.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
+ specifier: 10.27.0
+ version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))
'@supabase/ssr':
specifier: 0.7.0
version: 0.7.0(@supabase/supabase-js@2.78.0)
@@ -322,8 +322,8 @@ importers:
specifier: 9.2.1
version: 9.2.1
cross-env:
- specifier: 7.0.3
- version: 7.0.3
+ specifier: 10.1.0
+ version: 10.1.0
eslint:
specifier: 8.57.1
version: 8.57.1
@@ -1001,6 +1001,9 @@ packages:
'@emotion/unitless@0.8.1':
resolution: {integrity: sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==}
+ '@epic-web/invariant@1.0.0':
+ resolution: {integrity: sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==}
+
'@esbuild/aix-ppc64@0.25.11':
resolution: {integrity: sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==}
engines: {node: '>=18'}
@@ -1681,186 +1684,176 @@ packages:
'@open-draft/until@2.1.0':
resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==}
- '@opentelemetry/api-logs@0.204.0':
- resolution: {integrity: sha512-DqxY8yoAaiBPivoJD4UtgrMS8gEmzZ5lnaxzPojzLVHBGqPxgWm4zcuvcUHZiqQ6kRX2Klel2r9y8cA2HAtqpw==}
+ '@opentelemetry/api-logs@0.208.0':
+ resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==}
engines: {node: '>=8.0.0'}
- '@opentelemetry/api-logs@0.57.2':
- resolution: {integrity: sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==}
- engines: {node: '>=14'}
-
'@opentelemetry/api@1.9.0':
resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==}
engines: {node: '>=8.0.0'}
- '@opentelemetry/context-async-hooks@2.1.0':
- resolution: {integrity: sha512-zOyetmZppnwTyPrt4S7jMfXiSX9yyfF0hxlA8B5oo2TtKl+/RGCy7fi4DrBfIf3lCPrkKsRBWZZD7RFojK7FDg==}
+ '@opentelemetry/context-async-hooks@2.2.0':
+ resolution: {integrity: sha512-qRkLWiUEZNAmYapZ7KGS5C4OmBLcP/H2foXeOEaowYCR0wi89fHejrfYfbuLVCMLp/dWZXKvQusdbUEZjERfwQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': '>=1.0.0 <1.10.0'
- '@opentelemetry/core@2.1.0':
- resolution: {integrity: sha512-RMEtHsxJs/GiHHxYT58IY57UXAQTuUnZVco6ymDEqTNlJKTimM4qPUPVe8InNFyBjhHBEAx4k3Q8LtNayBsbUQ==}
+ '@opentelemetry/core@2.2.0':
+ resolution: {integrity: sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': '>=1.0.0 <1.10.0'
- '@opentelemetry/instrumentation-amqplib@0.51.0':
- resolution: {integrity: sha512-XGmjYwjVRktD4agFnWBWQXo9SiYHKBxR6Ag3MLXwtLE4R99N3a08kGKM5SC1qOFKIELcQDGFEFT9ydXMH00Luw==}
+ '@opentelemetry/instrumentation-amqplib@0.55.0':
+ resolution: {integrity: sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-connect@0.48.0':
- resolution: {integrity: sha512-OMjc3SFL4pC16PeK+tDhwP7MRvDPalYCGSvGqUhX5rASkI2H0RuxZHOWElYeXkV0WP+70Gw6JHWac/2Zqwmhdw==}
+ '@opentelemetry/instrumentation-connect@0.52.0':
+ resolution: {integrity: sha512-GXPxfNB5szMbV3I9b7kNWSmQBoBzw7MT0ui6iU/p+NIzVx3a06Ri2cdQO7tG9EKb4aKSLmfX9Cw5cKxXqX6Ohg==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-dataloader@0.22.0':
- resolution: {integrity: sha512-bXnTcwtngQsI1CvodFkTemrrRSQjAjZxqHVc+CJZTDnidT0T6wt3jkKhnsjU/Kkkc0lacr6VdRpCu2CUWa0OKw==}
+ '@opentelemetry/instrumentation-dataloader@0.26.0':
+ resolution: {integrity: sha512-P2BgnFfTOarZ5OKPmYfbXfDFjQ4P9WkQ1Jji7yH5/WwB6Wm/knynAoA1rxbjWcDlYupFkyT0M1j6XLzDzy0aCA==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-express@0.53.0':
- resolution: {integrity: sha512-r/PBafQmFYRjuxLYEHJ3ze1iBnP2GDA1nXOSS6E02KnYNZAVjj6WcDA1MSthtdAUUK0XnotHvvWM8/qz7DMO5A==}
+ '@opentelemetry/instrumentation-express@0.57.0':
+ resolution: {integrity: sha512-HAdx/o58+8tSR5iW+ru4PHnEejyKrAy9fYFhlEI81o10nYxrGahnMAHWiSjhDC7UQSY3I4gjcPgSKQz4rm/asg==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-fs@0.24.0':
- resolution: {integrity: sha512-HjIxJ6CBRD770KNVaTdMXIv29Sjz4C1kPCCK5x1Ujpc6SNnLGPqUVyJYZ3LUhhnHAqdbrl83ogVWjCgeT4Q0yw==}
+ '@opentelemetry/instrumentation-fs@0.28.0':
+ resolution: {integrity: sha512-FFvg8fq53RRXVBRHZViP+EMxMR03tqzEGpuq55lHNbVPyFklSVfQBN50syPhK5UYYwaStx0eyCtHtbRreusc5g==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-generic-pool@0.48.0':
- resolution: {integrity: sha512-TLv/On8pufynNR+pUbpkyvuESVASZZKMlqCm4bBImTpXKTpqXaJJ3o/MUDeMlM91rpen+PEv2SeyOKcHCSlgag==}
+ '@opentelemetry/instrumentation-generic-pool@0.52.0':
+ resolution: {integrity: sha512-ISkNcv5CM2IwvsMVL31Tl61/p2Zm2I2NAsYq5SSBgOsOndT0TjnptjufYVScCnD5ZLD1tpl4T3GEYULLYOdIdQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-graphql@0.52.0':
- resolution: {integrity: sha512-3fEJ8jOOMwopvldY16KuzHbRhPk8wSsOTSF0v2psmOCGewh6ad+ZbkTx/xyUK9rUdUMWAxRVU0tFpj4Wx1vkPA==}
+ '@opentelemetry/instrumentation-graphql@0.56.0':
+ resolution: {integrity: sha512-IPvNk8AFoVzTAM0Z399t34VDmGDgwT6rIqCUug8P9oAGerl2/PEIYMPOl/rerPGu+q8gSWdmbFSjgg7PDVRd3Q==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-hapi@0.51.0':
- resolution: {integrity: sha512-qyf27DaFNL1Qhbo/da+04MSCw982B02FhuOS5/UF+PMhM61CcOiu7fPuXj8TvbqyReQuJFljXE6UirlvoT/62g==}
+ '@opentelemetry/instrumentation-hapi@0.55.0':
+ resolution: {integrity: sha512-prqAkRf9e4eEpy4G3UcR32prKE8NLNlA90TdEU1UsghOTg0jUvs40Jz8LQWFEs5NbLbXHYGzB4CYVkCI8eWEVQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-http@0.204.0':
- resolution: {integrity: sha512-1afJYyGRA4OmHTv0FfNTrTAzoEjPQUYgd+8ih/lX0LlZBnGio/O80vxA0lN3knsJPS7FiDrsDrWq25K7oAzbkw==}
+ '@opentelemetry/instrumentation-http@0.208.0':
+ resolution: {integrity: sha512-rhmK46DRWEbQQB77RxmVXGyjs6783crXCnFjYQj+4tDH/Kpv9Rbg3h2kaNyp5Vz2emF1f9HOQQvZoHzwMWOFZQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-ioredis@0.52.0':
- resolution: {integrity: sha512-rUvlyZwI90HRQPYicxpDGhT8setMrlHKokCtBtZgYxQWRF5RBbG4q0pGtbZvd7kyseuHbFpA3I/5z7M8b/5ywg==}
+ '@opentelemetry/instrumentation-ioredis@0.56.0':
+ resolution: {integrity: sha512-XSWeqsd3rKSsT3WBz/JKJDcZD4QYElZEa0xVdX8f9dh4h4QgXhKRLorVsVkK3uXFbC2sZKAS2Ds+YolGwD83Dg==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-kafkajs@0.14.0':
- resolution: {integrity: sha512-kbB5yXS47dTIdO/lfbbXlzhvHFturbux4EpP0+6H78Lk0Bn4QXiZQW7rmZY1xBCY16mNcCb8Yt0mhz85hTnSVA==}
+ '@opentelemetry/instrumentation-kafkajs@0.18.0':
+ resolution: {integrity: sha512-KCL/1HnZN5zkUMgPyOxfGjLjbXjpd4odDToy+7c+UsthIzVLFf99LnfIBE8YSSrYE4+uS7OwJMhvhg3tWjqMBg==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-knex@0.49.0':
- resolution: {integrity: sha512-NKsRRT27fbIYL4Ix+BjjP8h4YveyKc+2gD6DMZbr5R5rUeDqfC8+DTfIt3c3ex3BIc5Vvek4rqHnN7q34ZetLQ==}
+ '@opentelemetry/instrumentation-knex@0.53.0':
+ resolution: {integrity: sha512-xngn5cH2mVXFmiT1XfQ1aHqq1m4xb5wvU6j9lSgLlihJ1bXzsO543cpDwjrZm2nMrlpddBf55w8+bfS4qDh60g==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-koa@0.52.0':
- resolution: {integrity: sha512-JJSBYLDx/mNSy8Ibi/uQixu2rH0bZODJa8/cz04hEhRaiZQoeJ5UrOhO/mS87IdgVsHrnBOsZ6vDu09znupyuA==}
+ '@opentelemetry/instrumentation-koa@0.57.0':
+ resolution: {integrity: sha512-3JS8PU/D5E3q295mwloU2v7c7/m+DyCqdu62BIzWt+3u9utjxC9QS7v6WmUNuoDN3RM+Q+D1Gpj13ERo+m7CGg==}
+ engines: {node: ^18.19.0 || >=20.6.0}
+ peerDependencies:
+ '@opentelemetry/api': ^1.9.0
+
+ '@opentelemetry/instrumentation-lru-memoizer@0.53.0':
+ resolution: {integrity: sha512-LDwWz5cPkWWr0HBIuZUjslyvijljTwmwiItpMTHujaULZCxcYE9eU44Qf/pbVC8TulT0IhZi+RoGvHKXvNhysw==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-lru-memoizer@0.49.0':
- resolution: {integrity: sha512-ctXu+O/1HSadAxtjoEg2w307Z5iPyLOMM8IRNwjaKrIpNAthYGSOanChbk1kqY6zU5CrpkPHGdAT6jk8dXiMqw==}
+ '@opentelemetry/instrumentation-mongodb@0.61.0':
+ resolution: {integrity: sha512-OV3i2DSoY5M/pmLk+68xr5RvkHU8DRB3DKMzYJdwDdcxeLs62tLbkmRyqJZsYf3Ht7j11rq35pHOWLuLzXL7pQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-mongodb@0.57.0':
- resolution: {integrity: sha512-KD6Rg0KSHWDkik+qjIOWoksi1xqSpix8TSPfquIK1DTmd9OTFb5PHmMkzJe16TAPVEuElUW8gvgP59cacFcrMQ==}
+ '@opentelemetry/instrumentation-mongoose@0.55.0':
+ resolution: {integrity: sha512-5afj0HfF6aM6Nlqgu6/PPHFk8QBfIe3+zF9FGpX76jWPS0/dujoEYn82/XcLSaW5LPUDW8sni+YeK0vTBNri+w==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-mongoose@0.51.0':
- resolution: {integrity: sha512-gwWaAlhhV2By7XcbyU3DOLMvzsgeaymwP/jktDC+/uPkCmgB61zurwqOQdeiRq9KAf22Y2dtE5ZLXxytJRbEVA==}
+ '@opentelemetry/instrumentation-mysql2@0.55.0':
+ resolution: {integrity: sha512-0cs8whQG55aIi20gnK8B7cco6OK6N+enNhW0p5284MvqJ5EPi+I1YlWsWXgzv/V2HFirEejkvKiI4Iw21OqDWg==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-mysql2@0.51.0':
- resolution: {integrity: sha512-zT2Wg22Xn43RyfU3NOUmnFtb5zlDI0fKcijCj9AcK9zuLZ4ModgtLXOyBJSSfO+hsOCZSC1v/Fxwj+nZJFdzLQ==}
+ '@opentelemetry/instrumentation-mysql@0.54.0':
+ resolution: {integrity: sha512-bqC1YhnwAeWmRzy1/Xf9cDqxNG2d/JDkaxnqF5N6iJKN1eVWI+vg7NfDkf52/Nggp3tl1jcC++ptC61BD6738A==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-mysql@0.50.0':
- resolution: {integrity: sha512-duKAvMRI3vq6u9JwzIipY9zHfikN20bX05sL7GjDeLKr2qV0LQ4ADtKST7KStdGcQ+MTN5wghWbbVdLgNcB3rA==}
+ '@opentelemetry/instrumentation-pg@0.61.0':
+ resolution: {integrity: sha512-UeV7KeTnRSM7ECHa3YscoklhUtTQPs6V6qYpG283AB7xpnPGCUCUfECFT9jFg6/iZOQTt3FHkB1wGTJCNZEvPw==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-pg@0.57.0':
- resolution: {integrity: sha512-dWLGE+r5lBgm2A8SaaSYDE3OKJ/kwwy5WLyGyzor8PLhUL9VnJRiY6qhp4njwhnljiLtzeffRtG2Mf/YyWLeTw==}
+ '@opentelemetry/instrumentation-redis@0.57.0':
+ resolution: {integrity: sha512-bCxTHQFXzrU3eU1LZnOZQ3s5LURxQPDlU3/upBzlWY77qOI1GZuGofazj3jtzjctMJeBEJhNwIFEgRPBX1kp/Q==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-redis@0.53.0':
- resolution: {integrity: sha512-WUHV8fr+8yo5RmzyU7D5BIE1zwiaNQcTyZPwtxlfr7px6NYYx7IIpSihJK7WA60npWynfxxK1T67RAVF0Gdfjg==}
+ '@opentelemetry/instrumentation-tedious@0.27.0':
+ resolution: {integrity: sha512-jRtyUJNZppPBjPae4ZjIQ2eqJbcRaRfJkr0lQLHFmOU/no5A6e9s1OHLd5XZyZoBJ/ymngZitanyRRA5cniseA==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation-tedious@0.23.0':
- resolution: {integrity: sha512-3TMTk/9VtlRonVTaU4tCzbg4YqW+Iq/l5VnN2e5whP6JgEg/PKfrGbqQ+CxQWNLfLaQYIUgEZqAn5gk/inh1uQ==}
- engines: {node: ^18.19.0 || >=20.6.0}
- peerDependencies:
- '@opentelemetry/api': ^1.3.0
-
- '@opentelemetry/instrumentation-undici@0.15.0':
- resolution: {integrity: sha512-sNFGA/iCDlVkNjzTzPRcudmI11vT/WAfAguRdZY9IspCw02N4WSC72zTuQhSMheh2a1gdeM9my1imnKRvEEvEg==}
+ '@opentelemetry/instrumentation-undici@0.19.0':
+ resolution: {integrity: sha512-Pst/RhR61A2OoZQZkn6OLpdVpXp6qn3Y92wXa6umfJe9rV640r4bc6SWvw4pPN6DiQqPu2c8gnSSZPDtC6JlpQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.7.0
- '@opentelemetry/instrumentation@0.204.0':
- resolution: {integrity: sha512-vV5+WSxktzoMP8JoYWKeopChy6G3HKk4UQ2hESCRDUUTZqQ3+nM3u8noVG0LmNfRWwcFBnbZ71GKC7vaYYdJ1g==}
+ '@opentelemetry/instrumentation@0.208.0':
+ resolution: {integrity: sha512-Eju0L4qWcQS+oXxi6pgh7zvE2byogAkcsVv0OjHF/97iOz1N/aKE6etSGowYkie+YA1uo6DNwdSxaaNnLvcRlA==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.3.0
- '@opentelemetry/instrumentation@0.57.2':
- resolution: {integrity: sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==}
- engines: {node: '>=14'}
- peerDependencies:
- '@opentelemetry/api': ^1.3.0
-
- '@opentelemetry/redis-common@0.38.0':
- resolution: {integrity: sha512-4Wc0AWURII2cfXVVoZ6vDqK+s5n4K5IssdrlVrvGsx6OEOKdghKtJZqXAHWFiZv4nTDLH2/2fldjIHY8clMOjQ==}
+ '@opentelemetry/redis-common@0.38.2':
+ resolution: {integrity: sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==}
engines: {node: ^18.19.0 || >=20.6.0}
- '@opentelemetry/resources@2.1.0':
- resolution: {integrity: sha512-1CJjf3LCvoefUOgegxi8h6r4B/wLSzInyhGP2UmIBYNlo4Qk5CZ73e1eEyWmfXvFtm1ybkmfb2DqWvspsYLrWw==}
+ '@opentelemetry/resources@2.2.0':
+ resolution: {integrity: sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': '>=1.3.0 <1.10.0'
- '@opentelemetry/sdk-trace-base@2.1.0':
- resolution: {integrity: sha512-uTX9FBlVQm4S2gVQO1sb5qyBLq/FPjbp+tmGoxu4tIgtYGmBYB44+KX/725RFDe30yBSaA9Ml9fqphe1hbUyLQ==}
+ '@opentelemetry/sdk-trace-base@2.2.0':
+ resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': '>=1.3.0 <1.10.0'
@@ -1869,8 +1862,8 @@ packages:
resolution: {integrity: sha512-JD6DerIKdJGmRp4jQyX5FlrQjA4tjOw1cvfsPAZXfOOEErMUHjPcPSICS+6WnM0nB0efSFARh0KAZss+bvExOA==}
engines: {node: '>=14'}
- '@opentelemetry/sql-common@0.41.0':
- resolution: {integrity: sha512-pmzXctVbEERbqSfiAgdes9Y63xjoOyXcD7B6IXBkVb+vbM7M9U98mn33nGXxPf4dfYR0M+vhcKRZmbSJ7HfqFA==}
+ '@opentelemetry/sql-common@0.41.2':
+ resolution: {integrity: sha512-4mhWm3Z8z+i508zQJ7r6Xi7y4mmoJpdvH0fZPFRkWrdp5fq7hhZ2HhYokEOLkfqSMgPR4Z9EyB3DBkbKGOqZiQ==}
engines: {node: ^18.19.0 || >=20.6.0}
peerDependencies:
'@opentelemetry/api': ^1.1.0
@@ -1947,8 +1940,8 @@ packages:
webpack-plugin-serve:
optional: true
- '@prisma/instrumentation@6.15.0':
- resolution: {integrity: sha512-6TXaH6OmDkMOQvOxwLZ8XS51hU2v4A3vmE2pSijCIiGRJYyNeMcL6nMHQMyYdZRD8wl7LF3Wzc+AMPMV/9Oo7A==}
+ '@prisma/instrumentation@6.19.0':
+ resolution: {integrity: sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==}
peerDependencies:
'@opentelemetry/api': ^1.8
@@ -2632,130 +2625,190 @@ packages:
'@scarf/scarf@1.4.0':
resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==}
- '@sentry-internal/browser-utils@10.22.0':
- resolution: {integrity: sha512-BpJoLZEyJr7ORzkCrIjxRTnFWwO1mJNICVh3B9g5d9245niGT4OJvRozmLz89WgJkZFHWu84ls6Xfq5b/3tGFQ==}
+ '@sentry-internal/browser-utils@10.27.0':
+ resolution: {integrity: sha512-17tO6AXP+rmVQtLJ3ROQJF2UlFmvMWp7/8RDT5x9VM0w0tY31z8Twc0gw2KA7tcDxa5AaHDUbf9heOf+R6G6ow==}
engines: {node: '>=18'}
- '@sentry-internal/feedback@10.22.0':
- resolution: {integrity: sha512-zXySOin/gGHPV+yKaHqjN9YZ7psEJwzLn8PzCLeo+4REzF1eQwbYZIgOxJFD32z8s3nZiABSWFM/n1CvVfMEsQ==}
+ '@sentry-internal/feedback@10.27.0':
+ resolution: {integrity: sha512-UecsIDJcv7VBwycge/MDvgSRxzevDdcItE1i0KSwlPz00rVVxLY9kV28PJ4I2E7r6/cIaP9BkbWegCEcv09NuA==}
engines: {node: '>=18'}
- '@sentry-internal/replay-canvas@10.22.0':
- resolution: {integrity: sha512-DE4JNUskJg+O+wFq42W5gAa/99aD5k7TfGOwABxvnzFv8vkKA7pqXwPbFFPzypdKIkln+df7RmbnDwQRNg6/lA==}
+ '@sentry-internal/replay-canvas@10.27.0':
+ resolution: {integrity: sha512-inhsRYSVBpu3BI1kZphXj6uB59baJpYdyHeIPCiTfdFNBE5tngNH0HS/aedZ1g9zICw290lwvpuyrWJqp4VBng==}
engines: {node: '>=18'}
- '@sentry-internal/replay@10.22.0':
- resolution: {integrity: sha512-JNE4kHAQSG4/V+J+Zog3vKBWgOe9H33ol/MEU1RuLM/4I+uLf4mTetwnS9ilpnnW/Z/gQYfA+R3CiMrZtqTivw==}
+ '@sentry-internal/replay@10.27.0':
+ resolution: {integrity: sha512-tKSzHq1hNzB619Ssrqo25cqdQJ84R3xSSLsUWEnkGO/wcXJvpZy94gwdoS+KmH18BB1iRRRGtnMxZcUkiPSesw==}
engines: {node: '>=18'}
'@sentry/babel-plugin-component-annotate@4.3.0':
resolution: {integrity: sha512-OuxqBprXRyhe8Pkfyz/4yHQJc5c3lm+TmYWSSx8u48g5yKewSQDOxkiLU5pAk3WnbLPy8XwU/PN+2BG0YFU9Nw==}
engines: {node: '>= 14'}
- '@sentry/browser@10.22.0':
- resolution: {integrity: sha512-wD2XqN+yeBpQFfdPo6+wlKDMyyuDctVGzZWE4qTPntICKQuwMdAfeq5Ma89ad0Dw+bzG9UijGeyuJQlswF87Mw==}
+ '@sentry/babel-plugin-component-annotate@4.6.1':
+ resolution: {integrity: sha512-aSIk0vgBqv7PhX6/Eov+vlI4puCE0bRXzUG5HdCsHBpAfeMkI8Hva6kSOusnzKqs8bf04hU7s3Sf0XxGTj/1AA==}
+ engines: {node: '>= 14'}
+
+ '@sentry/browser@10.27.0':
+ resolution: {integrity: sha512-G8q362DdKp9y1b5qkQEmhTFzyWTOVB0ps1rflok0N6bVA75IEmSDX1pqJsNuY3qy14VsVHYVwQBJQsNltQLS0g==}
engines: {node: '>=18'}
'@sentry/bundler-plugin-core@4.3.0':
resolution: {integrity: sha512-dmR4DJhJ4jqVWGWppuTL2blNFqOZZnt4aLkewbD1myFG3KVfUx8CrMQWEmGjkgPOtj5TO6xH9PyTJjXC6o5tnA==}
engines: {node: '>= 14'}
+ '@sentry/bundler-plugin-core@4.6.1':
+ resolution: {integrity: sha512-WPeRbnMXm927m4Kr69NTArPfI+p5/34FHftdCRI3LFPMyhZDzz6J3wLy4hzaVUgmMf10eLzmq2HGEMvpQmdynA==}
+ engines: {node: '>= 14'}
+
'@sentry/cli-darwin@2.55.0':
resolution: {integrity: sha512-jGHE7SHHzqXUmnsmRLgorVH6nmMmTjQQXdPZbSL5tRtH8d3OIYrVNr5D72DSgD26XAPBDMV0ibqOQ9NKoiSpfA==}
engines: {node: '>=10'}
os: [darwin]
+ '@sentry/cli-darwin@2.58.2':
+ resolution: {integrity: sha512-MArsb3zLhA2/cbd4rTm09SmTpnEuZCoZOpuZYkrpDw1qzBVJmRFA1W1hGAQ9puzBIk/ubY3EUhhzuU3zN2uD6w==}
+ engines: {node: '>=10'}
+ os: [darwin]
+
'@sentry/cli-linux-arm64@2.55.0':
resolution: {integrity: sha512-jNB/0/gFcOuDCaY/TqeuEpsy/k52dwyk1SOV3s1ku4DUsln6govTppeAGRewY3T1Rj9B2vgIWTrnB8KVh9+Rgg==}
engines: {node: '>=10'}
cpu: [arm64]
os: [linux, freebsd, android]
+ '@sentry/cli-linux-arm64@2.58.2':
+ resolution: {integrity: sha512-ay3OeObnbbPrt45cjeUyQjsx5ain1laj1tRszWj37NkKu55NZSp4QCg1gGBZ0gBGhckI9nInEsmKtix00alw2g==}
+ engines: {node: '>=10'}
+ cpu: [arm64]
+ os: [linux, freebsd, android]
+
'@sentry/cli-linux-arm@2.55.0':
resolution: {integrity: sha512-ATjU0PsiWADSPLF/kZroLZ7FPKd5W9TDWHVkKNwIUNTei702LFgTjNeRwOIzTgSvG3yTmVEqtwFQfFN/7hnVXQ==}
engines: {node: '>=10'}
cpu: [arm]
os: [linux, freebsd, android]
+ '@sentry/cli-linux-arm@2.58.2':
+ resolution: {integrity: sha512-HU9lTCzcHqCz/7Mt5n+cv+nFuJdc1hGD2h35Uo92GgxX3/IujNvOUfF+nMX9j6BXH6hUt73R5c0Ycq9+a3Parg==}
+ engines: {node: '>=10'}
+ cpu: [arm]
+ os: [linux, freebsd, android]
+
'@sentry/cli-linux-i686@2.55.0':
resolution: {integrity: sha512-8LZjo6PncTM6bWdaggscNOi5r7F/fqRREsCwvd51dcjGj7Kp1plqo9feEzYQ+jq+KUzVCiWfHrUjddFmYyZJrg==}
engines: {node: '>=10'}
cpu: [x86, ia32]
os: [linux, freebsd, android]
+ '@sentry/cli-linux-i686@2.58.2':
+ resolution: {integrity: sha512-CN9p0nfDFsAT1tTGBbzOUGkIllwS3hygOUyTK7LIm9z+UHw5uNgNVqdM/3Vg+02ymjkjISNB3/+mqEM5osGXdA==}
+ engines: {node: '>=10'}
+ cpu: [x86, ia32]
+ os: [linux, freebsd, android]
+
'@sentry/cli-linux-x64@2.55.0':
resolution: {integrity: sha512-5LUVvq74Yj2cZZy5g5o/54dcWEaX4rf3myTHy73AKhRj1PABtOkfexOLbF9xSrZy95WXWaXyeH+k5n5z/vtHfA==}
engines: {node: '>=10'}
cpu: [x64]
os: [linux, freebsd, android]
+ '@sentry/cli-linux-x64@2.58.2':
+ resolution: {integrity: sha512-oX/LLfvWaJO50oBVOn4ZvG2SDWPq0MN8SV9eg5tt2nviq+Ryltfr7Rtoo+HfV+eyOlx1/ZXhq9Wm7OT3cQuz+A==}
+ engines: {node: '>=10'}
+ cpu: [x64]
+ os: [linux, freebsd, android]
+
'@sentry/cli-win32-arm64@2.55.0':
resolution: {integrity: sha512-cWIQdzm1pfLwPARsV6dUb8TVd6Y3V1A2VWxjTons3Ift6GvtVmiAe0OWL8t2Yt95i8v61kTD/6Tq21OAaogqzA==}
engines: {node: '>=10'}
cpu: [arm64]
os: [win32]
+ '@sentry/cli-win32-arm64@2.58.2':
+ resolution: {integrity: sha512-+cl3x2HPVMpoSVGVM1IDWlAEREZrrVQj4xBb0TRKII7g3hUxRsAIcsrr7+tSkie++0FuH4go/b5fGAv51OEF3w==}
+ engines: {node: '>=10'}
+ cpu: [arm64]
+ os: [win32]
+
'@sentry/cli-win32-i686@2.55.0':
resolution: {integrity: sha512-ldepCn2t9r4I0wvgk7NRaA7coJyy4rTQAzM66u9j5nTEsUldf66xym6esd5ZZRAaJUjffqvHqUIr/lrieTIrVg==}
engines: {node: '>=10'}
cpu: [x86, ia32]
os: [win32]
+ '@sentry/cli-win32-i686@2.58.2':
+ resolution: {integrity: sha512-omFVr0FhzJ8oTJSg1Kf+gjLgzpYklY0XPfLxZ5iiMiYUKwF5uo1RJRdkUOiEAv0IqpUKnmKcmVCLaDxsWclB7Q==}
+ engines: {node: '>=10'}
+ cpu: [x86, ia32]
+ os: [win32]
+
'@sentry/cli-win32-x64@2.55.0':
resolution: {integrity: sha512-4hPc/I/9tXx+HLTdTGwlagtAfDSIa2AoTUP30tl32NAYQhx9a6niUbPAemK2qfxesiufJ7D2djX83rCw6WnJVA==}
engines: {node: '>=10'}
cpu: [x64]
os: [win32]
+ '@sentry/cli-win32-x64@2.58.2':
+ resolution: {integrity: sha512-2NAFs9UxVbRztQbgJSP5i8TB9eJQ7xraciwj/93djrSMHSEbJ0vC47TME0iifgvhlHMs5vqETOKJtfbbpQAQFA==}
+ engines: {node: '>=10'}
+ cpu: [x64]
+ os: [win32]
+
'@sentry/cli@2.55.0':
resolution: {integrity: sha512-cynvcIM2xL8ddwELyFRSpZQw4UtFZzoM2rId2l9vg7+wDREPDocMJB9lEQpBIo3eqhp9JswqUT037yjO6iJ5Sw==}
engines: {node: '>= 10'}
hasBin: true
- '@sentry/core@10.22.0':
- resolution: {integrity: sha512-V1oeHbrOKzxadsCmgtPku3v3Emo/Bpb3VSuKmlLrQefiHX98MWtjJ3XDGfduzD5/dCdh0r/OOLwjcmrO/PZ2aw==}
+ '@sentry/cli@2.58.2':
+ resolution: {integrity: sha512-U4u62V4vaTWF+o40Mih8aOpQKqKUbZQt9A3LorIJwaE3tO3XFLRI70eWtW2se1Qmy0RZ74zB14nYcFNFl2t4Rw==}
+ engines: {node: '>= 10'}
+ hasBin: true
+
+ '@sentry/core@10.27.0':
+ resolution: {integrity: sha512-Zc68kdH7tWTDtDbV1zWIbo3Jv0fHAU2NsF5aD2qamypKgfSIMSbWVxd22qZyDBkaX8gWIPm/0Sgx6aRXRBXrYQ==}
engines: {node: '>=18'}
- '@sentry/nextjs@10.22.0':
- resolution: {integrity: sha512-9Np176cDMLTl98QRqESe6STyaQ0SKiWTDRdF3GPYPEB9s4t5Qz2zZJ9A40Fz3fZ33kW4Z/qscDx3WpCwFLe5Bg==}
+ '@sentry/nextjs@10.27.0':
+ resolution: {integrity: sha512-O3b7y4JgVyj70ucW7lfyFLSXTCvztu7qOdFzFl2LwIstzFIZzt6v7ICOhP3FEEC7Lxn5teNb6xVBDtu8vYr20g==}
engines: {node: '>=18'}
peerDependencies:
next: ^13.2.0 || ^14.0 || ^15.0.0-rc.0 || ^16.0.0-0
- '@sentry/node-core@10.22.0':
- resolution: {integrity: sha512-88Yyn+Qvmp0kPMnNRWgpUlAvhI9CNPqOT+0glW0L7OoN8LkJcNgx2GGUoLrJ+RGeHz/S7dIJY6DGa+u0Not2Qg==}
+ '@sentry/node-core@10.27.0':
+ resolution: {integrity: sha512-Dzo1I64Psb7AkpyKVUlR9KYbl4wcN84W4Wet3xjLmVKMgrCo2uAT70V4xIacmoMH5QLZAx0nGfRy9yRCd4nzBg==}
engines: {node: '>=18'}
peerDependencies:
'@opentelemetry/api': ^1.9.0
- '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0
- '@opentelemetry/core': ^1.30.1 || ^2.1.0
+ '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 || ^2.2.0
+ '@opentelemetry/core': ^1.30.1 || ^2.1.0 || ^2.2.0
'@opentelemetry/instrumentation': '>=0.57.1 <1'
- '@opentelemetry/resources': ^1.30.1 || ^2.1.0
- '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0
+ '@opentelemetry/resources': ^1.30.1 || ^2.1.0 || ^2.2.0
+ '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 || ^2.2.0
'@opentelemetry/semantic-conventions': ^1.37.0
- '@sentry/node@10.22.0':
- resolution: {integrity: sha512-PfG8AMT2kgFJ7rWb0lLJOmjLW2riytTliLMjfoJ8/tLGk964uKqE0xM7FLtXZjlLJqTXVYCVG7VIPj185uyckQ==}
+ '@sentry/node@10.27.0':
+ resolution: {integrity: sha512-1cQZ4+QqV9juW64Jku1SMSz+PoZV+J59lotz4oYFvCNYzex8hRAnDKvNiKW1IVg5mEEkz98mg1fvcUtiw7GTiQ==}
engines: {node: '>=18'}
- '@sentry/opentelemetry@10.22.0':
- resolution: {integrity: sha512-XHXYYq3zsQ/dj1kQ7cGGLFIEVRmrmjcMhiJHvmKKsUGKxQjHe2G0LuG8clHIPkmbg7yEIxCT/W2I9QzrwYt5+g==}
+ '@sentry/opentelemetry@10.27.0':
+ resolution: {integrity: sha512-z2vXoicuGiqlRlgL9HaYJgkin89ncMpNQy0Kje6RWyhpzLe8BRgUXlgjux7WrSrcbopDdC1OttSpZsJ/Wjk7fg==}
engines: {node: '>=18'}
peerDependencies:
'@opentelemetry/api': ^1.9.0
- '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0
- '@opentelemetry/core': ^1.30.1 || ^2.1.0
- '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0
+ '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 || ^2.2.0
+ '@opentelemetry/core': ^1.30.1 || ^2.1.0 || ^2.2.0
+ '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 || ^2.2.0
'@opentelemetry/semantic-conventions': ^1.37.0
- '@sentry/react@10.22.0':
- resolution: {integrity: sha512-XByOjtW30LMNibmCPJF5LNYFmETNOUmWByECADox8GYV4BEX18WGXl4K1fpPDTSk+y4vUCHbltHa4GkyTRwG8Q==}
+ '@sentry/react@10.27.0':
+ resolution: {integrity: sha512-xoIRBlO1IhLX/O9aQgVYW1F3Qhw8TdkOiZjh6mrPsnCpBLufsQ4aS1nDQi9miZuWeslW0s2zNy0ACBpICZR/sw==}
engines: {node: '>=18'}
peerDependencies:
react: ^16.14.0 || 17.x || 18.x || 19.x
- '@sentry/vercel-edge@10.22.0':
- resolution: {integrity: sha512-N6/4BrnqTJND/E1wxrQuiMKjJQ6W9xC/gibxrEfbZMFYU6VMz9/Quz+btfFJRsOiuFarLK8J/iEvWVB3mjZdzw==}
+ '@sentry/vercel-edge@10.27.0':
+ resolution: {integrity: sha512-uBfpOnzSNSd2ITMTMeX5bV9Jlci9iMyI+iOPuW8c3oc+0dITTN0OpKLyNd6nfm50bM5h/1qFVQrph+oFTrtuGQ==}
engines: {node: '>=18'}
'@sentry/webpack-plugin@4.3.0':
@@ -3179,8 +3232,8 @@ packages:
'@types/pg-pool@2.0.6':
resolution: {integrity: sha512-TaAUE5rq2VQYxab5Ts7WZhKNmuN78Q6PiFonTDdpbx8a1H0M1vhy3rhiMjl+e2iHmogyMw7jZF4FrE6eJUy5HQ==}
- '@types/pg@8.15.5':
- resolution: {integrity: sha512-LF7lF6zWEKxuT3/OR8wAZGzkg4ENGXFNyiV/JeOt9z5B+0ZVwbql9McqX5c/WStFq1GaGso7H1AzP/qSzmlCKQ==}
+ '@types/pg@8.15.6':
+ resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==}
'@types/phoenix@1.6.6':
resolution: {integrity: sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==}
@@ -3208,9 +3261,6 @@ packages:
'@types/semver@7.7.1':
resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==}
- '@types/shimmer@1.2.0':
- resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==}
-
'@types/statuses@2.0.6':
resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==}
@@ -4123,9 +4173,9 @@ packages:
create-hmac@1.1.7:
resolution: {integrity: sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==}
- cross-env@7.0.3:
- resolution: {integrity: sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==}
- engines: {node: '>=10.14', npm: '>=6', yarn: '>=1'}
+ cross-env@10.1.0:
+ resolution: {integrity: sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==}
+ engines: {node: '>=20'}
hasBin: true
cross-spawn@7.0.6:
@@ -4908,6 +4958,10 @@ packages:
resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==}
hasBin: true
+ glob@10.5.0:
+ resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==}
+ hasBin: true
+
glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
deprecated: Glob versions prior to v9 are no longer supported
@@ -5108,6 +5162,9 @@ packages:
import-in-the-middle@1.14.2:
resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==}
+ import-in-the-middle@2.0.0:
+ resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==}
+
imurmurhash@0.1.4:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
@@ -6680,6 +6737,10 @@ packages:
resolution: {integrity: sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==}
engines: {node: '>=8.6.0'}
+ require-in-the-middle@8.0.1:
+ resolution: {integrity: sha512-QT7FVMXfWOYFbeRBF6nu+I6tr2Tf3u0q8RIEjNob/heKY/nh7drD/k7eeMFmSQgnTtCzLDcCu/XEnpW2wk4xCQ==}
+ engines: {node: '>=9.3.0 || >=8.10.0 <9.0.0'}
+
reselect@5.1.1:
resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==}
@@ -6847,9 +6908,6 @@ packages:
resolution: {integrity: sha512-VuvPvLG1QjNOLP7AIm2HGyfmxEIz8QdskvWOHwUcxLDibYWjLRBmCWd8LSL5FlwhBW7D/GU+3gNVC/ASxAWdxg==}
engines: {node: 18.* || >= 20}
- shimmer@1.2.1:
- resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==}
-
should-equal@2.0.0:
resolution: {integrity: sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==}
@@ -8507,6 +8565,8 @@ snapshots:
'@emotion/unitless@0.8.1': {}
+ '@epic-web/invariant@1.0.0': {}
+
'@esbuild/aix-ppc64@0.25.11':
optional: true
@@ -8993,257 +9053,236 @@ snapshots:
'@open-draft/until@2.1.0': {}
- '@opentelemetry/api-logs@0.204.0':
- dependencies:
- '@opentelemetry/api': 1.9.0
-
- '@opentelemetry/api-logs@0.57.2':
+ '@opentelemetry/api-logs@0.208.0':
dependencies:
'@opentelemetry/api': 1.9.0
'@opentelemetry/api@1.9.0': {}
- '@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
'@opentelemetry/semantic-conventions': 1.37.0
- '@opentelemetry/instrumentation-amqplib@0.51.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-amqplib@0.55.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-connect@0.48.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-connect@0.52.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
'@types/connect': 3.4.38
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-dataloader@0.22.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-dataloader@0.26.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-express@0.53.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-express@0.57.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-fs@0.24.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-fs@0.28.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-generic-pool@0.48.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-generic-pool@0.52.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-graphql@0.52.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-graphql@0.56.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-hapi@0.51.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-hapi@0.55.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-http@0.204.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-http@0.208.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
forwarded-parse: 2.1.2
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-ioredis@0.52.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-ioredis@0.56.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/redis-common': 0.38.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/redis-common': 0.38.2
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-kafkajs@0.18.0(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-kafkajs@0.14.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-knex@0.53.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-knex@0.49.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-koa@0.57.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-koa@0.52.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-lru-memoizer@0.53.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-lru-memoizer@0.49.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-mongodb@0.61.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-mongodb@0.57.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-mongoose@0.55.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-mongoose@0.51.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-mysql2@0.55.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/sql-common': 0.41.2(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-mysql2@0.51.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-mysql@0.54.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
- '@opentelemetry/sql-common': 0.41.0(@opentelemetry/api@1.9.0)
- transitivePeerDependencies:
- - supports-color
-
- '@opentelemetry/instrumentation-mysql@0.50.0(@opentelemetry/api@1.9.0)':
- dependencies:
- '@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@types/mysql': 2.15.27
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-pg@0.57.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-pg@0.61.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
- '@opentelemetry/sql-common': 0.41.0(@opentelemetry/api@1.9.0)
- '@types/pg': 8.15.5
+ '@opentelemetry/sql-common': 0.41.2(@opentelemetry/api@1.9.0)
+ '@types/pg': 8.15.6
'@types/pg-pool': 2.0.6
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-redis@0.53.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-redis@0.57.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/redis-common': 0.38.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/redis-common': 0.38.2
'@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-tedious@0.23.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-tedious@0.27.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/semantic-conventions': 1.37.0
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
'@types/tedious': 4.0.14
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation-undici@0.15.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation-undici@0.19.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.37.0
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/api-logs': 0.204.0
- import-in-the-middle: 1.14.2
- require-in-the-middle: 7.5.2
+ '@opentelemetry/api-logs': 0.208.0
+ import-in-the-middle: 2.0.0
+ require-in-the-middle: 8.0.1
transitivePeerDependencies:
- supports-color
- '@opentelemetry/instrumentation@0.57.2(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/redis-common@0.38.2': {}
+
+ '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/api-logs': 0.57.2
- '@types/shimmer': 1.2.0
- import-in-the-middle: 1.14.2
- require-in-the-middle: 7.5.2
- semver: 7.7.3
- shimmer: 1.2.1
- transitivePeerDependencies:
- - supports-color
-
- '@opentelemetry/redis-common@0.38.0': {}
-
- '@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0)':
- dependencies:
- '@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
- '@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
'@opentelemetry/semantic-conventions@1.37.0': {}
- '@opentelemetry/sql-common@0.41.0(@opentelemetry/api@1.9.0)':
+ '@opentelemetry/sql-common@0.41.2(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
'@orval/angular@7.13.0(openapi-types@12.1.3)(typescript@5.9.3)':
dependencies:
@@ -9395,10 +9434,10 @@ snapshots:
type-fest: 4.41.0
webpack-hot-middleware: 2.26.1
- '@prisma/instrumentation@6.15.0(@opentelemetry/api@1.9.0)':
+ '@prisma/instrumentation@6.19.0(@opentelemetry/api@1.9.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
transitivePeerDependencies:
- supports-color
@@ -10071,33 +10110,35 @@ snapshots:
'@scarf/scarf@1.4.0': {}
- '@sentry-internal/browser-utils@10.22.0':
+ '@sentry-internal/browser-utils@10.27.0':
dependencies:
- '@sentry/core': 10.22.0
+ '@sentry/core': 10.27.0
- '@sentry-internal/feedback@10.22.0':
+ '@sentry-internal/feedback@10.27.0':
dependencies:
- '@sentry/core': 10.22.0
+ '@sentry/core': 10.27.0
- '@sentry-internal/replay-canvas@10.22.0':
+ '@sentry-internal/replay-canvas@10.27.0':
dependencies:
- '@sentry-internal/replay': 10.22.0
- '@sentry/core': 10.22.0
+ '@sentry-internal/replay': 10.27.0
+ '@sentry/core': 10.27.0
- '@sentry-internal/replay@10.22.0':
+ '@sentry-internal/replay@10.27.0':
dependencies:
- '@sentry-internal/browser-utils': 10.22.0
- '@sentry/core': 10.22.0
+ '@sentry-internal/browser-utils': 10.27.0
+ '@sentry/core': 10.27.0
'@sentry/babel-plugin-component-annotate@4.3.0': {}
- '@sentry/browser@10.22.0':
+ '@sentry/babel-plugin-component-annotate@4.6.1': {}
+
+ '@sentry/browser@10.27.0':
dependencies:
- '@sentry-internal/browser-utils': 10.22.0
- '@sentry-internal/feedback': 10.22.0
- '@sentry-internal/replay': 10.22.0
- '@sentry-internal/replay-canvas': 10.22.0
- '@sentry/core': 10.22.0
+ '@sentry-internal/browser-utils': 10.27.0
+ '@sentry-internal/feedback': 10.27.0
+ '@sentry-internal/replay': 10.27.0
+ '@sentry-internal/replay-canvas': 10.27.0
+ '@sentry/core': 10.27.0
'@sentry/bundler-plugin-core@4.3.0':
dependencies:
@@ -10113,30 +10154,68 @@ snapshots:
- encoding
- supports-color
+ '@sentry/bundler-plugin-core@4.6.1':
+ dependencies:
+ '@babel/core': 7.28.4
+ '@sentry/babel-plugin-component-annotate': 4.6.1
+ '@sentry/cli': 2.58.2
+ dotenv: 16.6.1
+ find-up: 5.0.0
+ glob: 10.5.0
+ magic-string: 0.30.8
+ unplugin: 1.0.1
+ transitivePeerDependencies:
+ - encoding
+ - supports-color
+
'@sentry/cli-darwin@2.55.0':
optional: true
+ '@sentry/cli-darwin@2.58.2':
+ optional: true
+
'@sentry/cli-linux-arm64@2.55.0':
optional: true
+ '@sentry/cli-linux-arm64@2.58.2':
+ optional: true
+
'@sentry/cli-linux-arm@2.55.0':
optional: true
+ '@sentry/cli-linux-arm@2.58.2':
+ optional: true
+
'@sentry/cli-linux-i686@2.55.0':
optional: true
+ '@sentry/cli-linux-i686@2.58.2':
+ optional: true
+
'@sentry/cli-linux-x64@2.55.0':
optional: true
+ '@sentry/cli-linux-x64@2.58.2':
+ optional: true
+
'@sentry/cli-win32-arm64@2.55.0':
optional: true
+ '@sentry/cli-win32-arm64@2.58.2':
+ optional: true
+
'@sentry/cli-win32-i686@2.55.0':
optional: true
+ '@sentry/cli-win32-i686@2.58.2':
+ optional: true
+
'@sentry/cli-win32-x64@2.55.0':
optional: true
+ '@sentry/cli-win32-x64@2.58.2':
+ optional: true
+
'@sentry/cli@2.55.0':
dependencies:
https-proxy-agent: 5.0.1
@@ -10157,20 +10236,40 @@ snapshots:
- encoding
- supports-color
- '@sentry/core@10.22.0': {}
+ '@sentry/cli@2.58.2':
+ dependencies:
+ https-proxy-agent: 5.0.1
+ node-fetch: 2.7.0
+ progress: 2.0.3
+ proxy-from-env: 1.1.0
+ which: 2.0.2
+ optionalDependencies:
+ '@sentry/cli-darwin': 2.58.2
+ '@sentry/cli-linux-arm': 2.58.2
+ '@sentry/cli-linux-arm64': 2.58.2
+ '@sentry/cli-linux-i686': 2.58.2
+ '@sentry/cli-linux-x64': 2.58.2
+ '@sentry/cli-win32-arm64': 2.58.2
+ '@sentry/cli-win32-i686': 2.58.2
+ '@sentry/cli-win32-x64': 2.58.2
+ transitivePeerDependencies:
+ - encoding
+ - supports-color
- '@sentry/nextjs@10.22.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
+ '@sentry/core@10.27.0': {}
+
+ '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))':
dependencies:
'@opentelemetry/api': 1.9.0
'@opentelemetry/semantic-conventions': 1.37.0
'@rollup/plugin-commonjs': 28.0.1(rollup@4.52.2)
- '@sentry-internal/browser-utils': 10.22.0
- '@sentry/bundler-plugin-core': 4.3.0
- '@sentry/core': 10.22.0
- '@sentry/node': 10.22.0
- '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
- '@sentry/react': 10.22.0(react@18.3.1)
- '@sentry/vercel-edge': 10.22.0
+ '@sentry-internal/browser-utils': 10.27.0
+ '@sentry/bundler-plugin-core': 4.6.1
+ '@sentry/core': 10.27.0
+ '@sentry/node': 10.27.0
+ '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
+ '@sentry/react': 10.27.0(react@18.3.1)
+ '@sentry/vercel-edge': 10.27.0
'@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9))
next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
resolve: 1.22.8
@@ -10185,83 +10284,83 @@ snapshots:
- supports-color
- webpack
- '@sentry/node-core@10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)':
+ '@sentry/node-core@10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)':
dependencies:
'@apm-js-collab/tracing-hooks': 0.3.1
'@opentelemetry/api': 1.9.0
- '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
- '@sentry/core': 10.22.0
- '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
- import-in-the-middle: 1.14.2
+ '@sentry/core': 10.27.0
+ '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
+ import-in-the-middle: 2.0.0
transitivePeerDependencies:
- supports-color
- '@sentry/node@10.22.0':
+ '@sentry/node@10.27.0':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-amqplib': 0.51.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-connect': 0.48.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-dataloader': 0.22.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-express': 0.53.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-fs': 0.24.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-generic-pool': 0.48.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-graphql': 0.52.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-hapi': 0.51.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-http': 0.204.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-ioredis': 0.52.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-kafkajs': 0.14.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-knex': 0.49.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-koa': 0.52.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-lru-memoizer': 0.49.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-mongodb': 0.57.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-mongoose': 0.51.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-mysql': 0.50.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-mysql2': 0.51.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-pg': 0.57.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-redis': 0.53.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-tedious': 0.23.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/instrumentation-undici': 0.15.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-amqplib': 0.55.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-connect': 0.52.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-dataloader': 0.26.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-express': 0.57.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-fs': 0.28.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-generic-pool': 0.52.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-graphql': 0.56.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-hapi': 0.55.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-http': 0.208.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-ioredis': 0.56.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-kafkajs': 0.18.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-knex': 0.53.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-koa': 0.57.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-lru-memoizer': 0.53.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mongodb': 0.61.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mongoose': 0.55.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mysql': 0.54.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mysql2': 0.55.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-pg': 0.61.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-redis': 0.57.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-tedious': 0.27.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-undici': 0.19.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
- '@prisma/instrumentation': 6.15.0(@opentelemetry/api@1.9.0)
- '@sentry/core': 10.22.0
- '@sentry/node-core': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
- '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
- import-in-the-middle: 1.14.2
+ '@prisma/instrumentation': 6.19.0(@opentelemetry/api@1.9.0)
+ '@sentry/core': 10.27.0
+ '@sentry/node-core': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
+ '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)
+ import-in-the-middle: 2.0.0
minimatch: 9.0.5
transitivePeerDependencies:
- supports-color
- '@sentry/opentelemetry@10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)':
+ '@sentry/opentelemetry@10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0)
- '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0)
'@opentelemetry/semantic-conventions': 1.37.0
- '@sentry/core': 10.22.0
+ '@sentry/core': 10.27.0
- '@sentry/react@10.22.0(react@18.3.1)':
+ '@sentry/react@10.27.0(react@18.3.1)':
dependencies:
- '@sentry/browser': 10.22.0
- '@sentry/core': 10.22.0
+ '@sentry/browser': 10.27.0
+ '@sentry/core': 10.27.0
hoist-non-react-statics: 3.3.2
react: 18.3.1
- '@sentry/vercel-edge@10.22.0':
+ '@sentry/vercel-edge@10.27.0':
dependencies:
'@opentelemetry/api': 1.9.0
- '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0)
- '@sentry/core': 10.22.0
+ '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0)
+ '@sentry/core': 10.27.0
'@sentry/webpack-plugin@4.3.0(webpack@5.101.3(esbuild@0.25.9))':
dependencies:
@@ -10904,9 +11003,9 @@ snapshots:
'@types/pg-pool@2.0.6':
dependencies:
- '@types/pg': 8.15.5
+ '@types/pg': 8.15.6
- '@types/pg@8.15.5':
+ '@types/pg@8.15.6':
dependencies:
'@types/node': 24.10.0
pg-protocol: 1.10.3
@@ -10937,8 +11036,6 @@ snapshots:
'@types/semver@7.7.1': {}
- '@types/shimmer@1.2.0': {}
-
'@types/statuses@2.0.6': {}
'@types/stylis@4.2.5': {}
@@ -11901,8 +11998,9 @@ snapshots:
safe-buffer: 5.2.1
sha.js: 2.4.12
- cross-env@7.0.3:
+ cross-env@10.1.0:
dependencies:
+ '@epic-web/invariant': 1.0.0
cross-spawn: 7.0.6
cross-spawn@7.0.6:
@@ -12434,7 +12532,7 @@ snapshots:
eslint: 8.57.1
eslint-import-resolver-node: 0.3.9
eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1)
- eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1)
eslint-plugin-react: 7.37.5(eslint@8.57.1)
eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1)
@@ -12464,7 +12562,7 @@ snapshots:
tinyglobby: 0.2.15
unrs-resolver: 1.11.1
optionalDependencies:
- eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1)
+ eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1)
transitivePeerDependencies:
- supports-color
@@ -12479,7 +12577,7 @@ snapshots:
transitivePeerDependencies:
- supports-color
- eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1):
+ eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1):
dependencies:
'@rtsao/scc': 1.1.0
array-includes: 3.1.9
@@ -12902,6 +13000,15 @@ snapshots:
package-json-from-dist: 1.0.1
path-scurry: 1.11.1
+ glob@10.5.0:
+ dependencies:
+ foreground-child: 3.3.1
+ jackspeak: 3.4.3
+ minimatch: 9.0.5
+ minipass: 7.1.2
+ package-json-from-dist: 1.0.1
+ path-scurry: 1.11.1
+
glob@7.2.3:
dependencies:
fs.realpath: 1.0.0
@@ -13159,6 +13266,13 @@ snapshots:
cjs-module-lexer: 1.4.3
module-details-from-path: 1.0.4
+ import-in-the-middle@2.0.0:
+ dependencies:
+ acorn: 8.15.0
+ acorn-import-attributes: 1.9.5(acorn@8.15.0)
+ cjs-module-lexer: 1.4.3
+ module-details-from-path: 1.0.4
+
imurmurhash@0.1.4: {}
indent-string@4.0.0: {}
@@ -15029,6 +15143,13 @@ snapshots:
transitivePeerDependencies:
- supports-color
+ require-in-the-middle@8.0.1:
+ dependencies:
+ debug: 4.4.3
+ module-details-from-path: 1.0.4
+ transitivePeerDependencies:
+ - supports-color
+
reselect@5.1.1: {}
resolve-from@4.0.0: {}
@@ -15244,8 +15365,6 @@ snapshots:
'@scarf/scarf': 1.4.0
deepmerge-ts: 7.1.5
- shimmer@1.2.1: {}
-
should-equal@2.0.0:
dependencies:
should-type: 1.4.0
diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx
index cfeada190f..3176ec7f70 100644
--- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx
+++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx
@@ -1,4 +1,4 @@
-import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
+import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
import { useState } from "react";
diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx
index 6aeb0213e7..4b6abacbff 100644
--- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx
+++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx
@@ -1,6 +1,6 @@
"use client";
-import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs";
+import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
import {
Card,
CardContent,
diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx
index a8fd85eeb0..c24f9e11a3 100644
--- a/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx
+++ b/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx
@@ -1,8 +1,7 @@
"use client";
-import React from "react";
-import { useParams } from "next/navigation";
-import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs";
+import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs";
+import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default";
import {
Card,
CardContent,
@@ -11,7 +10,7 @@ import {
} from "@/components/__legacy__/ui/card";
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
import { InfoIcon } from "lucide-react";
-import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default";
+import { useParams } from "next/navigation";
export default function SharePage() {
const params = useParams();
diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts
index a075564063..f6df869350 100644
--- a/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts
+++ b/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts
@@ -1,4 +1,4 @@
-import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs";
+import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
import { NextResponse } from "next/server";
// This route is intended to be used as the callback for integration OAuth flows,
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx
index 3fcde3bf76..237bea2ab0 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx
@@ -1,9 +1,13 @@
+import { BlockUIType } from "@/app/(platform)/build/components/types";
+import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
+import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
import {
- Tooltip,
- TooltipContent,
- TooltipProvider,
- TooltipTrigger,
-} from "@/components/atoms/Tooltip/BaseTooltip";
+ globalRegistry,
+ OutputActions,
+ OutputItem,
+} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
+import { Label } from "@/components/__legacy__/ui/label";
+import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import {
Sheet,
SheetContent,
@@ -12,20 +16,16 @@ import {
SheetTitle,
SheetTrigger,
} from "@/components/__legacy__/ui/sheet";
-import { BuilderActionButton } from "../BuilderActionButton";
-import { BookOpenIcon } from "@phosphor-icons/react";
-import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
-import { useShallow } from "zustand/react/shallow";
-import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
-import { BlockUIType } from "@/app/(platform)/build/components/types";
-import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
-import { Label } from "@/components/__legacy__/ui/label";
-import { useMemo } from "react";
import {
- globalRegistry,
- OutputItem,
- OutputActions,
-} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers";
+ Tooltip,
+ TooltipContent,
+ TooltipProvider,
+ TooltipTrigger,
+} from "@/components/atoms/Tooltip/BaseTooltip";
+import { BookOpenIcon } from "@phosphor-icons/react";
+import { useMemo } from "react";
+import { useShallow } from "zustand/react/shallow";
+import { BuilderActionButton } from "../BuilderActionButton";
export const AgentOutputs = ({ flowID }: { flowID: string | null }) => {
const hasOutputs = useGraphStore(useShallow((state) => state.hasOutputs));
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx
index adb3c619bf..b6ec73eb9a 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx
@@ -1,10 +1,10 @@
+import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CronScheduler";
import { Button } from "@/components/atoms/Button/Button";
+import { Input } from "@/components/atoms/Input/Input";
+import { Text } from "@/components/atoms/Text/Text";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { InfoIcon } from "lucide-react";
-import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CronScheduler";
-import { Text } from "@/components/atoms/Text/Text";
import { useCronSchedulerDialog } from "./useCronSchedulerDialog";
-import { Input } from "@/components/atoms/Input/Input";
type CronSchedulerDialogProps = {
open: boolean;
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx
index 3a0c7aab4a..f4c1a7331f 100644
--- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx
@@ -18,6 +18,7 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
openRunInputDialog,
setOpenRunInputDialog,
isExecutingGraph,
+ isTerminatingGraph,
isSaving,
} = useRunGraph();
const isGraphRunning = useGraphStore(
@@ -34,8 +35,8 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
"border-red-500 bg-gradient-to-br from-red-400 to-red-500 shadow-[inset_0_2px_0_0_rgba(255,255,255,0.5),0_2px_4px_0_rgba(0,0,0,0.2)]",
)}
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
- disabled={!flowID || isExecutingGraph}
- isLoading={isExecutingGraph || isSaving}
+ disabled={!flowID || isExecutingGraph || isTerminatingGraph}
+ isLoading={isExecutingGraph || isTerminatingGraph || isSaving}
>
{!isGraphRunning ? (