diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml index 902fc461b2..20b6f1d28e 100644 --- a/.github/workflows/claude-dependabot.yml +++ b/.github/workflows/claude-dependabot.yml @@ -80,7 +80,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "21" + node-version: "22" - name: Enable corepack run: corepack enable diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml index 31f2769ea4..51eb764b80 100644 --- a/.github/workflows/claude.yml +++ b/.github/workflows/claude.yml @@ -90,7 +90,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "21" + node-version: "22" - name: Enable corepack run: corepack enable diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml index 7af1ec4365..13ef01cc44 100644 --- a/.github/workflows/copilot-setup-steps.yml +++ b/.github/workflows/copilot-setup-steps.yml @@ -78,7 +78,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "21" + node-version: "22" - name: Enable corepack run: corepack enable @@ -299,4 +299,4 @@ jobs: echo "✅ AutoGPT Platform development environment setup complete!" echo "🚀 Ready for development with Docker services running" echo "📝 Backend server: poetry run serve (port 8000)" - echo "🌐 Frontend server: pnpm dev (port 3000)" \ No newline at end of file + echo "🌐 Frontend server: pnpm dev (port 3000)" diff --git a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py index 912edec78a..850046317a 100644 --- a/autogpt_platform/backend/backend/blocks/ai_image_customizer.py +++ b/autogpt_platform/backend/backend/blocks/ai_image_customizer.py @@ -1,3 +1,4 @@ +import asyncio from enum import Enum from typing import Literal @@ -20,7 +21,7 @@ from backend.data.model import ( SchemaField, ) from backend.integrations.providers import ProviderName -from backend.util.file import MediaFileType +from backend.util.file import MediaFileType, store_media_file class GeminiImageModel(str, Enum): @@ -28,6 +29,20 @@ class GeminiImageModel(str, Enum): NANO_BANANA_PRO = "google/nano-banana-pro" +class AspectRatio(str, Enum): + MATCH_INPUT_IMAGE = "match_input_image" + ASPECT_1_1 = "1:1" + ASPECT_2_3 = "2:3" + ASPECT_3_2 = "3:2" + ASPECT_3_4 = "3:4" + ASPECT_4_3 = "4:3" + ASPECT_4_5 = "4:5" + ASPECT_5_4 = "5:4" + ASPECT_9_16 = "9:16" + ASPECT_16_9 = "16:9" + ASPECT_21_9 = "21:9" + + class OutputFormat(str, Enum): JPG = "jpg" PNG = "png" @@ -70,6 +85,11 @@ class AIImageCustomizerBlock(Block): default=[], title="Input Images", ) + aspect_ratio: AspectRatio = SchemaField( + description="Aspect ratio of the generated image", + default=AspectRatio.MATCH_INPUT_IMAGE, + title="Aspect Ratio", + ) output_format: OutputFormat = SchemaField( description="Format of the output image", default=OutputFormat.PNG, @@ -93,6 +113,7 @@ class AIImageCustomizerBlock(Block): "prompt": "Make the scene more vibrant and colorful", "model": GeminiImageModel.NANO_BANANA, "images": [], + "aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE, "output_format": OutputFormat.JPG, "credentials": TEST_CREDENTIALS_INPUT, }, @@ -117,11 +138,25 @@ class AIImageCustomizerBlock(Block): **kwargs, ) -> BlockOutput: try: + # Convert local file paths to Data URIs (base64) so Replicate can access them + processed_images = await asyncio.gather( + *( + store_media_file( + graph_exec_id=graph_exec_id, + file=img, + user_id=user_id, + return_content=True, + ) + for img in input_data.images + ) + ) + result = await self.run_model( api_key=credentials.api_key, model_name=input_data.model.value, prompt=input_data.prompt, - images=input_data.images, + images=processed_images, + aspect_ratio=input_data.aspect_ratio.value, output_format=input_data.output_format.value, ) yield "image_url", result @@ -134,12 +169,14 @@ class AIImageCustomizerBlock(Block): model_name: str, prompt: str, images: list[MediaFileType], + aspect_ratio: str, output_format: str, ) -> MediaFileType: client = ReplicateClient(api_token=api_key.get_secret_value()) input_params: dict = { "prompt": prompt, + "aspect_ratio": aspect_ratio, "output_format": output_format, } diff --git a/autogpt_platform/backend/backend/blocks/google/_drive.py b/autogpt_platform/backend/backend/blocks/google/_drive.py new file mode 100644 index 0000000000..46fafd6857 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/google/_drive.py @@ -0,0 +1,198 @@ +import asyncio +import mimetypes +import uuid +from pathlib import Path +from typing import Any, Literal, Optional + +from pydantic import BaseModel, ConfigDict, Field + +from backend.data.model import SchemaField +from backend.util.file import get_exec_file_path +from backend.util.request import Requests +from backend.util.type import MediaFileType +from backend.util.virus_scanner import scan_content_safe + +AttachmentView = Literal[ + "DOCS", + "DOCUMENTS", + "SPREADSHEETS", + "PRESENTATIONS", + "DOCS_IMAGES", + "FOLDERS", +] +ATTACHMENT_VIEWS: tuple[AttachmentView, ...] = ( + "DOCS", + "DOCUMENTS", + "SPREADSHEETS", + "PRESENTATIONS", + "DOCS_IMAGES", + "FOLDERS", +) + + +class GoogleDriveFile(BaseModel): + """Represents a single file/folder picked from Google Drive""" + + model_config = ConfigDict(populate_by_name=True) + + id: str = Field(description="Google Drive file/folder ID") + name: Optional[str] = Field(None, description="File/folder name") + mime_type: Optional[str] = Field( + None, + alias="mimeType", + description="MIME type (e.g., application/vnd.google-apps.document)", + ) + url: Optional[str] = Field(None, description="URL to open the file") + icon_url: Optional[str] = Field(None, alias="iconUrl", description="Icon URL") + is_folder: Optional[bool] = Field( + None, alias="isFolder", description="Whether this is a folder" + ) + + +def GoogleDrivePickerField( + multiselect: bool = False, + allow_folder_selection: bool = False, + allowed_views: Optional[list[AttachmentView]] = None, + allowed_mime_types: Optional[list[str]] = None, + scopes: Optional[list[str]] = None, + title: Optional[str] = None, + description: Optional[str] = None, + placeholder: Optional[str] = None, + **kwargs, +) -> Any: + """ + Creates a Google Drive Picker input field. + + Args: + multiselect: Allow selecting multiple files/folders (default: False) + allow_folder_selection: Allow selecting folders (default: False) + allowed_views: List of view types to show in picker (default: ["DOCS"]) + allowed_mime_types: Filter by MIME types (e.g., ["application/pdf"]) + title: Field title shown in UI + description: Field description/help text + placeholder: Placeholder text for the button + **kwargs: Additional SchemaField arguments (advanced, hidden, etc.) + + Returns: + Field definition that produces: + - Single GoogleDriveFile when multiselect=False + - list[GoogleDriveFile] when multiselect=True + + Example: + >>> class MyBlock(Block): + ... class Input(BlockSchema): + ... document: GoogleDriveFile = GoogleDrivePickerField( + ... title="Select Document", + ... allowed_views=["DOCUMENTS"], + ... ) + ... + ... files: list[GoogleDriveFile] = GoogleDrivePickerField( + ... title="Select Multiple Files", + ... multiselect=True, + ... allow_folder_selection=True, + ... ) + """ + # Build configuration that will be sent to frontend + picker_config = { + "multiselect": multiselect, + "allow_folder_selection": allow_folder_selection, + "allowed_views": list(allowed_views) if allowed_views else ["DOCS"], + } + + # Add optional configurations + if allowed_mime_types: + picker_config["allowed_mime_types"] = list(allowed_mime_types) + + # Determine required scopes based on config + base_scopes = scopes if scopes is not None else [] + picker_scopes: set[str] = set(base_scopes) + if allow_folder_selection: + picker_scopes.add("https://www.googleapis.com/auth/drive") + else: + # Use drive.file for minimal scope - only access files selected by user in picker + picker_scopes.add("https://www.googleapis.com/auth/drive.file") + + views = set(allowed_views or []) + if "SPREADSHEETS" in views: + picker_scopes.add("https://www.googleapis.com/auth/spreadsheets.readonly") + if "DOCUMENTS" in views or "DOCS" in views: + picker_scopes.add("https://www.googleapis.com/auth/documents.readonly") + + picker_config["scopes"] = sorted(picker_scopes) + + # Set appropriate default value + default_value = [] if multiselect else None + + # Use SchemaField to handle format properly + return SchemaField( + default=default_value, + title=title, + description=description, + placeholder=placeholder or "Choose from Google Drive", + format="google-drive-picker", + advanced=False, + json_schema_extra={ + "google_drive_picker_config": picker_config, + **kwargs, + }, + ) + + +DRIVE_API_URL = "https://www.googleapis.com/drive/v3/files" +_requests = Requests(trusted_origins=["https://www.googleapis.com"]) + + +def GoogleDriveAttachmentField( + *, + title: str, + description: str | None = None, + placeholder: str | None = None, + multiselect: bool = True, + allowed_mime_types: list[str] | None = None, + **extra: Any, +) -> Any: + return GoogleDrivePickerField( + multiselect=multiselect, + allowed_views=list(ATTACHMENT_VIEWS), + allowed_mime_types=allowed_mime_types, + title=title, + description=description, + placeholder=placeholder or "Choose files from Google Drive", + **extra, + ) + + +async def drive_file_to_media_file( + drive_file: GoogleDriveFile, *, graph_exec_id: str, access_token: str +) -> MediaFileType: + if drive_file.is_folder: + raise ValueError("Google Drive selection must be a file.") + if not access_token: + raise ValueError("Google Drive access token is required for file download.") + + url = f"{DRIVE_API_URL}/{drive_file.id}?alt=media" + response = await _requests.get( + url, headers={"Authorization": f"Bearer {access_token}"} + ) + + mime_type = drive_file.mime_type or response.headers.get( + "content-type", "application/octet-stream" + ) + + MAX_FILE_SIZE = 100 * 1024 * 1024 + if len(response.content) > MAX_FILE_SIZE: + raise ValueError( + f"File too large: {len(response.content)} bytes > {MAX_FILE_SIZE} bytes" + ) + + base_path = Path(get_exec_file_path(graph_exec_id, "")) + base_path.mkdir(parents=True, exist_ok=True) + + extension = mimetypes.guess_extension(mime_type, strict=False) or ".bin" + filename = f"{uuid.uuid4()}{extension}" + target_path = base_path / filename + + await scan_content_safe(response.content, filename=filename) + await asyncio.to_thread(target_path.write_bytes, response.content) + + return MediaFileType(str(target_path.relative_to(base_path))) diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py index c10bcfb255..e8d2d1d74a 100644 --- a/autogpt_platform/backend/backend/blocks/google/sheets.py +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -5,6 +5,7 @@ from typing import Any from google.oauth2.credentials import Credentials from googleapiclient.discovery import build +from backend.blocks.google._drive import GoogleDriveFile, GoogleDrivePickerField from backend.data.block import ( Block, BlockCategory, @@ -160,6 +161,7 @@ def _convert_dicts_to_rows( def _build_sheets_service(credentials: GoogleCredentials): + """Build Sheets service from platform credentials (with refresh token).""" settings = Settings() creds = Credentials( token=( @@ -180,6 +182,41 @@ def _build_sheets_service(credentials: GoogleCredentials): return build("sheets", "v4", credentials=creds) +def _validate_spreadsheet_file(spreadsheet_file: "GoogleDriveFile") -> str | None: + """Validate that the selected file is a Google Sheets spreadsheet. + + Returns None if valid, error message string if invalid. + """ + if spreadsheet_file.mime_type != "application/vnd.google-apps.spreadsheet": + file_type = spreadsheet_file.mime_type + file_name = spreadsheet_file.name + if file_type == "text/csv": + return f"Cannot use CSV file '{file_name}' with Google Sheets block. Please use a CSV reader block instead, or convert the CSV to a Google Sheets spreadsheet first." + elif file_type in [ + "application/vnd.ms-excel", + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ]: + return f"Cannot use Excel file '{file_name}' with Google Sheets block. Please use an Excel reader block instead, or convert to Google Sheets first." + else: + return f"Cannot use file '{file_name}' (type: {file_type}) with Google Sheets block. This block only works with Google Sheets spreadsheets." + return None + + +def _handle_sheets_api_error(error_msg: str, operation: str = "access") -> str: + """Convert common Google Sheets API errors to user-friendly messages.""" + if "Request contains an invalid argument" in error_msg: + return f"Invalid request to Google Sheets API. This usually means the file is not a Google Sheets spreadsheet, the range is invalid, or you don't have permission to {operation} this file." + elif "The caller does not have permission" in error_msg or "Forbidden" in error_msg: + if operation in ["write", "modify", "update", "append", "clear"]: + return "Permission denied. You don't have edit access to this spreadsheet. Make sure it's shared with edit permissions." + else: + return "Permission denied. You don't have access to this spreadsheet. Make sure it's shared with you and try re-selecting the file." + elif "not found" in error_msg.lower() or "does not exist" in error_msg.lower(): + return "Spreadsheet not found. The file may have been deleted or the link is invalid." + else: + return f"Failed to {operation} Google Sheet: {error_msg}" + + class SheetOperation(str, Enum): CREATE = "create" DELETE = "delete" @@ -216,18 +253,24 @@ class GoogleSheetsReadBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets.readonly"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to read from", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) range: str = SchemaField( description="The A1 notation of the range to read", + placeholder="Sheet1!A1:Z1000", ) class Output(BlockSchemaOutput): result: list[list[str]] = SchemaField( description="The data read from the spreadsheet", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -241,9 +284,13 @@ class GoogleSheetsReadBlock(Block): output_schema=GoogleSheetsReadBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", - "range": "Sheet1!A1:B2", "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "Sheet1!A1:B2", }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -254,6 +301,17 @@ class GoogleSheetsReadBlock(Block): ["Alice", "85"], ], ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_read_sheet": lambda *args, **kwargs: [ @@ -266,16 +324,52 @@ class GoogleSheetsReadBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - data = await asyncio.to_thread( - self._read_sheet, service, spreadsheet_id, input_data.range - ) - yield "result", data + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + try: + service = _build_sheets_service(credentials) + spreadsheet_id = input_data.spreadsheet.id + data = await asyncio.to_thread( + self._read_sheet, service, spreadsheet_id, input_data.range + ) + yield "result", data + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=spreadsheet_id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", _handle_sheets_api_error(str(e), "read") def _read_sheet(self, service, spreadsheet_id: str, range: str) -> list[list[str]]: sheet = service.spreadsheets() - result = sheet.values().get(spreadsheetId=spreadsheet_id, range=range).execute() + range_to_use = range or "A:Z" + sheet_name, cell_range = parse_a1_notation(range_to_use) + if sheet_name: + cleaned_sheet = sheet_name.strip().strip("'\"") + formatted_sheet = format_sheet_name(cleaned_sheet) + cell_part = cell_range.strip() if cell_range else "" + if cell_part: + range_to_use = f"{formatted_sheet}!{cell_part}" + else: + range_to_use = f"{formatted_sheet}!A:Z" + # If no sheet name, keep the original range (e.g., "A1:B2" or "B:B") + result = ( + sheet.values() + .get(spreadsheetId=spreadsheet_id, range=range_to_use) + .execute() + ) return result.get("values", []) @@ -284,12 +378,15 @@ class GoogleSheetsWriteBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to write to", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) range: str = SchemaField( description="The A1 notation of the range to write", + placeholder="Sheet1!A1:B2", ) values: list[list[str]] = SchemaField( description="The data to write to the spreadsheet", @@ -299,6 +396,9 @@ class GoogleSheetsWriteBlock(Block): result: dict = SchemaField( description="The result of the write operation", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -312,13 +412,17 @@ class GoogleSheetsWriteBlock(Block): output_schema=GoogleSheetsWriteBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "range": "Sheet1!A1:B2", "values": [ ["Name", "Score"], ["Bob", "90"], ], - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -326,6 +430,17 @@ class GoogleSheetsWriteBlock(Block): "result", {"updatedCells": 4, "updatedColumns": 2, "updatedRows": 2}, ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_write_sheet": lambda *args, **kwargs: { @@ -339,16 +454,44 @@ class GoogleSheetsWriteBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._write_sheet, - service, - spreadsheet_id, - input_data.range, - input_data.values, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + # Customize message for write operations on CSV files + if "CSV file" in validation_error: + yield "error", validation_error.replace( + "Please use a CSV reader block instead, or", + "CSV files are read-only through Google Drive. Please", + ) + else: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._write_sheet, + service, + input_data.spreadsheet.id, + input_data.range, + input_data.values, + ) + yield "result", result + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", _handle_sheets_api_error(str(e), "write") def _write_sheet( self, service, spreadsheet_id: str, range: str, values: list[list[str]] @@ -373,9 +516,11 @@ class GoogleSheetsAppendBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="Spreadsheet ID or URL", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) sheet_name: str = SchemaField( description="Optional sheet to append to (defaults to first sheet)", @@ -411,6 +556,12 @@ class GoogleSheetsAppendBlock(Block): class Output(BlockSchemaOutput): result: dict = SchemaField(description="Append API response") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) + error: str = SchemaField( + description="Error message if any", + ) def __init__(self): super().__init__( @@ -421,13 +572,28 @@ class GoogleSheetsAppendBlock(Block): output_schema=GoogleSheetsAppendBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", - "values": [["Charlie", "95"]], "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "values": [["Charlie", "95"]], }, test_credentials=TEST_CREDENTIALS, test_output=[ ("result", {"updatedCells": 2, "updatedColumns": 2, "updatedRows": 1}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_append_sheet": lambda *args, **kwargs: { @@ -441,37 +607,58 @@ class GoogleSheetsAppendBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - # Determine which values to use and convert if needed - processed_values: list[list[str]] + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return - # Validate that only one format is provided - if input_data.values and input_data.dict_values: - raise ValueError("Provide either 'values' or 'dict_values', not both") + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + try: + service = _build_sheets_service(credentials) - if input_data.dict_values: - if not input_data.headers: - raise ValueError("Headers are required when using dict_values") - processed_values = _convert_dicts_to_rows( - input_data.dict_values, input_data.headers + # Determine which values to use and convert if needed + processed_values: list[list[str]] + + # Validate that only one format is provided + if input_data.values and input_data.dict_values: + raise ValueError("Provide either 'values' or 'dict_values', not both") + + if input_data.dict_values: + if not input_data.headers: + raise ValueError("Headers are required when using dict_values") + processed_values = _convert_dicts_to_rows( + input_data.dict_values, input_data.headers + ) + elif input_data.values: + processed_values = input_data.values + else: + raise ValueError("Either 'values' or 'dict_values' must be provided") + + result = await asyncio.to_thread( + self._append_sheet, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + processed_values, + input_data.range, + input_data.value_input_option, + input_data.insert_data_option, ) - elif input_data.values: - processed_values = input_data.values - else: - raise ValueError("Either 'values' or 'dict_values' must be provided") - - result = await asyncio.to_thread( - self._append_sheet, - service, - spreadsheet_id, - input_data.sheet_name, - processed_values, - input_data.range, - input_data.value_input_option, - input_data.insert_data_option, - ) - yield "result", result + yield "result", result + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to append to Google Sheet: {str(e)}" def _append_sheet( self, @@ -512,18 +699,24 @@ class GoogleSheetsClearBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to clear", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) range: str = SchemaField( description="The A1 notation of the range to clear", + placeholder="Sheet1!A1:B2", ) class Output(BlockSchemaOutput): result: dict = SchemaField( description="The result of the clear operation", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -537,13 +730,28 @@ class GoogleSheetsClearBlock(Block): output_schema=GoogleSheetsClearBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", - "range": "Sheet1!A1:B2", "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "Sheet1!A1:B2", }, test_credentials=TEST_CREDENTIALS, test_output=[ ("result", {"clearedRange": "Sheet1!A1:B2"}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_clear_range": lambda *args, **kwargs: { @@ -555,15 +763,36 @@ class GoogleSheetsClearBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._clear_range, - service, - spreadsheet_id, - input_data.range, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._clear_range, + service, + input_data.spreadsheet.id, + input_data.range, + ) + yield "result", result + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to clear Google Sheet range: {str(e)}" def _clear_range(self, service, spreadsheet_id: str, range: str) -> dict: result = ( @@ -580,15 +809,20 @@ class GoogleSheetsMetadataBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets.readonly"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to get metadata for", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) class Output(BlockSchemaOutput): result: dict = SchemaField( description="The metadata of the spreadsheet including sheets info", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -602,8 +836,12 @@ class GoogleSheetsMetadataBlock(Block): output_schema=GoogleSheetsMetadataBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -614,6 +852,17 @@ class GoogleSheetsMetadataBlock(Block): "sheets": [{"title": "Sheet1", "sheetId": 0}], }, ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_get_metadata": lambda *args, **kwargs: { @@ -626,14 +875,35 @@ class GoogleSheetsMetadataBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._get_metadata, - service, - spreadsheet_id, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_metadata, + service, + input_data.spreadsheet.id, + ) + yield "result", result + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to get spreadsheet metadata: {str(e)}" def _get_metadata(self, service, spreadsheet_id: str) -> dict: result = ( @@ -661,9 +931,11 @@ class GoogleSheetsManageSheetBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="Spreadsheet ID or URL", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) operation: SheetOperation = SchemaField(description="Operation to perform") sheet_name: str = SchemaField( @@ -679,6 +951,12 @@ class GoogleSheetsManageSheetBlock(Block): class Output(BlockSchemaOutput): result: dict = SchemaField(description="Operation result") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) + error: str = SchemaField( + description="Error message if any", + ) def __init__(self): super().__init__( @@ -689,13 +967,30 @@ class GoogleSheetsManageSheetBlock(Block): output_schema=GoogleSheetsManageSheetBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "operation": SheetOperation.CREATE, "sheet_name": "NewSheet", - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, - test_output=[("result", {"success": True, "sheetId": 123})], + test_output=[ + ("result", {"success": True, "sheetId": 123}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), + ], test_mock={ "_manage_sheet": lambda *args, **kwargs: { "success": True, @@ -707,18 +1002,39 @@ class GoogleSheetsManageSheetBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._manage_sheet, - service, - spreadsheet_id, - input_data.operation, - input_data.sheet_name, - input_data.source_sheet_id, - input_data.destination_sheet_name, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._manage_sheet, + service, + input_data.spreadsheet.id, + input_data.operation, + input_data.sheet_name, + input_data.source_sheet_id, + input_data.destination_sheet_name, + ) + yield "result", result + # Output the GoogleDriveFile for chaining + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to manage sheet: {str(e)}" def _manage_sheet( self, @@ -731,17 +1047,21 @@ class GoogleSheetsManageSheetBlock(Block): ) -> dict: requests = [] - # Ensure a target sheet name when needed - target_name = resolve_sheet_name(service, spreadsheet_id, sheet_name) - if operation == SheetOperation.CREATE: + # For CREATE, use sheet_name directly or default to "New Sheet" + target_name = sheet_name or "New Sheet" requests.append({"addSheet": {"properties": {"title": target_name}}}) elif operation == SheetOperation.DELETE: + # For DELETE, resolve sheet name (fall back to first sheet if empty) + target_name = resolve_sheet_name( + service, spreadsheet_id, sheet_name or None + ) sid = sheet_id_by_name(service, spreadsheet_id, target_name) if sid is None: return {"error": f"Sheet '{target_name}' not found"} requests.append({"deleteSheet": {"sheetId": sid}}) elif operation == SheetOperation.COPY: + # For COPY, use source_sheet_id and destination_sheet_name directly requests.append( { "duplicateSheet": { @@ -768,9 +1088,11 @@ class GoogleSheetsBatchOperationsBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to perform batch operations on", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) operations: list[BatchOperation] = SchemaField( description="List of operations to perform", @@ -780,6 +1102,9 @@ class GoogleSheetsBatchOperationsBlock(Block): result: dict = SchemaField( description="The result of the batch operations", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -793,7 +1118,12 @@ class GoogleSheetsBatchOperationsBlock(Block): output_schema=GoogleSheetsBatchOperationsBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "operations": [ { "type": BatchOperationType.UPDATE, @@ -806,11 +1136,21 @@ class GoogleSheetsBatchOperationsBlock(Block): "values": [["Data1", "Data2"]], }, ], - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, test_output=[ ("result", {"totalUpdatedCells": 4, "replies": []}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_batch_operations": lambda *args, **kwargs: { @@ -823,15 +1163,35 @@ class GoogleSheetsBatchOperationsBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._batch_operations, - service, - spreadsheet_id, - input_data.operations, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._batch_operations, + service, + input_data.spreadsheet.id, + input_data.operations, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to perform batch operations: {str(e)}" def _batch_operations( self, service, spreadsheet_id: str, operations: list[BatchOperation] @@ -885,9 +1245,11 @@ class GoogleSheetsFindReplaceBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to perform find/replace on", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) find_text: str = SchemaField( description="The text to find", @@ -912,6 +1274,9 @@ class GoogleSheetsFindReplaceBlock(Block): result: dict = SchemaField( description="The result of the find/replace operation including number of replacements", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -925,16 +1290,31 @@ class GoogleSheetsFindReplaceBlock(Block): output_schema=GoogleSheetsFindReplaceBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "find_text": "old_value", "replace_text": "new_value", "match_case": False, "match_entire_cell": False, - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, test_output=[ ("result", {"occurrencesChanged": 5}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_find_replace": lambda *args, **kwargs: {"occurrencesChanged": 5}, @@ -944,19 +1324,39 @@ class GoogleSheetsFindReplaceBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._find_replace, - service, - spreadsheet_id, - input_data.find_text, - input_data.replace_text, - input_data.sheet_id, - input_data.match_case, - input_data.match_entire_cell, - ) - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._find_replace, + service, + input_data.spreadsheet.id, + input_data.find_text, + input_data.replace_text, + input_data.sheet_id, + input_data.match_case, + input_data.match_entire_cell, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to find/replace in Google Sheet: {str(e)}" def _find_replace( self, @@ -995,9 +1395,11 @@ class GoogleSheetsFindBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets.readonly"] ) - spreadsheet_id: str = SchemaField( - description="The ID or URL of the spreadsheet to search in", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) find_text: str = SchemaField( description="The text to find", @@ -1034,6 +1436,9 @@ class GoogleSheetsFindBlock(Block): count: int = SchemaField( description="Number of occurrences found", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) error: str = SchemaField( description="Error message if any", ) @@ -1047,13 +1452,17 @@ class GoogleSheetsFindBlock(Block): output_schema=GoogleSheetsFindBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "find_text": "search_value", "match_case": False, "match_entire_cell": False, "find_all": True, "range": "Sheet1!A1:C10", - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -1067,6 +1476,17 @@ class GoogleSheetsFindBlock(Block): ], ), ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ], test_mock={ "_find_text": lambda *args, **kwargs: { @@ -1083,22 +1503,42 @@ class GoogleSheetsFindBlock(Block): async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._find_text, - service, - spreadsheet_id, - input_data.find_text, - input_data.sheet_id, - input_data.match_case, - input_data.match_entire_cell, - input_data.find_all, - input_data.range, - ) - yield "count", result["count"] - yield "locations", result["locations"] - yield "result", {"success": True} + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._find_text, + service, + input_data.spreadsheet.id, + input_data.find_text, + input_data.sheet_id, + input_data.match_case, + input_data.match_entire_cell, + input_data.find_all, + input_data.range, + ) + yield "count", result["count"] + yield "locations", result["locations"] + yield "result", {"success": True} + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to find text in Google Sheet: {str(e)}" def _find_text( self, @@ -1263,11 +1703,16 @@ class GoogleSheetsFormatBlock(Block): credentials: GoogleCredentialsInput = GoogleCredentialsField( ["https://www.googleapis.com/auth/spreadsheets"] ) - spreadsheet_id: str = SchemaField( - description="Spreadsheet ID or URL", - title="Spreadsheet ID or URL", + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + range: str = SchemaField( + description="A1 notation – sheet optional", + placeholder="Sheet1!A1:B2", ) - range: str = SchemaField(description="A1 notation – sheet optional") background_color: dict = SchemaField(default={}) text_color: dict = SchemaField(default={}) bold: bool = SchemaField(default=False) @@ -1276,6 +1721,12 @@ class GoogleSheetsFormatBlock(Block): class Output(BlockSchemaOutput): result: dict = SchemaField(description="API response or success flag") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) + error: str = SchemaField( + description="Error message if any", + ) def __init__(self): super().__init__( @@ -1286,37 +1737,74 @@ class GoogleSheetsFormatBlock(Block): output_schema=GoogleSheetsFormatBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ - "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, "range": "A1:B2", "background_color": {"red": 1.0, "green": 0.9, "blue": 0.9}, "bold": True, - "credentials": TEST_CREDENTIALS_INPUT, }, test_credentials=TEST_CREDENTIALS, - test_output=[("result", {"success": True})], + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), + ], test_mock={"_format_cells": lambda *args, **kwargs: {"success": True}}, ) async def run( self, input_data: Input, *, credentials: GoogleCredentials, **kwargs ) -> BlockOutput: - service = _build_sheets_service(credentials) - spreadsheet_id = extract_spreadsheet_id(input_data.spreadsheet_id) - result = await asyncio.to_thread( - self._format_cells, - service, - spreadsheet_id, - input_data.range, - input_data.background_color, - input_data.text_color, - input_data.bold, - input_data.italic, - input_data.font_size, - ) - if "error" in result: - yield "error", result["error"] - else: - yield "result", result + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._format_cells, + service, + input_data.spreadsheet.id, + input_data.range, + input_data.background_color, + input_data.text_color, + input_data.bold, + input_data.italic, + input_data.font_size, + ) + if "error" in result: + yield "error", result["error"] + else: + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", f"Failed to format Google Sheet cells: {str(e)}" def _format_cells( self, @@ -1402,6 +1890,9 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): result: dict = SchemaField( description="The result containing spreadsheet ID and URL", ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The created spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) spreadsheet_id: str = SchemaField( description="The ID of the created spreadsheet", ) @@ -1427,6 +1918,17 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): }, test_credentials=TEST_CREDENTIALS, test_output=[ + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), ("spreadsheet_id", "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms"), ( "spreadsheet_url", @@ -1438,6 +1940,7 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): "_create_spreadsheet": lambda *args, **kwargs: { "spreadsheetId": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", "spreadsheetUrl": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + "title": "Test Spreadsheet", }, }, ) @@ -1456,8 +1959,19 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): if "error" in result: yield "error", result["error"] else: - yield "spreadsheet_id", result["spreadsheetId"] - yield "spreadsheet_url", result["spreadsheetUrl"] + spreadsheet_id = result["spreadsheetId"] + spreadsheet_url = result["spreadsheetUrl"] + # Output the full GoogleDriveFile object for easy chaining + yield "spreadsheet", GoogleDriveFile( + id=spreadsheet_id, + name=result.get("title", input_data.title), + mimeType="application/vnd.google-apps.spreadsheet", + url=spreadsheet_url, + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + yield "spreadsheet_id", spreadsheet_id + yield "spreadsheet_url", spreadsheet_url yield "result", {"success": True} def _create_spreadsheet(self, service, title: str, sheet_names: list[str]) -> dict: @@ -1493,6 +2007,152 @@ class GoogleSheetsCreateSpreadsheetBlock(Block): return { "spreadsheetId": spreadsheet_id, "spreadsheetUrl": spreadsheet_url, + "title": title, } except Exception as e: return {"error": str(e)} + + +class GoogleSheetsUpdateCellBlock(Block): + """Update a single cell in a Google Sheets spreadsheet.""" + + class Input(BlockSchemaInput): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/spreadsheets"] + ) + spreadsheet: GoogleDriveFile = GoogleDrivePickerField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + cell: str = SchemaField( + description="Cell address in A1 notation (e.g., 'A1', 'Sheet1!B2')", + placeholder="A1", + ) + value: str = SchemaField( + description="Value to write to the cell", + ) + value_input_option: ValueInputOption = SchemaField( + description="How input data should be interpreted", + default=ValueInputOption.USER_ENTERED, + advanced=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="The result of the update operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="df521b68-62d9-42e4-924f-fb6c245516fc", + description="Update a single cell in a Google Sheets spreadsheet.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsUpdateCellBlock.Input, + output_schema=GoogleSheetsUpdateCellBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "cell": "A1", + "value": "Hello World", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + {"updatedCells": 1, "updatedColumns": 1, "updatedRows": 1}, + ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ), + ), + ], + test_mock={ + "_update_cell": lambda *args, **kwargs: { + "updatedCells": 1, + "updatedColumns": 1, + "updatedRows": 1, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + try: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + # Check if the selected file is actually a Google Sheets spreadsheet + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._update_cell, + service, + input_data.spreadsheet.id, + input_data.cell, + input_data.value, + input_data.value_input_option, + ) + + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + ) + except Exception as e: + yield "error", _handle_sheets_api_error(str(e), "update") + + def _update_cell( + self, + service, + spreadsheet_id: str, + cell: str, + value: str, + value_input_option: ValueInputOption, + ) -> dict: + body = {"values": [[value]]} + result = ( + service.spreadsheets() + .values() + .update( + spreadsheetId=spreadsheet_id, + range=cell, + valueInputOption=value_input_option.value, + body=body, + ) + .execute() + ) + return { + "updatedCells": result.get("updatedCells", 0), + "updatedRows": result.get("updatedRows", 0), + "updatedColumns": result.get("updatedColumns", 0), + } diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py new file mode 100644 index 0000000000..1dd5dbac9d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -0,0 +1,160 @@ +import logging +from typing import Any, Literal + +from prisma.enums import ReviewStatus + +from backend.data.block import ( + Block, + BlockCategory, + BlockOutput, + BlockSchemaInput, + BlockSchemaOutput, +) +from backend.data.execution import ExecutionStatus +from backend.data.human_review import ReviewResult +from backend.data.model import SchemaField +from backend.executor.manager import async_update_node_execution_status +from backend.util.clients import get_database_manager_async_client + +logger = logging.getLogger(__name__) + + +class HumanInTheLoopBlock(Block): + """ + This block pauses execution and waits for human approval or modification of the data. + + When executed, it creates a pending review entry and sets the node execution status + to REVIEW. The execution will remain paused until a human user either: + - Approves the data (with or without modifications) + - Rejects the data + + This is useful for workflows that require human validation or intervention before + proceeding to the next steps. + """ + + class Input(BlockSchemaInput): + data: Any = SchemaField(description="The data to be reviewed by a human user") + name: str = SchemaField( + description="A descriptive name for what this data represents", + ) + editable: bool = SchemaField( + description="Whether the human reviewer can edit the data", + default=True, + advanced=True, + ) + + class Output(BlockSchemaOutput): + reviewed_data: Any = SchemaField( + description="The data after human review (may be modified)" + ) + status: Literal["approved", "rejected"] = SchemaField( + description="Status of the review: 'approved' or 'rejected'" + ) + review_message: str = SchemaField( + description="Any message provided by the reviewer", default="" + ) + + def __init__(self): + super().__init__( + id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d", + description="Pause execution and wait for human approval or modification of data", + categories={BlockCategory.BASIC}, + input_schema=HumanInTheLoopBlock.Input, + output_schema=HumanInTheLoopBlock.Output, + test_input={ + "data": {"name": "John Doe", "age": 30}, + "name": "User profile data", + "editable": True, + }, + test_output=[ + ("reviewed_data", {"name": "John Doe", "age": 30}), + ("status", "approved"), + ("review_message", ""), + ], + test_mock={ + "get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult( + data={"name": "John Doe", "age": 30}, + status=ReviewStatus.APPROVED, + message="", + processed=False, + node_exec_id="test-node-exec-id", + ), + "update_node_execution_status": lambda *_args, **_kwargs: None, + }, + ) + + async def run( + self, + input_data: Input, + *, + user_id: str, + node_exec_id: str, + graph_exec_id: str, + graph_id: str, + graph_version: int, + **kwargs, + ) -> BlockOutput: + """ + Execute the Human In The Loop block. + + This method uses one function to handle the complete workflow - checking existing reviews + and creating pending ones as needed. + """ + try: + logger.debug(f"HITL block executing for node {node_exec_id}") + + # Use the data layer to handle the complete workflow + db_client = get_database_manager_async_client() + result = await db_client.get_or_create_human_review( + user_id=user_id, + node_exec_id=node_exec_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + graph_version=graph_version, + input_data=input_data.data, + message=input_data.name, + editable=input_data.editable, + ) + except Exception as e: + logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}") + raise + + # Check if we're waiting for human input + if result is None: + logger.info( + f"HITL block pausing execution for node {node_exec_id} - awaiting human review" + ) + try: + # Set node status to REVIEW so execution manager can't mark it as COMPLETED + # The VALID_STATUS_TRANSITIONS will then prevent any unwanted status changes + # Use the proper wrapper function to ensure websocket events are published + await async_update_node_execution_status( + db_client=db_client, + exec_id=node_exec_id, + status=ExecutionStatus.REVIEW, + ) + # Execution pauses here until API routes process the review + return + except Exception as e: + logger.error( + f"Failed to update node status for HITL block {node_exec_id}: {str(e)}" + ) + raise + + # Review is complete (approved or rejected) - check if unprocessed + if not result.processed: + # Mark as processed before yielding + await db_client.update_review_processed_status( + node_exec_id=node_exec_id, processed=True + ) + + if result.status == ReviewStatus.APPROVED: + yield "status", "approved" + yield "reviewed_data", result.data + if result.message: + yield "review_message", result.message + + elif result.status == ReviewStatus.REJECTED: + yield "status", "rejected" + if result.message: + yield "review_message", result.message diff --git a/autogpt_platform/backend/backend/check_db.py b/autogpt_platform/backend/backend/check_db.py index 591c519f84..7e1c3ee14f 100644 --- a/autogpt_platform/backend/backend/check_db.py +++ b/autogpt_platform/backend/backend/check_db.py @@ -5,6 +5,8 @@ from datetime import datetime from faker import Faker from prisma import Prisma +from backend.data.db import query_raw_with_schema + faker = Faker() @@ -15,9 +17,9 @@ async def check_cron_job(db): try: # Check if pg_cron extension exists - extension_check = await db.query_raw("CREATE EXTENSION pg_cron;") + extension_check = await query_raw_with_schema("CREATE EXTENSION pg_cron;") print(extension_check) - extension_check = await db.query_raw( + extension_check = await query_raw_with_schema( "SELECT COUNT(*) as count FROM pg_extension WHERE extname = 'pg_cron'" ) if extension_check[0]["count"] == 0: @@ -25,7 +27,7 @@ async def check_cron_job(db): return False # Check if the refresh job exists - job_check = await db.query_raw( + job_check = await query_raw_with_schema( """ SELECT jobname, schedule, command FROM cron.job @@ -55,33 +57,33 @@ async def get_materialized_view_counts(db): print("-" * 40) # Get counts from mv_agent_run_counts - agent_runs = await db.query_raw( + agent_runs = await query_raw_with_schema( """ SELECT COUNT(*) as total_agents, SUM(run_count) as total_runs, MAX(run_count) as max_runs, MIN(run_count) as min_runs - FROM mv_agent_run_counts + FROM {schema_prefix}mv_agent_run_counts """ ) # Get counts from mv_review_stats - review_stats = await db.query_raw( + review_stats = await query_raw_with_schema( """ SELECT COUNT(*) as total_listings, SUM(review_count) as total_reviews, AVG(avg_rating) as overall_avg_rating - FROM mv_review_stats + FROM {schema_prefix}mv_review_stats """ ) # Get sample data from StoreAgent view - store_agents = await db.query_raw( + store_agents = await query_raw_with_schema( """ SELECT COUNT(*) as total_store_agents, AVG(runs) as avg_runs, AVG(rating) as avg_rating - FROM "StoreAgent" + FROM {schema_prefix}"StoreAgent" """ ) diff --git a/autogpt_platform/backend/backend/check_store_data.py b/autogpt_platform/backend/backend/check_store_data.py index 10aa6507ba..c17393a6d4 100644 --- a/autogpt_platform/backend/backend/check_store_data.py +++ b/autogpt_platform/backend/backend/check_store_data.py @@ -5,6 +5,8 @@ import asyncio from prisma import Prisma +from backend.data.db import query_raw_with_schema + async def check_store_data(db): """Check what store data exists in the database.""" @@ -89,11 +91,11 @@ async def check_store_data(db): sa.creator_username, sa.categories, sa.updated_at - FROM "StoreAgent" sa + FROM {schema_prefix}"StoreAgent" sa LIMIT 10; """ - store_agents = await db.query_raw(query) + store_agents = await query_raw_with_schema(query) print(f"Total store agents in view: {len(store_agents)}") if store_agents: @@ -111,22 +113,22 @@ async def check_store_data(db): # Check for any APPROVED store listing versions query = """ SELECT COUNT(*) as count - FROM "StoreListingVersion" + FROM {schema_prefix}"StoreListingVersion" WHERE "submissionStatus" = 'APPROVED' """ - result = await db.query_raw(query) + result = await query_raw_with_schema(query) approved_count = result[0]["count"] if result else 0 print(f"Approved store listing versions: {approved_count}") # Check for store listings with hasApprovedVersion = true query = """ SELECT COUNT(*) as count - FROM "StoreListing" + FROM {schema_prefix}"StoreListing" WHERE "hasApprovedVersion" = true AND "isDeleted" = false """ - result = await db.query_raw(query) + result = await query_raw_with_schema(query) has_approved_count = result[0]["count"] if result else 0 print(f"Store listings with approved versions: {has_approved_count}") @@ -134,10 +136,10 @@ async def check_store_data(db): query = """ SELECT COUNT(DISTINCT "agentGraphId") as unique_agents, COUNT(*) as total_executions - FROM "AgentGraphExecution" + FROM {schema_prefix}"AgentGraphExecution" """ - result = await db.query_raw(query) + result = await query_raw_with_schema(query) if result: print("\nAgent Graph Executions:") print(f" Unique agents with executions: {result[0]['unique_agents']}") diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py index 8e9487f74a..6f604975cf 100644 --- a/autogpt_platform/backend/backend/data/credit_test.py +++ b/autogpt_platform/backend/backend/data/credit_test.py @@ -73,6 +73,7 @@ async def test_block_credit_usage(server: SpinTestServer): NodeExecutionEntry( user_id=DEFAULT_USER_ID, graph_id="test_graph", + graph_version=1, node_id="test_node", graph_exec_id="test_graph_exec", node_exec_id="test_node_exec", @@ -94,6 +95,7 @@ async def test_block_credit_usage(server: SpinTestServer): NodeExecutionEntry( user_id=DEFAULT_USER_ID, graph_id="test_graph", + graph_version=1, node_id="test_node", graph_exec_id="test_graph_exec", node_exec_id="test_node_exec", diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index a8253f3136..b78633cf58 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -34,6 +34,7 @@ from prisma.types import ( AgentNodeExecutionKeyValueDataCreateInput, AgentNodeExecutionUpdateInput, AgentNodeExecutionWhereInput, + AgentNodeExecutionWhereUniqueInput, ) from pydantic import BaseModel, ConfigDict, JsonValue, ValidationError from pydantic.fields import Field @@ -96,11 +97,14 @@ NodesInputMasks = Mapping[str, NodeInputMask] VALID_STATUS_TRANSITIONS = { ExecutionStatus.QUEUED: [ ExecutionStatus.INCOMPLETE, + ExecutionStatus.TERMINATED, # For resuming halted execution + ExecutionStatus.REVIEW, # For resuming after review ], ExecutionStatus.RUNNING: [ ExecutionStatus.INCOMPLETE, ExecutionStatus.QUEUED, ExecutionStatus.TERMINATED, # For resuming halted execution + ExecutionStatus.REVIEW, # For resuming after review ], ExecutionStatus.COMPLETED: [ ExecutionStatus.RUNNING, @@ -109,11 +113,16 @@ VALID_STATUS_TRANSITIONS = { ExecutionStatus.INCOMPLETE, ExecutionStatus.QUEUED, ExecutionStatus.RUNNING, + ExecutionStatus.REVIEW, ], ExecutionStatus.TERMINATED: [ ExecutionStatus.INCOMPLETE, ExecutionStatus.QUEUED, ExecutionStatus.RUNNING, + ExecutionStatus.REVIEW, + ], + ExecutionStatus.REVIEW: [ + ExecutionStatus.RUNNING, ], } @@ -446,6 +455,7 @@ class NodeExecutionResult(BaseModel): user_id=self.user_id, graph_exec_id=self.graph_exec_id, graph_id=self.graph_id, + graph_version=self.graph_version, node_exec_id=self.node_exec_id, node_id=self.node_id, block_id=self.block_id, @@ -728,7 +738,7 @@ async def upsert_execution_input( input_name: str, input_data: JsonValue, node_exec_id: str | None = None, -) -> tuple[str, BlockInput]: +) -> tuple[NodeExecutionResult, BlockInput]: """ Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input. If there is no AgentNodeExecution that has no `input_name` as input, create new one. @@ -761,7 +771,7 @@ async def upsert_execution_input( existing_execution = await AgentNodeExecution.prisma().find_first( where=existing_exec_query_filter, order={"addedTime": "asc"}, - include={"Input": True}, + include={"Input": True, "GraphExecution": True}, ) json_input_data = SafeJson(input_data) @@ -773,7 +783,7 @@ async def upsert_execution_input( referencedByInputExecId=existing_execution.id, ) ) - return existing_execution.id, { + return NodeExecutionResult.from_db(existing_execution), { **{ input_data.name: type_utils.convert(input_data.data, JsonValue) for input_data in existing_execution.Input or [] @@ -788,9 +798,10 @@ async def upsert_execution_input( agentGraphExecutionId=graph_exec_id, executionStatus=ExecutionStatus.INCOMPLETE, Input={"create": {"name": input_name, "data": json_input_data}}, - ) + ), + include={"GraphExecution": True}, ) - return result.id, {input_name: input_data} + return NodeExecutionResult.from_db(result), {input_name: input_data} else: raise ValueError( @@ -886,9 +897,25 @@ async def update_node_execution_status_batch( node_exec_ids: list[str], status: ExecutionStatus, stats: dict[str, Any] | None = None, -): - await AgentNodeExecution.prisma().update_many( - where={"id": {"in": node_exec_ids}}, +) -> int: + # Validate status transitions - allowed_from should never be empty for valid statuses + allowed_from = VALID_STATUS_TRANSITIONS.get(status, []) + if not allowed_from: + raise ValueError( + f"Invalid status transition: {status} has no valid source statuses" + ) + + # For batch updates, we filter to only update nodes with valid current statuses + where_clause = cast( + AgentNodeExecutionWhereInput, + { + "id": {"in": node_exec_ids}, + "executionStatus": {"in": [s.value for s in allowed_from]}, + }, + ) + + return await AgentNodeExecution.prisma().update_many( + where=where_clause, data=_get_update_status_data(status, None, stats), ) @@ -902,15 +929,32 @@ async def update_node_execution_status( if status == ExecutionStatus.QUEUED and execution_data is None: raise ValueError("Execution data must be provided when queuing an execution.") - res = await AgentNodeExecution.prisma().update( - where={"id": node_exec_id}, + # Validate status transitions - allowed_from should never be empty for valid statuses + allowed_from = VALID_STATUS_TRANSITIONS.get(status, []) + if not allowed_from: + raise ValueError( + f"Invalid status transition: {status} has no valid source statuses" + ) + + if res := await AgentNodeExecution.prisma().update( + where=cast( + AgentNodeExecutionWhereUniqueInput, + { + "id": node_exec_id, + "executionStatus": {"in": [s.value for s in allowed_from]}, + }, + ), data=_get_update_status_data(status, execution_data, stats), include=EXECUTION_RESULT_INCLUDE, - ) - if not res: - raise ValueError(f"Execution {node_exec_id} not found.") + ): + return NodeExecutionResult.from_db(res) - return NodeExecutionResult.from_db(res) + if res := await AgentNodeExecution.prisma().find_unique( + where={"id": node_exec_id}, include=EXECUTION_RESULT_INCLUDE + ): + return NodeExecutionResult.from_db(res) + + raise ValueError(f"Execution {node_exec_id} not found.") def _get_update_status_data( @@ -964,17 +1008,17 @@ async def get_node_execution(node_exec_id: str) -> NodeExecutionResult | None: return NodeExecutionResult.from_db(execution) -async def get_node_executions( +def _build_node_execution_where_clause( graph_exec_id: str | None = None, node_id: str | None = None, block_ids: list[str] | None = None, statuses: list[ExecutionStatus] | None = None, - limit: int | None = None, created_time_gte: datetime | None = None, created_time_lte: datetime | None = None, - include_exec_data: bool = True, -) -> list[NodeExecutionResult]: - """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" +) -> AgentNodeExecutionWhereInput: + """ + Build where clause for node execution queries. + """ where_clause: AgentNodeExecutionWhereInput = {} if graph_exec_id: where_clause["agentGraphExecutionId"] = graph_exec_id @@ -991,6 +1035,29 @@ async def get_node_executions( "lte": created_time_lte or datetime.max.replace(tzinfo=timezone.utc), } + return where_clause + + +async def get_node_executions( + graph_exec_id: str | None = None, + node_id: str | None = None, + block_ids: list[str] | None = None, + statuses: list[ExecutionStatus] | None = None, + limit: int | None = None, + created_time_gte: datetime | None = None, + created_time_lte: datetime | None = None, + include_exec_data: bool = True, +) -> list[NodeExecutionResult]: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + where_clause = _build_node_execution_where_clause( + graph_exec_id=graph_exec_id, + node_id=node_id, + block_ids=block_ids, + statuses=statuses, + created_time_gte=created_time_gte, + created_time_lte=created_time_lte, + ) + executions = await AgentNodeExecution.prisma().find_many( where=where_clause, include=( @@ -1052,6 +1119,7 @@ class NodeExecutionEntry(BaseModel): user_id: str graph_exec_id: str graph_id: str + graph_version: int node_exec_id: str node_id: str block_id: str diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py new file mode 100644 index 0000000000..2b0b2dbfb7 --- /dev/null +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -0,0 +1,294 @@ +""" +Data layer for Human In The Loop (HITL) review operations. +Handles all database operations for pending human reviews. +""" + +import asyncio +import logging +from datetime import datetime, timezone +from typing import Optional + +from prisma.enums import ReviewStatus +from prisma.models import PendingHumanReview +from prisma.types import PendingHumanReviewUpdateInput +from pydantic import BaseModel + +from backend.server.v2.executions.review.model import ( + PendingHumanReviewModel, + SafeJsonData, +) +from backend.util.json import SafeJson + +logger = logging.getLogger(__name__) + + +class ReviewResult(BaseModel): + """Result of a review operation.""" + + data: Optional[SafeJsonData] = None + status: ReviewStatus + message: str = "" + processed: bool + node_exec_id: str + + +async def get_pending_review_by_node_exec_id( + node_exec_id: str, user_id: str +) -> Optional["PendingHumanReviewModel"]: + """ + Get a pending review by node execution ID with user ownership validation. + + Args: + node_exec_id: The node execution ID to check + user_id: The user ID to validate ownership + + Returns: + The existing review if found and owned by the user, None otherwise + """ + review = await PendingHumanReview.prisma().find_first( + where={ + "nodeExecId": node_exec_id, + "userId": user_id, + } + ) + + if review: + return PendingHumanReviewModel.from_db(review) + + return None + + +async def get_or_create_human_review( + user_id: str, + node_exec_id: str, + graph_exec_id: str, + graph_id: str, + graph_version: int, + input_data: SafeJsonData, + message: str, + editable: bool, +) -> Optional[ReviewResult]: + """ + Get existing review or create a new pending review entry. + + Uses upsert with empty update to get existing or create new review in a single operation. + + Args: + user_id: ID of the user who owns this review + node_exec_id: ID of the node execution + graph_exec_id: ID of the graph execution + graph_id: ID of the graph template + graph_version: Version of the graph template + input_data: The data to be reviewed + message: Instructions for the reviewer + editable: Whether the data can be edited + + Returns: + ReviewResult if the review is complete, None if waiting for human input + """ + try: + logger.debug(f"Getting or creating review for node {node_exec_id}") + + # Upsert - get existing or create new review + review = await PendingHumanReview.prisma().upsert( + where={"nodeExecId": node_exec_id}, + data={ + "create": { + "userId": user_id, + "nodeExecId": node_exec_id, + "graphExecId": graph_exec_id, + "graphId": graph_id, + "graphVersion": graph_version, + "payload": SafeJson(input_data), + "instructions": message, + "editable": editable, + "status": ReviewStatus.WAITING, + }, + "update": {}, # Do nothing on update - keep existing review as is + }, + ) + + logger.info( + f"Review {'created' if review.createdAt == review.updatedAt else 'retrieved'} for node {node_exec_id} with status {review.status}" + ) + except Exception as e: + logger.error( + f"Database error in get_or_create_human_review for node {node_exec_id}: {str(e)}" + ) + raise + + # Early return if already processed + if review.processed: + return None + + if review.status == ReviewStatus.APPROVED: + # Return the approved review result + return ReviewResult( + data=review.payload, + status=ReviewStatus.APPROVED, + message=review.reviewMessage or "", + processed=review.processed, + node_exec_id=review.nodeExecId, + ) + elif review.status == ReviewStatus.REJECTED: + # Return the rejected review result + return ReviewResult( + data=None, + status=ReviewStatus.REJECTED, + message=review.reviewMessage or "", + processed=review.processed, + node_exec_id=review.nodeExecId, + ) + else: + # Review is pending - return None to continue waiting + return None + + +async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool: + """ + Check if a graph execution has any pending reviews. + + Args: + graph_exec_id: The graph execution ID to check + + Returns: + True if there are reviews waiting for human input, False otherwise + """ + # Check if there are any reviews waiting for human input + count = await PendingHumanReview.prisma().count( + where={"graphExecId": graph_exec_id, "status": ReviewStatus.WAITING} + ) + return count > 0 + + +async def get_pending_reviews_for_user( + user_id: str, page: int = 1, page_size: int = 25 +) -> list["PendingHumanReviewModel"]: + """ + Get all pending reviews for a user with pagination. + + Args: + user_id: User ID to get reviews for + page: Page number (1-indexed) + page_size: Number of reviews per page + + Returns: + List of pending review models + """ + # Calculate offset for pagination + offset = (page - 1) * page_size + + reviews = await PendingHumanReview.prisma().find_many( + where={"userId": user_id, "status": ReviewStatus.WAITING}, + order={"createdAt": "desc"}, + skip=offset, + take=page_size, + ) + + return [PendingHumanReviewModel.from_db(review) for review in reviews] + + +async def get_pending_reviews_for_execution( + graph_exec_id: str, user_id: str +) -> list["PendingHumanReviewModel"]: + """ + Get all pending reviews for a specific graph execution. + + Args: + graph_exec_id: Graph execution ID + user_id: User ID for security validation + + Returns: + List of pending review models + """ + reviews = await PendingHumanReview.prisma().find_many( + where={ + "userId": user_id, + "graphExecId": graph_exec_id, + "status": ReviewStatus.WAITING, + }, + order={"createdAt": "asc"}, + ) + + return [PendingHumanReviewModel.from_db(review) for review in reviews] + + +async def process_all_reviews_for_execution( + user_id: str, + review_decisions: dict[str, tuple[ReviewStatus, SafeJsonData | None, str | None]], +) -> dict[str, PendingHumanReviewModel]: + """Process all pending reviews for an execution with approve/reject decisions. + + Args: + user_id: User ID for ownership validation + review_decisions: Map of node_exec_id -> (status, reviewed_data, message) + + Returns: + Dict of node_exec_id -> updated review model + """ + if not review_decisions: + return {} + + node_exec_ids = list(review_decisions.keys()) + + # Get all reviews for validation + reviews = await PendingHumanReview.prisma().find_many( + where={ + "nodeExecId": {"in": node_exec_ids}, + "userId": user_id, + "status": ReviewStatus.WAITING, + }, + ) + + # Validate all reviews can be processed + if len(reviews) != len(node_exec_ids): + missing_ids = set(node_exec_ids) - {review.nodeExecId for review in reviews} + raise ValueError( + f"Reviews not found, access denied, or not in WAITING status: {', '.join(missing_ids)}" + ) + + # Create parallel update tasks + update_tasks = [] + + for review in reviews: + new_status, reviewed_data, message = review_decisions[review.nodeExecId] + has_data_changes = reviewed_data is not None and reviewed_data != review.payload + + # Check edit permissions for actual data modifications + if has_data_changes and not review.editable: + raise ValueError(f"Review {review.nodeExecId} is not editable") + + update_data: PendingHumanReviewUpdateInput = { + "status": new_status, + "reviewMessage": message, + "wasEdited": has_data_changes, + "reviewedAt": datetime.now(timezone.utc), + } + + if has_data_changes: + update_data["payload"] = SafeJson(reviewed_data) + + task = PendingHumanReview.prisma().update( + where={"nodeExecId": review.nodeExecId}, + data=update_data, + ) + update_tasks.append(task) + + # Execute all updates in parallel and get updated reviews + updated_reviews = await asyncio.gather(*update_tasks) + + # Note: Execution resumption is now handled at the API layer after ALL reviews + # for an execution are processed (both approved and rejected) + + # Return as dict for easy access + return { + review.nodeExecId: PendingHumanReviewModel.from_db(review) + for review in updated_reviews + } + + +async def update_review_processed_status(node_exec_id: str, processed: bool) -> None: + """Update the processed status of a review.""" + await PendingHumanReview.prisma().update( + where={"nodeExecId": node_exec_id}, data={"processed": processed} + ) diff --git a/autogpt_platform/backend/backend/data/human_review_test.py b/autogpt_platform/backend/backend/data/human_review_test.py new file mode 100644 index 0000000000..fe6c9057c1 --- /dev/null +++ b/autogpt_platform/backend/backend/data/human_review_test.py @@ -0,0 +1,376 @@ +import datetime +from unittest.mock import AsyncMock, Mock + +import pytest +import pytest_mock +from prisma.enums import ReviewStatus + +from backend.data.human_review import ( + get_or_create_human_review, + get_pending_review_by_node_exec_id, + get_pending_reviews_for_execution, + get_pending_reviews_for_user, + has_pending_reviews_for_graph_exec, + process_all_reviews_for_execution, +) + + +@pytest.fixture +def sample_db_review(): + """Create a sample database review object""" + mock_review = Mock() + mock_review.nodeExecId = "test_node_123" + mock_review.userId = "test_user" + mock_review.graphExecId = "test_graph_exec_456" + mock_review.graphId = "test_graph_789" + mock_review.graphVersion = 1 + mock_review.payload = {"data": "test payload"} + mock_review.instructions = "Please review" + mock_review.editable = True + mock_review.status = ReviewStatus.WAITING + mock_review.reviewMessage = None + mock_review.wasEdited = False + mock_review.processed = False + mock_review.createdAt = datetime.datetime.now(datetime.timezone.utc) + mock_review.updatedAt = None + mock_review.reviewedAt = None + return mock_review + + +@pytest.mark.asyncio +async def test_get_pending_review_by_node_exec_id_found( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test finding an existing pending review""" + mock_find_first = mocker.patch( + "backend.data.human_review.PendingHumanReview.prisma" + ) + mock_find_first.return_value.find_first = AsyncMock(return_value=sample_db_review) + + result = await get_pending_review_by_node_exec_id("test_node_123", "test_user") + + assert result is not None + assert result.node_exec_id == "test_node_123" + assert result.user_id == "test_user" + assert result.status == ReviewStatus.WAITING + + +@pytest.mark.asyncio +async def test_get_pending_review_by_node_exec_id_not_found( + mocker: pytest_mock.MockFixture, +): + """Test when review is not found""" + mock_find_first = mocker.patch( + "backend.data.human_review.PendingHumanReview.prisma" + ) + mock_find_first.return_value.find_first = AsyncMock(return_value=None) + + result = await get_pending_review_by_node_exec_id("nonexistent", "test_user") + + assert result is None + + +@pytest.mark.asyncio +async def test_get_or_create_human_review_new( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test creating a new human review""" + # Mock the upsert to return a new review (created_at == updated_at) + sample_db_review.status = ReviewStatus.WAITING + sample_db_review.processed = False + + mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) + + result = await get_or_create_human_review( + user_id="test_user", + node_exec_id="test_node_123", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + input_data={"data": "test payload"}, + message="Please review", + editable=True, + ) + + # Should return None for pending reviews (waiting for human input) + assert result is None + + +@pytest.mark.asyncio +async def test_get_or_create_human_review_approved( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test retrieving an already approved review""" + # Set up review as already approved + sample_db_review.status = ReviewStatus.APPROVED + sample_db_review.processed = False + sample_db_review.reviewMessage = "Looks good" + + mock_upsert = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_upsert.return_value.upsert = AsyncMock(return_value=sample_db_review) + + result = await get_or_create_human_review( + user_id="test_user", + node_exec_id="test_node_123", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + input_data={"data": "test payload"}, + message="Please review", + editable=True, + ) + + # Should return the approved result + assert result is not None + assert result.status == ReviewStatus.APPROVED + assert result.data == {"data": "test payload"} + assert result.message == "Looks good" + + +@pytest.mark.asyncio +async def test_has_pending_reviews_for_graph_exec_true( + mocker: pytest_mock.MockFixture, +): + """Test when there are pending reviews""" + mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_count.return_value.count = AsyncMock(return_value=2) + + result = await has_pending_reviews_for_graph_exec("test_graph_exec") + + assert result is True + + +@pytest.mark.asyncio +async def test_has_pending_reviews_for_graph_exec_false( + mocker: pytest_mock.MockFixture, +): + """Test when there are no pending reviews""" + mock_count = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_count.return_value.count = AsyncMock(return_value=0) + + result = await has_pending_reviews_for_graph_exec("test_graph_exec") + + assert result is False + + +@pytest.mark.asyncio +async def test_get_pending_reviews_for_user( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test getting pending reviews for a user with pagination""" + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + + result = await get_pending_reviews_for_user("test_user", page=2, page_size=10) + + assert len(result) == 1 + assert result[0].node_exec_id == "test_node_123" + + # Verify pagination parameters + call_args = mock_find_many.return_value.find_many.call_args + assert call_args.kwargs["skip"] == 10 # (page-1) * page_size = (2-1) * 10 + assert call_args.kwargs["take"] == 10 + + +@pytest.mark.asyncio +async def test_get_pending_reviews_for_execution( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test getting pending reviews for specific execution""" + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + + result = await get_pending_reviews_for_execution("test_graph_exec_456", "test_user") + + assert len(result) == 1 + assert result[0].graph_exec_id == "test_graph_exec_456" + + # Verify it filters by execution and user + call_args = mock_find_many.return_value.find_many.call_args + where_clause = call_args.kwargs["where"] + assert where_clause["userId"] == "test_user" + assert where_clause["graphExecId"] == "test_graph_exec_456" + assert where_clause["status"] == ReviewStatus.WAITING + + +@pytest.mark.asyncio +async def test_process_all_reviews_for_execution_success( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test successful processing of reviews for an execution""" + # Mock finding reviews + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + + # Mock updating reviews + updated_review = Mock() + updated_review.nodeExecId = "test_node_123" + updated_review.userId = "test_user" + updated_review.graphExecId = "test_graph_exec_456" + updated_review.graphId = "test_graph_789" + updated_review.graphVersion = 1 + updated_review.payload = {"data": "modified"} + updated_review.instructions = "Please review" + updated_review.editable = True + updated_review.status = ReviewStatus.APPROVED + updated_review.reviewMessage = "Approved" + updated_review.wasEdited = True + updated_review.processed = False + updated_review.createdAt = datetime.datetime.now(datetime.timezone.utc) + updated_review.updatedAt = datetime.datetime.now(datetime.timezone.utc) + updated_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc) + mock_update = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_update.return_value.update = AsyncMock(return_value=updated_review) + + # Mock gather to simulate parallel updates + mocker.patch( + "backend.data.human_review.asyncio.gather", + new=AsyncMock(return_value=[updated_review]), + ) + + result = await process_all_reviews_for_execution( + user_id="test_user", + review_decisions={ + "test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved") + }, + ) + + assert len(result) == 1 + assert "test_node_123" in result + assert result["test_node_123"].status == ReviewStatus.APPROVED + + +@pytest.mark.asyncio +async def test_process_all_reviews_for_execution_validation_errors( + mocker: pytest_mock.MockFixture, +): + """Test validation errors in process_all_reviews_for_execution""" + # Mock finding fewer reviews than requested (some not found) + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock( + return_value=[] + ) # No reviews found + + with pytest.raises(ValueError, match="Reviews not found"): + await process_all_reviews_for_execution( + user_id="test_user", + review_decisions={ + "nonexistent_node": (ReviewStatus.APPROVED, {"data": "test"}, "message") + }, + ) + + +@pytest.mark.asyncio +async def test_process_all_reviews_edit_permission_error( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test editing non-editable review""" + # Set review as non-editable + sample_db_review.editable = False + + # Mock finding reviews + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock(return_value=[sample_db_review]) + + with pytest.raises(ValueError, match="not editable"): + await process_all_reviews_for_execution( + user_id="test_user", + review_decisions={ + "test_node_123": ( + ReviewStatus.APPROVED, + {"data": "modified"}, + "message", + ) + }, + ) + + +@pytest.mark.asyncio +async def test_process_all_reviews_mixed_approval_rejection( + mocker: pytest_mock.MockFixture, + sample_db_review, +): + """Test processing mixed approval and rejection decisions""" + # Create second review for rejection + second_review = Mock() + second_review.nodeExecId = "test_node_456" + second_review.userId = "test_user" + second_review.graphExecId = "test_graph_exec_456" + second_review.graphId = "test_graph_789" + second_review.graphVersion = 1 + second_review.payload = {"data": "original"} + second_review.instructions = "Second review" + second_review.editable = True + second_review.status = ReviewStatus.WAITING + second_review.reviewMessage = None + second_review.wasEdited = False + second_review.processed = False + second_review.createdAt = datetime.datetime.now(datetime.timezone.utc) + second_review.updatedAt = None + second_review.reviewedAt = None + + # Mock finding reviews + mock_find_many = mocker.patch("backend.data.human_review.PendingHumanReview.prisma") + mock_find_many.return_value.find_many = AsyncMock( + return_value=[sample_db_review, second_review] + ) + + # Mock updating reviews + approved_review = Mock() + approved_review.nodeExecId = "test_node_123" + approved_review.userId = "test_user" + approved_review.graphExecId = "test_graph_exec_456" + approved_review.graphId = "test_graph_789" + approved_review.graphVersion = 1 + approved_review.payload = {"data": "modified"} + approved_review.instructions = "Please review" + approved_review.editable = True + approved_review.status = ReviewStatus.APPROVED + approved_review.reviewMessage = "Approved" + approved_review.wasEdited = True + approved_review.processed = False + approved_review.createdAt = datetime.datetime.now(datetime.timezone.utc) + approved_review.updatedAt = datetime.datetime.now(datetime.timezone.utc) + approved_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc) + + rejected_review = Mock() + rejected_review.nodeExecId = "test_node_456" + rejected_review.userId = "test_user" + rejected_review.graphExecId = "test_graph_exec_456" + rejected_review.graphId = "test_graph_789" + rejected_review.graphVersion = 1 + rejected_review.payload = {"data": "original"} + rejected_review.instructions = "Please review" + rejected_review.editable = True + rejected_review.status = ReviewStatus.REJECTED + rejected_review.reviewMessage = "Rejected" + rejected_review.wasEdited = False + rejected_review.processed = False + rejected_review.createdAt = datetime.datetime.now(datetime.timezone.utc) + rejected_review.updatedAt = datetime.datetime.now(datetime.timezone.utc) + rejected_review.reviewedAt = datetime.datetime.now(datetime.timezone.utc) + + mocker.patch( + "backend.data.human_review.asyncio.gather", + new=AsyncMock(return_value=[approved_review, rejected_review]), + ) + + result = await process_all_reviews_for_execution( + user_id="test_user", + review_decisions={ + "test_node_123": (ReviewStatus.APPROVED, {"data": "modified"}, "Approved"), + "test_node_456": (ReviewStatus.REJECTED, None, "Rejected"), + }, + ) + + assert len(result) == 2 + assert "test_node_123" in result + assert "test_node_456" in result diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index df581e0de4..27e8b01043 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -31,6 +31,11 @@ from backend.data.graph import ( get_node, validate_graph_execution_permissions, ) +from backend.data.human_review import ( + get_or_create_human_review, + has_pending_reviews_for_graph_exec, + update_review_processed_status, +) from backend.data.notifications import ( clear_all_user_notification_batches, create_or_add_to_user_notification_batch, @@ -161,6 +166,11 @@ class DatabaseManager(AppService): get_user_email_verification = _(get_user_email_verification) get_user_notification_preference = _(get_user_notification_preference) + # Human In The Loop + get_or_create_human_review = _(get_or_create_human_review) + has_pending_reviews_for_graph_exec = _(has_pending_reviews_for_graph_exec) + update_review_processed_status = _(update_review_processed_status) + # Notifications - async clear_all_user_notification_batches = _(clear_all_user_notification_batches) create_or_add_to_user_notification_batch = _( @@ -215,6 +225,9 @@ class DatabaseManagerClient(AppServiceClient): # Block error monitoring get_block_error_stats = _(d.get_block_error_stats) + # Human In The Loop + has_pending_reviews_for_graph_exec = _(d.has_pending_reviews_for_graph_exec) + # User Emails get_user_email_by_id = _(d.get_user_email_by_id) @@ -256,6 +269,10 @@ class DatabaseManagerAsyncClient(AppServiceClient): get_execution_kv_data = d.get_execution_kv_data set_execution_kv_data = d.set_execution_kv_data + # Human In The Loop + get_or_create_human_review = d.get_or_create_human_review + update_review_processed_status = d.update_review_processed_status + # User Comms get_active_user_ids_in_timerange = d.get_active_user_ids_in_timerange get_user_email_by_id = d.get_user_email_by_id diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index f04102a950..06ad06e6dc 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -164,6 +164,7 @@ async def execute_node( user_id = data.user_id graph_exec_id = data.graph_exec_id graph_id = data.graph_id + graph_version = data.graph_version node_exec_id = data.node_exec_id node_id = data.node_id node_block = node.block @@ -204,6 +205,7 @@ async def execute_node( # Inject extra execution arguments for the blocks via kwargs extra_exec_kwargs: dict = { "graph_id": graph_id, + "graph_version": graph_version, "node_id": node_id, "graph_exec_id": graph_exec_id, "node_exec_id": node_exec_id, @@ -284,6 +286,7 @@ async def _enqueue_next_nodes( user_id: str, graph_exec_id: str, graph_id: str, + graph_version: int, log_metadata: LogMetadata, nodes_input_masks: Optional[NodesInputMasks], user_context: UserContext, @@ -301,6 +304,7 @@ async def _enqueue_next_nodes( user_id=user_id, graph_exec_id=graph_exec_id, graph_id=graph_id, + graph_version=graph_version, node_exec_id=node_exec_id, node_id=node_id, block_id=block_id, @@ -334,17 +338,14 @@ async def _enqueue_next_nodes( # Or the same input to be consumed multiple times. async with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"): # Add output data to the earliest incomplete execution, or create a new one. - next_node_exec_id, next_node_input = await db_client.upsert_execution_input( + next_node_exec, next_node_input = await db_client.upsert_execution_input( node_id=next_node_id, graph_exec_id=graph_exec_id, input_name=next_input_name, input_data=next_data, ) - await async_update_node_execution_status( - db_client=db_client, - exec_id=next_node_exec_id, - status=ExecutionStatus.INCOMPLETE, - ) + next_node_exec_id = next_node_exec.node_exec_id + await send_async_execution_update(next_node_exec) # Complete missing static input pins data using the last execution input. static_link_names = { @@ -660,6 +661,16 @@ class ExecutionProcessor: log_metadata.info( f"⚙️ Graph execution #{graph_exec.graph_exec_id} is already running, continuing where it left off." ) + elif exec_meta.status == ExecutionStatus.REVIEW: + exec_meta.status = ExecutionStatus.RUNNING + log_metadata.info( + f"⚙️ Graph execution #{graph_exec.graph_exec_id} was waiting for review, resuming execution." + ) + update_graph_execution_state( + db_client=db_client, + graph_exec_id=graph_exec.graph_exec_id, + status=ExecutionStatus.RUNNING, + ) elif exec_meta.status == ExecutionStatus.FAILED: exec_meta.status = ExecutionStatus.RUNNING log_metadata.info( @@ -697,19 +708,21 @@ class ExecutionProcessor: raise status exec_meta.status = status - # Activity status handling - activity_response = asyncio.run_coroutine_threadsafe( - generate_activity_status_for_execution( - graph_exec_id=graph_exec.graph_exec_id, - graph_id=graph_exec.graph_id, - graph_version=graph_exec.graph_version, - execution_stats=exec_stats, - db_client=get_db_async_client(), - user_id=graph_exec.user_id, - execution_status=status, - ), - self.node_execution_loop, - ).result(timeout=60.0) + if status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]: + activity_response = asyncio.run_coroutine_threadsafe( + generate_activity_status_for_execution( + graph_exec_id=graph_exec.graph_exec_id, + graph_id=graph_exec.graph_id, + graph_version=graph_exec.graph_version, + execution_stats=exec_stats, + db_client=get_db_async_client(), + user_id=graph_exec.user_id, + execution_status=status, + ), + self.node_execution_loop, + ).result(timeout=60.0) + else: + activity_response = None if activity_response is not None: exec_stats.activity_status = activity_response["activity_status"] exec_stats.correctness_score = activity_response["correctness_score"] @@ -845,6 +858,7 @@ class ExecutionProcessor: ExecutionStatus.RUNNING, ExecutionStatus.QUEUED, ExecutionStatus.TERMINATED, + ExecutionStatus.REVIEW, ], ): node_entry = node_exec.to_node_execution_entry(graph_exec.user_context) @@ -853,6 +867,7 @@ class ExecutionProcessor: # ------------------------------------------------------------ # Main dispatch / polling loop ----------------------------- # ------------------------------------------------------------ + while not execution_queue.empty(): if cancel.is_set(): break @@ -1006,7 +1021,12 @@ class ExecutionProcessor: elif error is not None: execution_status = ExecutionStatus.FAILED else: - execution_status = ExecutionStatus.COMPLETED + if db_client.has_pending_reviews_for_graph_exec( + graph_exec.graph_exec_id + ): + execution_status = ExecutionStatus.REVIEW + else: + execution_status = ExecutionStatus.COMPLETED if error: execution_stats.error = str(error) or type(error).__name__ @@ -1142,6 +1162,7 @@ class ExecutionProcessor: user_id=graph_exec.user_id, graph_exec_id=graph_exec.graph_exec_id, graph_id=graph_exec.graph_id, + graph_version=graph_exec.graph_version, log_metadata=log_metadata, nodes_input_masks=nodes_input_masks, user_context=graph_exec.user_context, diff --git a/autogpt_platform/backend/backend/executor/utils.py b/autogpt_platform/backend/backend/executor/utils.py index b11ea45cf5..f8c6da8546 100644 --- a/autogpt_platform/backend/backend/executor/utils.py +++ b/autogpt_platform/backend/backend/executor/utils.py @@ -30,6 +30,7 @@ from backend.data.execution import ( GraphExecutionWithNodes, NodesInputMasks, UserContext, + get_graph_execution, ) from backend.data.graph import GraphModel, Node from backend.data.model import CredentialsMetaInput @@ -764,6 +765,7 @@ async def add_graph_execution( nodes_input_masks: Optional[NodesInputMasks] = None, parent_graph_exec_id: Optional[str] = None, is_sub_graph: bool = False, + graph_exec_id: Optional[str] = None, ) -> GraphExecutionWithNodes: """ Adds a graph execution to the queue and returns the execution entry. @@ -779,32 +781,48 @@ async def add_graph_execution( nodes_input_masks: Node inputs to use in the execution. parent_graph_exec_id: The ID of the parent graph execution (for nested executions). is_sub_graph: Whether this is a sub-graph execution. + graph_exec_id: If provided, resume this existing execution instead of creating a new one. Returns: GraphExecutionEntry: The entry for the graph execution. Raises: ValueError: If the graph is not found or if there are validation errors. + NotFoundError: If graph_exec_id is provided but execution is not found. """ if prisma.is_connected(): edb = execution_db else: edb = get_database_manager_async_client() - graph, starting_nodes_input, compiled_nodes_input_masks = ( - await validate_and_construct_node_execution_input( - graph_id=graph_id, + # Get or create the graph execution + if graph_exec_id: + # Resume existing execution + graph_exec = await get_graph_execution( user_id=user_id, - graph_inputs=inputs or {}, - graph_version=graph_version, - graph_credentials_inputs=graph_credentials_inputs, - nodes_input_masks=nodes_input_masks, - is_sub_graph=is_sub_graph, + execution_id=graph_exec_id, + include_node_executions=True, + ) + + if not graph_exec: + raise NotFoundError(f"Graph execution #{graph_exec_id} not found.") + + # Use existing execution's compiled input masks + compiled_nodes_input_masks = graph_exec.nodes_input_masks or {} + + logger.info(f"Resuming graph execution #{graph_exec.id} for graph #{graph_id}") + else: + # Create new execution + graph, starting_nodes_input, compiled_nodes_input_masks = ( + await validate_and_construct_node_execution_input( + graph_id=graph_id, + user_id=user_id, + graph_inputs=inputs or {}, + graph_version=graph_version, + graph_credentials_inputs=graph_credentials_inputs, + nodes_input_masks=nodes_input_masks, + is_sub_graph=is_sub_graph, + ) ) - ) - graph_exec = None - try: - # Sanity check: running add_graph_execution with the properties of - # the graph_exec created here should create the same execution again. graph_exec = await edb.create_graph_execution( user_id=user_id, graph_id=graph_id, @@ -817,16 +835,20 @@ async def add_graph_execution( parent_graph_exec_id=parent_graph_exec_id, ) + logger.info( + f"Created graph execution #{graph_exec.id} for graph " + f"#{graph_id} with {len(starting_nodes_input)} starting nodes" + ) + + # Common path: publish to queue and update status + try: graph_exec_entry = graph_exec.to_graph_execution_entry( user_context=await get_user_context(user_id), compiled_nodes_input_masks=compiled_nodes_input_masks, parent_graph_exec_id=parent_graph_exec_id, ) - logger.info( - f"Created graph execution #{graph_exec.id} for graph " - f"#{graph_id} with {len(starting_nodes_input)} starting nodes. " - f"Now publishing to execution queue." - ) + + logger.info(f"Publishing execution {graph_exec.id} to execution queue") exec_queue = await get_async_execution_queue() await exec_queue.publish_message( diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index 15e7485d5d..556903571c 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -29,6 +29,7 @@ import backend.server.v2.admin.store_admin_routes import backend.server.v2.builder import backend.server.v2.builder.routes import backend.server.v2.chat.routes as chat_routes +import backend.server.v2.executions.review.routes import backend.server.v2.library.db import backend.server.v2.library.model import backend.server.v2.library.routes @@ -274,6 +275,11 @@ app.include_router( tags=["v2", "admin"], prefix="/api/executions", ) +app.include_router( + backend.server.v2.executions.review.routes.router, + tags=["v2", "executions", "review"], + prefix="/api/review", +) app.include_router( backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library" ) diff --git a/autogpt_platform/backend/backend/server/v2/builder/db.py b/autogpt_platform/backend/backend/server/v2/builder/db.py index ed5938b218..c3f6ac88ab 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/db.py +++ b/autogpt_platform/backend/backend/server/v2/builder/db.py @@ -7,6 +7,7 @@ import backend.data.block from backend.blocks import load_all_blocks from backend.blocks.llm import LlmModel from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema +from backend.data.db import query_raw_with_schema from backend.integrations.providers import ProviderName from backend.server.v2.builder.model import ( BlockCategoryResponse, @@ -340,13 +341,13 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: # Calculate the cutoff timestamp timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30) - results = await prisma.get_client().query_raw( + results = await query_raw_with_schema( """ SELECT agent_node."agentBlockId" AS block_id, COUNT(execution.id) AS execution_count - FROM "AgentNodeExecution" execution - JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id + FROM {schema_prefix}"AgentNodeExecution" execution + JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id WHERE execution."endedTime" >= $1::timestamp GROUP BY agent_node."agentBlockId" ORDER BY execution_count DESC; diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/model.py b/autogpt_platform/backend/backend/server/v2/executions/review/model.py new file mode 100644 index 0000000000..74f72fe1ff --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/executions/review/model.py @@ -0,0 +1,204 @@ +import json +from datetime import datetime +from typing import TYPE_CHECKING, Any, Dict, List, Union + +from prisma.enums import ReviewStatus +from pydantic import BaseModel, Field, field_validator, model_validator + +if TYPE_CHECKING: + from prisma.models import PendingHumanReview + +# SafeJson-compatible type alias for review data +SafeJsonData = Union[Dict[str, Any], List[Any], str, int, float, bool, None] + + +class PendingHumanReviewModel(BaseModel): + """Response model for pending human review data. + + Represents a human review request that is awaiting user action. + Contains all necessary information for a user to review and approve + or reject data from a Human-in-the-Loop block execution. + + Attributes: + id: Unique identifier for the review record + user_id: ID of the user who must perform the review + node_exec_id: ID of the node execution that created this review + graph_exec_id: ID of the graph execution containing the node + graph_id: ID of the graph template being executed + graph_version: Version number of the graph template + payload: The actual data payload awaiting review + instructions: Instructions or message for the reviewer + editable: Whether the reviewer can edit the data + status: Current review status (WAITING, APPROVED, or REJECTED) + review_message: Optional message from the reviewer + created_at: Timestamp when review was created + updated_at: Timestamp when review was last modified + reviewed_at: Timestamp when review was completed (if applicable) + """ + + node_exec_id: str = Field(description="Node execution ID (primary key)") + user_id: str = Field(description="User ID associated with the review") + graph_exec_id: str = Field(description="Graph execution ID") + graph_id: str = Field(description="Graph ID") + graph_version: int = Field(description="Graph version") + payload: SafeJsonData = Field(description="The actual data payload awaiting review") + instructions: str | None = Field( + description="Instructions or message for the reviewer", default=None + ) + editable: bool = Field(description="Whether the reviewer can edit the data") + status: ReviewStatus = Field(description="Review status") + review_message: str | None = Field( + description="Optional message from the reviewer", default=None + ) + was_edited: bool | None = Field( + description="Whether the data was modified during review", default=None + ) + processed: bool = Field( + description="Whether the review result has been processed by the execution engine", + default=False, + ) + created_at: datetime = Field(description="When the review was created") + updated_at: datetime | None = Field( + description="When the review was last updated", default=None + ) + reviewed_at: datetime | None = Field( + description="When the review was completed", default=None + ) + + @classmethod + def from_db(cls, review: "PendingHumanReview") -> "PendingHumanReviewModel": + """ + Convert a database model to a response model. + + Uses the new flat database structure with separate columns for + payload, instructions, and editable flag. + + Handles invalid data gracefully by using safe defaults. + """ + return cls( + node_exec_id=review.nodeExecId, + user_id=review.userId, + graph_exec_id=review.graphExecId, + graph_id=review.graphId, + graph_version=review.graphVersion, + payload=review.payload, + instructions=review.instructions, + editable=review.editable, + status=review.status, + review_message=review.reviewMessage, + was_edited=review.wasEdited, + processed=review.processed, + created_at=review.createdAt, + updated_at=review.updatedAt, + reviewed_at=review.reviewedAt, + ) + + +class ReviewItem(BaseModel): + """Single review item for processing.""" + + node_exec_id: str = Field(description="Node execution ID to review") + approved: bool = Field( + description="Whether this review is approved (True) or rejected (False)" + ) + message: str | None = Field( + None, description="Optional review message", max_length=2000 + ) + reviewed_data: SafeJsonData | None = Field( + None, description="Optional edited data (ignored if approved=False)" + ) + + @field_validator("reviewed_data") + @classmethod + def validate_reviewed_data(cls, v): + """Validate that reviewed_data is safe and properly structured.""" + if v is None: + return v + + # Validate SafeJson compatibility + def validate_safejson_type(obj): + """Ensure object only contains SafeJson compatible types.""" + if obj is None: + return True + elif isinstance(obj, (str, int, float, bool)): + return True + elif isinstance(obj, dict): + return all( + isinstance(k, str) and validate_safejson_type(v) + for k, v in obj.items() + ) + elif isinstance(obj, list): + return all(validate_safejson_type(item) for item in obj) + else: + return False + + if not validate_safejson_type(v): + raise ValueError("reviewed_data contains non-SafeJson compatible types") + + # Validate data size to prevent DoS attacks + try: + json_str = json.dumps(v) + if len(json_str) > 1000000: # 1MB limit + raise ValueError("reviewed_data is too large (max 1MB)") + except (TypeError, ValueError) as e: + raise ValueError(f"reviewed_data must be JSON serializable: {str(e)}") + + # Ensure no dangerous nested structures (prevent infinite recursion) + def check_depth(obj, max_depth=10, current_depth=0): + """Recursively check object nesting depth to prevent stack overflow attacks.""" + if current_depth > max_depth: + raise ValueError("reviewed_data has excessive nesting depth") + + if isinstance(obj, dict): + for value in obj.values(): + check_depth(value, max_depth, current_depth + 1) + elif isinstance(obj, list): + for item in obj: + check_depth(item, max_depth, current_depth + 1) + + check_depth(v) + return v + + @field_validator("message") + @classmethod + def validate_message(cls, v): + """Validate and sanitize review message.""" + if v is not None and len(v.strip()) == 0: + return None + return v + + +class ReviewRequest(BaseModel): + """Request model for processing ALL pending reviews for an execution. + + This request must include ALL pending reviews for a graph execution. + Each review will be either approved (with optional data modifications) + or rejected (data ignored). The execution will resume only after ALL reviews are processed. + """ + + reviews: List[ReviewItem] = Field( + description="All reviews with their approval status, data, and messages" + ) + + @model_validator(mode="after") + def validate_review_completeness(self): + """Validate that we have at least one review to process and no duplicates.""" + if not self.reviews: + raise ValueError("At least one review must be provided") + + # Ensure no duplicate node_exec_ids + node_ids = [review.node_exec_id for review in self.reviews] + if len(node_ids) != len(set(node_ids)): + duplicates = [nid for nid in set(node_ids) if node_ids.count(nid) > 1] + raise ValueError(f"Duplicate review IDs found: {', '.join(duplicates)}") + + return self + + +class ReviewResponse(BaseModel): + """Response from review endpoint.""" + + approved_count: int = Field(description="Number of reviews successfully approved") + rejected_count: int = Field(description="Number of reviews successfully rejected") + failed_count: int = Field(description="Number of reviews that failed processing") + error: str | None = Field(None, description="Error message if operation failed") diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py b/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py new file mode 100644 index 0000000000..3bc0dff923 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/executions/review/review_routes_test.py @@ -0,0 +1,459 @@ +import datetime + +import fastapi +import fastapi.testclient +import pytest +import pytest_mock +from prisma.enums import ReviewStatus +from pytest_snapshot.plugin import Snapshot + +from backend.server.v2.executions.review.model import PendingHumanReviewModel +from backend.server.v2.executions.review.routes import router + +# Using a fixed timestamp for reproducible tests +FIXED_NOW = datetime.datetime(2023, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc) + +app = fastapi.FastAPI() +app.include_router(router, prefix="/api/review") + +client = fastapi.testclient.TestClient(app) + + +@pytest.fixture(autouse=True) +def setup_app_auth(mock_jwt_user): + """Setup auth overrides for all tests in this module""" + from autogpt_libs.auth.jwt_utils import get_jwt_payload + + app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"] + yield + app.dependency_overrides.clear() + + +@pytest.fixture +def sample_pending_review() -> PendingHumanReviewModel: + """Create a sample pending review for testing""" + return PendingHumanReviewModel( + node_exec_id="test_node_123", + user_id="test_user", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "test payload", "value": 42}, + instructions="Please review this data", + editable=True, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=None, + processed=False, + created_at=FIXED_NOW, + updated_at=None, + reviewed_at=None, + ) + + +def test_get_pending_reviews_empty( + mocker: pytest_mock.MockFixture, + snapshot: Snapshot, +) -> None: + """Test getting pending reviews when none exist""" + mock_get_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_user" + ) + mock_get_reviews.return_value = [] + + response = client.get("/api/review/pending") + + assert response.status_code == 200 + assert response.json() == [] + mock_get_reviews.assert_called_once_with("test_user", 1, 25) + + +def test_get_pending_reviews_with_data( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, + snapshot: Snapshot, +) -> None: + """Test getting pending reviews with data""" + mock_get_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_user" + ) + mock_get_reviews.return_value = [sample_pending_review] + + response = client.get("/api/review/pending?page=2&page_size=10") + + assert response.status_code == 200 + data = response.json() + assert len(data) == 1 + assert data[0]["node_exec_id"] == "test_node_123" + assert data[0]["status"] == "WAITING" + mock_get_reviews.assert_called_once_with("test_user", 2, 10) + + +def test_get_pending_reviews_for_execution_success( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, + snapshot: Snapshot, +) -> None: + """Test getting pending reviews for specific execution""" + mock_get_graph_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_graph_execution_meta" + ) + mock_get_graph_execution.return_value = { + "id": "test_graph_exec_456", + "user_id": "test_user", + } + + mock_get_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews.return_value = [sample_pending_review] + + response = client.get("/api/review/execution/test_graph_exec_456") + + assert response.status_code == 200 + data = response.json() + assert len(data) == 1 + assert data[0]["graph_exec_id"] == "test_graph_exec_456" + + +def test_get_pending_reviews_for_execution_access_denied( + mocker: pytest_mock.MockFixture, +) -> None: + """Test access denied when user doesn't own the execution""" + mock_get_graph_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_graph_execution_meta" + ) + mock_get_graph_execution.return_value = None + + response = client.get("/api/review/execution/test_graph_exec_456") + + assert response.status_code == 403 + assert "Access denied" in response.json()["detail"] + + +def test_process_review_action_approve_success( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, +) -> None: + """Test successful review approval""" + # Mock the validation functions + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.return_value = sample_pending_review + + mock_get_reviews_for_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews_for_execution.return_value = [sample_pending_review] + + mock_process_all_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.process_all_reviews_for_execution" + ) + mock_process_all_reviews.return_value = {"test_node_123": sample_pending_review} + + mock_has_pending = mocker.patch( + "backend.data.human_review.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + mocker.patch("backend.executor.utils.add_graph_execution") + + request_data = { + "approved_reviews": [ + { + "node_exec_id": "test_node_123", + "message": "Looks good", + "reviewed_data": {"data": "modified payload", "value": 50}, + } + ], + "rejected_review_ids": [], + } + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + data = response.json() + assert data["approved_count"] == 1 + assert data["rejected_count"] == 0 + assert data["failed_count"] == 0 + assert data["error"] is None + + +def test_process_review_action_reject_success( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, +) -> None: + """Test successful review rejection""" + # Mock the validation functions + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.return_value = sample_pending_review + + mock_get_reviews_for_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews_for_execution.return_value = [sample_pending_review] + + mock_process_all_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.process_all_reviews_for_execution" + ) + rejected_review = PendingHumanReviewModel( + node_exec_id="test_node_123", + user_id="test_user", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "test payload"}, + instructions="Please review", + editable=True, + status=ReviewStatus.REJECTED, + review_message="Rejected by user", + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=None, + reviewed_at=FIXED_NOW, + ) + mock_process_all_reviews.return_value = {"test_node_123": rejected_review} + + mock_has_pending = mocker.patch( + "backend.data.human_review.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + request_data = {"approved_reviews": [], "rejected_review_ids": ["test_node_123"]} + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + data = response.json() + assert data["approved_count"] == 0 + assert data["rejected_count"] == 1 + assert data["failed_count"] == 0 + assert data["error"] is None + + +def test_process_review_action_mixed_success( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, +) -> None: + """Test mixed approve/reject operations""" + # Create a second review + second_review = PendingHumanReviewModel( + node_exec_id="test_node_456", + user_id="test_user", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "second payload"}, + instructions="Second review", + editable=False, + status=ReviewStatus.WAITING, + review_message=None, + was_edited=None, + processed=False, + created_at=FIXED_NOW, + updated_at=None, + reviewed_at=None, + ) + + # Mock the validation functions + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.side_effect = lambda node_id, user_id: ( + sample_pending_review if node_id == "test_node_123" else second_review + ) + + mock_get_reviews_for_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews_for_execution.return_value = [sample_pending_review, second_review] + + mock_process_all_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.process_all_reviews_for_execution" + ) + # Create approved version of first review + approved_review = PendingHumanReviewModel( + node_exec_id="test_node_123", + user_id="test_user", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "modified"}, + instructions="Please review", + editable=True, + status=ReviewStatus.APPROVED, + review_message="Approved", + was_edited=True, + processed=False, + created_at=FIXED_NOW, + updated_at=None, + reviewed_at=FIXED_NOW, + ) + # Create rejected version of second review + rejected_review = PendingHumanReviewModel( + node_exec_id="test_node_456", + user_id="test_user", + graph_exec_id="test_graph_exec_456", + graph_id="test_graph_789", + graph_version=1, + payload={"data": "second payload"}, + instructions="Second review", + editable=False, + status=ReviewStatus.REJECTED, + review_message="Rejected by user", + was_edited=False, + processed=False, + created_at=FIXED_NOW, + updated_at=None, + reviewed_at=FIXED_NOW, + ) + mock_process_all_reviews.return_value = { + "test_node_123": approved_review, + "test_node_456": rejected_review, + } + + mock_has_pending = mocker.patch( + "backend.data.human_review.has_pending_reviews_for_graph_exec" + ) + mock_has_pending.return_value = False + + request_data = { + "approved_reviews": [ + { + "node_exec_id": "test_node_123", + "message": "Approved", + "reviewed_data": {"data": "modified"}, + } + ], + "rejected_review_ids": ["test_node_456"], + } + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + data = response.json() + assert data["approved_count"] == 1 + assert data["rejected_count"] == 1 + assert data["failed_count"] == 0 + assert data["error"] is None + + +def test_process_review_action_empty_request( + mocker: pytest_mock.MockFixture, +) -> None: + """Test error when no reviews provided""" + request_data = {"approved_reviews": [], "rejected_review_ids": []} + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 400 + assert "At least one review must be provided" in response.json()["detail"] + + +def test_process_review_action_review_not_found( + mocker: pytest_mock.MockFixture, +) -> None: + """Test error when review is not found""" + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.return_value = None + + request_data = { + "approved_reviews": [ + { + "node_exec_id": "nonexistent_node", + "message": "Test", + } + ], + "rejected_review_ids": [], + } + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 403 + assert "not found or access denied" in response.json()["detail"] + + +def test_process_review_action_partial_failure( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, +) -> None: + """Test handling of partial failures in review processing""" + # Mock successful validation + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.return_value = sample_pending_review + + mock_get_reviews_for_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews_for_execution.return_value = [sample_pending_review] + + # Mock partial failure in processing + mock_process_all_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.process_all_reviews_for_execution" + ) + mock_process_all_reviews.side_effect = ValueError("Some reviews failed validation") + + request_data = { + "approved_reviews": [ + { + "node_exec_id": "test_node_123", + "message": "Test", + } + ], + "rejected_review_ids": [], + } + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 200 + data = response.json() + assert data["approved_count"] == 0 + assert data["rejected_count"] == 0 + assert data["failed_count"] == 1 + assert "Failed to process reviews" in data["error"] + + +def test_process_review_action_complete_failure( + mocker: pytest_mock.MockFixture, + sample_pending_review: PendingHumanReviewModel, +) -> None: + """Test complete failure scenario""" + # Mock successful validation + mock_get_pending_review = mocker.patch( + "backend.data.human_review.get_pending_review_by_node_exec_id" + ) + mock_get_pending_review.return_value = sample_pending_review + + mock_get_reviews_for_execution = mocker.patch( + "backend.server.v2.executions.review.routes.get_pending_reviews_for_execution" + ) + mock_get_reviews_for_execution.return_value = [sample_pending_review] + + # Mock complete failure in processing + mock_process_all_reviews = mocker.patch( + "backend.server.v2.executions.review.routes.process_all_reviews_for_execution" + ) + mock_process_all_reviews.side_effect = Exception("Database error") + + request_data = { + "approved_reviews": [ + { + "node_exec_id": "test_node_123", + "message": "Test", + } + ], + "rejected_review_ids": [], + } + + response = client.post("/api/review/action", json=request_data) + + assert response.status_code == 500 + assert "error" in response.json()["detail"].lower() diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py new file mode 100644 index 0000000000..a8afe03635 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py @@ -0,0 +1,194 @@ +import logging +from typing import List + +import autogpt_libs.auth as autogpt_auth_lib +from fastapi import APIRouter, HTTPException, Query, Security, status +from prisma.enums import ReviewStatus + +from backend.data.execution import get_graph_execution_meta +from backend.data.human_review import ( + get_pending_reviews_for_execution, + get_pending_reviews_for_user, + has_pending_reviews_for_graph_exec, + process_all_reviews_for_execution, +) +from backend.executor.utils import add_graph_execution +from backend.server.v2.executions.review.model import ( + PendingHumanReviewModel, + ReviewRequest, + ReviewResponse, +) + +logger = logging.getLogger(__name__) + + +router = APIRouter( + tags=["executions", "review", "private"], + dependencies=[Security(autogpt_auth_lib.requires_user)], +) + + +@router.get( + "/pending", + summary="Get Pending Reviews", + response_model=List[PendingHumanReviewModel], + responses={ + 200: {"description": "List of pending reviews"}, + 500: {"description": "Server error", "content": {"application/json": {}}}, + }, +) +async def list_pending_reviews( + user_id: str = Security(autogpt_auth_lib.get_user_id), + page: int = Query(1, ge=1, description="Page number (1-indexed)"), + page_size: int = Query(25, ge=1, le=100, description="Number of reviews per page"), +) -> List[PendingHumanReviewModel]: + """Get all pending reviews for the current user. + + Retrieves all reviews with status "WAITING" that belong to the authenticated user. + Results are ordered by creation time (newest first). + + Args: + user_id: Authenticated user ID from security dependency + + Returns: + List of pending review objects with status converted to typed literals + + Raises: + HTTPException: If authentication fails or database error occurs + + Note: + Reviews with invalid status values are logged as warnings but excluded + from results rather than failing the entire request. + """ + + return await get_pending_reviews_for_user(user_id, page, page_size) + + +@router.get( + "/execution/{graph_exec_id}", + summary="Get Pending Reviews for Execution", + response_model=List[PendingHumanReviewModel], + responses={ + 200: {"description": "List of pending reviews for the execution"}, + 400: {"description": "Invalid graph execution ID"}, + 403: {"description": "Access denied to graph execution"}, + 500: {"description": "Server error", "content": {"application/json": {}}}, + }, +) +async def list_pending_reviews_for_execution( + graph_exec_id: str, + user_id: str = Security(autogpt_auth_lib.get_user_id), +) -> List[PendingHumanReviewModel]: + """Get all pending reviews for a specific graph execution. + + Retrieves all reviews with status "WAITING" for the specified graph execution + that belong to the authenticated user. Results are ordered by creation time + (oldest first) to preserve review order within the execution. + + Args: + graph_exec_id: ID of the graph execution to get reviews for + user_id: Authenticated user ID from security dependency + + Returns: + List of pending review objects for the specified execution + + Raises: + HTTPException: + - 403: If user doesn't own the graph execution + - 500: If authentication fails or database error occurs + + Note: + Only returns reviews owned by the authenticated user for security. + Reviews with invalid status are excluded with warning logs. + """ + + # Verify user owns the graph execution before returning reviews + graph_exec = await get_graph_execution_meta( + user_id=user_id, execution_id=graph_exec_id + ) + if not graph_exec: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Access denied to graph execution", + ) + + return await get_pending_reviews_for_execution(graph_exec_id, user_id) + + +@router.post("/action", response_model=ReviewResponse) +async def process_review_action( + request: ReviewRequest, + user_id: str = Security(autogpt_auth_lib.get_user_id), +) -> ReviewResponse: + """Process reviews with approve or reject actions.""" + + # Collect all node exec IDs from the request + all_request_node_ids = {review.node_exec_id for review in request.reviews} + + if not all_request_node_ids: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least one review must be provided", + ) + + # Build review decisions map + review_decisions = {} + for review in request.reviews: + if review.approved: + review_decisions[review.node_exec_id] = ( + ReviewStatus.APPROVED, + review.reviewed_data, + review.message, + ) + else: + review_decisions[review.node_exec_id] = ( + ReviewStatus.REJECTED, + None, + review.message, + ) + + # Process all reviews + updated_reviews = await process_all_reviews_for_execution( + user_id=user_id, + review_decisions=review_decisions, + ) + + # Count results + approved_count = sum( + 1 + for review in updated_reviews.values() + if review.status == ReviewStatus.APPROVED + ) + rejected_count = sum( + 1 + for review in updated_reviews.values() + if review.status == ReviewStatus.REJECTED + ) + + # Resume execution if we processed some reviews + if updated_reviews: + # Get graph execution ID from any processed review + first_review = next(iter(updated_reviews.values())) + graph_exec_id = first_review.graph_exec_id + + # Check if any pending reviews remain for this execution + still_has_pending = await has_pending_reviews_for_graph_exec(graph_exec_id) + + if not still_has_pending: + # Resume execution + try: + await add_graph_execution( + graph_id=first_review.graph_id, + user_id=user_id, + graph_exec_id=graph_exec_id, + ) + logger.info(f"Resumed execution {graph_exec_id}") + except Exception as e: + logger.error(f"Failed to resume execution {graph_exec_id}: {str(e)}") + + return ReviewResponse( + approved_count=approved_count, + rejected_count=rejected_count, + failed_count=0, + error=None, + ) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes/agents.py b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py index 1bdf255ce5..eeea9d8fb6 100644 --- a/autogpt_platform/backend/backend/server/v2/library/routes/agents.py +++ b/autogpt_platform/backend/backend/server/v2/library/routes/agents.py @@ -22,7 +22,9 @@ router = APIRouter( @router.get( "", summary="List Library Agents", + response_model=library_model.LibraryAgentResponse, responses={ + 200: {"description": "List of library agents"}, 500: {"description": "Server error", "content": {"application/json": {}}}, }, ) @@ -155,7 +157,12 @@ async def get_library_agent_by_graph_id( @router.get( "/marketplace/{store_listing_version_id}", summary="Get Agent By Store ID", - tags=["store, library"], + tags=["store", "library"], + response_model=library_model.LibraryAgent | None, + responses={ + 200: {"description": "Library agent found"}, + 404: {"description": "Agent not found"}, + }, ) async def get_library_agent_by_store_listing_version_id( store_listing_version_id: str, diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py index f6f0f812fe..fe782e8eea 100644 --- a/autogpt_platform/backend/backend/server/v2/store/db.py +++ b/autogpt_platform/backend/backend/server/v2/store/db.py @@ -12,7 +12,7 @@ import prisma.types import backend.server.v2.store.exceptions import backend.server.v2.store.model -from backend.data.db import transaction +from backend.data.db import query_raw_with_schema, transaction from backend.data.graph import ( GraphMeta, GraphModel, @@ -120,7 +120,7 @@ async def get_store_agents( is_available, updated_at, ts_rank_cd(search, query) AS rank - FROM "StoreAgent", + FROM {{schema_prefix}}"StoreAgent", plainto_tsquery('english', $1) AS query WHERE {sql_where_clause} AND search @@ query @@ -131,22 +131,18 @@ async def get_store_agents( # Count query for pagination - only uses search term parameter count_query = f""" SELECT COUNT(*) as count - FROM "StoreAgent", + FROM {{schema_prefix}}"StoreAgent", plainto_tsquery('english', $1) AS query WHERE {sql_where_clause} AND search @@ query """ # Execute both queries with parameters - agents = await prisma.client.get_client().query_raw( - typing.cast(typing.LiteralString, sql_query), *params - ) + agents = await query_raw_with_schema(sql_query, *params) # For count, use params without pagination (last 2 params) count_params = params[:-2] - count_result = await prisma.client.get_client().query_raw( - typing.cast(typing.LiteralString, count_query), *count_params - ) + count_result = await query_raw_with_schema(count_query, *count_params) total = count_result[0]["count"] if count_result else 0 total_pages = (total + page_size - 1) // page_size diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 13b3365446..0a2015254b 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -140,6 +140,7 @@ async def execute_block_test(block: Block): "graph_exec_id": str(uuid.uuid4()), "node_exec_id": str(uuid.uuid4()), "user_id": str(uuid.uuid4()), + "graph_version": 1, # Default version for tests "user_context": UserContext(timezone="UTC"), # Default for tests } input_model = cast(type[BlockSchema], block.input_schema) diff --git a/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql b/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql new file mode 100644 index 0000000000..5a2cc2f722 --- /dev/null +++ b/autogpt_platform/backend/migrations/20251117102522_add_human_in_the_loop_table/migration.sql @@ -0,0 +1,44 @@ +-- CreateEnum +CREATE TYPE "ReviewStatus" AS ENUM ('WAITING', 'APPROVED', 'REJECTED'); + +-- AlterEnum +ALTER TYPE "AgentExecutionStatus" ADD VALUE 'REVIEW'; + +-- CreateTable +CREATE TABLE "PendingHumanReview" ( + "nodeExecId" TEXT NOT NULL, + "userId" TEXT NOT NULL, + "graphExecId" TEXT NOT NULL, + "graphId" TEXT NOT NULL, + "graphVersion" INTEGER NOT NULL, + "payload" JSONB NOT NULL, + "instructions" TEXT, + "editable" BOOLEAN NOT NULL DEFAULT true, + "status" "ReviewStatus" NOT NULL DEFAULT 'WAITING', + "reviewMessage" TEXT, + "wasEdited" BOOLEAN, + "processed" BOOLEAN NOT NULL DEFAULT false, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3), + "reviewedAt" TIMESTAMP(3), + + CONSTRAINT "PendingHumanReview_pkey" PRIMARY KEY ("nodeExecId") +); + +-- CreateIndex +CREATE INDEX "PendingHumanReview_userId_status_idx" ON "PendingHumanReview"("userId", "status"); + +-- CreateIndex +CREATE INDEX "PendingHumanReview_graphExecId_status_idx" ON "PendingHumanReview"("graphExecId", "status"); + +-- CreateIndex +CREATE UNIQUE INDEX "PendingHumanReview_nodeExecId_key" ON "PendingHumanReview"("nodeExecId"); + +-- AddForeignKey +ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_nodeExecId_fkey" FOREIGN KEY ("nodeExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "PendingHumanReview" ADD CONSTRAINT "PendingHumanReview_graphExecId_fkey" FOREIGN KEY ("graphExecId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index e8755d99ab..ca015a3cb9 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -59,6 +59,7 @@ model User { APIKeys APIKey[] IntegrationWebhooks IntegrationWebhook[] NotificationBatches UserNotificationBatch[] + PendingHumanReviews PendingHumanReview[] } enum OnboardingStep { @@ -351,6 +352,7 @@ enum AgentExecutionStatus { COMPLETED TERMINATED FAILED + REVIEW } // This model describes the execution of an AgentGraph. @@ -393,6 +395,8 @@ model AgentGraphExecution { shareToken String? @unique sharedAt DateTime? + PendingHumanReviews PendingHumanReview[] + @@index([agentGraphId, agentGraphVersion]) @@index([userId, isDeleted, createdAt]) @@index([createdAt]) @@ -423,6 +427,8 @@ model AgentNodeExecution { stats Json? + PendingHumanReview PendingHumanReview? + @@index([agentGraphExecutionId, agentNodeId, executionStatus]) @@index([agentNodeId, executionStatus]) @@index([addedTime, queuedTime]) @@ -464,6 +470,39 @@ model AgentNodeExecutionKeyValueData { @@id([userId, key]) } +enum ReviewStatus { + WAITING + APPROVED + REJECTED +} + +// Pending human reviews for Human-in-the-loop blocks +model PendingHumanReview { + nodeExecId String @id + userId String + graphExecId String + graphId String + graphVersion Int + payload Json // The actual payload data to be reviewed + instructions String? // Instructions/message for the reviewer + editable Boolean @default(true) // Whether the reviewer can edit the data + status ReviewStatus @default(WAITING) + reviewMessage String? // Optional message from the reviewer + wasEdited Boolean? // Whether the data was modified during review + processed Boolean @default(false) // Whether the review result has been processed by the execution engine + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + reviewedAt DateTime? + + User User @relation(fields: [userId], references: [id], onDelete: Cascade) + NodeExecution AgentNodeExecution @relation(fields: [nodeExecId], references: [id], onDelete: Cascade) + GraphExecution AgentGraphExecution @relation(fields: [graphExecId], references: [id], onDelete: Cascade) + + @@unique([nodeExecId]) // One pending review per node execution + @@index([userId, status]) + @@index([graphExecId, status]) +} + // Webhook that is registered with a provider and propagates to one or more nodes model IntegrationWebhook { id String @id @default(uuid()) diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs index d4df72a643..d4595990a2 100644 --- a/autogpt_platform/frontend/next.config.mjs +++ b/autogpt_platform/frontend/next.config.mjs @@ -34,7 +34,8 @@ const nextConfig = { }, ], }, - output: "standalone", + // Vercel has its own deployment mechanism and doesn't need standalone mode + ...(process.env.VERCEL ? {} : { output: "standalone" }), transpilePackages: ["geist"], }; diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 52ba28064f..29a28059a6 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -54,7 +54,7 @@ "@rjsf/core": "5.24.13", "@rjsf/utils": "5.24.13", "@rjsf/validator-ajv8": "5.24.13", - "@sentry/nextjs": "10.22.0", + "@sentry/nextjs": "10.27.0", "@supabase/ssr": "0.7.0", "@supabase/supabase-js": "2.78.0", "@tanstack/react-query": "5.90.6", @@ -134,7 +134,7 @@ "axe-playwright": "2.2.2", "chromatic": "13.3.3", "concurrently": "9.2.1", - "cross-env": "7.0.3", + "cross-env": "10.1.0", "eslint": "8.57.1", "eslint-config-next": "15.5.2", "eslint-plugin-storybook": "9.1.5", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 848fd3a88d..406fcb212f 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -87,8 +87,8 @@ importers: specifier: 5.24.13 version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1)) '@sentry/nextjs': - specifier: 10.22.0 - version: 10.22.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) + specifier: 10.27.0 + version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) '@supabase/ssr': specifier: 0.7.0 version: 0.7.0(@supabase/supabase-js@2.78.0) @@ -322,8 +322,8 @@ importers: specifier: 9.2.1 version: 9.2.1 cross-env: - specifier: 7.0.3 - version: 7.0.3 + specifier: 10.1.0 + version: 10.1.0 eslint: specifier: 8.57.1 version: 8.57.1 @@ -1001,6 +1001,9 @@ packages: '@emotion/unitless@0.8.1': resolution: {integrity: sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==} + '@epic-web/invariant@1.0.0': + resolution: {integrity: sha512-lrTPqgvfFQtR/eY/qkIzp98OGdNJu0m5ji3q/nJI8v3SXkRKEnWiOxMmbvcSoAIzv/cGiuvRy57k4suKQSAdwA==} + '@esbuild/aix-ppc64@0.25.11': resolution: {integrity: sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==} engines: {node: '>=18'} @@ -1681,186 +1684,176 @@ packages: '@open-draft/until@2.1.0': resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==} - '@opentelemetry/api-logs@0.204.0': - resolution: {integrity: sha512-DqxY8yoAaiBPivoJD4UtgrMS8gEmzZ5lnaxzPojzLVHBGqPxgWm4zcuvcUHZiqQ6kRX2Klel2r9y8cA2HAtqpw==} + '@opentelemetry/api-logs@0.208.0': + resolution: {integrity: sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==} engines: {node: '>=8.0.0'} - '@opentelemetry/api-logs@0.57.2': - resolution: {integrity: sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==} - engines: {node: '>=14'} - '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} - '@opentelemetry/context-async-hooks@2.1.0': - resolution: {integrity: sha512-zOyetmZppnwTyPrt4S7jMfXiSX9yyfF0hxlA8B5oo2TtKl+/RGCy7fi4DrBfIf3lCPrkKsRBWZZD7RFojK7FDg==} + '@opentelemetry/context-async-hooks@2.2.0': + resolution: {integrity: sha512-qRkLWiUEZNAmYapZ7KGS5C4OmBLcP/H2foXeOEaowYCR0wi89fHejrfYfbuLVCMLp/dWZXKvQusdbUEZjERfwQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/core@2.1.0': - resolution: {integrity: sha512-RMEtHsxJs/GiHHxYT58IY57UXAQTuUnZVco6ymDEqTNlJKTimM4qPUPVe8InNFyBjhHBEAx4k3Q8LtNayBsbUQ==} + '@opentelemetry/core@2.2.0': + resolution: {integrity: sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/instrumentation-amqplib@0.51.0': - resolution: {integrity: sha512-XGmjYwjVRktD4agFnWBWQXo9SiYHKBxR6Ag3MLXwtLE4R99N3a08kGKM5SC1qOFKIELcQDGFEFT9ydXMH00Luw==} + '@opentelemetry/instrumentation-amqplib@0.55.0': + resolution: {integrity: sha512-5ULoU8p+tWcQw5PDYZn8rySptGSLZHNX/7srqo2TioPnAAcvTy6sQFQXsNPrAnyRRtYGMetXVyZUy5OaX1+IfA==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-connect@0.48.0': - resolution: {integrity: sha512-OMjc3SFL4pC16PeK+tDhwP7MRvDPalYCGSvGqUhX5rASkI2H0RuxZHOWElYeXkV0WP+70Gw6JHWac/2Zqwmhdw==} + '@opentelemetry/instrumentation-connect@0.52.0': + resolution: {integrity: sha512-GXPxfNB5szMbV3I9b7kNWSmQBoBzw7MT0ui6iU/p+NIzVx3a06Ri2cdQO7tG9EKb4aKSLmfX9Cw5cKxXqX6Ohg==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-dataloader@0.22.0': - resolution: {integrity: sha512-bXnTcwtngQsI1CvodFkTemrrRSQjAjZxqHVc+CJZTDnidT0T6wt3jkKhnsjU/Kkkc0lacr6VdRpCu2CUWa0OKw==} + '@opentelemetry/instrumentation-dataloader@0.26.0': + resolution: {integrity: sha512-P2BgnFfTOarZ5OKPmYfbXfDFjQ4P9WkQ1Jji7yH5/WwB6Wm/knynAoA1rxbjWcDlYupFkyT0M1j6XLzDzy0aCA==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-express@0.53.0': - resolution: {integrity: sha512-r/PBafQmFYRjuxLYEHJ3ze1iBnP2GDA1nXOSS6E02KnYNZAVjj6WcDA1MSthtdAUUK0XnotHvvWM8/qz7DMO5A==} + '@opentelemetry/instrumentation-express@0.57.0': + resolution: {integrity: sha512-HAdx/o58+8tSR5iW+ru4PHnEejyKrAy9fYFhlEI81o10nYxrGahnMAHWiSjhDC7UQSY3I4gjcPgSKQz4rm/asg==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-fs@0.24.0': - resolution: {integrity: sha512-HjIxJ6CBRD770KNVaTdMXIv29Sjz4C1kPCCK5x1Ujpc6SNnLGPqUVyJYZ3LUhhnHAqdbrl83ogVWjCgeT4Q0yw==} + '@opentelemetry/instrumentation-fs@0.28.0': + resolution: {integrity: sha512-FFvg8fq53RRXVBRHZViP+EMxMR03tqzEGpuq55lHNbVPyFklSVfQBN50syPhK5UYYwaStx0eyCtHtbRreusc5g==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-generic-pool@0.48.0': - resolution: {integrity: sha512-TLv/On8pufynNR+pUbpkyvuESVASZZKMlqCm4bBImTpXKTpqXaJJ3o/MUDeMlM91rpen+PEv2SeyOKcHCSlgag==} + '@opentelemetry/instrumentation-generic-pool@0.52.0': + resolution: {integrity: sha512-ISkNcv5CM2IwvsMVL31Tl61/p2Zm2I2NAsYq5SSBgOsOndT0TjnptjufYVScCnD5ZLD1tpl4T3GEYULLYOdIdQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-graphql@0.52.0': - resolution: {integrity: sha512-3fEJ8jOOMwopvldY16KuzHbRhPk8wSsOTSF0v2psmOCGewh6ad+ZbkTx/xyUK9rUdUMWAxRVU0tFpj4Wx1vkPA==} + '@opentelemetry/instrumentation-graphql@0.56.0': + resolution: {integrity: sha512-IPvNk8AFoVzTAM0Z399t34VDmGDgwT6rIqCUug8P9oAGerl2/PEIYMPOl/rerPGu+q8gSWdmbFSjgg7PDVRd3Q==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-hapi@0.51.0': - resolution: {integrity: sha512-qyf27DaFNL1Qhbo/da+04MSCw982B02FhuOS5/UF+PMhM61CcOiu7fPuXj8TvbqyReQuJFljXE6UirlvoT/62g==} + '@opentelemetry/instrumentation-hapi@0.55.0': + resolution: {integrity: sha512-prqAkRf9e4eEpy4G3UcR32prKE8NLNlA90TdEU1UsghOTg0jUvs40Jz8LQWFEs5NbLbXHYGzB4CYVkCI8eWEVQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-http@0.204.0': - resolution: {integrity: sha512-1afJYyGRA4OmHTv0FfNTrTAzoEjPQUYgd+8ih/lX0LlZBnGio/O80vxA0lN3knsJPS7FiDrsDrWq25K7oAzbkw==} + '@opentelemetry/instrumentation-http@0.208.0': + resolution: {integrity: sha512-rhmK46DRWEbQQB77RxmVXGyjs6783crXCnFjYQj+4tDH/Kpv9Rbg3h2kaNyp5Vz2emF1f9HOQQvZoHzwMWOFZQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-ioredis@0.52.0': - resolution: {integrity: sha512-rUvlyZwI90HRQPYicxpDGhT8setMrlHKokCtBtZgYxQWRF5RBbG4q0pGtbZvd7kyseuHbFpA3I/5z7M8b/5ywg==} + '@opentelemetry/instrumentation-ioredis@0.56.0': + resolution: {integrity: sha512-XSWeqsd3rKSsT3WBz/JKJDcZD4QYElZEa0xVdX8f9dh4h4QgXhKRLorVsVkK3uXFbC2sZKAS2Ds+YolGwD83Dg==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-kafkajs@0.14.0': - resolution: {integrity: sha512-kbB5yXS47dTIdO/lfbbXlzhvHFturbux4EpP0+6H78Lk0Bn4QXiZQW7rmZY1xBCY16mNcCb8Yt0mhz85hTnSVA==} + '@opentelemetry/instrumentation-kafkajs@0.18.0': + resolution: {integrity: sha512-KCL/1HnZN5zkUMgPyOxfGjLjbXjpd4odDToy+7c+UsthIzVLFf99LnfIBE8YSSrYE4+uS7OwJMhvhg3tWjqMBg==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-knex@0.49.0': - resolution: {integrity: sha512-NKsRRT27fbIYL4Ix+BjjP8h4YveyKc+2gD6DMZbr5R5rUeDqfC8+DTfIt3c3ex3BIc5Vvek4rqHnN7q34ZetLQ==} + '@opentelemetry/instrumentation-knex@0.53.0': + resolution: {integrity: sha512-xngn5cH2mVXFmiT1XfQ1aHqq1m4xb5wvU6j9lSgLlihJ1bXzsO543cpDwjrZm2nMrlpddBf55w8+bfS4qDh60g==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-koa@0.52.0': - resolution: {integrity: sha512-JJSBYLDx/mNSy8Ibi/uQixu2rH0bZODJa8/cz04hEhRaiZQoeJ5UrOhO/mS87IdgVsHrnBOsZ6vDu09znupyuA==} + '@opentelemetry/instrumentation-koa@0.57.0': + resolution: {integrity: sha512-3JS8PU/D5E3q295mwloU2v7c7/m+DyCqdu62BIzWt+3u9utjxC9QS7v6WmUNuoDN3RM+Q+D1Gpj13ERo+m7CGg==} + engines: {node: ^18.19.0 || >=20.6.0} + peerDependencies: + '@opentelemetry/api': ^1.9.0 + + '@opentelemetry/instrumentation-lru-memoizer@0.53.0': + resolution: {integrity: sha512-LDwWz5cPkWWr0HBIuZUjslyvijljTwmwiItpMTHujaULZCxcYE9eU44Qf/pbVC8TulT0IhZi+RoGvHKXvNhysw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-lru-memoizer@0.49.0': - resolution: {integrity: sha512-ctXu+O/1HSadAxtjoEg2w307Z5iPyLOMM8IRNwjaKrIpNAthYGSOanChbk1kqY6zU5CrpkPHGdAT6jk8dXiMqw==} + '@opentelemetry/instrumentation-mongodb@0.61.0': + resolution: {integrity: sha512-OV3i2DSoY5M/pmLk+68xr5RvkHU8DRB3DKMzYJdwDdcxeLs62tLbkmRyqJZsYf3Ht7j11rq35pHOWLuLzXL7pQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mongodb@0.57.0': - resolution: {integrity: sha512-KD6Rg0KSHWDkik+qjIOWoksi1xqSpix8TSPfquIK1DTmd9OTFb5PHmMkzJe16TAPVEuElUW8gvgP59cacFcrMQ==} + '@opentelemetry/instrumentation-mongoose@0.55.0': + resolution: {integrity: sha512-5afj0HfF6aM6Nlqgu6/PPHFk8QBfIe3+zF9FGpX76jWPS0/dujoEYn82/XcLSaW5LPUDW8sni+YeK0vTBNri+w==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mongoose@0.51.0': - resolution: {integrity: sha512-gwWaAlhhV2By7XcbyU3DOLMvzsgeaymwP/jktDC+/uPkCmgB61zurwqOQdeiRq9KAf22Y2dtE5ZLXxytJRbEVA==} + '@opentelemetry/instrumentation-mysql2@0.55.0': + resolution: {integrity: sha512-0cs8whQG55aIi20gnK8B7cco6OK6N+enNhW0p5284MvqJ5EPi+I1YlWsWXgzv/V2HFirEejkvKiI4Iw21OqDWg==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mysql2@0.51.0': - resolution: {integrity: sha512-zT2Wg22Xn43RyfU3NOUmnFtb5zlDI0fKcijCj9AcK9zuLZ4ModgtLXOyBJSSfO+hsOCZSC1v/Fxwj+nZJFdzLQ==} + '@opentelemetry/instrumentation-mysql@0.54.0': + resolution: {integrity: sha512-bqC1YhnwAeWmRzy1/Xf9cDqxNG2d/JDkaxnqF5N6iJKN1eVWI+vg7NfDkf52/Nggp3tl1jcC++ptC61BD6738A==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mysql@0.50.0': - resolution: {integrity: sha512-duKAvMRI3vq6u9JwzIipY9zHfikN20bX05sL7GjDeLKr2qV0LQ4ADtKST7KStdGcQ+MTN5wghWbbVdLgNcB3rA==} + '@opentelemetry/instrumentation-pg@0.61.0': + resolution: {integrity: sha512-UeV7KeTnRSM7ECHa3YscoklhUtTQPs6V6qYpG283AB7xpnPGCUCUfECFT9jFg6/iZOQTt3FHkB1wGTJCNZEvPw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-pg@0.57.0': - resolution: {integrity: sha512-dWLGE+r5lBgm2A8SaaSYDE3OKJ/kwwy5WLyGyzor8PLhUL9VnJRiY6qhp4njwhnljiLtzeffRtG2Mf/YyWLeTw==} + '@opentelemetry/instrumentation-redis@0.57.0': + resolution: {integrity: sha512-bCxTHQFXzrU3eU1LZnOZQ3s5LURxQPDlU3/upBzlWY77qOI1GZuGofazj3jtzjctMJeBEJhNwIFEgRPBX1kp/Q==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-redis@0.53.0': - resolution: {integrity: sha512-WUHV8fr+8yo5RmzyU7D5BIE1zwiaNQcTyZPwtxlfr7px6NYYx7IIpSihJK7WA60npWynfxxK1T67RAVF0Gdfjg==} + '@opentelemetry/instrumentation-tedious@0.27.0': + resolution: {integrity: sha512-jRtyUJNZppPBjPae4ZjIQ2eqJbcRaRfJkr0lQLHFmOU/no5A6e9s1OHLd5XZyZoBJ/ymngZitanyRRA5cniseA==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-tedious@0.23.0': - resolution: {integrity: sha512-3TMTk/9VtlRonVTaU4tCzbg4YqW+Iq/l5VnN2e5whP6JgEg/PKfrGbqQ+CxQWNLfLaQYIUgEZqAn5gk/inh1uQ==} - engines: {node: ^18.19.0 || >=20.6.0} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - - '@opentelemetry/instrumentation-undici@0.15.0': - resolution: {integrity: sha512-sNFGA/iCDlVkNjzTzPRcudmI11vT/WAfAguRdZY9IspCw02N4WSC72zTuQhSMheh2a1gdeM9my1imnKRvEEvEg==} + '@opentelemetry/instrumentation-undici@0.19.0': + resolution: {integrity: sha512-Pst/RhR61A2OoZQZkn6OLpdVpXp6qn3Y92wXa6umfJe9rV640r4bc6SWvw4pPN6DiQqPu2c8gnSSZPDtC6JlpQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.7.0 - '@opentelemetry/instrumentation@0.204.0': - resolution: {integrity: sha512-vV5+WSxktzoMP8JoYWKeopChy6G3HKk4UQ2hESCRDUUTZqQ3+nM3u8noVG0LmNfRWwcFBnbZ71GKC7vaYYdJ1g==} + '@opentelemetry/instrumentation@0.208.0': + resolution: {integrity: sha512-Eju0L4qWcQS+oXxi6pgh7zvE2byogAkcsVv0OjHF/97iOz1N/aKE6etSGowYkie+YA1uo6DNwdSxaaNnLvcRlA==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation@0.57.2': - resolution: {integrity: sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - - '@opentelemetry/redis-common@0.38.0': - resolution: {integrity: sha512-4Wc0AWURII2cfXVVoZ6vDqK+s5n4K5IssdrlVrvGsx6OEOKdghKtJZqXAHWFiZv4nTDLH2/2fldjIHY8clMOjQ==} + '@opentelemetry/redis-common@0.38.2': + resolution: {integrity: sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA==} engines: {node: ^18.19.0 || >=20.6.0} - '@opentelemetry/resources@2.1.0': - resolution: {integrity: sha512-1CJjf3LCvoefUOgegxi8h6r4B/wLSzInyhGP2UmIBYNlo4Qk5CZ73e1eEyWmfXvFtm1ybkmfb2DqWvspsYLrWw==} + '@opentelemetry/resources@2.2.0': + resolution: {integrity: sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' - '@opentelemetry/sdk-trace-base@2.1.0': - resolution: {integrity: sha512-uTX9FBlVQm4S2gVQO1sb5qyBLq/FPjbp+tmGoxu4tIgtYGmBYB44+KX/725RFDe30yBSaA9Ml9fqphe1hbUyLQ==} + '@opentelemetry/sdk-trace-base@2.2.0': + resolution: {integrity: sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' @@ -1869,8 +1862,8 @@ packages: resolution: {integrity: sha512-JD6DerIKdJGmRp4jQyX5FlrQjA4tjOw1cvfsPAZXfOOEErMUHjPcPSICS+6WnM0nB0efSFARh0KAZss+bvExOA==} engines: {node: '>=14'} - '@opentelemetry/sql-common@0.41.0': - resolution: {integrity: sha512-pmzXctVbEERbqSfiAgdes9Y63xjoOyXcD7B6IXBkVb+vbM7M9U98mn33nGXxPf4dfYR0M+vhcKRZmbSJ7HfqFA==} + '@opentelemetry/sql-common@0.41.2': + resolution: {integrity: sha512-4mhWm3Z8z+i508zQJ7r6Xi7y4mmoJpdvH0fZPFRkWrdp5fq7hhZ2HhYokEOLkfqSMgPR4Z9EyB3DBkbKGOqZiQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.1.0 @@ -1947,8 +1940,8 @@ packages: webpack-plugin-serve: optional: true - '@prisma/instrumentation@6.15.0': - resolution: {integrity: sha512-6TXaH6OmDkMOQvOxwLZ8XS51hU2v4A3vmE2pSijCIiGRJYyNeMcL6nMHQMyYdZRD8wl7LF3Wzc+AMPMV/9Oo7A==} + '@prisma/instrumentation@6.19.0': + resolution: {integrity: sha512-QcuYy25pkXM8BJ37wVFBO7Zh34nyRV1GOb2n3lPkkbRYfl4hWl3PTcImP41P0KrzVXfa/45p6eVCos27x3exIg==} peerDependencies: '@opentelemetry/api': ^1.8 @@ -2632,130 +2625,190 @@ packages: '@scarf/scarf@1.4.0': resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==} - '@sentry-internal/browser-utils@10.22.0': - resolution: {integrity: sha512-BpJoLZEyJr7ORzkCrIjxRTnFWwO1mJNICVh3B9g5d9245niGT4OJvRozmLz89WgJkZFHWu84ls6Xfq5b/3tGFQ==} + '@sentry-internal/browser-utils@10.27.0': + resolution: {integrity: sha512-17tO6AXP+rmVQtLJ3ROQJF2UlFmvMWp7/8RDT5x9VM0w0tY31z8Twc0gw2KA7tcDxa5AaHDUbf9heOf+R6G6ow==} engines: {node: '>=18'} - '@sentry-internal/feedback@10.22.0': - resolution: {integrity: sha512-zXySOin/gGHPV+yKaHqjN9YZ7psEJwzLn8PzCLeo+4REzF1eQwbYZIgOxJFD32z8s3nZiABSWFM/n1CvVfMEsQ==} + '@sentry-internal/feedback@10.27.0': + resolution: {integrity: sha512-UecsIDJcv7VBwycge/MDvgSRxzevDdcItE1i0KSwlPz00rVVxLY9kV28PJ4I2E7r6/cIaP9BkbWegCEcv09NuA==} engines: {node: '>=18'} - '@sentry-internal/replay-canvas@10.22.0': - resolution: {integrity: sha512-DE4JNUskJg+O+wFq42W5gAa/99aD5k7TfGOwABxvnzFv8vkKA7pqXwPbFFPzypdKIkln+df7RmbnDwQRNg6/lA==} + '@sentry-internal/replay-canvas@10.27.0': + resolution: {integrity: sha512-inhsRYSVBpu3BI1kZphXj6uB59baJpYdyHeIPCiTfdFNBE5tngNH0HS/aedZ1g9zICw290lwvpuyrWJqp4VBng==} engines: {node: '>=18'} - '@sentry-internal/replay@10.22.0': - resolution: {integrity: sha512-JNE4kHAQSG4/V+J+Zog3vKBWgOe9H33ol/MEU1RuLM/4I+uLf4mTetwnS9ilpnnW/Z/gQYfA+R3CiMrZtqTivw==} + '@sentry-internal/replay@10.27.0': + resolution: {integrity: sha512-tKSzHq1hNzB619Ssrqo25cqdQJ84R3xSSLsUWEnkGO/wcXJvpZy94gwdoS+KmH18BB1iRRRGtnMxZcUkiPSesw==} engines: {node: '>=18'} '@sentry/babel-plugin-component-annotate@4.3.0': resolution: {integrity: sha512-OuxqBprXRyhe8Pkfyz/4yHQJc5c3lm+TmYWSSx8u48g5yKewSQDOxkiLU5pAk3WnbLPy8XwU/PN+2BG0YFU9Nw==} engines: {node: '>= 14'} - '@sentry/browser@10.22.0': - resolution: {integrity: sha512-wD2XqN+yeBpQFfdPo6+wlKDMyyuDctVGzZWE4qTPntICKQuwMdAfeq5Ma89ad0Dw+bzG9UijGeyuJQlswF87Mw==} + '@sentry/babel-plugin-component-annotate@4.6.1': + resolution: {integrity: sha512-aSIk0vgBqv7PhX6/Eov+vlI4puCE0bRXzUG5HdCsHBpAfeMkI8Hva6kSOusnzKqs8bf04hU7s3Sf0XxGTj/1AA==} + engines: {node: '>= 14'} + + '@sentry/browser@10.27.0': + resolution: {integrity: sha512-G8q362DdKp9y1b5qkQEmhTFzyWTOVB0ps1rflok0N6bVA75IEmSDX1pqJsNuY3qy14VsVHYVwQBJQsNltQLS0g==} engines: {node: '>=18'} '@sentry/bundler-plugin-core@4.3.0': resolution: {integrity: sha512-dmR4DJhJ4jqVWGWppuTL2blNFqOZZnt4aLkewbD1myFG3KVfUx8CrMQWEmGjkgPOtj5TO6xH9PyTJjXC6o5tnA==} engines: {node: '>= 14'} + '@sentry/bundler-plugin-core@4.6.1': + resolution: {integrity: sha512-WPeRbnMXm927m4Kr69NTArPfI+p5/34FHftdCRI3LFPMyhZDzz6J3wLy4hzaVUgmMf10eLzmq2HGEMvpQmdynA==} + engines: {node: '>= 14'} + '@sentry/cli-darwin@2.55.0': resolution: {integrity: sha512-jGHE7SHHzqXUmnsmRLgorVH6nmMmTjQQXdPZbSL5tRtH8d3OIYrVNr5D72DSgD26XAPBDMV0ibqOQ9NKoiSpfA==} engines: {node: '>=10'} os: [darwin] + '@sentry/cli-darwin@2.58.2': + resolution: {integrity: sha512-MArsb3zLhA2/cbd4rTm09SmTpnEuZCoZOpuZYkrpDw1qzBVJmRFA1W1hGAQ9puzBIk/ubY3EUhhzuU3zN2uD6w==} + engines: {node: '>=10'} + os: [darwin] + '@sentry/cli-linux-arm64@2.55.0': resolution: {integrity: sha512-jNB/0/gFcOuDCaY/TqeuEpsy/k52dwyk1SOV3s1ku4DUsln6govTppeAGRewY3T1Rj9B2vgIWTrnB8KVh9+Rgg==} engines: {node: '>=10'} cpu: [arm64] os: [linux, freebsd, android] + '@sentry/cli-linux-arm64@2.58.2': + resolution: {integrity: sha512-ay3OeObnbbPrt45cjeUyQjsx5ain1laj1tRszWj37NkKu55NZSp4QCg1gGBZ0gBGhckI9nInEsmKtix00alw2g==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux, freebsd, android] + '@sentry/cli-linux-arm@2.55.0': resolution: {integrity: sha512-ATjU0PsiWADSPLF/kZroLZ7FPKd5W9TDWHVkKNwIUNTei702LFgTjNeRwOIzTgSvG3yTmVEqtwFQfFN/7hnVXQ==} engines: {node: '>=10'} cpu: [arm] os: [linux, freebsd, android] + '@sentry/cli-linux-arm@2.58.2': + resolution: {integrity: sha512-HU9lTCzcHqCz/7Mt5n+cv+nFuJdc1hGD2h35Uo92GgxX3/IujNvOUfF+nMX9j6BXH6hUt73R5c0Ycq9+a3Parg==} + engines: {node: '>=10'} + cpu: [arm] + os: [linux, freebsd, android] + '@sentry/cli-linux-i686@2.55.0': resolution: {integrity: sha512-8LZjo6PncTM6bWdaggscNOi5r7F/fqRREsCwvd51dcjGj7Kp1plqo9feEzYQ+jq+KUzVCiWfHrUjddFmYyZJrg==} engines: {node: '>=10'} cpu: [x86, ia32] os: [linux, freebsd, android] + '@sentry/cli-linux-i686@2.58.2': + resolution: {integrity: sha512-CN9p0nfDFsAT1tTGBbzOUGkIllwS3hygOUyTK7LIm9z+UHw5uNgNVqdM/3Vg+02ymjkjISNB3/+mqEM5osGXdA==} + engines: {node: '>=10'} + cpu: [x86, ia32] + os: [linux, freebsd, android] + '@sentry/cli-linux-x64@2.55.0': resolution: {integrity: sha512-5LUVvq74Yj2cZZy5g5o/54dcWEaX4rf3myTHy73AKhRj1PABtOkfexOLbF9xSrZy95WXWaXyeH+k5n5z/vtHfA==} engines: {node: '>=10'} cpu: [x64] os: [linux, freebsd, android] + '@sentry/cli-linux-x64@2.58.2': + resolution: {integrity: sha512-oX/LLfvWaJO50oBVOn4ZvG2SDWPq0MN8SV9eg5tt2nviq+Ryltfr7Rtoo+HfV+eyOlx1/ZXhq9Wm7OT3cQuz+A==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux, freebsd, android] + '@sentry/cli-win32-arm64@2.55.0': resolution: {integrity: sha512-cWIQdzm1pfLwPARsV6dUb8TVd6Y3V1A2VWxjTons3Ift6GvtVmiAe0OWL8t2Yt95i8v61kTD/6Tq21OAaogqzA==} engines: {node: '>=10'} cpu: [arm64] os: [win32] + '@sentry/cli-win32-arm64@2.58.2': + resolution: {integrity: sha512-+cl3x2HPVMpoSVGVM1IDWlAEREZrrVQj4xBb0TRKII7g3hUxRsAIcsrr7+tSkie++0FuH4go/b5fGAv51OEF3w==} + engines: {node: '>=10'} + cpu: [arm64] + os: [win32] + '@sentry/cli-win32-i686@2.55.0': resolution: {integrity: sha512-ldepCn2t9r4I0wvgk7NRaA7coJyy4rTQAzM66u9j5nTEsUldf66xym6esd5ZZRAaJUjffqvHqUIr/lrieTIrVg==} engines: {node: '>=10'} cpu: [x86, ia32] os: [win32] + '@sentry/cli-win32-i686@2.58.2': + resolution: {integrity: sha512-omFVr0FhzJ8oTJSg1Kf+gjLgzpYklY0XPfLxZ5iiMiYUKwF5uo1RJRdkUOiEAv0IqpUKnmKcmVCLaDxsWclB7Q==} + engines: {node: '>=10'} + cpu: [x86, ia32] + os: [win32] + '@sentry/cli-win32-x64@2.55.0': resolution: {integrity: sha512-4hPc/I/9tXx+HLTdTGwlagtAfDSIa2AoTUP30tl32NAYQhx9a6niUbPAemK2qfxesiufJ7D2djX83rCw6WnJVA==} engines: {node: '>=10'} cpu: [x64] os: [win32] + '@sentry/cli-win32-x64@2.58.2': + resolution: {integrity: sha512-2NAFs9UxVbRztQbgJSP5i8TB9eJQ7xraciwj/93djrSMHSEbJ0vC47TME0iifgvhlHMs5vqETOKJtfbbpQAQFA==} + engines: {node: '>=10'} + cpu: [x64] + os: [win32] + '@sentry/cli@2.55.0': resolution: {integrity: sha512-cynvcIM2xL8ddwELyFRSpZQw4UtFZzoM2rId2l9vg7+wDREPDocMJB9lEQpBIo3eqhp9JswqUT037yjO6iJ5Sw==} engines: {node: '>= 10'} hasBin: true - '@sentry/core@10.22.0': - resolution: {integrity: sha512-V1oeHbrOKzxadsCmgtPku3v3Emo/Bpb3VSuKmlLrQefiHX98MWtjJ3XDGfduzD5/dCdh0r/OOLwjcmrO/PZ2aw==} + '@sentry/cli@2.58.2': + resolution: {integrity: sha512-U4u62V4vaTWF+o40Mih8aOpQKqKUbZQt9A3LorIJwaE3tO3XFLRI70eWtW2se1Qmy0RZ74zB14nYcFNFl2t4Rw==} + engines: {node: '>= 10'} + hasBin: true + + '@sentry/core@10.27.0': + resolution: {integrity: sha512-Zc68kdH7tWTDtDbV1zWIbo3Jv0fHAU2NsF5aD2qamypKgfSIMSbWVxd22qZyDBkaX8gWIPm/0Sgx6aRXRBXrYQ==} engines: {node: '>=18'} - '@sentry/nextjs@10.22.0': - resolution: {integrity: sha512-9Np176cDMLTl98QRqESe6STyaQ0SKiWTDRdF3GPYPEB9s4t5Qz2zZJ9A40Fz3fZ33kW4Z/qscDx3WpCwFLe5Bg==} + '@sentry/nextjs@10.27.0': + resolution: {integrity: sha512-O3b7y4JgVyj70ucW7lfyFLSXTCvztu7qOdFzFl2LwIstzFIZzt6v7ICOhP3FEEC7Lxn5teNb6xVBDtu8vYr20g==} engines: {node: '>=18'} peerDependencies: next: ^13.2.0 || ^14.0 || ^15.0.0-rc.0 || ^16.0.0-0 - '@sentry/node-core@10.22.0': - resolution: {integrity: sha512-88Yyn+Qvmp0kPMnNRWgpUlAvhI9CNPqOT+0glW0L7OoN8LkJcNgx2GGUoLrJ+RGeHz/S7dIJY6DGa+u0Not2Qg==} + '@sentry/node-core@10.27.0': + resolution: {integrity: sha512-Dzo1I64Psb7AkpyKVUlR9KYbl4wcN84W4Wet3xjLmVKMgrCo2uAT70V4xIacmoMH5QLZAx0nGfRy9yRCd4nzBg==} engines: {node: '>=18'} peerDependencies: '@opentelemetry/api': ^1.9.0 - '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 - '@opentelemetry/core': ^1.30.1 || ^2.1.0 + '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 || ^2.2.0 + '@opentelemetry/core': ^1.30.1 || ^2.1.0 || ^2.2.0 '@opentelemetry/instrumentation': '>=0.57.1 <1' - '@opentelemetry/resources': ^1.30.1 || ^2.1.0 - '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 + '@opentelemetry/resources': ^1.30.1 || ^2.1.0 || ^2.2.0 + '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 || ^2.2.0 '@opentelemetry/semantic-conventions': ^1.37.0 - '@sentry/node@10.22.0': - resolution: {integrity: sha512-PfG8AMT2kgFJ7rWb0lLJOmjLW2riytTliLMjfoJ8/tLGk964uKqE0xM7FLtXZjlLJqTXVYCVG7VIPj185uyckQ==} + '@sentry/node@10.27.0': + resolution: {integrity: sha512-1cQZ4+QqV9juW64Jku1SMSz+PoZV+J59lotz4oYFvCNYzex8hRAnDKvNiKW1IVg5mEEkz98mg1fvcUtiw7GTiQ==} engines: {node: '>=18'} - '@sentry/opentelemetry@10.22.0': - resolution: {integrity: sha512-XHXYYq3zsQ/dj1kQ7cGGLFIEVRmrmjcMhiJHvmKKsUGKxQjHe2G0LuG8clHIPkmbg7yEIxCT/W2I9QzrwYt5+g==} + '@sentry/opentelemetry@10.27.0': + resolution: {integrity: sha512-z2vXoicuGiqlRlgL9HaYJgkin89ncMpNQy0Kje6RWyhpzLe8BRgUXlgjux7WrSrcbopDdC1OttSpZsJ/Wjk7fg==} engines: {node: '>=18'} peerDependencies: '@opentelemetry/api': ^1.9.0 - '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 - '@opentelemetry/core': ^1.30.1 || ^2.1.0 - '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 + '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.1.0 || ^2.2.0 + '@opentelemetry/core': ^1.30.1 || ^2.1.0 || ^2.2.0 + '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.1.0 || ^2.2.0 '@opentelemetry/semantic-conventions': ^1.37.0 - '@sentry/react@10.22.0': - resolution: {integrity: sha512-XByOjtW30LMNibmCPJF5LNYFmETNOUmWByECADox8GYV4BEX18WGXl4K1fpPDTSk+y4vUCHbltHa4GkyTRwG8Q==} + '@sentry/react@10.27.0': + resolution: {integrity: sha512-xoIRBlO1IhLX/O9aQgVYW1F3Qhw8TdkOiZjh6mrPsnCpBLufsQ4aS1nDQi9miZuWeslW0s2zNy0ACBpICZR/sw==} engines: {node: '>=18'} peerDependencies: react: ^16.14.0 || 17.x || 18.x || 19.x - '@sentry/vercel-edge@10.22.0': - resolution: {integrity: sha512-N6/4BrnqTJND/E1wxrQuiMKjJQ6W9xC/gibxrEfbZMFYU6VMz9/Quz+btfFJRsOiuFarLK8J/iEvWVB3mjZdzw==} + '@sentry/vercel-edge@10.27.0': + resolution: {integrity: sha512-uBfpOnzSNSd2ITMTMeX5bV9Jlci9iMyI+iOPuW8c3oc+0dITTN0OpKLyNd6nfm50bM5h/1qFVQrph+oFTrtuGQ==} engines: {node: '>=18'} '@sentry/webpack-plugin@4.3.0': @@ -3179,8 +3232,8 @@ packages: '@types/pg-pool@2.0.6': resolution: {integrity: sha512-TaAUE5rq2VQYxab5Ts7WZhKNmuN78Q6PiFonTDdpbx8a1H0M1vhy3rhiMjl+e2iHmogyMw7jZF4FrE6eJUy5HQ==} - '@types/pg@8.15.5': - resolution: {integrity: sha512-LF7lF6zWEKxuT3/OR8wAZGzkg4ENGXFNyiV/JeOt9z5B+0ZVwbql9McqX5c/WStFq1GaGso7H1AzP/qSzmlCKQ==} + '@types/pg@8.15.6': + resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==} '@types/phoenix@1.6.6': resolution: {integrity: sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==} @@ -3208,9 +3261,6 @@ packages: '@types/semver@7.7.1': resolution: {integrity: sha512-FmgJfu+MOcQ370SD0ev7EI8TlCAfKYU+B4m5T3yXc1CiRN94g/SZPtsCkk506aUDtlMnFZvasDwHHUcZUEaYuA==} - '@types/shimmer@1.2.0': - resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} - '@types/statuses@2.0.6': resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==} @@ -4123,9 +4173,9 @@ packages: create-hmac@1.1.7: resolution: {integrity: sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==} - cross-env@7.0.3: - resolution: {integrity: sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==} - engines: {node: '>=10.14', npm: '>=6', yarn: '>=1'} + cross-env@10.1.0: + resolution: {integrity: sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==} + engines: {node: '>=20'} hasBin: true cross-spawn@7.0.6: @@ -4908,6 +4958,10 @@ packages: resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} hasBin: true + glob@10.5.0: + resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + hasBin: true + glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} deprecated: Glob versions prior to v9 are no longer supported @@ -5108,6 +5162,9 @@ packages: import-in-the-middle@1.14.2: resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==} + import-in-the-middle@2.0.0: + resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==} + imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} @@ -6680,6 +6737,10 @@ packages: resolution: {integrity: sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==} engines: {node: '>=8.6.0'} + require-in-the-middle@8.0.1: + resolution: {integrity: sha512-QT7FVMXfWOYFbeRBF6nu+I6tr2Tf3u0q8RIEjNob/heKY/nh7drD/k7eeMFmSQgnTtCzLDcCu/XEnpW2wk4xCQ==} + engines: {node: '>=9.3.0 || >=8.10.0 <9.0.0'} + reselect@5.1.1: resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==} @@ -6847,9 +6908,6 @@ packages: resolution: {integrity: sha512-VuvPvLG1QjNOLP7AIm2HGyfmxEIz8QdskvWOHwUcxLDibYWjLRBmCWd8LSL5FlwhBW7D/GU+3gNVC/ASxAWdxg==} engines: {node: 18.* || >= 20} - shimmer@1.2.1: - resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} - should-equal@2.0.0: resolution: {integrity: sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==} @@ -8507,6 +8565,8 @@ snapshots: '@emotion/unitless@0.8.1': {} + '@epic-web/invariant@1.0.0': {} + '@esbuild/aix-ppc64@0.25.11': optional: true @@ -8993,257 +9053,236 @@ snapshots: '@open-draft/until@2.1.0': {} - '@opentelemetry/api-logs@0.204.0': - dependencies: - '@opentelemetry/api': 1.9.0 - - '@opentelemetry/api-logs@0.57.2': + '@opentelemetry/api-logs@0.208.0': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/api@1.9.0': {} - '@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.37.0 - '@opentelemetry/instrumentation-amqplib@0.51.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-amqplib@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-connect@0.48.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-connect@0.52.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 '@types/connect': 3.4.38 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-dataloader@0.22.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-dataloader@0.26.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-express@0.53.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-express@0.57.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-fs@0.24.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-fs@0.28.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-generic-pool@0.48.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-generic-pool@0.52.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-graphql@0.52.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-graphql@0.56.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-hapi@0.51.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-hapi@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-http@0.204.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-http@0.208.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 forwarded-parse: 2.1.2 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-ioredis@0.52.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-ioredis@0.56.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/redis-common': 0.38.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/redis-common': 0.38.2 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation-kafkajs@0.18.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-kafkajs@0.14.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-knex@0.53.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-knex@0.49.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-koa@0.57.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-koa@0.52.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-lru-memoizer@0.53.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-lru-memoizer@0.49.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mongodb@0.61.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mongodb@0.57.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mongoose@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mongoose@0.51.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mysql2@0.55.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/sql-common': 0.41.2(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mysql2@0.51.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mysql@0.54.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 - '@opentelemetry/sql-common': 0.41.0(@opentelemetry/api@1.9.0) - transitivePeerDependencies: - - supports-color - - '@opentelemetry/instrumentation-mysql@0.50.0(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@types/mysql': 2.15.27 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-pg@0.57.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-pg@0.61.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 - '@opentelemetry/sql-common': 0.41.0(@opentelemetry/api@1.9.0) - '@types/pg': 8.15.5 + '@opentelemetry/sql-common': 0.41.2(@opentelemetry/api@1.9.0) + '@types/pg': 8.15.6 '@types/pg-pool': 2.0.6 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-redis@0.53.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-redis@0.57.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/redis-common': 0.38.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/redis-common': 0.38.2 '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-tedious@0.23.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-tedious@0.27.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.37.0 + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) '@types/tedious': 4.0.14 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-undici@0.15.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-undici@0.19.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.37.0 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.204.0 - import-in-the-middle: 1.14.2 - require-in-the-middle: 7.5.2 + '@opentelemetry/api-logs': 0.208.0 + import-in-the-middle: 2.0.0 + require-in-the-middle: 8.0.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation@0.57.2(@opentelemetry/api@1.9.0)': + '@opentelemetry/redis-common@0.38.2': {} + + '@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.57.2 - '@types/shimmer': 1.2.0 - import-in-the-middle: 1.14.2 - require-in-the-middle: 7.5.2 - semver: 7.7.3 - shimmer: 1.2.1 - transitivePeerDependencies: - - supports-color - - '@opentelemetry/redis-common@0.38.0': {} - - '@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 - '@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 '@opentelemetry/semantic-conventions@1.37.0': {} - '@opentelemetry/sql-common@0.41.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/sql-common@0.41.2(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) '@orval/angular@7.13.0(openapi-types@12.1.3)(typescript@5.9.3)': dependencies: @@ -9395,10 +9434,10 @@ snapshots: type-fest: 4.41.0 webpack-hot-middleware: 2.26.1 - '@prisma/instrumentation@6.15.0(@opentelemetry/api@1.9.0)': + '@prisma/instrumentation@6.19.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color @@ -10071,33 +10110,35 @@ snapshots: '@scarf/scarf@1.4.0': {} - '@sentry-internal/browser-utils@10.22.0': + '@sentry-internal/browser-utils@10.27.0': dependencies: - '@sentry/core': 10.22.0 + '@sentry/core': 10.27.0 - '@sentry-internal/feedback@10.22.0': + '@sentry-internal/feedback@10.27.0': dependencies: - '@sentry/core': 10.22.0 + '@sentry/core': 10.27.0 - '@sentry-internal/replay-canvas@10.22.0': + '@sentry-internal/replay-canvas@10.27.0': dependencies: - '@sentry-internal/replay': 10.22.0 - '@sentry/core': 10.22.0 + '@sentry-internal/replay': 10.27.0 + '@sentry/core': 10.27.0 - '@sentry-internal/replay@10.22.0': + '@sentry-internal/replay@10.27.0': dependencies: - '@sentry-internal/browser-utils': 10.22.0 - '@sentry/core': 10.22.0 + '@sentry-internal/browser-utils': 10.27.0 + '@sentry/core': 10.27.0 '@sentry/babel-plugin-component-annotate@4.3.0': {} - '@sentry/browser@10.22.0': + '@sentry/babel-plugin-component-annotate@4.6.1': {} + + '@sentry/browser@10.27.0': dependencies: - '@sentry-internal/browser-utils': 10.22.0 - '@sentry-internal/feedback': 10.22.0 - '@sentry-internal/replay': 10.22.0 - '@sentry-internal/replay-canvas': 10.22.0 - '@sentry/core': 10.22.0 + '@sentry-internal/browser-utils': 10.27.0 + '@sentry-internal/feedback': 10.27.0 + '@sentry-internal/replay': 10.27.0 + '@sentry-internal/replay-canvas': 10.27.0 + '@sentry/core': 10.27.0 '@sentry/bundler-plugin-core@4.3.0': dependencies: @@ -10113,30 +10154,68 @@ snapshots: - encoding - supports-color + '@sentry/bundler-plugin-core@4.6.1': + dependencies: + '@babel/core': 7.28.4 + '@sentry/babel-plugin-component-annotate': 4.6.1 + '@sentry/cli': 2.58.2 + dotenv: 16.6.1 + find-up: 5.0.0 + glob: 10.5.0 + magic-string: 0.30.8 + unplugin: 1.0.1 + transitivePeerDependencies: + - encoding + - supports-color + '@sentry/cli-darwin@2.55.0': optional: true + '@sentry/cli-darwin@2.58.2': + optional: true + '@sentry/cli-linux-arm64@2.55.0': optional: true + '@sentry/cli-linux-arm64@2.58.2': + optional: true + '@sentry/cli-linux-arm@2.55.0': optional: true + '@sentry/cli-linux-arm@2.58.2': + optional: true + '@sentry/cli-linux-i686@2.55.0': optional: true + '@sentry/cli-linux-i686@2.58.2': + optional: true + '@sentry/cli-linux-x64@2.55.0': optional: true + '@sentry/cli-linux-x64@2.58.2': + optional: true + '@sentry/cli-win32-arm64@2.55.0': optional: true + '@sentry/cli-win32-arm64@2.58.2': + optional: true + '@sentry/cli-win32-i686@2.55.0': optional: true + '@sentry/cli-win32-i686@2.58.2': + optional: true + '@sentry/cli-win32-x64@2.55.0': optional: true + '@sentry/cli-win32-x64@2.58.2': + optional: true + '@sentry/cli@2.55.0': dependencies: https-proxy-agent: 5.0.1 @@ -10157,20 +10236,40 @@ snapshots: - encoding - supports-color - '@sentry/core@10.22.0': {} + '@sentry/cli@2.58.2': + dependencies: + https-proxy-agent: 5.0.1 + node-fetch: 2.7.0 + progress: 2.0.3 + proxy-from-env: 1.1.0 + which: 2.0.2 + optionalDependencies: + '@sentry/cli-darwin': 2.58.2 + '@sentry/cli-linux-arm': 2.58.2 + '@sentry/cli-linux-arm64': 2.58.2 + '@sentry/cli-linux-i686': 2.58.2 + '@sentry/cli-linux-x64': 2.58.2 + '@sentry/cli-win32-arm64': 2.58.2 + '@sentry/cli-win32-i686': 2.58.2 + '@sentry/cli-win32-x64': 2.58.2 + transitivePeerDependencies: + - encoding + - supports-color - '@sentry/nextjs@10.22.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': + '@sentry/core@10.27.0': {} + + '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.37.0 '@rollup/plugin-commonjs': 28.0.1(rollup@4.52.2) - '@sentry-internal/browser-utils': 10.22.0 - '@sentry/bundler-plugin-core': 4.3.0 - '@sentry/core': 10.22.0 - '@sentry/node': 10.22.0 - '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) - '@sentry/react': 10.22.0(react@18.3.1) - '@sentry/vercel-edge': 10.22.0 + '@sentry-internal/browser-utils': 10.27.0 + '@sentry/bundler-plugin-core': 4.6.1 + '@sentry/core': 10.27.0 + '@sentry/node': 10.27.0 + '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) + '@sentry/react': 10.27.0(react@18.3.1) + '@sentry/vercel-edge': 10.27.0 '@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9)) next: 15.4.7(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) resolve: 1.22.8 @@ -10185,83 +10284,83 @@ snapshots: - supports-color - webpack - '@sentry/node-core@10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)': + '@sentry/node-core@10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)': dependencies: '@apm-js-collab/tracing-hooks': 0.3.1 '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 - '@sentry/core': 10.22.0 - '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) - import-in-the-middle: 1.14.2 + '@sentry/core': 10.27.0 + '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) + import-in-the-middle: 2.0.0 transitivePeerDependencies: - supports-color - '@sentry/node@10.22.0': + '@sentry/node@10.27.0': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-amqplib': 0.51.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-connect': 0.48.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-dataloader': 0.22.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-express': 0.53.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-fs': 0.24.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-generic-pool': 0.48.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-graphql': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-hapi': 0.51.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-http': 0.204.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-ioredis': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-kafkajs': 0.14.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-knex': 0.49.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-koa': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-lru-memoizer': 0.49.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mongodb': 0.57.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mongoose': 0.51.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mysql': 0.50.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mysql2': 0.51.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-pg': 0.57.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-redis': 0.53.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-tedious': 0.23.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-undici': 0.15.0(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-amqplib': 0.55.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-connect': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-dataloader': 0.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-express': 0.57.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-fs': 0.28.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-generic-pool': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-graphql': 0.56.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-hapi': 0.55.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-http': 0.208.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-ioredis': 0.56.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-kafkajs': 0.18.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-knex': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-koa': 0.57.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-lru-memoizer': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mongodb': 0.61.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mongoose': 0.55.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mysql': 0.54.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mysql2': 0.55.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-pg': 0.61.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-redis': 0.57.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-tedious': 0.27.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-undici': 0.19.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 - '@prisma/instrumentation': 6.15.0(@opentelemetry/api@1.9.0) - '@sentry/core': 10.22.0 - '@sentry/node-core': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) - '@sentry/opentelemetry': 10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) - import-in-the-middle: 1.14.2 + '@prisma/instrumentation': 6.19.0(@opentelemetry/api@1.9.0) + '@sentry/core': 10.27.0 + '@sentry/node-core': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.208.0(@opentelemetry/api@1.9.0))(@opentelemetry/resources@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) + '@sentry/opentelemetry': 10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0) + import-in-the-middle: 2.0.0 minimatch: 9.0.5 transitivePeerDependencies: - supports-color - '@sentry/opentelemetry@10.22.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)': + '@sentry/opentelemetry@10.27.0(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.37.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0) + '@opentelemetry/context-async-hooks': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 2.2.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 2.2.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.37.0 - '@sentry/core': 10.22.0 + '@sentry/core': 10.27.0 - '@sentry/react@10.22.0(react@18.3.1)': + '@sentry/react@10.27.0(react@18.3.1)': dependencies: - '@sentry/browser': 10.22.0 - '@sentry/core': 10.22.0 + '@sentry/browser': 10.27.0 + '@sentry/core': 10.27.0 hoist-non-react-statics: 3.3.2 react: 18.3.1 - '@sentry/vercel-edge@10.22.0': + '@sentry/vercel-edge@10.27.0': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/resources': 2.1.0(@opentelemetry/api@1.9.0) - '@sentry/core': 10.22.0 + '@opentelemetry/resources': 2.2.0(@opentelemetry/api@1.9.0) + '@sentry/core': 10.27.0 '@sentry/webpack-plugin@4.3.0(webpack@5.101.3(esbuild@0.25.9))': dependencies: @@ -10904,9 +11003,9 @@ snapshots: '@types/pg-pool@2.0.6': dependencies: - '@types/pg': 8.15.5 + '@types/pg': 8.15.6 - '@types/pg@8.15.5': + '@types/pg@8.15.6': dependencies: '@types/node': 24.10.0 pg-protocol: 1.10.3 @@ -10937,8 +11036,6 @@ snapshots: '@types/semver@7.7.1': {} - '@types/shimmer@1.2.0': {} - '@types/statuses@2.0.6': {} '@types/stylis@4.2.5': {} @@ -11901,8 +11998,9 @@ snapshots: safe-buffer: 5.2.1 sha.js: 2.4.12 - cross-env@7.0.3: + cross-env@10.1.0: dependencies: + '@epic-web/invariant': 1.0.0 cross-spawn: 7.0.6 cross-spawn@7.0.6: @@ -12434,7 +12532,7 @@ snapshots: eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1) - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) eslint-plugin-react: 7.37.5(eslint@8.57.1) eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1) @@ -12464,7 +12562,7 @@ snapshots: tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1) transitivePeerDependencies: - supports-color @@ -12479,7 +12577,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1))(eslint@8.57.1))(eslint@8.57.1): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.9 @@ -12902,6 +13000,15 @@ snapshots: package-json-from-dist: 1.0.1 path-scurry: 1.11.1 + glob@10.5.0: + dependencies: + foreground-child: 3.3.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.1 + path-scurry: 1.11.1 + glob@7.2.3: dependencies: fs.realpath: 1.0.0 @@ -13159,6 +13266,13 @@ snapshots: cjs-module-lexer: 1.4.3 module-details-from-path: 1.0.4 + import-in-the-middle@2.0.0: + dependencies: + acorn: 8.15.0 + acorn-import-attributes: 1.9.5(acorn@8.15.0) + cjs-module-lexer: 1.4.3 + module-details-from-path: 1.0.4 + imurmurhash@0.1.4: {} indent-string@4.0.0: {} @@ -15029,6 +15143,13 @@ snapshots: transitivePeerDependencies: - supports-color + require-in-the-middle@8.0.1: + dependencies: + debug: 4.4.3 + module-details-from-path: 1.0.4 + transitivePeerDependencies: + - supports-color + reselect@5.1.1: {} resolve-from@4.0.0: {} @@ -15244,8 +15365,6 @@ snapshots: '@scarf/scarf': 1.4.0 deepmerge-ts: 7.1.5 - shimmer@1.2.1: {} - should-equal@2.0.0: dependencies: should-type: 1.4.0 diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx index cfeada190f..3176ec7f70 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/components/AgentOnboardingCredentials/AgentOnboardingCredentials.tsx @@ -1,4 +1,4 @@ -import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; +import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput"; import { GraphMeta } from "@/app/api/__generated__/models/graphMeta"; import { useState } from "react"; diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx index 6aeb0213e7..4b6abacbff 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx @@ -1,6 +1,6 @@ "use client"; -import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs"; +import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs"; import { Card, CardContent, diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx index a8fd85eeb0..c24f9e11a3 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/share/[token]/page.tsx @@ -1,8 +1,7 @@ "use client"; -import React from "react"; -import { useParams } from "next/navigation"; -import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs"; +import { RunOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs"; +import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default"; import { Card, CardContent, @@ -11,7 +10,7 @@ import { } from "@/components/__legacy__/ui/card"; import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert"; import { InfoIcon } from "lucide-react"; -import { useGetV1GetSharedExecution } from "@/app/api/__generated__/endpoints/default/default"; +import { useParams } from "next/navigation"; export default function SharePage() { const params = useParams(); diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts index a075564063..f6df869350 100644 --- a/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts +++ b/autogpt_platform/frontend/src/app/(platform)/auth/integrations/oauth_callback/route.ts @@ -1,4 +1,4 @@ -import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; +import { OAuthPopupResultMessage } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { NextResponse } from "next/server"; // This route is intended to be used as the callback for integration OAuth flows, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx index 3fcde3bf76..237bea2ab0 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/AgentOutputs/AgentOutputs.tsx @@ -1,9 +1,13 @@ +import { BlockUIType } from "@/app/(platform)/build/components/types"; +import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; +import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/atoms/Tooltip/BaseTooltip"; + globalRegistry, + OutputActions, + OutputItem, +} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { Label } from "@/components/__legacy__/ui/label"; +import { ScrollArea } from "@/components/__legacy__/ui/scroll-area"; import { Sheet, SheetContent, @@ -12,20 +16,16 @@ import { SheetTitle, SheetTrigger, } from "@/components/__legacy__/ui/sheet"; -import { BuilderActionButton } from "../BuilderActionButton"; -import { BookOpenIcon } from "@phosphor-icons/react"; -import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; -import { useShallow } from "zustand/react/shallow"; -import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore"; -import { BlockUIType } from "@/app/(platform)/build/components/types"; -import { ScrollArea } from "@/components/__legacy__/ui/scroll-area"; -import { Label } from "@/components/__legacy__/ui/label"; -import { useMemo } from "react"; import { - globalRegistry, - OutputItem, - OutputActions, -} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; +import { BookOpenIcon } from "@phosphor-icons/react"; +import { useMemo } from "react"; +import { useShallow } from "zustand/react/shallow"; +import { BuilderActionButton } from "../BuilderActionButton"; export const AgentOutputs = ({ flowID }: { flowID: string | null }) => { const hasOutputs = useGraphStore(useShallow((state) => state.hasOutputs)); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx index adb3c619bf..b6ec73eb9a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/CronSchedulerDialog/CronSchedulerDialog.tsx @@ -1,10 +1,10 @@ +import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CronScheduler"; import { Button } from "@/components/atoms/Button/Button"; +import { Input } from "@/components/atoms/Input/Input"; +import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { InfoIcon } from "lucide-react"; -import { CronScheduler } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CronScheduler"; -import { Text } from "@/components/atoms/Text/Text"; import { useCronSchedulerDialog } from "./useCronSchedulerDialog"; -import { Input } from "@/components/atoms/Input/Input"; type CronSchedulerDialogProps = { open: boolean; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx index 3a0c7aab4a..f4c1a7331f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/RunGraph.tsx @@ -18,6 +18,7 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { openRunInputDialog, setOpenRunInputDialog, isExecutingGraph, + isTerminatingGraph, isSaving, } = useRunGraph(); const isGraphRunning = useGraphStore( @@ -34,8 +35,8 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => { "border-red-500 bg-gradient-to-br from-red-400 to-red-500 shadow-[inset_0_2px_0_0_rgba(255,255,255,0.5),0_2px_4px_0_rgba(0,0,0,0.2)]", )} onClick={isGraphRunning ? handleStopGraph : handleRunGraph} - disabled={!flowID || isExecutingGraph} - isLoading={isExecutingGraph || isSaving} + disabled={!flowID || isExecutingGraph || isTerminatingGraph} + isLoading={isExecutingGraph || isTerminatingGraph || isSaving} > {!isGraphRunning ? ( diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts index beb3a8741a..cd321e340f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunGraph/useRunGraph.ts @@ -15,9 +15,6 @@ export const useRunGraph = () => { showToast: false, }); const { toast } = useToast(); - const setIsGraphRunning = useGraphStore( - useShallow((state) => state.setIsGraphRunning), - ); const hasInputs = useGraphStore(useShallow((state) => state.hasInputs)); const hasCredentials = useGraphStore( useShallow((state) => state.hasCredentials), @@ -34,15 +31,13 @@ export const useRunGraph = () => { const { mutateAsync: executeGraph, isPending: isExecutingGraph } = usePostV1ExecuteGraphAgent({ mutation: { - onSuccess: (response) => { + onSuccess: (response: any) => { const { id } = response.data as GraphExecutionMeta; setQueryStates({ flowExecutionID: id, }); }, - onError: (error) => { - setIsGraphRunning(false); - + onError: (error: any) => { toast({ title: (error.detail as string) ?? "An unexpected error occurred.", description: "An unexpected error occurred.", @@ -52,20 +47,19 @@ export const useRunGraph = () => { }, }); - const { mutateAsync: stopGraph } = usePostV1StopGraphExecution({ - mutation: { - onSuccess: () => { - setIsGraphRunning(false); + const { mutateAsync: stopGraph, isPending: isTerminatingGraph } = + usePostV1StopGraphExecution({ + mutation: { + onSuccess: () => {}, + onError: (error: any) => { + toast({ + title: (error.detail as string) ?? "An unexpected error occurred.", + description: "An unexpected error occurred.", + variant: "destructive", + }); + }, }, - onError: (error) => { - toast({ - title: (error.detail as string) ?? "An unexpected error occurred.", - description: "An unexpected error occurred.", - variant: "destructive", - }); - }, - }, - }); + }); const handleRunGraph = async () => { await saveGraph(undefined); @@ -96,6 +90,7 @@ export const useRunGraph = () => { handleStopGraph, isSaving, isExecutingGraph, + isTerminatingGraph, openRunInputDialog, setOpenRunInputDialog, }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts index 054aa36ebd..a1933dc15b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/BuilderActions/components/RunInputDialog/useRunInputDialog.ts @@ -7,7 +7,6 @@ import { } from "@/lib/autogpt-server-api"; import { parseAsInteger, parseAsString, useQueryStates } from "nuqs"; import { useMemo, useState } from "react"; -import { useShallow } from "zustand/react/shallow"; import { uiSchema } from "../../../FlowEditor/nodes/uiSchema"; import { isCredentialFieldSchema } from "@/components/renderers/input-renderer/fields/CredentialField/helpers"; @@ -30,9 +29,6 @@ export const useRunInputDialog = ({ flowID: parseAsString, flowVersion: parseAsInteger, }); - const setIsGraphRunning = useGraphStore( - useShallow((state) => state.setIsGraphRunning), - ); const { toast } = useToast(); const { mutateAsync: executeGraph, isPending: isExecutingGraph } = @@ -45,8 +41,6 @@ export const useRunInputDialog = ({ }); }, onError: (error) => { - setIsGraphRunning(false); - toast({ title: (error.detail as string) ?? "An unexpected error occurred.", description: "An unexpected error occurred.", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx index aca6e6f7c9..86b1e23871 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/Flow.tsx @@ -13,9 +13,16 @@ import { BuilderActions } from "../../BuilderActions/BuilderActions"; import { RunningBackground } from "./components/RunningBackground"; import { useGraphStore } from "../../../stores/graphStore"; import { useCopyPaste } from "./useCopyPaste"; +import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel"; +import { parseAsString, useQueryStates } from "nuqs"; import { CustomControls } from "./components/CustomControl"; export const Flow = () => { + const [{ flowExecutionID }] = useQueryStates({ + flowID: parseAsString, + flowExecutionID: parseAsString, + }); + const nodes = useNodeStore(useShallow((state) => state.nodes)); const onNodesChange = useNodeStore( useShallow((state) => state.onNodesChange), @@ -44,7 +51,9 @@ export const Flow = () => { window.removeEventListener("keydown", handleKeyDown); }; }, [handleCopyPaste]); - const { isGraphRunning } = useGraphStore(); + const isGraphRunning = useGraphStore( + useShallow((state) => state.isGraphRunning), + ); return (
@@ -72,6 +81,7 @@ export const Flow = () => { {isGraphRunning && }
+
); }; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index e6697d82c9..6ac9856c5e 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -13,7 +13,6 @@ import { convertNodesPlusBlockInfoIntoCustomNodes } from "../../helper"; import { useEdgeStore } from "../../../stores/edgeStore"; import { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; import { useGraphStore } from "../../../stores/graphStore"; -import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { useReactFlow } from "@xyflow/react"; import { useControlPanelStore } from "../../../stores/controlPanelStore"; import { useHistoryStore } from "../../../stores/historyStore"; @@ -28,9 +27,6 @@ export const useFlow = () => { const updateNodeExecutionResult = useNodeStore( useShallow((state) => state.updateNodeExecutionResult), ); - const setIsGraphRunning = useGraphStore( - useShallow((state) => state.setIsGraphRunning), - ); const setGraphSchemas = useGraphStore( useShallow((state) => state.setGraphSchemas), ); @@ -126,15 +122,6 @@ export const useFlow = () => { } }, [graph?.links, addLinks]); - // update graph running status - useEffect(() => { - const isRunning = - executionDetails?.status === AgentExecutionStatus.RUNNING || - executionDetails?.status === AgentExecutionStatus.QUEUED; - - setIsGraphRunning(isRunning); - }, [executionDetails?.status, customNodes]); - // update node execution status in nodes useEffect(() => { if ( @@ -182,7 +169,6 @@ export const useFlow = () => { useEdgeStore.getState().setEdges([]); useGraphStore.getState().reset(); useEdgeStore.getState().resetEdgeBeads(); - setIsGraphRunning(false); }; }, []); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlowRealtime.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlowRealtime.ts index 081a7048c5..d54ab39a36 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlowRealtime.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlowRealtime.ts @@ -19,8 +19,8 @@ export const useFlowRealtime = () => { const updateStatus = useNodeStore( useShallow((state) => state.updateNodeStatus), ); - const setIsGraphRunning = useGraphStore( - useShallow((state) => state.setIsGraphRunning), + const setGraphExecutionStatus = useGraphStore( + useShallow((state) => state.setGraphExecutionStatus), ); const updateEdgeBeads = useEdgeStore( useShallow((state) => state.updateEdgeBeads), @@ -57,11 +57,7 @@ export const useFlowRealtime = () => { return; } - const isRunning = - graphExecution.status === AgentExecutionStatus.RUNNING || - graphExecution.status === AgentExecutionStatus.QUEUED; - - setIsGraphRunning(isRunning); + setGraphExecutionStatus(graphExecution.status as AgentExecutionStatus); }, ); diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge.tsx index 5b187c9e43..acc0c26156 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeExecutionBadge.tsx @@ -9,6 +9,7 @@ const statusStyles: Record = { INCOMPLETE: "text-slate-700 border-slate-400", QUEUED: "text-blue-700 border-blue-400", RUNNING: "text-amber-700 border-amber-400", + REVIEW: "text-orange-700 border-orange-400 bg-orange-50", COMPLETED: "text-green-700 border-green-400", TERMINATED: "text-orange-700 border-orange-400", FAILED: "text-red-700 border-red-400", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ContentRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ContentRenderer.tsx index ed8d4e83de..9cb1a62e3d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ContentRenderer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/ContentRenderer.tsx @@ -1,7 +1,7 @@ "use client"; -import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; -import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; +import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; export const TextRenderer: React.FC<{ value: any; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx index 90706ccb7e..c505282e7b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/NodeDataViewer.tsx @@ -1,25 +1,25 @@ -import React, { FC } from "react"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + OutputActions, + OutputItem, +} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { ScrollArea } from "@/components/__legacy__/ui/scroll-area"; import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { beautifyString } from "@/lib/utils"; import { ArrowsOutSimpleIcon, + CheckIcon, CopyIcon, DownloadIcon, - CheckIcon, } from "@phosphor-icons/react"; -import { Text } from "@/components/atoms/Text/Text"; -import { beautifyString } from "@/lib/utils"; -import { ScrollArea } from "@/components/__legacy__/ui/scroll-area"; -import { - OutputItem, - OutputActions, -} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; +import { FC } from "react"; import { useNodeDataViewer } from "./useNodeDataViewer"; interface NodeDataViewerProps { diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts index 0fb11ae051..1adec625a0 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/components/NodeOutput/components/NodeDataViewer/useNodeDataViewer.ts @@ -1,9 +1,9 @@ -import React, { useState, useMemo } from "react"; -import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; -import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/download"; +import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { downloadOutputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download"; import { useToast } from "@/components/molecules/Toast/use-toast"; -import { globalRegistry } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; import { beautifyString } from "@/lib/utils"; +import React, { useMemo, useState } from "react"; export const useNodeDataViewer = ( data: any, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts index 5547468828..2093fed40f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/helpers.ts @@ -4,6 +4,7 @@ export const nodeStyleBasedOnStatus: Record = { INCOMPLETE: "ring-slate-300 bg-slate-300", QUEUED: " ring-blue-300 bg-blue-300", RUNNING: "ring-amber-300 bg-amber-300", + REVIEW: "ring-orange-300 bg-orange-300", COMPLETED: "ring-green-300 bg-green-300", TERMINATED: "ring-orange-300 bg-orange-300 ", FAILED: "ring-red-300 bg-red-300", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx index f3e9d95b90..f0a49080fa 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode.tsx @@ -1,70 +1,67 @@ -import React, { - useState, - useEffect, - useCallback, - useRef, - useContext, -} from "react"; -import Link from "next/link"; -import { NodeProps, useReactFlow, Node as XYNode, Edge } from "@xyflow/react"; -import "@xyflow/react/dist/style.css"; -import "./customnode.css"; -import InputModalComponent from "../InputModalComponent"; -import OutputModalComponent from "../OutputModalComponent"; -import { - BlockIORootSchema, - BlockIOSubSchema, - BlockIOStringSubSchema, - Category, - NodeExecutionResult, - BlockUIType, - BlockCost, -} from "@/lib/autogpt-server-api"; -import { - beautifyString, - cn, - fillObjectDefaultsFromSchema, - getValue, - hasNonNullNonObjectValue, - isObject, - parseKeys, - setNestedProperty, -} from "@/lib/utils"; -import { Button } from "@/components/atoms/Button/Button"; -import { TextRenderer } from "@/components/__legacy__/ui/render"; -import { history } from "../history"; -import NodeHandle from "../NodeHandle"; -import { NodeGenericInputField, NodeTextBoxInput } from "../NodeInputs"; -import { getPrimaryCategoryColor } from "@/lib/utils"; -import { BuilderContext } from "../Flow/Flow"; -import { Badge } from "../../../../../../components/__legacy__/ui/badge"; -import NodeOutputs from "../NodeOutputs"; -import { IconCoin } from "../../../../../../components/__legacy__/ui/icons"; -import * as Separator from "@radix-ui/react-separator"; -import * as ContextMenu from "@radix-ui/react-context-menu"; -import { - Alert, - AlertDescription, -} from "../../../../../../components/molecules/Alert/Alert"; -import { - DotsVerticalIcon, - TrashIcon, - CopyIcon, - ExitIcon, - Pencil1Icon, -} from "@radix-ui/react-icons"; -import { InfoIcon, Key } from "@phosphor-icons/react"; -import useCredits from "@/hooks/useCredits"; import { getV1GetAyrshareSsoUrl } from "@/app/api/__generated__/endpoints/integrations/integrations"; -import { toast } from "@/components/molecules/Toast/use-toast"; import { Input } from "@/components/__legacy__/ui/input"; +import { TextRenderer } from "@/components/__legacy__/ui/render"; +import { Button } from "@/components/atoms/Button/Button"; +import { Switch } from "@/components/atoms/Switch/Switch"; import { Tooltip, TooltipContent, TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; -import { Switch } from "@/components/atoms/Switch/Switch"; +import { toast } from "@/components/molecules/Toast/use-toast"; +import useCredits from "@/hooks/useCredits"; +import { + BlockCost, + BlockIORootSchema, + BlockIOStringSubSchema, + BlockIOSubSchema, + BlockUIType, + Category, + NodeExecutionResult, +} from "@/lib/autogpt-server-api"; +import { + beautifyString, + cn, + fillObjectDefaultsFromSchema, + getPrimaryCategoryColor, + getValue, + hasNonNullNonObjectValue, + isObject, + parseKeys, + setNestedProperty, +} from "@/lib/utils"; +import { InfoIcon, Key } from "@phosphor-icons/react"; +import * as ContextMenu from "@radix-ui/react-context-menu"; +import { + CopyIcon, + DotsVerticalIcon, + ExitIcon, + Pencil1Icon, + TrashIcon, +} from "@radix-ui/react-icons"; +import * as Separator from "@radix-ui/react-separator"; +import { Edge, NodeProps, useReactFlow, Node as XYNode } from "@xyflow/react"; +import "@xyflow/react/dist/style.css"; +import Link from "next/link"; +import React, { + useCallback, + useContext, + useEffect, + useRef, + useState, +} from "react"; +import { Badge } from "@/components/__legacy__/ui/badge"; +import { IconCoin } from "@/components/__legacy__/ui/icons"; +import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert"; +import { BuilderContext } from "../Flow/Flow"; +import { history } from "../history"; +import InputModalComponent from "../InputModalComponent"; +import NodeHandle from "../NodeHandle"; +import { NodeGenericInputField, NodeTextBoxInput } from "../NodeInputs"; +import NodeOutputs from "../NodeOutputs"; +import OutputModalComponent from "../OutputModalComponent"; +import "./customnode.css"; export type ConnectionData = Array<{ edge_id: string; @@ -366,6 +363,7 @@ export const CustomNode = React.memo( // For OUTPUT blocks, only show the 'value' (hides 'name') input connection handle !(nodeType == BlockUIType.OUTPUT && propKey == "name"); const isConnected = isInputHandleConnected(propKey); + return ( !isHidden && (isRequired || isAdvancedOpen || isConnected || !isAdvanced) && ( @@ -647,6 +645,8 @@ export const CustomNode = React.memo( return "border-purple-200 dark:border-purple-800 border-4"; case "queued": return "border-cyan-200 dark:border-cyan-800 border-4"; + case "review": + return "border-orange-200 dark:border-orange-800 border-4"; default: return ""; } @@ -666,6 +666,8 @@ export const CustomNode = React.memo( return "bg-purple-200 dark:bg-purple-800"; case "queued": return "bg-cyan-200 dark:bg-cyan-800"; + case "review": + return "bg-orange-200 dark:bg-orange-800"; default: return ""; } @@ -1010,6 +1012,8 @@ export const CustomNode = React.memo( data.status === "QUEUED", "border-gray-600 bg-gray-600 font-black": data.status === "INCOMPLETE", + "border-orange-600 bg-orange-600 text-white": + data.status === "REVIEW", }, )} > diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/ExpandableOutputDialog.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/ExpandableOutputDialog.tsx index 0edb37ec6a..0050c6cf64 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/ExpandableOutputDialog.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/ExpandableOutputDialog.tsx @@ -1,27 +1,27 @@ +import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { + globalRegistry, + OutputActions, + OutputItem, +} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; +import { beautifyString } from "@/lib/utils"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import { Clipboard, Maximize2 } from "lucide-react"; import React, { FC, useMemo, useState } from "react"; import { Button } from "../../../../../components/__legacy__/ui/button"; -import { ContentRenderer } from "../../../../../components/__legacy__/ui/render"; -import { beautifyString } from "@/lib/utils"; -import { Clipboard, Maximize2 } from "lucide-react"; -import { useToast } from "../../../../../components/molecules/Toast/use-toast"; -import { Switch } from "../../../../../components/atoms/Switch/Switch"; import { Dialog, DialogContent, - DialogHeader, - DialogTitle, DialogDescription, DialogFooter, + DialogHeader, + DialogTitle, } from "../../../../../components/__legacy__/ui/dialog"; +import { ContentRenderer } from "../../../../../components/__legacy__/ui/render"; import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area"; import { Separator } from "../../../../../components/__legacy__/ui/separator"; -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { - globalRegistry, - OutputItem, - OutputActions, -} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; -import type { OutputMetadata } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; +import { Switch } from "../../../../../components/atoms/Switch/Switch"; +import { useToast } from "../../../../../components/molecules/Toast/use-toast"; interface ExpandableOutputDialogProps { isOpen: boolean; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/Flow/Flow.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/Flow/Flow.tsx index 9ed4f3ff53..3e733eab96 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/Flow/Flow.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/legacy-builder/Flow/Flow.tsx @@ -64,6 +64,7 @@ import { useCopyPaste } from "../useCopyPaste"; import NewControlPanel from "@/app/(platform)/build/components/NewControlPanel/NewControlPanel"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { BuildActionBar } from "../BuildActionBar"; +import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel"; // This is for the history, this is the minimum distance a block must move before it is logged // It helps to prevent spamming the history with small movements especially when pressing on a input in a block @@ -1024,6 +1025,10 @@ const FlowEditor: React.FC<{ saveAndRun={saveAndRun} /> )} + ); + case DataType.GOOGLE_DRIVE_PICKER: { + const pickerSchema = propSchema as any; + const config: import("@/lib/autogpt-server-api/types").GoogleDrivePickerConfig = + pickerSchema.google_drive_picker_config || {}; + + return ( + handleInputChange(propKey, value)} + error={errors[propKey]} + className={className} + showRemoveButton={true} + /> + ); + } case DataType.DATE: case DataType.TIME: diff --git a/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts b/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts index 1e01c221f2..5cb9d410ba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/stores/graphStore.ts @@ -1,8 +1,10 @@ import { create } from "zustand"; +import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; interface GraphStore { + graphExecutionStatus: AgentExecutionStatus | undefined; isGraphRunning: boolean; - setIsGraphRunning: (isGraphRunning: boolean) => void; + setGraphExecutionStatus: (status: AgentExecutionStatus | undefined) => void; inputSchema: Record | null; credentialsInputSchema: Record | null; @@ -21,12 +23,20 @@ interface GraphStore { } export const useGraphStore = create((set, get) => ({ + graphExecutionStatus: undefined, isGraphRunning: false, inputSchema: null, credentialsInputSchema: null, outputSchema: null, - setIsGraphRunning: (isGraphRunning: boolean) => set({ isGraphRunning }), + setGraphExecutionStatus: (status: AgentExecutionStatus | undefined) => { + set({ + graphExecutionStatus: status, + isGraphRunning: + status === AgentExecutionStatus.RUNNING || + status === AgentExecutionStatus.QUEUED, + }); + }, setGraphSchemas: (inputSchema, credentialsInputSchema, outputSchema) => set({ inputSchema, credentialsInputSchema, outputSchema }), @@ -48,6 +58,7 @@ export const useGraphStore = create((set, get) => ({ reset: () => set({ + graphExecutionStatus: undefined, isGraphRunning: false, inputSchema: null, credentialsInputSchema: null, diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx index fce36acad2..0f3a7dadfc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx @@ -1,11 +1,11 @@ -import { useEffect, useRef } from "react"; +import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { Card } from "@/components/atoms/Card/Card"; import { Text } from "@/components/atoms/Text/Text"; -import { KeyIcon, CheckIcon, WarningIcon } from "@phosphor-icons/react"; -import { cn } from "@/lib/utils"; -import { useChatCredentialsSetup } from "./useChatCredentialsSetup"; -import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; import type { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api"; +import { cn } from "@/lib/utils"; +import { CheckIcon, KeyIcon, WarningIcon } from "@phosphor-icons/react"; +import { useEffect, useRef } from "react"; +import { useChatCredentialsSetup } from "./useChatCredentialsSetup"; export interface CredentialInfo { provider: string; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/AgentRunsView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx similarity index 78% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/AgentRunsView.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index de01f6a35b..5bb3b953b2 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/AgentRunsView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -5,15 +5,15 @@ import { Breadcrumbs } from "@/components/molecules/Breadcrumbs/Breadcrumbs"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { PlusIcon } from "@phosphor-icons/react"; import { useEffect } from "react"; -import { AgentRunsLoading } from "./components/AgentRunsLoading"; -import { EmptyAgentRuns } from "./components/EmptyAgentRuns/EmptyAgentRuns"; -import { RunAgentModal } from "./components/RunAgentModal/RunAgentModal"; -import { RunsSidebar } from "./components/RunsSidebar/RunsSidebar"; -import { SelectedRunView } from "./components/SelectedRunView/SelectedRunView"; -import { SelectedScheduleView } from "./components/SelectedScheduleView/SelectedScheduleView"; -import { useAgentRunsView } from "./useAgentRunsView"; +import { RunAgentModal } from "./components/modals/RunAgentModal/RunAgentModal"; +import { AgentRunsLoading } from "./components/other/AgentRunsLoading"; +import { EmptyAgentRuns } from "./components/other/EmptyAgentRuns"; +import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView"; +import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView"; +import { AgentRunsLists } from "./components/sidebar/AgentRunsLists/AgentRunsLists"; +import { useNewAgentLibraryView } from "./useNewAgentLibraryView"; -export function AgentRunsView() { +export function NewAgentLibraryView() { const { agent, hasAnyItems, @@ -22,10 +22,11 @@ export function AgentRunsView() { error, agentId, selectedRun, + sidebarLoading, handleSelectRun, handleCountsChange, handleClearSelectedRun, - } = useAgentRunsView(); + } = useNewAgentLibraryView(); useEffect(() => { if (agent) { @@ -73,7 +74,7 @@ export function AgentRunsView() { /> - ) + ) : sidebarLoading ? ( + // Show loading state while sidebar is loading to prevent flash of empty state +
Loading runs...
) : hasAnyItems ? (
Select a run to view its details diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentInputsReadOnly/AgentInputsReadOnly.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentInputsReadOnly/AgentInputsReadOnly.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentInputsReadOnly/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentInputsReadOnly/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/APIKeyCredentialsModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/APIKeyCredentialsModal/useAPIKeyCredentialsModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs.tsx similarity index 97% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs.tsx index 137013744e..c58b2f01a7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs.tsx @@ -1,4 +1,3 @@ -import { Button } from "@/components/atoms/Button/Button"; import { IconKey, IconKeyPlus, @@ -12,6 +11,8 @@ import { SelectTrigger, SelectValue, } from "@/components/__legacy__/ui/select"; +import { Button } from "@/components/atoms/Button/Button"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import useCredentials from "@/hooks/useCredentials"; import { useBackendAPI } from "@/lib/autogpt-server-api/context"; import { @@ -31,11 +32,10 @@ import { FaMedium, FaTwitter, } from "react-icons/fa"; -import { APIKeyCredentialsModal } from "../APIKeyCredentialsModal/APIKeyCredentialsModal"; -import { HostScopedCredentialsModal } from "../HotScopedCredentialsModal/HotScopedCredentialsModal"; -import { OAuthFlowWaitingModal } from "../OAuthWaitingModal/OAuthWaitingModal"; -import { PasswordCredentialsModal } from "../PasswordCredentialsModal/PasswordCredentialsModal"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { APIKeyCredentialsModal } from "./APIKeyCredentialsModal/APIKeyCredentialsModal"; +import { HostScopedCredentialsModal } from "./HotScopedCredentialsModal/HotScopedCredentialsModal"; +import { OAuthFlowWaitingModal } from "./OAuthWaitingModal/OAuthWaitingModal"; +import { PasswordCredentialsModal } from "./PasswordCredentialsModal/PasswordCredentialsModal"; const fallbackIcon = FaKey; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/HotScopedCredentialsModal/HotScopedCredentialsModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OAuthWaitingModal/OAuthWaitingModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/OAuthWaitingModal/OAuthWaitingModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OAuthWaitingModal/OAuthWaitingModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/OAuthWaitingModal/OAuthWaitingModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/PasswordCredentialsModal/PasswordCredentialsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/PasswordCredentialsModal/PasswordCredentialsModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/PasswordCredentialsModal/PasswordCredentialsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/PasswordCredentialsModal/PasswordCredentialsModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx similarity index 94% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx index dd2f328058..d98d3cb10d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx @@ -1,10 +1,15 @@ -import React from "react"; import { format } from "date-fns"; +import React from "react"; import { Input as DSInput } from "@/components/atoms/Input/Input"; import { Select as DSSelect } from "@/components/atoms/Select/Select"; import { MultiToggle } from "@/components/molecules/MultiToggle/MultiToggle"; // Removed shadcn Select usage in favor of DS Select for time picker +import { Button } from "@/components/atoms/Button/Button"; +import { FileInput } from "@/components/atoms/FileInput/FileInput"; +import { Switch } from "@/components/atoms/Switch/Switch"; +import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput"; +import { TimePicker } from "@/components/molecules/TimePicker/TimePicker"; import { BlockIOObjectSubSchema, BlockIOSubSchema, @@ -13,12 +18,8 @@ import { determineDataType, TableRow, } from "@/lib/autogpt-server-api/types"; -import { TimePicker } from "@/components/molecules/TimePicker/TimePicker"; -import { FileInput } from "@/components/atoms/FileInput/FileInput"; -import { useRunAgentInputs } from "./useRunAgentInputs"; -import { Switch } from "@/components/atoms/Switch/Switch"; import { PlusIcon, XIcon } from "@phosphor-icons/react"; -import { Button } from "@/components/atoms/Button/Button"; +import { useRunAgentInputs } from "./useRunAgentInputs"; /** * A generic prop structure for the TypeBasedInput. @@ -90,6 +91,23 @@ export function RunAgentInputs({ ); break; + case DataType.GOOGLE_DRIVE_PICKER: { + const pickerSchema = schema as any; + const config: import("@/lib/autogpt-server-api/types").GoogleDrivePickerConfig = + pickerSchema.google_drive_picker_config || {}; + + innerInputElement = ( + + ); + break; + } + case DataType.BOOLEAN: innerInputElement = ( <> diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/useRunAgentInputs.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/useRunAgentInputs.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/useRunAgentInputs.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/useRunAgentInputs.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/RunAgentModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index 4ef8886058..810dfc9e64 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -1,21 +1,21 @@ "use client"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; -import { Button } from "@/components/atoms/Button/Button"; -import { useState } from "react"; -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { useAgentRunModal } from "./useAgentRunModal"; -import { ModalHeader } from "./components/ModalHeader/ModalHeader"; -import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection"; -import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader"; -import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection"; -import { RunAgentModalContextProvider } from "./context"; -import { AgentDetails } from "./components/AgentDetails/AgentDetails"; -import { RunActions } from "./components/RunActions/RunActions"; -import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; -import { AlarmIcon } from "@phosphor-icons/react"; -import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { AlarmIcon } from "@phosphor-icons/react"; +import { useState } from "react"; +import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; +import { AgentCostSection } from "./components/AgentCostSection/AgentCostSection"; +import { AgentDetails } from "./components/AgentDetails/AgentDetails"; +import { AgentSectionHeader } from "./components/AgentSectionHeader/AgentSectionHeader"; +import { ModalHeader } from "./components/ModalHeader/ModalHeader"; +import { ModalRunSection } from "./components/ModalRunSection/ModalRunSection"; +import { RunActions } from "./components/RunActions/RunActions"; +import { RunAgentModalContextProvider } from "./context"; +import { useAgentRunModal } from "./useAgentRunModal"; interface Props { triggerSlot: React.ReactNode; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentCostSection/AgentCostSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentCostSection/AgentCostSection.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentCostSection/AgentCostSection.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentCostSection/AgentCostSection.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentDetails/AgentDetails.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentDetails/AgentDetails.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentDetails/AgentDetails.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentDetails/AgentDetails.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/AgentSectionHeader/AgentSectionHeader.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalHeader/ModalHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalHeader/ModalHeader.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalHeader/ModalHeader.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx similarity index 98% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx index 8286bdf782..f5d63852bf 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/ModalRunSection.tsx @@ -1,13 +1,13 @@ -import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; +import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { Input } from "@/components/atoms/Input/Input"; -import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; -import { useRunAgentModalContext } from "../../context"; -import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; -import { InfoIcon } from "@phosphor-icons/react"; import { Text } from "@/components/atoms/Text/Text"; -import { toDisplayName } from "@/providers/agent-credentials/helper"; -import { getCredentialTypeDisplayName } from "./helpers"; import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { toDisplayName } from "@/providers/agent-credentials/helper"; +import { InfoIcon } from "@phosphor-icons/react"; +import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; +import { useRunAgentModalContext } from "../../context"; +import { WebhookTriggerBanner } from "../WebhookTriggerBanner/WebhookTriggerBanner"; +import { getCredentialTypeDisplayName } from "./helpers"; export function ModalRunSection() { const { diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalRunSection/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/helpers.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/ModalRunSection/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/RunActions/RunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/RunActions/RunActions.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/context.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/context.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/context.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/context.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/helpers.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/useAgentRunModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentModal/useAgentRunModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/useAgentRunModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/ScheduleAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/ScheduleAgentModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/ScheduleAgentModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/ScheduleAgentModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CronScheduler.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CronScheduler.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CronScheduler.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CronScheduler.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CustomInterval.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CustomInterval.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/CustomInterval.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/CustomInterval.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/FrequencySelect.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/FrequencySelect.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/FrequencySelect.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/FrequencySelect.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/MonthlyPicker.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/MonthlyPicker.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/MonthlyPicker.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/MonthlyPicker.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/TimeAt.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/TimeAt.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/TimeAt.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/TimeAt.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/WeeklyPicker.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/WeeklyPicker.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/WeeklyPicker.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/WeeklyPicker.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/YearlyPicker.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/YearlyPicker.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/CronScheduler/YearlyPicker.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/CronScheduler/YearlyPicker.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/ModalScheduleSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/ModalScheduleSection.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/ModalScheduleSection.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/ModalScheduleSection.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/helpers.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/useModalScheduleSection.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/useModalScheduleSection.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/ModalScheduleSection/useModalScheduleSection.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/ModalScheduleSection/useModalScheduleSection.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/TimezoneNotice/TimezoneNotice.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/TimezoneNotice/TimezoneNotice.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/components/TimezoneNotice/TimezoneNotice.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/components/TimezoneNotice/TimezoneNotice.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/useScheduleAgentModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/useScheduleAgentModal.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ScheduleAgentModal/useScheduleAgentModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/ScheduleAgentModal/useScheduleAgentModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentRunsLoading.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/AgentRunsLoading.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentRunsLoading.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/AgentRunsLoading.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/EmptyAgentRuns/EmptyAgentRuns.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx similarity index 93% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/EmptyAgentRuns/EmptyAgentRuns.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx index 3b0899e48d..7b7a6c0bb0 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/EmptyAgentRuns/EmptyAgentRuns.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyAgentRuns.tsx @@ -1,10 +1,10 @@ -import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; -import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; -import { Text } from "@/components/atoms/Text/Text"; -import { RunAgentModal } from "../RunAgentModal/RunAgentModal"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; import { PlusIcon } from "@phosphor-icons/react"; +import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal"; +import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard"; type Props = { agentName: string; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/AgentActionsDropdown.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AgentActionsDropdown.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/components/OutputActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputActions.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/components/OutputActions.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputActions.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/components/OutputItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/components/OutputItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/components/OutputItem.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/index.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/index.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/index.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/index.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/CodeRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/CodeRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/CodeRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/CodeRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/ImageRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/ImageRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/ImageRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/ImageRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/JSONRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/JSONRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/JSONRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/JSONRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/MarkdownRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/MarkdownRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/TextRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/TextRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/TextRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/TextRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/VideoRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/VideoRenderer.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/renderers/VideoRenderer.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/VideoRenderer.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/types.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/types.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/types.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/types.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/copy.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/copy.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/copy.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/copy.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/download.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers/utils/download.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/utils/download.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailCard/RunDetailCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailCard/RunDetailCard.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailCard/RunDetailCard.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/RunDetailHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/RunDetailHeader.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx index 9e54801860..af19d94cd5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/RunDetailHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx @@ -1,21 +1,20 @@ -import React from "react"; -import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; -import { Text } from "@/components/atoms/Text/Text"; -import { Button } from "@/components/atoms/Button/Button"; -import { - TrashIcon, - StopIcon, - PlayIcon, - ArrowSquareOutIcon, -} from "@phosphor-icons/react"; -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import moment from "moment"; import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; -import { useRunDetailHeader } from "./useRunDetailHeader"; -import { AgentActionsDropdown } from "../AgentActionsDropdown"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import { + ArrowSquareOutIcon, + PlayIcon, + StopIcon, + TrashIcon, +} from "@phosphor-icons/react"; +import moment from "moment"; +import { AgentActionsDropdown } from "../AgentActionsDropdown"; +import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; import { ShareRunButton } from "../ShareRunButton/ShareRunButton"; +import { useRunDetailHeader } from "./useRunDetailHeader"; type Props = { agent: LibraryAgent; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/useRunDetailHeader.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useRunDetailHeader.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/useRunDetailHeader.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useRunDetailHeader.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/useScheduleDetailHeader.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useScheduleDetailHeader.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunDetailHeader/useScheduleDetailHeader.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/useScheduleDetailHeader.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx similarity index 58% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/SelectedRunView.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 0ab027d99f..52122edf8d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -1,20 +1,24 @@ "use client"; -import React from "react"; +import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { TabsLine, TabsLineContent, TabsLineList, TabsLineTrigger, } from "@/components/molecules/TabsLine/TabsLine"; -import { useSelectedRunView } from "./useSelectedRunView"; -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; -import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; -import { AgentInputsReadOnly } from "../AgentInputsReadOnly/AgentInputsReadOnly"; +import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; +import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; +import { parseAsString, useQueryState } from "nuqs"; +import { useEffect } from "react"; +import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; +import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; import { RunOutputs } from "./components/RunOutputs"; +import { useSelectedRunView } from "./useSelectedRunView"; interface Props { agent: LibraryAgent; @@ -34,6 +38,24 @@ export function SelectedRunView({ runId, ); + const { + pendingReviews, + isLoading: reviewsLoading, + refetch: refetchReviews, + } = usePendingReviewsForExecution(runId); + + // Tab state management + const [activeTab, setActiveTab] = useQueryState( + "tab", + parseAsString.withDefault("output"), + ); + + useEffect(() => { + if (run?.status === AgentExecutionStatus.REVIEW && runId) { + refetchReviews(); + } + }, [run?.status, runId, refetchReviews]); + if (responseError || httpError) { return ( {/* Content */} - + Output Your input + {run?.status === AgentExecutionStatus.REVIEW && ( + + Reviews ({pendingReviews.length}) + + )} @@ -92,6 +119,26 @@ export function SelectedRunView({ /> + + {run?.status === AgentExecutionStatus.REVIEW && ( + + + {reviewsLoading ? ( +
Loading reviews…
+ ) : pendingReviews.length > 0 ? ( + + ) : ( +
+ No pending reviews for this execution +
+ )} +
+
+ )}
); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx similarity index 92% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx index f7e4298215..f165c4c964 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunOutputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunOutputs.tsx @@ -3,12 +3,12 @@ import type { OutputMetadata, OutputRenderer, -} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; +} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; import { globalRegistry, OutputActions, OutputItem, -} from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/OutputRenderers"; +} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers"; import React, { useMemo } from "react"; type OutputsRecord = Record>; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunStatusBadge.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunStatusBadge.tsx similarity index 92% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunStatusBadge.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunStatusBadge.tsx index f9534541c6..cf92280c86 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/components/RunStatusBadge.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/RunStatusBadge.tsx @@ -2,6 +2,7 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut import { CheckCircleIcon, ClockIcon, + EyeIcon, PauseCircleIcon, StopCircleIcon, WarningCircleIcon, @@ -36,6 +37,11 @@ const statusIconMap: Record = { bgColor: "bg-yellow-50", textColor: "!text-yellow-700", }, + REVIEW: { + icon: , + bgColor: "bg-orange-50", + textColor: "!text-orange-700", + }, COMPLETED: { icon: ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/useSelectedRunView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts similarity index 90% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/useSelectedRunView.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts index b63f39f189..276673d389 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedRunView/useSelectedRunView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts @@ -7,7 +7,7 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut export function useSelectedRunView(graphId: string, runId: string) { const query = useGetV1GetExecutionDetails(graphId, runId, { query: { - refetchInterval: (q) => { + refetchInterval: (q: any) => { const isSuccess = q.state.data?.status === 200; if (!isSuccess) return false; @@ -19,7 +19,8 @@ export function useSelectedRunView(graphId: string, runId: string) { if ( status === AgentExecutionStatus.RUNNING || status === AgentExecutionStatus.QUEUED || - status === AgentExecutionStatus.INCOMPLETE + status === AgentExecutionStatus.INCOMPLETE || + status === AgentExecutionStatus.REVIEW ) return 1500; return false; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx similarity index 98% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/SelectedScheduleView.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 558b3d573a..fb5a84a3b1 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -1,24 +1,23 @@ "use client"; -import React from "react"; +import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import { Text } from "@/components/atoms/Text/Text"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { TabsLine, TabsLineContent, TabsLineList, TabsLineTrigger, } from "@/components/molecules/TabsLine/TabsLine"; -import { useSelectedScheduleView } from "./useSelectedScheduleView"; +import { humanizeCronExpression } from "@/lib/cron-expression-utils"; +import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; +import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; -import { humanizeCronExpression } from "@/lib/cron-expression-utils"; -import { useGetV1GetUserTimezone } from "@/app/api/__generated__/endpoints/auth/auth"; -import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; -import { AgentInputsReadOnly } from "../AgentInputsReadOnly/AgentInputsReadOnly"; import { ScheduleActions } from "./components/ScheduleActions"; +import { useSelectedScheduleView } from "./useSelectedScheduleView"; interface Props { agent: LibraryAgent; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/DeleteScheduleButton/DeleteScheduleButton.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/DeleteScheduleButton/DeleteScheduleButton.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/DeleteScheduleButton/DeleteScheduleButton.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/DeleteScheduleButton/DeleteScheduleButton.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx similarity index 96% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx index 26b0539248..05e55abd34 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/EditInputsModal.tsx @@ -1,14 +1,13 @@ "use client"; -import React from "react"; -import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { RunAgentInputs } from "../../../RunAgentInputs/RunAgentInputs"; -import { useEditInputsModal } from "./useEditInputsModal"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { PencilSimpleIcon } from "@phosphor-icons/react"; +import { RunAgentInputs } from "../../../../modals/RunAgentInputs/RunAgentInputs"; +import { useEditInputsModal } from "./useEditInputsModal"; type Props = { agent: LibraryAgent; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditScheduleModal/EditScheduleModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditScheduleModal/EditScheduleModal.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditScheduleModal/EditScheduleModal.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditScheduleModal/EditScheduleModal.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts similarity index 97% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts index 29f439b977..b006e775f9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditScheduleModal/useEditScheduleModal.ts @@ -1,15 +1,15 @@ "use client"; -import { useMemo, useState } from "react"; -import { useMutation, useQueryClient } from "@tanstack/react-query"; -import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules"; import { getGetV1ListGraphExecutionsInfiniteQueryOptions } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules"; +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { useMutation, useQueryClient } from "@tanstack/react-query"; +import { useMemo, useState } from "react"; import { parseCronToForm, validateSchedule, -} from "../../../ScheduleAgentModal/components/ModalScheduleSection/helpers"; -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { useToast } from "@/components/molecules/Toast/use-toast"; +} from "../../../../modals/ScheduleAgentModal/components/ModalScheduleSection/helpers"; export function useEditScheduleModal( graphId: string, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/ScheduleActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/components/ScheduleActions.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/ScheduleActions.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/useSelectedScheduleView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/useSelectedScheduleView.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/SelectedScheduleView/useSelectedScheduleView.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/useSelectedScheduleView.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ShareRunButton/ShareRunButton.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ShareRunButton/ShareRunButton.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/ShareRunButton.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ShareRunButton/useShareRunButton.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/useShareRunButton.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/ShareRunButton/useShareRunButton.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/ShareRunButton/useShareRunButton.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/RunsSidebar.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx similarity index 95% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/RunsSidebar.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx index 9d2ba2f0b3..bfd10d98b8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/RunsSidebar.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/AgentRunsLists.tsx @@ -1,22 +1,21 @@ "use client"; -import React from "react"; +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; import { TabsLine, + TabsLineContent, TabsLineList, TabsLineTrigger, - TabsLineContent, } from "@/components/molecules/TabsLine/TabsLine"; -import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import { useRunsSidebar } from "./useRunsSidebar"; import { RunListItem } from "./components/RunListItem"; import { ScheduleListItem } from "./components/ScheduleListItem"; -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; -import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { useAgentRunsLists } from "./useAgentRunsLists"; -interface RunsSidebarProps { +interface Props { agent: LibraryAgent; selectedRunId?: string; onSelectRun: (id: string) => void; @@ -27,12 +26,12 @@ interface RunsSidebarProps { }) => void; } -export function RunsSidebar({ +export function AgentRunsLists({ agent, selectedRunId, onSelectRun, onCountsChange, -}: RunsSidebarProps) { +}: Props) { const { runs, schedules, @@ -45,7 +44,7 @@ export function RunsSidebar({ isFetchingMoreRuns, tabValue, setTabValue, - } = useRunsSidebar({ + } = useAgentRunsLists({ graphId: agent.graph_id, onSelectRun, onCountsChange, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunIconWrapper.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunIconWrapper.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunIconWrapper.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunIconWrapper.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx similarity index 92% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunListItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx index 39b91a8f64..89137cbaf7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunListItem.tsx @@ -31,6 +31,11 @@ const statusIconMap: Record = { ), + REVIEW: ( + + + + ), COMPLETED: ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunSidebarCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunSidebarCard.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/RunSidebarCard.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/RunSidebarCard.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/ScheduleListItem.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/components/ScheduleListItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/components/ScheduleListItem.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/helpers.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/helpers.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/helpers.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/useRunsSidebar.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts similarity index 97% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/useRunsSidebar.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts index 2ba2dedd97..e6bd124006 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunsSidebar/useRunsSidebar.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/AgentRunsLists/useAgentRunsLists.ts @@ -5,13 +5,13 @@ import { useEffect, useMemo, useState } from "react"; import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/endpoints/graphs/graphs"; import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules"; import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { useSearchParams } from "next/navigation"; import { okData } from "@/app/api/helpers"; +import { useSearchParams } from "next/navigation"; import { - getRunsPollingInterval, computeRunsCount, - getNextRunsPageParam, extractRunsFromPages, + getNextRunsPageParam, + getRunsPollingInterval, } from "./helpers"; type Args = { @@ -24,7 +24,11 @@ type Args = { }) => void; }; -export function useRunsSidebar({ graphId, onSelectRun, onCountsChange }: Args) { +export function useAgentRunsLists({ + graphId, + onSelectRun, + onCountsChange, +}: Args) { const params = useSearchParams(); const existingRunId = params.get("executionId") as string | undefined; const [tabValue, setTabValue] = useState<"runs" | "scheduled">("runs"); diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/useAgentRunsView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts similarity index 96% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/useAgentRunsView.ts rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index c66d849691..4635321b5d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/AgentRunsView/useAgentRunsView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -5,7 +5,7 @@ import { useParams } from "next/navigation"; import { parseAsString, useQueryState } from "nuqs"; import { useCallback, useMemo, useState } from "react"; -export function useAgentRunsView() { +export function useNewAgentLibraryView() { const { id } = useParams(); const agentId = id as string; const { @@ -70,6 +70,7 @@ export function useAgentRunsView() { hasAnyItems, showSidebarLayout, selectedRun, + sidebarLoading, handleClearSelectedRun, handleCountsChange, handleSelectRun, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx index b513ff39a7..47f009263b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-details-view.tsx @@ -1,6 +1,6 @@ "use client"; import moment from "moment"; -import React, { useCallback, useMemo } from "react"; +import React, { useCallback, useMemo, useEffect } from "react"; import { Graph, @@ -39,6 +39,8 @@ import useCredits from "@/hooks/useCredits"; import { AgentRunOutputView } from "./agent-run-output-view"; import { analytics } from "@/services/analytics"; import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; +import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; +import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; export function AgentRunDetailsView({ agent, @@ -67,8 +69,21 @@ export function AgentRunDetailsView({ const { completeStep } = useOnboarding(); + const { + pendingReviews, + isLoading: reviewsLoading, + refetch: refetchReviews, + } = usePendingReviewsForExecution(run.id); + const toastOnFail = useToastOnFail(); + // Refetch pending reviews when execution status changes to REVIEW + useEffect(() => { + if (runStatus === "review" && run.id) { + refetchReviews(); + } + }, [runStatus, run.id, refetchReviews]); + const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => { if (!run) return []; return [ @@ -373,6 +388,32 @@ export function AgentRunDetailsView({ )} + {/* Pending Reviews Section */} + {runStatus === "review" && ( + + + + Pending Reviews ({pendingReviews.length}) + + + + {reviewsLoading ? ( + + ) : pendingReviews.length > 0 ? ( + + ) : ( +
+ No pending reviews for this execution +
+ )} +
+
+ )} + Input diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index a037ca9645..1ad4d4df39 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -12,6 +12,9 @@ import { } from "@/lib/autogpt-server-api"; import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; +import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs"; +import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog"; import ActionButtonGroup from "@/components/__legacy__/action-button-group"; import type { ButtonAction } from "@/components/__legacy__/types"; import { @@ -25,25 +28,21 @@ import { IconPlay, IconSave, } from "@/components/__legacy__/ui/icons"; -import { CalendarClockIcon, Trash2Icon } from "lucide-react"; -import { ClockIcon, InfoIcon } from "@phosphor-icons/react"; -import { humanizeCronExpression } from "@/lib/cron-expression-utils"; -import { ScheduleTaskDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog"; -import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; -import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/RunAgentInputs/RunAgentInputs"; -import { cn, isEmpty } from "@/lib/utils"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; -import { CopyIcon } from "@phosphor-icons/react"; -import { Button } from "@/components/atoms/Button/Button"; import { Input } from "@/components/__legacy__/ui/input"; +import { Button } from "@/components/atoms/Button/Button"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { useToast, useToastOnFail, } from "@/components/molecules/Toast/use-toast"; +import { humanizeCronExpression } from "@/lib/cron-expression-utils"; +import { cn, isEmpty } from "@/lib/utils"; +import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react"; +import { CalendarClockIcon, Trash2Icon } from "lucide-react"; -import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; import { useOnboarding } from "@/providers/onboarding/onboarding-provider"; import { analytics } from "@/services/analytics"; +import { AgentStatus, AgentStatusChip } from "./agent-status-chip"; export function AgentRunDraftView({ graph, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx index 520917e1d7..e55914b4ea 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-output-view.tsx @@ -1,7 +1,7 @@ "use client"; -import React, { useMemo } from "react"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import React, { useMemo } from "react"; import { Card, @@ -11,12 +11,12 @@ import { } from "@/components/__legacy__/ui/card"; import LoadingBox from "@/components/__legacy__/ui/loading"; +import type { OutputMetadata } from "../../NewAgentLibraryView/components/selected-views/OutputRenderers"; import { globalRegistry, - OutputItem, OutputActions, -} from "../../AgentRunsView/components/OutputRenderers"; -import type { OutputMetadata } from "../../AgentRunsView/components/OutputRenderers"; + OutputItem, +} from "../../NewAgentLibraryView/components/selected-views/OutputRenderers"; export function AgentRunOutputView({ agentRunOutputs, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx index 46bd50d26c..24b2864359 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-status-chip.tsx @@ -11,7 +11,8 @@ export type AgentRunStatus = | "running" | "stopped" | "scheduled" - | "draft"; + | "draft" + | "review"; export const agentRunStatusMap: Record< GraphExecutionMeta["status"], @@ -23,7 +24,7 @@ export const agentRunStatusMap: Record< QUEUED: "queued", RUNNING: "running", TERMINATED: "stopped", - // TODO: implement "draft" - https://github.com/Significant-Gravitas/AutoGPT/issues/9168 + REVIEW: "review", }; const statusData: Record< @@ -37,6 +38,7 @@ const statusData: Record< draft: { label: "Draft", variant: "secondary" }, stopped: { label: "Stopped", variant: "secondary" }, scheduled: { label: "Scheduled", variant: "secondary" }, + review: { label: "In Review", variant: "orange" }, }; const statusStyles = { @@ -45,6 +47,8 @@ const statusStyles = { destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800", warning: "bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800", + orange: + "bg-orange-100 text-orange-800 hover:bg-orange-100 hover:text-orange-800", info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800", secondary: "bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800", diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/loading.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/loading.tsx index a2d4360666..42f1e3330c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/loading.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/loading.tsx @@ -1,3 +1,3 @@ -import { AgentRunsLoading } from "./components/AgentRunsView/components/AgentRunsLoading"; +import { AgentRunsLoading } from "./components/NewAgentLibraryView/components/other/AgentRunsLoading"; export default AgentRunsLoading; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/page.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/page.tsx index 8aa6cb31cd..9ada590dd8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/page.tsx @@ -1,10 +1,14 @@ "use client"; import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { AgentRunsView } from "./components/AgentRunsView/AgentRunsView"; +import { NewAgentLibraryView } from "./components/NewAgentLibraryView/NewAgentLibraryView"; import { OldAgentLibraryView } from "./components/OldAgentLibraryView/OldAgentLibraryView"; export default function AgentLibraryPage() { const isNewLibraryPageEnabled = useGetFlag(Flag.NEW_AGENT_RUNS); - return isNewLibraryPageEnabled ? : ; + return isNewLibraryPageEnabled ? ( + + ) : ( + + ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/ProfileLoading.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/ProfileLoading.tsx new file mode 100644 index 0000000000..13b67383eb --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/ProfileLoading.tsx @@ -0,0 +1,48 @@ +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Separator } from "@radix-ui/react-separator"; + +export function ProfileLoading() { + return ( +
+
+ +
+
+ + +
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+ {[1, 2, 3, 4, 5].map((i) => ( +
+ + +
+ ))} +
+
+ +
+ +
+
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx index a09c53ba20..c8b24b6d9f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx @@ -1,5 +1,5 @@ "use client"; -import { providerIcons } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; +import { providerIcons } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { IconKey, IconUser } from "@/components/__legacy__/ui/icons"; import LoadingBox from "@/components/__legacy__/ui/loading"; import { diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx index 86190e62cc..260fbc0b52 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx @@ -1,23 +1,58 @@ -import React from "react"; -import { Metadata } from "next/types"; -import { redirect } from "next/navigation"; -import BackendAPI from "@/lib/autogpt-server-api"; +"use client"; + +import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store"; import { ProfileInfoForm } from "@/components/__legacy__/ProfileInfoForm"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { ProfileDetails } from "@/lib/autogpt-server-api/types"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { ProfileLoading } from "./ProfileLoading"; -// Force dynamic rendering to avoid static generation issues with cookies -export const dynamic = "force-dynamic"; +export default function UserProfilePage() { + const { user } = useSupabase(); -export const metadata: Metadata = { title: "Profile - AutoGPT Platform" }; - -export default async function UserProfilePage(): Promise { - const api = new BackendAPI(); - const profile = await api.getStoreProfile().catch((error) => { - console.error("Error fetching profile:", error); - return null; + const { + data: profile, + isLoading, + isError, + error, + refetch, + } = useGetV2GetUserProfile({ + query: { + enabled: !!user, + select: (res) => { + if (res.status === 200) { + return { + ...res.data, + avatar_url: res.data.avatar_url ?? "", + }; + } + return null; + }, + }, }); - if (!profile) { - redirect("/login"); + if (isError) { + return ( +
+ { + void refetch(); + }} + /> +
+ ); + } + + if (isLoading || !user || !profile) { + return ; } return ( diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/SettingsForm.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/SettingsForm.tsx index 127b7ac94f..5a42db20c0 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/SettingsForm.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/SettingsForm.tsx @@ -1,7 +1,7 @@ "use client"; -import { Separator } from "@/components/__legacy__/ui/separator"; import { NotificationPreference } from "@/app/api/__generated__/models/notificationPreference"; +import { Separator } from "@/components/__legacy__/ui/separator"; import { User } from "@supabase/supabase-js"; import { EmailForm } from "./components/EmailForm/EmailForm"; import { NotificationForm } from "./components/NotificationForm/NotificationForm"; @@ -18,6 +18,8 @@ export function SettingsForm({ user, timezone, }: SettingsFormProps) { + if (!user || !preferences) return null; + return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/TimezoneForm.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/TimezoneForm.tsx index 0456e901d0..f09dc11afc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/TimezoneForm.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/TimezoneForm.tsx @@ -1,22 +1,11 @@ "use client"; -import * as React from "react"; -import { useTimezoneForm } from "./useTimezoneForm"; -import { User } from "@supabase/supabase-js"; import { Card, CardContent, CardHeader, CardTitle, } from "@/components/__legacy__/ui/card"; -import { Button } from "@/components/atoms/Button/Button"; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from "@/components/__legacy__/ui/select"; import { Form, FormControl, @@ -25,48 +14,26 @@ import { FormLabel, FormMessage, } from "@/components/__legacy__/ui/form"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/__legacy__/ui/select"; +import { Button } from "@/components/atoms/Button/Button"; +import { User } from "@supabase/supabase-js"; +import * as React from "react"; +import { TIMEZONES } from "./helpers"; +import { useTimezoneForm } from "./useTimezoneForm"; -type TimezoneFormProps = { +type Props = { user: User; currentTimezone?: string; }; -// Common timezones list - can be expanded later -const TIMEZONES = [ - { value: "UTC", label: "UTC (Coordinated Universal Time)" }, - { value: "America/New_York", label: "Eastern Time (US & Canada)" }, - { value: "America/Chicago", label: "Central Time (US & Canada)" }, - { value: "America/Denver", label: "Mountain Time (US & Canada)" }, - { value: "America/Los_Angeles", label: "Pacific Time (US & Canada)" }, - { value: "America/Phoenix", label: "Arizona (US)" }, - { value: "America/Anchorage", label: "Alaska (US)" }, - { value: "Pacific/Honolulu", label: "Hawaii (US)" }, - { value: "Europe/London", label: "London (UK)" }, - { value: "Europe/Paris", label: "Paris (France)" }, - { value: "Europe/Berlin", label: "Berlin (Germany)" }, - { value: "Europe/Moscow", label: "Moscow (Russia)" }, - { value: "Asia/Dubai", label: "Dubai (UAE)" }, - { value: "Asia/Kolkata", label: "India Standard Time" }, - { value: "Asia/Shanghai", label: "China Standard Time" }, - { value: "Asia/Tokyo", label: "Tokyo (Japan)" }, - { value: "Asia/Seoul", label: "Seoul (South Korea)" }, - { value: "Asia/Singapore", label: "Singapore" }, - { value: "Australia/Sydney", label: "Sydney (Australia)" }, - { value: "Australia/Melbourne", label: "Melbourne (Australia)" }, - { value: "Pacific/Auckland", label: "Auckland (New Zealand)" }, - { value: "America/Toronto", label: "Toronto (Canada)" }, - { value: "America/Vancouver", label: "Vancouver (Canada)" }, - { value: "America/Mexico_City", label: "Mexico City (Mexico)" }, - { value: "America/Sao_Paulo", label: "São Paulo (Brazil)" }, - { value: "America/Buenos_Aires", label: "Buenos Aires (Argentina)" }, - { value: "Africa/Cairo", label: "Cairo (Egypt)" }, - { value: "Africa/Johannesburg", label: "Johannesburg (South Africa)" }, -]; - -export function TimezoneForm({ - user, - currentTimezone = "not-set", -}: TimezoneFormProps) { +export function TimezoneForm({ user, currentTimezone = "not-set" }: Props) { + console.log("currentTimezone", currentTimezone); // If timezone is not set, try to detect it from the browser const effectiveTimezone = React.useMemo(() => { if (currentTimezone === "not-set") { @@ -120,7 +87,7 @@ export function TimezoneForm({ )} /> - diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/helpers.ts new file mode 100644 index 0000000000..1b62a8ce45 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/components/SettingsForm/components/TimezoneForm/helpers.ts @@ -0,0 +1,123 @@ +export const TIMEZONES = [ + { value: "UTC", label: "UTC (Coordinated Universal Time)" }, + { value: "America/Adak", label: "Adak (US - Aleutian Islands)" }, + { value: "America/Anchorage", label: "Anchorage (US - Alaska)" }, + { value: "America/Los_Angeles", label: "Los Angeles (US - Pacific)" }, + { value: "America/Tijuana", label: "Tijuana (Mexico - Pacific)" }, + { value: "America/Phoenix", label: "Phoenix (US - Arizona)" }, + { value: "America/Denver", label: "Denver (US - Mountain)" }, + { value: "America/Chicago", label: "Chicago (US - Central)" }, + { value: "America/New_York", label: "New York (US - Eastern)" }, + { value: "America/Toronto", label: "Toronto (Canada - Eastern)" }, + { value: "America/Halifax", label: "Halifax (Canada - Atlantic)" }, + { value: "America/St_Johns", label: "St. John's (Canada - Newfoundland)" }, + { value: "America/Caracas", label: "Caracas (Venezuela)" }, + { value: "America/Bogota", label: "Bogotá (Colombia)" }, + { value: "America/Lima", label: "Lima (Peru)" }, + { value: "America/Santiago", label: "Santiago (Chile)" }, + { value: "America/La_Paz", label: "La Paz (Bolivia)" }, + { value: "America/Asuncion", label: "Asunción (Paraguay)" }, + { value: "America/Montevideo", label: "Montevideo (Uruguay)" }, + { value: "America/Buenos_Aires", label: "Buenos Aires (Argentina)" }, + { value: "America/Sao_Paulo", label: "São Paulo (Brazil)" }, + { value: "America/Manaus", label: "Manaus (Brazil - Amazon)" }, + { value: "America/Fortaleza", label: "Fortaleza (Brazil - Northeast)" }, + { value: "America/Mexico_City", label: "Mexico City (Mexico)" }, + { value: "America/Guatemala", label: "Guatemala" }, + { value: "America/Costa_Rica", label: "Costa Rica" }, + { value: "America/Panama", label: "Panama" }, + { value: "America/Havana", label: "Havana (Cuba)" }, + { value: "America/Jamaica", label: "Jamaica" }, + { value: "America/Port-au-Prince", label: "Port-au-Prince (Haiti)" }, + { + value: "America/Santo_Domingo", + label: "Santo Domingo (Dominican Republic)", + }, + { value: "America/Puerto_Rico", label: "Puerto Rico" }, + { value: "Atlantic/Azores", label: "Azores (Portugal)" }, + { value: "Atlantic/Cape_Verde", label: "Cape Verde" }, + { value: "Europe/London", label: "London (UK)" }, + { value: "Europe/Dublin", label: "Dublin (Ireland)" }, + { value: "Europe/Lisbon", label: "Lisbon (Portugal)" }, + { value: "Europe/Madrid", label: "Madrid (Spain)" }, + { value: "Europe/Paris", label: "Paris (France)" }, + { value: "Europe/Brussels", label: "Brussels (Belgium)" }, + { value: "Europe/Amsterdam", label: "Amsterdam (Netherlands)" }, + { value: "Europe/Berlin", label: "Berlin (Germany)" }, + { value: "Europe/Rome", label: "Rome (Italy)" }, + { value: "Europe/Vienna", label: "Vienna (Austria)" }, + { value: "Europe/Zurich", label: "Zurich (Switzerland)" }, + { value: "Europe/Prague", label: "Prague (Czech Republic)" }, + { value: "Europe/Warsaw", label: "Warsaw (Poland)" }, + { value: "Europe/Stockholm", label: "Stockholm (Sweden)" }, + { value: "Europe/Oslo", label: "Oslo (Norway)" }, + { value: "Europe/Copenhagen", label: "Copenhagen (Denmark)" }, + { value: "Europe/Helsinki", label: "Helsinki (Finland)" }, + { value: "Europe/Athens", label: "Athens (Greece)" }, + { value: "Europe/Bucharest", label: "Bucharest (Romania)" }, + { value: "Europe/Sofia", label: "Sofia (Bulgaria)" }, + { value: "Europe/Budapest", label: "Budapest (Hungary)" }, + { value: "Europe/Belgrade", label: "Belgrade (Serbia)" }, + { value: "Europe/Zagreb", label: "Zagreb (Croatia)" }, + { value: "Europe/Moscow", label: "Moscow (Russia)" }, + { value: "Europe/Kiev", label: "Kyiv (Ukraine)" }, + { value: "Europe/Istanbul", label: "Istanbul (Turkey)" }, + { value: "Asia/Dubai", label: "Dubai (UAE)" }, + { value: "Asia/Muscat", label: "Muscat (Oman)" }, + { value: "Asia/Kuwait", label: "Kuwait" }, + { value: "Asia/Riyadh", label: "Riyadh (Saudi Arabia)" }, + { value: "Asia/Baghdad", label: "Baghdad (Iraq)" }, + { value: "Asia/Tehran", label: "Tehran (Iran)" }, + { value: "Asia/Kabul", label: "Kabul (Afghanistan)" }, + { value: "Asia/Karachi", label: "Karachi (Pakistan)" }, + { value: "Asia/Tashkent", label: "Tashkent (Uzbekistan)" }, + { value: "Asia/Dhaka", label: "Dhaka (Bangladesh)" }, + { value: "Asia/Kolkata", label: "Kolkata (India)" }, + { value: "Asia/Kathmandu", label: "Kathmandu (Nepal)" }, + { value: "Asia/Colombo", label: "Colombo (Sri Lanka)" }, + { value: "Asia/Yangon", label: "Yangon (Myanmar)" }, + { value: "Asia/Bangkok", label: "Bangkok (Thailand)" }, + { value: "Asia/Ho_Chi_Minh", label: "Ho Chi Minh City (Vietnam)" }, + { value: "Asia/Jakarta", label: "Jakarta (Indonesia - Western)" }, + { value: "Asia/Makassar", label: "Makassar (Indonesia - Central)" }, + { value: "Asia/Jayapura", label: "Jayapura (Indonesia - Eastern)" }, + { value: "Asia/Manila", label: "Manila (Philippines)" }, + { value: "Asia/Singapore", label: "Singapore" }, + { value: "Asia/Kuala_Lumpur", label: "Kuala Lumpur (Malaysia)" }, + { value: "Asia/Hong_Kong", label: "Hong Kong" }, + { value: "Asia/Shanghai", label: "Shanghai (China)" }, + { value: "Asia/Taipei", label: "Taipei (Taiwan)" }, + { value: "Asia/Seoul", label: "Seoul (South Korea)" }, + { value: "Asia/Tokyo", label: "Tokyo (Japan)" }, + { value: "Asia/Vladivostok", label: "Vladivostok (Russia)" }, + { value: "Asia/Yakutsk", label: "Yakutsk (Russia)" }, + { value: "Asia/Irkutsk", label: "Irkutsk (Russia)" }, + { value: "Asia/Yekaterinburg", label: "Yekaterinburg (Russia)" }, + { value: "Australia/Perth", label: "Perth (Australia - Western)" }, + { value: "Australia/Darwin", label: "Darwin (Australia - Northern)" }, + { value: "Australia/Adelaide", label: "Adelaide (Australia - Central)" }, + { value: "Australia/Brisbane", label: "Brisbane (Australia - Eastern)" }, + { value: "Australia/Sydney", label: "Sydney (Australia - Eastern)" }, + { value: "Australia/Melbourne", label: "Melbourne (Australia - Eastern)" }, + { value: "Australia/Hobart", label: "Hobart (Australia - Eastern)" }, + { value: "Pacific/Auckland", label: "Auckland (New Zealand)" }, + { value: "Pacific/Fiji", label: "Fiji" }, + { value: "Pacific/Guam", label: "Guam" }, + { value: "Pacific/Honolulu", label: "Honolulu (US - Hawaii)" }, + { value: "Pacific/Samoa", label: "Samoa" }, + { value: "Pacific/Tahiti", label: "Tahiti (French Polynesia)" }, + { value: "Africa/Cairo", label: "Cairo (Egypt)" }, + { value: "Africa/Johannesburg", label: "Johannesburg (South Africa)" }, + { value: "Africa/Lagos", label: "Lagos (Nigeria)" }, + { value: "Africa/Nairobi", label: "Nairobi (Kenya)" }, + { value: "Africa/Casablanca", label: "Casablanca (Morocco)" }, + { value: "Africa/Algiers", label: "Algiers (Algeria)" }, + { value: "Africa/Tunis", label: "Tunis (Tunisia)" }, + { value: "Africa/Addis_Ababa", label: "Addis Ababa (Ethiopia)" }, + { value: "Africa/Dar_es_Salaam", label: "Dar es Salaam (Tanzania)" }, + { value: "Africa/Kampala", label: "Kampala (Uganda)" }, + { value: "Africa/Khartoum", label: "Khartoum (Sudan)" }, + { value: "Africa/Accra", label: "Accra (Ghana)" }, + { value: "Africa/Abidjan", label: "Abidjan (Ivory Coast)" }, + { value: "Africa/Dakar", label: "Dakar (Senegal)" }, +]; diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/page.tsx index 679a145290..f0eb8a6b8c 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/settings/page.tsx @@ -1,4 +1,5 @@ "use client"; + import { SettingsForm } from "@/app/(platform)/profile/(user)/settings/components/SettingsForm/SettingsForm"; import { useTimezoneDetection } from "@/app/(platform)/profile/(user)/settings/useTimezoneDetection"; import { @@ -6,49 +7,67 @@ import { useGetV1GetUserTimezone, } from "@/app/api/__generated__/endpoints/auth/auth"; import { Text } from "@/components/atoms/Text/Text"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; -import { redirect } from "next/navigation"; import { useEffect } from "react"; import SettingsLoading from "./loading"; export default function SettingsPage() { + const { user } = useSupabase(); + const { data: preferences, isError: preferencesError, isLoading: preferencesLoading, + error: preferencesErrorData, + refetch: refetchPreferences, } = useGetV1GetNotificationPreferences({ - query: { select: (res) => (res.status === 200 ? res.data : null) }, + query: { + enabled: !!user, + select: (res) => (res.status === 200 ? res.data : null), + }, }); const { data: timezone, isLoading: timezoneLoading } = useGetV1GetUserTimezone({ query: { + enabled: !!user, select: (res) => { return res.status === 200 ? String(res.data.timezone) : "not-set"; }, }, }); - useTimezoneDetection(timezone); - - const { user, isUserLoading } = useSupabase(); + useTimezoneDetection(!!user ? timezone : undefined); useEffect(() => { document.title = "Settings – AutoGPT Platform"; }, []); - if (preferencesLoading || isUserLoading || timezoneLoading) { + if (preferencesError) { + return ( +
+ { + void refetchPreferences(); + }} + /> +
+ ); + } + + if (preferencesLoading || timezoneLoading || !user || !preferences) { return ; } - if (!user) { - redirect("/login"); - } - - if (preferencesError || !preferences || !preferences.preferences) { - return "Error..."; // TODO: Will use a Error reusable components from Block Menu redesign - } - return (
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 49ef8e607f..99db625c2f 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -3952,6 +3952,184 @@ "security": [{ "HTTPBearerJWT": [] }] } }, + "/api/review/pending": { + "get": { + "tags": [ + "v2", + "executions", + "review", + "executions", + "review", + "private" + ], + "summary": "Get Pending Reviews", + "description": "Get all pending reviews for the current user.\n\nRetrieves all reviews with status \"WAITING\" that belong to the authenticated user.\nResults are ordered by creation time (newest first).\n\nArgs:\n user_id: Authenticated user ID from security dependency\n\nReturns:\n List of pending review objects with status converted to typed literals\n\nRaises:\n HTTPException: If authentication fails or database error occurs\n\nNote:\n Reviews with invalid status values are logged as warnings but excluded\n from results rather than failing the entire request.", + "operationId": "getV2Get pending reviews", + "security": [{ "HTTPBearerJWT": [] }], + "parameters": [ + { + "name": "page", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 1, + "description": "Page number (1-indexed)", + "default": 1, + "title": "Page" + }, + "description": "Page number (1-indexed)" + }, + { + "name": "page_size", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 100, + "minimum": 1, + "description": "Number of reviews per page", + "default": 25, + "title": "Page Size" + }, + "description": "Number of reviews per page" + } + ], + "responses": { + "200": { + "description": "List of pending reviews", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PendingHumanReviewModel" + }, + "title": "Response Getv2Get Pending Reviews" + } + } + } + }, + "500": { + "description": "Server error", + "content": { "application/json": {} } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/HTTPValidationError" } + } + } + }, + "401": { + "$ref": "#/components/responses/HTTP401NotAuthenticatedError" + } + } + } + }, + "/api/review/execution/{graph_exec_id}": { + "get": { + "tags": [ + "v2", + "executions", + "review", + "executions", + "review", + "private" + ], + "summary": "Get Pending Reviews for Execution", + "description": "Get all pending reviews for a specific graph execution.\n\nRetrieves all reviews with status \"WAITING\" for the specified graph execution\nthat belong to the authenticated user. Results are ordered by creation time\n(oldest first) to preserve review order within the execution.\n\nArgs:\n graph_exec_id: ID of the graph execution to get reviews for\n user_id: Authenticated user ID from security dependency\n\nReturns:\n List of pending review objects for the specified execution\n\nRaises:\n HTTPException:\n - 403: If user doesn't own the graph execution\n - 500: If authentication fails or database error occurs\n\nNote:\n Only returns reviews owned by the authenticated user for security.\n Reviews with invalid status are excluded with warning logs.", + "operationId": "getV2Get pending reviews for execution", + "security": [{ "HTTPBearerJWT": [] }], + "parameters": [ + { + "name": "graph_exec_id", + "in": "path", + "required": true, + "schema": { "type": "string", "title": "Graph Exec Id" } + } + ], + "responses": { + "200": { + "description": "List of pending reviews for the execution", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PendingHumanReviewModel" + }, + "title": "Response Getv2Get Pending Reviews For Execution" + } + } + } + }, + "400": { "description": "Invalid graph execution ID" }, + "403": { "description": "Access denied to graph execution" }, + "500": { + "description": "Server error", + "content": { "application/json": {} } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/HTTPValidationError" } + } + } + }, + "401": { + "$ref": "#/components/responses/HTTP401NotAuthenticatedError" + } + } + } + }, + "/api/review/action": { + "post": { + "tags": [ + "v2", + "executions", + "review", + "executions", + "review", + "private" + ], + "summary": "Process Review Action", + "description": "Process reviews with approve or reject actions.", + "operationId": "postV2ProcessReviewAction", + "requestBody": { + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/ReviewRequest" } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/ReviewResponse" } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { "$ref": "#/components/schemas/HTTPValidationError" } + } + } + }, + "401": { + "$ref": "#/components/responses/HTTP401NotAuthenticatedError" + } + }, + "security": [{ "HTTPBearerJWT": [] }] + } + }, "/api/library/presets": { "get": { "tags": ["v2", "presets"], @@ -4324,7 +4502,7 @@ ], "responses": { "200": { - "description": "Successful Response", + "description": "List of library agents", "content": { "application/json": { "schema": { @@ -4622,7 +4800,7 @@ }, "/api/library/agents/marketplace/{store_listing_version_id}": { "get": { - "tags": ["v2", "library", "private", "store, library"], + "tags": ["v2", "library", "private", "store", "library"], "summary": "Get Agent By Store ID", "description": "Get Library Agent from Store Listing Version ID.", "operationId": "getV2Get agent by store id", @@ -4637,7 +4815,7 @@ ], "responses": { "200": { - "description": "Successful Response", + "description": "Library agent found", "content": { "application/json": { "schema": { @@ -4650,6 +4828,7 @@ } } }, + "404": { "description": "Agent not found" }, "422": { "description": "Validation Error", "content": { @@ -5137,7 +5316,8 @@ "RUNNING", "COMPLETED", "TERMINATED", - "FAILED" + "FAILED", + "REVIEW" ], "title": "AgentExecutionStatus" }, @@ -7356,6 +7536,114 @@ "required": ["total_items", "total_pages", "current_page", "page_size"], "title": "Pagination" }, + "PendingHumanReviewModel": { + "properties": { + "node_exec_id": { + "type": "string", + "title": "Node Exec Id", + "description": "Node execution ID (primary key)" + }, + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID associated with the review" + }, + "graph_exec_id": { + "type": "string", + "title": "Graph Exec Id", + "description": "Graph execution ID" + }, + "graph_id": { + "type": "string", + "title": "Graph Id", + "description": "Graph ID" + }, + "graph_version": { + "type": "integer", + "title": "Graph Version", + "description": "Graph version" + }, + "payload": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "items": {}, "type": "array" }, + { "type": "string" }, + { "type": "integer" }, + { "type": "number" }, + { "type": "boolean" }, + { "type": "null" } + ], + "title": "Payload", + "description": "The actual data payload awaiting review" + }, + "instructions": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Instructions", + "description": "Instructions or message for the reviewer" + }, + "editable": { + "type": "boolean", + "title": "Editable", + "description": "Whether the reviewer can edit the data" + }, + "status": { + "$ref": "#/components/schemas/ReviewStatus", + "description": "Review status" + }, + "review_message": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Review Message", + "description": "Optional message from the reviewer" + }, + "was_edited": { + "anyOf": [{ "type": "boolean" }, { "type": "null" }], + "title": "Was Edited", + "description": "Whether the data was modified during review" + }, + "processed": { + "type": "boolean", + "title": "Processed", + "description": "Whether the review result has been processed by the execution engine", + "default": false + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At", + "description": "When the review was created" + }, + "updated_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Updated At", + "description": "When the review was last updated" + }, + "reviewed_at": { + "anyOf": [ + { "type": "string", "format": "date-time" }, + { "type": "null" } + ], + "title": "Reviewed At", + "description": "When the review was completed" + } + }, + "type": "object", + "required": [ + "node_exec_id", + "user_id", + "graph_exec_id", + "graph_id", + "graph_version", + "payload", + "editable", + "status", + "created_at" + ], + "title": "PendingHumanReviewModel", + "description": "Response model for pending human review data.\n\nRepresents a human review request that is awaiting user action.\nContains all necessary information for a user to review and approve\nor reject data from a Human-in-the-Loop block execution.\n\nAttributes:\n id: Unique identifier for the review record\n user_id: ID of the user who must perform the review\n node_exec_id: ID of the node execution that created this review\n graph_exec_id: ID of the graph execution containing the node\n graph_id: ID of the graph template being executed\n graph_version: Version number of the graph template\n payload: The actual data payload awaiting review\n instructions: Instructions or message for the reviewer\n editable: Whether the reviewer can edit the data\n status: Current review status (WAITING, APPROVED, or REJECTED)\n review_message: Optional message from the reviewer\n created_at: Timestamp when review was created\n updated_at: Timestamp when review was last modified\n reviewed_at: Timestamp when review was completed (if applicable)" + }, "PostmarkBounceEnum": { "type": "integer", "enum": [ @@ -7825,6 +8113,92 @@ "required": ["credit_amount"], "title": "RequestTopUp" }, + "ReviewItem": { + "properties": { + "node_exec_id": { + "type": "string", + "title": "Node Exec Id", + "description": "Node execution ID to review" + }, + "approved": { + "type": "boolean", + "title": "Approved", + "description": "Whether this review is approved (True) or rejected (False)" + }, + "message": { + "anyOf": [ + { "type": "string", "maxLength": 2000 }, + { "type": "null" } + ], + "title": "Message", + "description": "Optional review message" + }, + "reviewed_data": { + "anyOf": [ + { "additionalProperties": true, "type": "object" }, + { "items": {}, "type": "array" }, + { "type": "string" }, + { "type": "integer" }, + { "type": "number" }, + { "type": "boolean" }, + { "type": "null" } + ], + "title": "Reviewed Data", + "description": "Optional edited data (ignored if approved=False)" + } + }, + "type": "object", + "required": ["node_exec_id", "approved"], + "title": "ReviewItem", + "description": "Single review item for processing." + }, + "ReviewRequest": { + "properties": { + "reviews": { + "items": { "$ref": "#/components/schemas/ReviewItem" }, + "type": "array", + "title": "Reviews", + "description": "All reviews with their approval status, data, and messages" + } + }, + "type": "object", + "required": ["reviews"], + "title": "ReviewRequest", + "description": "Request model for processing ALL pending reviews for an execution.\n\nThis request must include ALL pending reviews for a graph execution.\nEach review will be either approved (with optional data modifications)\nor rejected (data ignored). The execution will resume only after ALL reviews are processed." + }, + "ReviewResponse": { + "properties": { + "approved_count": { + "type": "integer", + "title": "Approved Count", + "description": "Number of reviews successfully approved" + }, + "rejected_count": { + "type": "integer", + "title": "Rejected Count", + "description": "Number of reviews successfully rejected" + }, + "failed_count": { + "type": "integer", + "title": "Failed Count", + "description": "Number of reviews that failed processing" + }, + "error": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Error", + "description": "Error message if operation failed" + } + }, + "type": "object", + "required": ["approved_count", "rejected_count", "failed_count"], + "title": "ReviewResponse", + "description": "Response from review endpoint." + }, + "ReviewStatus": { + "type": "string", + "enum": ["WAITING", "APPROVED", "REJECTED"], + "title": "ReviewStatus" + }, "ReviewSubmissionRequest": { "properties": { "store_listing_version_id": { diff --git a/autogpt_platform/frontend/src/app/layout.tsx b/autogpt_platform/frontend/src/app/layout.tsx index dd73303b9e..ff2590dc61 100644 --- a/autogpt_platform/frontend/src/app/layout.tsx +++ b/autogpt_platform/frontend/src/app/layout.tsx @@ -6,6 +6,7 @@ import "./globals.css"; import { Providers } from "@/app/providers"; import { CookieConsentBanner } from "@/components/molecules/CookieConsentBanner/CookieConsentBanner"; +import { ErrorBoundary } from "@/components/molecules/ErrorBoundary/ErrorBoundary"; import TallyPopupSimple from "@/components/molecules/TallyPoup/TallyPopup"; import { Toaster } from "@/components/molecules/Toast/toaster"; import { SetupAnalytics } from "@/services/analytics"; @@ -54,29 +55,31 @@ export default async function RootLayout({ /> - -
- {children} - - + + +
+ {children} + + - {/* React Query DevTools is only available in development */} - {process.env.NEXT_PUBLIC_REACT_QUERY_DEVTOOL && ( - - )} -
- - -
+ {/* React Query DevTools is only available in development */} + {process.env.NEXT_PUBLIC_REACT_QUERY_DEVTOOL && ( + + )} +
+ + +
+ ); diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx index 2554a1c4ab..0500d08549 100644 --- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx +++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePicker.tsx @@ -1,8 +1,8 @@ "use client"; -import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/AgentRunsView/components/CredentialsInputs/CredentialsInputs"; +import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs"; import { Button } from "@/components/atoms/Button/Button"; -import { CircleNotchIcon } from "@phosphor-icons/react"; +import { CircleNotchIcon, FolderOpenIcon } from "@phosphor-icons/react"; import { Props, useGoogleDrivePicker } from "./useGoogleDrivePicker"; export function GoogleDrivePicker(props: Props) { @@ -12,28 +12,46 @@ export function GoogleDrivePicker(props: Props) { isAuthInProgress, isLoading, handleOpenPicker, + selectedCredential, + setSelectedCredential, } = useGoogleDrivePicker(props); if (!credentials || credentials.isLoading) { return ; } - if (!hasGoogleOAuth) + if (!hasGoogleOAuth) { return ( {}} + selectedCredentials={selectedCredential} + onSelectCredentials={setSelectedCredential} hideIfSingleCredentialAvailable /> ); + } + + const hasMultipleCredentials = + credentials.savedCredentials && credentials.savedCredentials.length > 1; return ( - +
+ {hasMultipleCredentials && ( + + )} + +
); } diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx new file mode 100644 index 0000000000..506b5b87b8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput.tsx @@ -0,0 +1,137 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { cn } from "@/lib/utils"; +import { Cross2Icon } from "@radix-ui/react-icons"; +import React, { useCallback } from "react"; +import { GoogleDrivePicker } from "./GoogleDrivePicker"; +import type { GoogleDrivePickerConfig } from "@/lib/autogpt-server-api/types"; + +export interface GoogleDrivePickerInputProps { + config: GoogleDrivePickerConfig; + value: any; + onChange: (value: any) => void; + error?: string; + className?: string; + showRemoveButton?: boolean; +} + +export function GoogleDrivePickerInput({ + config, + value, + onChange, + error, + className, + showRemoveButton = true, +}: GoogleDrivePickerInputProps) { + const [pickerError, setPickerError] = React.useState(null); + const isMultiSelect = config.multiselect || false; + const currentFiles = isMultiSelect + ? Array.isArray(value) + ? value + : [] + : value + ? [value] + : []; + + const handlePicked = useCallback( + (files: any[]) => { + // Clear any previous picker errors + setPickerError(null); + + // Convert to GoogleDriveFile format + const convertedFiles = files.map((f) => ({ + id: f.id, + name: f.name, + mimeType: f.mimeType, + url: f.url, + iconUrl: f.iconUrl, + isFolder: f.mimeType === "application/vnd.google-apps.folder", + })); + + // Store based on multiselect mode + const newValue = isMultiSelect ? convertedFiles : convertedFiles[0]; + onChange(newValue); + }, + [isMultiSelect, onChange], + ); + + const handleRemoveFile = useCallback( + (idx: number) => { + if (isMultiSelect) { + const newFiles = currentFiles.filter((_: any, i: number) => i !== idx); + onChange(newFiles); + } else { + onChange(null); + } + }, + [isMultiSelect, currentFiles, onChange], + ); + + const handleError = useCallback((error: any) => { + console.error("Google Drive Picker error:", error); + setPickerError(error instanceof Error ? error.message : String(error)); + }, []); + + return ( +
+ {/* Picker Button */} + { + // User canceled - no action needed + }} + onError={handleError} + /> + + {/* Display Selected Files */} + {currentFiles.length > 0 && ( +
+ {currentFiles.map((file: any, idx: number) => ( +
+
+ {file.iconUrl && ( + + )} + + {file.name || file.id} + +
+ + {showRemoveButton && ( + + )} +
+ ))} +
+ )} + + {/* Error Messages */} + {error && {error}} + {pickerError && ( + {pickerError} + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts index 359d7e4596..323cd6d9d6 100644 --- a/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts +++ b/autogpt_platform/frontend/src/components/contextual/GoogleDrivePicker/useGoogleDrivePicker.ts @@ -1,6 +1,10 @@ +import { getGetV1GetSpecificCredentialByIdQueryOptions } from "@/app/api/__generated__/endpoints/integrations/integrations"; +import type { OAuth2Credentials } from "@/app/api/__generated__/models/oAuth2Credentials"; import { useToast } from "@/components/molecules/Toast/use-toast"; import useCredentials from "@/hooks/useCredentials"; -import { useMemo, useRef, useState } from "react"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useMemo, useRef, useState } from "react"; import { getCredentialsSchema, GooglePickerView, @@ -54,10 +58,15 @@ export function useGoogleDrivePicker(options: Props) { const requestedScopes = options?.scopes || defaultScopes; const [isLoading, setIsLoading] = useState(false); const [isAuthInProgress, setIsAuthInProgress] = useState(false); + const [hasInsufficientScopes, setHasInsufficientScopes] = useState(false); + const [selectedCredential, setSelectedCredential] = useState< + CredentialsMetaInput | undefined + >(); const accessTokenRef = useRef(null); const tokenClientRef = useRef(null); const pickerReadyRef = useRef(false); const credentials = useCredentials(getCredentialsSchema(requestedScopes)); + const queryClient = useQueryClient(); const isReady = pickerReadyRef.current && !!tokenClientRef.current; const { toast } = useToast(); @@ -66,10 +75,109 @@ export function useGoogleDrivePicker(options: Props) { return credentials.savedCredentials?.length > 0; }, [credentials]); + useEffect(() => { + if ( + hasGoogleOAuth && + credentials && + !credentials.isLoading && + credentials.savedCredentials?.length > 0 + ) { + setHasInsufficientScopes(false); + } + }, [hasGoogleOAuth, credentials]); + + useEffect(() => { + if ( + credentials && + !credentials.isLoading && + credentials.savedCredentials?.length === 1 && + !selectedCredential + ) { + setSelectedCredential({ + id: credentials.savedCredentials[0].id, + type: credentials.savedCredentials[0].type, + provider: credentials.savedCredentials[0].provider, + title: credentials.savedCredentials[0].title, + }); + } + }, [credentials, selectedCredential]); + async function openPicker() { try { await ensureLoaded(); - console.log(accessTokenRef.current); + + if ( + hasGoogleOAuth && + credentials && + !credentials.isLoading && + credentials.savedCredentials?.length > 0 + ) { + const credentialId = + selectedCredential?.id || credentials.savedCredentials[0].id; + + try { + const queryOptions = getGetV1GetSpecificCredentialByIdQueryOptions( + "google", + credentialId, + ); + + const response = await queryClient.fetchQuery(queryOptions); + + if (response.status === 200 && response.data) { + const cred = response.data; + if (cred.type === "oauth2") { + const oauthCred = cred as OAuth2Credentials; + if (oauthCred.access_token) { + const credentialScopes = new Set(oauthCred.scopes || []); + const requiredScopesSet = new Set(requestedScopes); + const hasRequiredScopes = Array.from(requiredScopesSet).every( + (scope) => credentialScopes.has(scope), + ); + + if (!hasRequiredScopes) { + const error = new Error( + "The saved Google OAuth credentials do not have the required permissions. Please sign in again with the correct permissions.", + ); + toast({ + title: "Insufficient Permissions", + description: error.message, + variant: "destructive", + }); + setHasInsufficientScopes(true); + if (onError) onError(error); + return; + } + + accessTokenRef.current = oauthCred.access_token; + buildAndShowPicker(oauthCred.access_token); + return; + } + } + } + + const error = new Error( + "Failed to retrieve Google OAuth credentials. Please try signing in again.", + ); + if (onError) onError(error); + return; + } catch (err) { + const error = + err instanceof Error + ? err + : new Error("Failed to fetch Google OAuth credentials"); + + toast({ + title: "Authentication Error", + description: error.message, + variant: "destructive", + }); + + if (onError) onError(error); + + return; + } + } + const token = accessTokenRef.current || (await requestAccessToken()); buildAndShowPicker(token); } catch (e) { @@ -195,6 +303,9 @@ export function useGoogleDrivePicker(options: Props) { isAuthInProgress, handleOpenPicker: openPicker, credentials, - hasGoogleOAuth, + hasGoogleOAuth: hasInsufficientScopes ? false : hasGoogleOAuth, + accessToken: accessTokenRef.current, + selectedCredential, + setSelectedCredential, }; } diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/AccountMenu.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/AccountMenu.tsx index 34081bf426..aeca460b38 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/AccountMenu.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/AccountMenu.tsx @@ -3,15 +3,16 @@ import { PopoverContent, PopoverTrigger, } from "@/components/__legacy__/ui/popover"; -import Link from "next/link"; -import * as React from "react"; -import { getAccountMenuOptionIcon, MenuItemGroup } from "../../helpers"; -import { AccountLogoutOption } from "./components/AccountLogoutOption"; -import { PublishAgentModal } from "@/components/contextual/PublishAgentModal/PublishAgentModal"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; import Avatar, { AvatarFallback, AvatarImage, } from "@/components/atoms/Avatar/Avatar"; +import { PublishAgentModal } from "@/components/contextual/PublishAgentModal/PublishAgentModal"; +import Link from "next/link"; +import * as React from "react"; +import { getAccountMenuOptionIcon, MenuItemGroup } from "../../helpers"; +import { AccountLogoutOption } from "./components/AccountLogoutOption"; interface Props { userName?: string; @@ -19,6 +20,7 @@ interface Props { avatarSrc?: string; hideNavBarUsername?: boolean; menuItemGroups: MenuItemGroup[]; + isLoading?: boolean; } export function AccountMenu({ @@ -26,6 +28,7 @@ export function AccountMenu({ userEmail, avatarSrc, menuItemGroups, + isLoading = false, }: Props) { const popupId = React.useId(); @@ -63,15 +66,24 @@ export function AccountMenu({
-
- {userName} -
-
- {userEmail} -
+ {isLoading || !userName || !userEmail ? ( + <> + + + + ) : ( + <> +
+ {userName} +
+
+ {userEmail} +
+ + )}
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/components/AccountLogoutOption.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/components/AccountLogoutOption.tsx index ec6f65d0a6..71bc67613e 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/components/AccountLogoutOption.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AccountMenu/components/AccountLogoutOption.tsx @@ -5,17 +5,20 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { cn } from "@/lib/utils"; import * as Sentry from "@sentry/nextjs"; +import { useRouter } from "next/navigation"; import { useState } from "react"; export function AccountLogoutOption() { const [isLoggingOut, setIsLoggingOut] = useState(false); const supabase = useSupabase(); + const router = useRouter(); const { toast } = useToast(); async function handleLogout() { setIsLoggingOut(true); try { await supabase.logOut(); + router.push("/login"); } catch (e) { Sentry.captureException(e); toast({ @@ -25,7 +28,9 @@ export function AccountLogoutOption() { variant: "destructive", }); } finally { - setIsLoggingOut(false); + setTimeout(() => { + setIsLoggingOut(false); + }, 3000); } } diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx index 2d4e80a6b6..ff5983ab25 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityItem.tsx @@ -4,11 +4,12 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut import { Text } from "@/components/atoms/Text/Text"; import { CheckCircle, - CircleNotchIcon, + CircleNotch, Clock, - WarningOctagonIcon, + WarningOctagon, StopCircle, CircleDashed, + Eye, } from "@phosphor-icons/react"; import type { AgentExecutionWithInfo } from "../helpers"; import { getExecutionDuration } from "../helpers"; @@ -26,7 +27,7 @@ export function ActivityItem({ execution }: Props) { return ; case AgentExecutionStatus.RUNNING: return ( - ); case AgentExecutionStatus.FAILED: - return ; + return ; case AgentExecutionStatus.TERMINATED: return ( ); case AgentExecutionStatus.INCOMPLETE: return ; + case AgentExecutionStatus.REVIEW: + return ; default: return null; } @@ -52,12 +55,15 @@ export function ActivityItem({ execution }: Props) { function getTimeDisplay() { const isActiveStatus = execution.status === AgentExecutionStatus.RUNNING || - execution.status === AgentExecutionStatus.QUEUED; + execution.status === AgentExecutionStatus.QUEUED || + execution.status === AgentExecutionStatus.REVIEW; if (isActiveStatus) { const timeAgo = formatTimeAgo(execution.started_at.toString()); - const statusText = - execution.status === AgentExecutionStatus.QUEUED ? "queued" : "running"; + let statusText = "running"; + if (execution.status === AgentExecutionStatus.QUEUED) { + statusText = "queued"; + } return `Started ${timeAgo}, ${getExecutionDuration(execution)} ${statusText}`; } @@ -72,6 +78,8 @@ export function ActivityItem({ execution }: Props) { return `Stopped ${timeAgo}`; case AgentExecutionStatus.INCOMPLETE: return `Incomplete ${timeAgo}`; + case AgentExecutionStatus.REVIEW: + return `In review ${timeAgo}`; default: return `Ended ${timeAgo}`; } @@ -80,7 +88,10 @@ export function ActivityItem({ execution }: Props) { return "Unknown"; } - const linkUrl = `/library/agents/${execution.library_agent_id}?executionId=${execution.id}`; + // Determine the tab based on execution status + const tabParam = + execution.status === AgentExecutionStatus.REVIEW ? "&tab=reviews" : ""; + const linkUrl = `/library/agents/${execution.library_agent_id}?executionId=${execution.id}${tabParam}`; const withExecutionLink = execution.library_agent_id && execution.id; const content = ( diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/helpers.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/helpers.tsx index 79834010f4..ab2d5a9d03 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/helpers.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/helpers.tsx @@ -114,7 +114,8 @@ export function isActiveExecution( const status = execution.status; return ( status === AgentExecutionStatus.RUNNING || - status === AgentExecutionStatus.QUEUED + status === AgentExecutionStatus.QUEUED || + status === AgentExecutionStatus.REVIEW ); } diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarView.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarView.tsx index f1e5a25fe6..5cc13572b3 100644 --- a/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarView.tsx +++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/NavbarView.tsx @@ -26,12 +26,19 @@ export function NavbarView({ isLoggedIn, previewBranchName }: NavbarViewProps) { const dynamicMenuItems = getAccountMenuItems(user?.role); const isChatEnabled = useGetFlag(Flag.CHAT); - const { data: profile } = useGetV2GetUserProfile({ - query: { - select: (res) => (res.status === 200 ? res.data : null), - enabled: isLoggedIn, + const { data: profile, isLoading: isProfileLoading } = useGetV2GetUserProfile( + { + query: { + select: (res) => (res.status === 200 ? res.data : null), + enabled: isLoggedIn && !!user, + // Include user ID in query key to ensure cache invalidation when user changes + queryKey: ["/api/store/profile", user?.id], + }, }, - }); + ); + + const { isUserLoading } = useSupabase(); + const isLoadingProfile = isProfileLoading || isUserLoading; const linksWithChat = useMemo(() => { const chatLink = { name: "Chat", href: "/chat" }; @@ -84,6 +91,7 @@ export function NavbarView({ isLoggedIn, previewBranchName }: NavbarViewProps) { userEmail={profile?.name} avatarSrc={profile?.avatar_url ?? ""} menuItemGroups={dynamicMenuItems} + isLoading={isLoadingProfile} />
diff --git a/autogpt_platform/frontend/src/components/molecules/ErrorBoundary/ErrorBoundary.tsx b/autogpt_platform/frontend/src/components/molecules/ErrorBoundary/ErrorBoundary.tsx new file mode 100644 index 0000000000..879209f692 --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ErrorBoundary/ErrorBoundary.tsx @@ -0,0 +1,78 @@ +"use client"; + +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import * as Sentry from "@sentry/nextjs"; +import { Component, type ReactNode } from "react"; + +interface ErrorBoundaryProps { + children: ReactNode; + fallback?: ReactNode; + context?: string; + onReset?: () => void; +} + +interface ErrorBoundaryState { + hasError: boolean; + error: Error | null; +} + +export class ErrorBoundary extends Component< + ErrorBoundaryProps, + ErrorBoundaryState +> { + constructor(props: ErrorBoundaryProps) { + super(props); + this.state = { hasError: false, error: null }; + } + + static getDerivedStateFromError(error: Error): ErrorBoundaryState { + return { hasError: true, error }; + } + + componentDidCatch(error: Error, errorInfo: React.ErrorInfo) { + Sentry.captureException(error, { + contexts: { + react: { + componentStack: errorInfo.componentStack, + }, + }, + tags: { + errorBoundary: "true", + context: this.props.context || "application", + }, + }); + } + + handleReset = () => { + this.setState({ hasError: false, error: null }); + if (this.props.onReset) { + this.props.onReset(); + } + }; + + render() { + if (this.state.hasError && this.state.error) { + if (this.props.fallback) { + return this.props.fallback; + } + + return ( +
+
+ +
+
+ ); + } + + return this.props.children; + } +} diff --git a/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx b/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx new file mode 100644 index 0000000000..feb4da96fe --- /dev/null +++ b/autogpt_platform/frontend/src/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel.tsx @@ -0,0 +1,92 @@ +import { useState, useEffect } from "react"; +import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; +import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; +import { Button } from "@/components/atoms/Button/Button"; +import { ClockIcon, XIcon } from "@phosphor-icons/react"; +import { cn } from "@/lib/utils"; +import { Text } from "@/components/atoms/Text/Text"; +import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; +import { useGraphStore } from "@/app/(platform)/build/stores/graphStore"; + +interface FloatingReviewsPanelProps { + executionId?: string; + className?: string; +} + +export function FloatingReviewsPanel({ + executionId, + className, +}: FloatingReviewsPanelProps) { + const [isOpen, setIsOpen] = useState(false); + + const executionStatus = useGraphStore((state) => state.graphExecutionStatus); + + const { pendingReviews, isLoading, refetch } = usePendingReviewsForExecution( + executionId || "", + ); + + useEffect(() => { + if (executionStatus === AgentExecutionStatus.REVIEW && executionId) { + refetch(); + } + }, [executionStatus, executionId, refetch]); + + if ( + !executionId || + (!isLoading && pendingReviews.length === 0) || + executionStatus !== AgentExecutionStatus.REVIEW + ) { + return null; + } + + function handleReviewComplete() { + refetch(); + setIsOpen(false); + } + + return ( +
+ {!isOpen && pendingReviews.length > 0 && ( + + )} + + {isOpen && ( +
+
+
+ + Pending Reviews +
+ +
+ +
+ {isLoading ? ( +
+ + Loading reviews... + +
+ ) : ( + + )} +
+
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx b/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx new file mode 100644 index 0000000000..b5094dd4cd --- /dev/null +++ b/autogpt_platform/frontend/src/components/organisms/PendingReviewCard/PendingReviewCard.tsx @@ -0,0 +1,210 @@ +import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel"; +import { Text } from "@/components/atoms/Text/Text"; +import { Button } from "@/components/atoms/Button/Button"; +import { Input } from "@/components/atoms/Input/Input"; +import { Switch } from "@/components/atoms/Switch/Switch"; +import { TrashIcon, EyeSlashIcon } from "@phosphor-icons/react"; +import { useState } from "react"; + +interface StructuredReviewPayload { + data: unknown; + instructions?: string; +} + +function isStructuredReviewPayload( + payload: unknown, +): payload is StructuredReviewPayload { + return ( + payload !== null && + typeof payload === "object" && + "data" in payload && + (typeof (payload as any).instructions === "string" || + (payload as any).instructions === undefined) + ); +} + +function extractReviewData(payload: unknown): { + data: unknown; + instructions?: string; +} { + if (isStructuredReviewPayload(payload)) { + return { + data: payload.data, + instructions: payload.instructions, + }; + } + + return { data: payload }; +} + +interface PendingReviewCardProps { + review: PendingHumanReviewModel; + onReviewDataChange: (nodeExecId: string, data: string) => void; + reviewMessage: string; + onReviewMessageChange: (nodeExecId: string, message: string) => void; + isDisabled: boolean; + onToggleDisabled: (nodeExecId: string) => void; +} + +export function PendingReviewCard({ + review, + onReviewDataChange, + reviewMessage, + onReviewMessageChange, + isDisabled, + onToggleDisabled, +}: PendingReviewCardProps) { + const extractedData = extractReviewData(review.payload); + const isDataEditable = review.editable; + const instructions = extractedData.instructions || review.instructions; + const [currentData, setCurrentData] = useState(extractedData.data); + + const handleDataChange = (newValue: unknown) => { + setCurrentData(newValue); + onReviewDataChange(review.node_exec_id, JSON.stringify(newValue, null, 2)); + }; + + const handleMessageChange = (newMessage: string) => { + onReviewMessageChange(review.node_exec_id, newMessage); + }; + + const renderDataInput = () => { + const data = currentData; + + if (typeof data === "string") { + return ( + handleDataChange(e.target.value)} + placeholder="Enter text" + /> + ); + } else if (typeof data === "number") { + return ( + handleDataChange(Number(e.target.value))} + placeholder="Enter number" + /> + ); + } else if (typeof data === "boolean") { + return ( +
+ + {data ? "Enabled" : "Disabled"} + + handleDataChange(checked)} + /> +
+ ); + } else { + return ( + { + try { + const parsed = JSON.parse(e.target.value); + handleDataChange(parsed); + } catch {} + }} + placeholder="Edit JSON data" + className="font-mono text-sm" + /> + ); + } + }; + + return ( +
+
+
+ {isDisabled && ( + + This item will be rejected + + )} +
+ +
+ + {instructions && ( +
+ + Instructions: + + {instructions} +
+ )} + +
+ + Data to Review: + {!isDataEditable && ( + + (Read-only) + + )} + + {isDataEditable && !isDisabled ? ( + renderDataInput() + ) : ( +
+ + {JSON.stringify(currentData, null, 2)} + +
+ )} +
+ + {isDisabled && ( +
+ + Rejection Reason (Optional): + + handleMessageChange(e.target.value)} + placeholder="Add any notes about why you're rejecting this..." + /> +
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx b/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx new file mode 100644 index 0000000000..320d84e91f --- /dev/null +++ b/autogpt_platform/frontend/src/components/organisms/PendingReviewsList/PendingReviewsList.tsx @@ -0,0 +1,248 @@ +import { useState } from "react"; +import { PendingHumanReviewModel } from "@/app/api/__generated__/models/pendingHumanReviewModel"; +import { PendingReviewCard } from "@/components/organisms/PendingReviewCard/PendingReviewCard"; +import { Text } from "@/components/atoms/Text/Text"; +import { Button } from "@/components/atoms/Button/Button"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { ClockIcon, PlayIcon, XIcon, CheckIcon } from "@phosphor-icons/react"; +import { usePostV2ProcessReviewAction } from "@/app/api/__generated__/endpoints/executions/executions"; + +interface PendingReviewsListProps { + reviews: PendingHumanReviewModel[]; + onReviewComplete?: () => void; + emptyMessage?: string; +} + +export function PendingReviewsList({ + reviews, + onReviewComplete, + emptyMessage = "No pending reviews", +}: PendingReviewsListProps) { + const [reviewDataMap, setReviewDataMap] = useState>( + () => { + const initialData: Record = {}; + reviews.forEach((review) => { + initialData[review.node_exec_id] = JSON.stringify( + review.payload, + null, + 2, + ); + }); + return initialData; + }, + ); + + const [reviewMessageMap, setReviewMessageMap] = useState< + Record + >({}); + const [disabledReviews, setDisabledReviews] = useState>( + new Set(), + ); + + const { toast } = useToast(); + + const reviewActionMutation = usePostV2ProcessReviewAction({ + mutation: { + onSuccess: (data: any) => { + if (data.status !== 200) { + toast({ + title: "Failed to process reviews", + description: "Unexpected response from server", + variant: "destructive", + }); + return; + } + + const response = data.data; + + if (response.failed_count > 0) { + toast({ + title: "Reviews partially processed", + description: `${response.approved_count + response.rejected_count} succeeded, ${response.failed_count} failed. ${response.error || "Some reviews could not be processed."}`, + variant: "destructive", + }); + } else { + toast({ + title: "Reviews processed successfully", + description: `${response.approved_count} approved, ${response.rejected_count} rejected`, + variant: "default", + }); + } + + onReviewComplete?.(); + }, + onError: (error: Error) => { + toast({ + title: "Failed to process reviews", + description: error.message || "An error occurred", + variant: "destructive", + }); + }, + }, + }); + + function handleReviewDataChange(nodeExecId: string, data: string) { + setReviewDataMap((prev) => ({ ...prev, [nodeExecId]: data })); + } + + function handleReviewMessageChange(nodeExecId: string, message: string) { + setReviewMessageMap((prev) => ({ ...prev, [nodeExecId]: message })); + } + + function handleToggleDisabled(nodeExecId: string) { + setDisabledReviews((prev) => { + const newSet = new Set(prev); + if (newSet.has(nodeExecId)) { + newSet.delete(nodeExecId); + } else { + newSet.add(nodeExecId); + } + return newSet; + }); + } + + function handleApproveAll() { + setDisabledReviews(new Set()); + } + + function handleRejectAll() { + const allReviewIds = reviews.map((review) => review.node_exec_id); + setDisabledReviews(new Set(allReviewIds)); + } + + function handleContinue() { + if (reviews.length === 0) { + toast({ + title: "No reviews to process", + description: "No reviews found to process.", + variant: "destructive", + }); + return; + } + + const reviewItems = []; + + for (const review of reviews) { + const isApproved = !disabledReviews.has(review.node_exec_id); + const reviewData = reviewDataMap[review.node_exec_id]; + const reviewMessage = reviewMessageMap[review.node_exec_id]; + + let parsedData; + if (isApproved && review.editable && reviewData) { + try { + parsedData = JSON.parse(reviewData); + if (JSON.stringify(parsedData) === JSON.stringify(review.payload)) { + parsedData = undefined; + } + } catch (error) { + toast({ + title: "Invalid JSON", + description: `Please fix the JSON format in review for node ${review.node_exec_id}: ${error instanceof Error ? error.message : "Invalid syntax"}`, + variant: "destructive", + }); + return; + } + } + + reviewItems.push({ + node_exec_id: review.node_exec_id, + approved: isApproved, + reviewed_data: isApproved ? parsedData : undefined, + message: reviewMessage || undefined, + }); + } + + reviewActionMutation.mutate({ + data: { + reviews: reviewItems, + }, + }); + } + + if (reviews.length === 0) { + return ( +
+ + + {emptyMessage} + + + When agents have human-in-the-loop blocks, they will appear here for + your review and approval. + +
+ ); + } + + return ( +
+
+ {reviews.map((review) => ( + + ))} +
+ +
+
+ + +
+ +
+
+ + {disabledReviews.size > 0 ? ( + <> + Approve {reviews.length - disabledReviews.size}, reject{" "} + {disabledReviews.size} of {reviews.length} items + + ) : ( + <>Approve all {reviews.length} items + )} + +
+ +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx index f3b6731aa6..f8c46e7d1a 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx +++ b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx @@ -62,6 +62,9 @@ export default function useAgentGraph( const [graphExecutionError, setGraphExecutionError] = useState( null, ); + const [graphExecutionStatus, setGraphExecutionStatus] = useState< + string | null + >(null); const [xyNodes, setXYNodes] = useState([]); const [xyEdges, setXYEdges] = useState([]); const { state, completeStep, incrementRuns } = useOnboarding(); @@ -358,11 +361,12 @@ export default function useAgentGraph( const statusRank = { RUNNING: 0, - QUEUED: 1, - INCOMPLETE: 2, - TERMINATED: 3, - COMPLETED: 4, - FAILED: 5, + REVIEW: 1, + QUEUED: 2, + INCOMPLETE: 3, + TERMINATED: 4, + COMPLETED: 5, + FAILED: 6, }; const status = executionResults .map((v) => v.status) @@ -476,7 +480,8 @@ export default function useAgentGraph( flowExecutionID, ); - // Set graph execution error from the initial fetch + // Set graph execution status and error from the initial fetch + setGraphExecutionStatus(execution.status); if (execution.status === "FAILED") { setGraphExecutionError( execution.stats?.error || @@ -545,10 +550,14 @@ export default function useAgentGraph( }); } } + // Update the execution status + setGraphExecutionStatus(graphExec.status); + if ( graphExec.status === "COMPLETED" || graphExec.status === "TERMINATED" || - graphExec.status === "FAILED" + graphExec.status === "FAILED" || + graphExec.status === "REVIEW" ) { cancelGraphExecListener(); setIsRunning(false); @@ -735,7 +744,6 @@ export default function useAgentGraph( ]); const saveAgent = useCallback(async () => { - console.log("saveAgent"); setIsSaving(true); try { await _saveAgent(); @@ -968,6 +976,7 @@ export default function useAgentGraph( isStopping, isScheduling, graphExecutionError, + graphExecutionStatus, nodes: xyNodes, setNodes: setXYNodes, edges: xyEdges, diff --git a/autogpt_platform/frontend/src/hooks/usePendingReviews.ts b/autogpt_platform/frontend/src/hooks/usePendingReviews.ts new file mode 100644 index 0000000000..111b50a491 --- /dev/null +++ b/autogpt_platform/frontend/src/hooks/usePendingReviews.ts @@ -0,0 +1,26 @@ +import { + useGetV2GetPendingReviews, + useGetV2GetPendingReviewsForExecution, +} from "@/app/api/__generated__/endpoints/executions/executions"; + +export function usePendingReviews() { + const query = useGetV2GetPendingReviews(); + + return { + pendingReviews: (query.data?.status === 200 ? query.data.data : []) || [], + isLoading: query.isLoading, + error: query.error, + refetch: query.refetch, + }; +} + +export function usePendingReviewsForExecution(graphExecId: string) { + const query = useGetV2GetPendingReviewsForExecution(graphExecId); + + return { + pendingReviews: (query.data?.status === 200 ? query.data.data : []) || [], + isLoading: query.isLoading, + error: query.error, + refetch: query.refetch, + }; +} diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index ea82e34d3e..aa657ff4a7 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -80,6 +80,7 @@ export enum DataType { KEY_VALUE = "key-value", ARRAY = "array", TABLE = "table", + GOOGLE_DRIVE_PICKER = "google-drive-picker", } export type BlockIOSubSchemaMeta = { @@ -116,6 +117,43 @@ export type BlockIOArraySubSchema = BlockIOSubSchemaMeta & { secret?: boolean; }; +export type GoogleDriveFile = { + id: string; + name?: string; + mimeType?: string; + url?: string; + iconUrl?: string; + isFolder?: boolean; +}; + +/** Valid view types for Google Drive Picker - matches backend AttachmentView */ +export type AttachmentView = + | "DOCS" + | "DOCUMENTS" + | "SPREADSHEETS" + | "PRESENTATIONS" + | "DOCS_IMAGES" + | "FOLDERS"; + +export type GoogleDrivePickerConfig = { + multiselect?: boolean; + allow_folder_selection?: boolean; + allowed_views?: AttachmentView[]; + allowed_mime_types?: string[]; + scopes?: string[]; +}; + +/** + * Schema for Google Drive Picker input fields. + * When multiselect=false: type="object" (single GoogleDriveFile) + * When multiselect=true: type="array" with items={ type="object" } (array of GoogleDriveFile) + */ +export type GoogleDrivePickerSchema = BlockIOSubSchemaMeta & { + type: "object" | "array"; + format: "google-drive-picker"; + google_drive_picker_config?: GoogleDrivePickerConfig; +}; + // Table cell values are typically primitives export type TableCellValue = string | number | boolean | null; @@ -277,7 +315,8 @@ export type GraphExecutionMeta = { | "COMPLETED" | "TERMINATED" | "FAILED" - | "INCOMPLETE"; + | "INCOMPLETE" + | "REVIEW"; started_at: Date; ended_at: Date; stats: { @@ -414,7 +453,8 @@ export type NodeExecutionResult = { | "RUNNING" | "COMPLETED" | "TERMINATED" - | "FAILED"; + | "FAILED" + | "REVIEW"; input_data: Record; output_data: Record>; add_time: Date; @@ -1151,6 +1191,13 @@ export function determineDataType(schema: BlockIOSubSchema): DataType { return DataType.CREDENTIALS; } + if ( + "google_drive_picker_config" in schema || + ("format" in schema && schema.format === "google-drive-picker") + ) { + return DataType.GOOGLE_DRIVE_PICKER; + } + // enum == SELECT if ("enum" in schema) { return DataType.SELECT; diff --git a/autogpt_platform/frontend/src/lib/supabase/hooks/helpers.ts b/autogpt_platform/frontend/src/lib/supabase/hooks/helpers.ts index aaa17db76e..cce4f7a769 100644 --- a/autogpt_platform/frontend/src/lib/supabase/hooks/helpers.ts +++ b/autogpt_platform/frontend/src/lib/supabase/hooks/helpers.ts @@ -51,9 +51,11 @@ export async function fetchUser(): Promise { const { user, error } = await getCurrentUser(); if (error || !user) { + // Only mark as loaded if we got an explicit error (not just no user) + // This allows retrying when cookies aren't ready yet after login return { user: null, - hasLoadedUser: true, + hasLoadedUser: !!error, // Only true if there was an error, not just no user isUserLoading: false, }; } @@ -68,7 +70,7 @@ export async function fetchUser(): Promise { console.error("Get user error:", error); return { user: null, - hasLoadedUser: true, + hasLoadedUser: true, // Error means we tried and failed, so mark as loaded isUserLoading: false, }; } diff --git a/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabase.ts b/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabase.ts index 81ad1961ea..41fdee25a2 100644 --- a/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabase.ts +++ b/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabase.ts @@ -44,7 +44,6 @@ export function useSupabase() { return logOut({ options, api, - router, }); } diff --git a/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabaseStore.ts b/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabaseStore.ts index 90536285a5..dcc6029668 100644 --- a/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabaseStore.ts +++ b/autogpt_platform/frontend/src/lib/supabase/hooks/useSupabaseStore.ts @@ -72,10 +72,29 @@ export const useSupabaseStore = create((set, get) => { if (!initializationPromise) { initializationPromise = (async () => { - if (!get().hasLoadedUser) { + // Always fetch user if we haven't loaded it yet, or if user is null but hasLoadedUser is true + // This handles the case where hasLoadedUser might be stale after logout/login + if (!get().hasLoadedUser || !get().user) { set({ isUserLoading: true }); const result = await fetchUser(); set(result); + + // If fetchUser didn't return a user, validate the session to ensure we have the latest state + // This handles race conditions after login where cookies might not be immediately available + if (!result.user) { + const validationResult = await validateSessionHelper({ + pathname: params.pathname, + currentUser: null, + }); + + if (validationResult.user && validationResult.isValid) { + set({ + user: validationResult.user, + hasLoadedUser: true, + isUserLoading: false, + }); + } + } } else { set({ isUserLoading: false }); } @@ -104,7 +123,6 @@ export const useSupabaseStore = create((set, get) => { } async function logOut(params?: LogOutParams): Promise { - const router = params?.router ?? get().routerRef; const api = params?.api ?? get().apiRef; const options = params?.options ?? {}; @@ -122,17 +140,20 @@ export const useSupabaseStore = create((set, get) => { broadcastLogout(); + // Clear React Query cache to prevent stale data from old user + if (typeof window !== "undefined") { + const { getQueryClient } = await import("@/lib/react-query/queryClient"); + const queryClient = getQueryClient(); + queryClient.clear(); + } + set({ user: null, hasLoadedUser: false, isUserLoading: false, }); - const result = await serverLogout(options); - - if (result.success && router) { - router.push("/login"); - } + await serverLogout(options); } async function validateSessionInternal( diff --git a/autogpt_platform/frontend/src/lib/utils.ts b/autogpt_platform/frontend/src/lib/utils.ts index b05375bb83..b7be324f3f 100644 --- a/autogpt_platform/frontend/src/lib/utils.ts +++ b/autogpt_platform/frontend/src/lib/utils.ts @@ -2,12 +2,12 @@ import { type ClassValue, clsx } from "clsx"; import { isEmpty as _isEmpty } from "lodash"; import { twMerge } from "tailwind-merge"; +import { NodeDimension } from "@/app/(platform)/build/components/legacy-builder/Flow/Flow"; import { BlockIOObjectSubSchema, BlockIORootSchema, Category, } from "@/lib/autogpt-server-api/types"; -import { NodeDimension } from "@/app/(platform)/build/components/legacy-builder/Flow/Flow"; export function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)); @@ -153,24 +153,29 @@ export function setNestedProperty(obj: any, path: string, value: any) { throw new Error("Path must be a non-empty string"); } - const keys = path.split(/[\/.]/); + // Split by both / and . to handle mixed separators, then filter empty strings + const keys = path.split(/[\/.]/).filter((key) => key.length > 0); + if (keys.length === 0) { + throw new Error("Path must be a non-empty string"); + } + + // Validate keys for prototype pollution protection for (const key of keys) { - if ( - !key || - key === "__proto__" || - key === "constructor" || - key === "prototype" - ) { + if (key === "__proto__" || key === "constructor" || key === "prototype") { throw new Error(`Invalid property name: ${key}`); } } + // Securely traverse and set nested properties + // Use Object.prototype.hasOwnProperty.call() to safely check properties let current = obj; for (let i = 0; i < keys.length - 1; i++) { const key = keys[i]; - if (!current.hasOwnProperty(key)) { + + // Use hasOwnProperty check to avoid prototype chain access + if (!Object.prototype.hasOwnProperty.call(current, key)) { current[key] = {}; } else if (typeof current[key] !== "object" || current[key] === null) { current[key] = {}; @@ -178,7 +183,10 @@ export function setNestedProperty(obj: any, path: string, value: any) { current = current[key]; } - current[keys[keys.length - 1]] = value; + // Set the final value using bracket notation with validated key + // Since we've validated all keys, this is safe from prototype pollution + const finalKey = keys[keys.length - 1]; + current[finalKey] = value; } export function pruneEmptyValues(