diff --git a/.github/workflows/repo-close-stale-issues.yml b/.github/workflows/repo-close-stale-issues.yml index a9f183d775..d58459daa1 100644 --- a/.github/workflows/repo-close-stale-issues.yml +++ b/.github/workflows/repo-close-stale-issues.yml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v9 + - uses: actions/stale@v10 with: # operations-per-run: 5000 stale-issue-message: > diff --git a/.github/workflows/repo-pr-label.yml b/.github/workflows/repo-pr-label.yml index eef928ef16..97579c2784 100644 --- a/.github/workflows/repo-pr-label.yml +++ b/.github/workflows/repo-pr-label.yml @@ -61,6 +61,6 @@ jobs: pull-requests: write runs-on: ubuntu-latest steps: - - uses: actions/labeler@v5 + - uses: actions/labeler@v6 with: sync-labels: true diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json new file mode 100644 index 0000000000..6b0d73dda3 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/discussion.created.json @@ -0,0 +1,108 @@ +{ + "action": "created", + "discussion": { + "repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "category": { + "id": 12345678, + "node_id": "DIC_kwDOJKSTjM4CXXXX", + "repository_id": 614765452, + "emoji": ":pray:", + "name": "Q&A", + "description": "Ask the community for help", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2023-03-16T09:21:07Z", + "slug": "q-a", + "is_answerable": true + }, + "answer_html_url": null, + "answer_chosen_at": null, + "answer_chosen_by": null, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/discussions/9999", + "id": 5000000001, + "node_id": "D_kwDOJKSTjM4AYYYY", + "number": 9999, + "title": "How do I configure custom blocks?", + "user": { + "login": "curious-user", + "id": 22222222, + "node_id": "MDQ6VXNlcjIyMjIyMjIy", + "avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4", + "url": "https://api.github.com/users/curious-user", + "html_url": "https://github.com/curious-user", + "type": "User", + "site_admin": false + }, + "state": "open", + "state_reason": null, + "locked": false, + "comments": 0, + "created_at": "2024-12-01T17:00:00Z", + "updated_at": "2024-12-01T17:00:00Z", + "author_association": "NONE", + "active_lock_reason": null, + "body": "## Question\n\nI'm trying to create a custom block for my specific use case. I've read the documentation but I'm not sure how to:\n\n1. Define the input/output schema\n2. Handle authentication\n3. Test my block locally\n\nCan someone point me to examples or provide guidance?\n\n## Environment\n\n- AutoGPT Platform version: latest\n- Python: 3.11", + "reactions": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/reactions", + "total_count": 0, + "+1": 0, + "-1": 0, + "laugh": 0, + "hooray": 0, + "confused": 0, + "heart": 0, + "rocket": 0, + "eyes": 0 + }, + "timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/discussions/9999/timeline" + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T17:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "has_discussions": true, + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "curious-user", + "id": 22222222, + "node_id": "MDQ6VXNlcjIyMjIyMjIy", + "avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/curious-user", + "html_url": "https://github.com/curious-user", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json new file mode 100644 index 0000000000..078d5da0be --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/issues.opened.json @@ -0,0 +1,112 @@ +{ + "action": "opened", + "issue": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345", + "repository_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/labels{/name}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/comments", + "events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/events", + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/issues/12345", + "id": 2000000001, + "node_id": "I_kwDOJKSTjM5wXXXX", + "number": 12345, + "title": "Bug: Application crashes when processing large files", + "user": { + "login": "bug-reporter", + "id": 11111111, + "node_id": "MDQ6VXNlcjExMTExMTEx", + "avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4", + "url": "https://api.github.com/users/bug-reporter", + "html_url": "https://github.com/bug-reporter", + "type": "User", + "site_admin": false + }, + "labels": [ + { + "id": 5272676214, + "node_id": "LA_kwDOJKSTjM8AAAABOkandg", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/bug", + "name": "bug", + "color": "d73a4a", + "default": true, + "description": "Something isn't working" + } + ], + "state": "open", + "locked": false, + "assignee": null, + "assignees": [], + "milestone": null, + "comments": 0, + "created_at": "2024-12-01T16:00:00Z", + "updated_at": "2024-12-01T16:00:00Z", + "closed_at": null, + "author_association": "NONE", + "active_lock_reason": null, + "body": "## Description\n\nWhen I try to process a file larger than 100MB, the application crashes with an out of memory error.\n\n## Steps to Reproduce\n\n1. Open the application\n2. Select a file larger than 100MB\n3. Click 'Process'\n4. Application crashes\n\n## Expected Behavior\n\nThe application should handle large files gracefully.\n\n## Environment\n\n- OS: Ubuntu 22.04\n- Python: 3.11\n- AutoGPT Version: 1.0.0", + "reactions": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/reactions", + "total_count": 0, + "+1": 0, + "-1": 0, + "laugh": 0, + "hooray": 0, + "confused": 0, + "heart": 0, + "rocket": 0, + "eyes": 0 + }, + "timeline_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/12345/timeline", + "state_reason": null + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T16:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "forks_count": 45000, + "open_issues_count": 190, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "bug-reporter", + "id": 11111111, + "node_id": "MDQ6VXNlcjExMTExMTEx", + "avatar_url": "https://avatars.githubusercontent.com/u/11111111?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/bug-reporter", + "html_url": "https://github.com/bug-reporter", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json new file mode 100644 index 0000000000..eac8461e59 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/release.published.json @@ -0,0 +1,97 @@ +{ + "action": "published", + "release": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789", + "assets_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets", + "upload_url": "https://uploads.github.com/repos/Significant-Gravitas/AutoGPT/releases/123456789/assets{?name,label}", + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/tag/v1.0.0", + "id": 123456789, + "author": { + "login": "ntindle", + "id": 12345678, + "node_id": "MDQ6VXNlcjEyMzQ1Njc4", + "avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/ntindle", + "html_url": "https://github.com/ntindle", + "type": "User", + "site_admin": false + }, + "node_id": "RE_kwDOJKSTjM4HWwAA", + "tag_name": "v1.0.0", + "target_commitish": "master", + "name": "AutoGPT Platform v1.0.0", + "draft": false, + "prerelease": false, + "created_at": "2024-12-01T10:00:00Z", + "published_at": "2024-12-01T12:00:00Z", + "assets": [ + { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases/assets/987654321", + "id": 987654321, + "node_id": "RA_kwDOJKSTjM4HWwBB", + "name": "autogpt-v1.0.0.zip", + "label": "Release Package", + "content_type": "application/zip", + "state": "uploaded", + "size": 52428800, + "download_count": 0, + "created_at": "2024-12-01T11:30:00Z", + "updated_at": "2024-12-01T11:35:00Z", + "browser_download_url": "https://github.com/Significant-Gravitas/AutoGPT/releases/download/v1.0.0/autogpt-v1.0.0.zip" + } + ], + "tarball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tarball/v1.0.0", + "zipball_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/zipball/v1.0.0", + "body": "## What's New\n\n- Feature 1: Amazing new capability\n- Feature 2: Performance improvements\n- Bug fixes and stability improvements\n\n## Breaking Changes\n\nNone\n\n## Contributors\n\nThanks to all our contributors!" + }, + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T12:00:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170000, + "watchers_count": 170000, + "language": "Python", + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "ntindle", + "id": 12345678, + "node_id": "MDQ6VXNlcjEyMzQ1Njc4", + "avatar_url": "https://avatars.githubusercontent.com/u/12345678?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/ntindle", + "html_url": "https://github.com/ntindle", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json new file mode 100644 index 0000000000..cb2dfd7522 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/star.created.json @@ -0,0 +1,53 @@ +{ + "action": "created", + "starred_at": "2024-12-01T15:30:00Z", + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-12-01T15:30:00Z", + "pushed_at": "2024-12-01T12:00:00Z", + "stargazers_count": 170001, + "watchers_count": 170001, + "language": "Python", + "forks_count": 45000, + "visibility": "public", + "default_branch": "master" + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "sender": { + "login": "awesome-contributor", + "id": 98765432, + "node_id": "MDQ6VXNlcjk4NzY1NDMy", + "avatar_url": "https://avatars.githubusercontent.com/u/98765432?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/awesome-contributor", + "html_url": "https://github.com/awesome-contributor", + "type": "User", + "site_admin": false + } +} diff --git a/autogpt_platform/backend/backend/blocks/github/triggers.py b/autogpt_platform/backend/backend/blocks/github/triggers.py index f7215b8f8e..2fc568a468 100644 --- a/autogpt_platform/backend/backend/blocks/github/triggers.py +++ b/autogpt_platform/backend/backend/blocks/github/triggers.py @@ -159,3 +159,391 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block): # --8<-- [end:GithubTriggerExample] + + +class GithubStarTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub star events - useful for milestone celebrations.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "star.created.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#star + """ + + created: bool = False + deleted: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The star events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The star event that triggered the webhook ('created' or 'deleted')" + ) + starred_at: str = SchemaField( + description="ISO timestamp when the repo was starred (empty if deleted)" + ) + stargazers_count: int = SchemaField( + description="Current number of stars on the repository" + ) + repository_name: str = SchemaField( + description="Full name of the repository (owner/repo)" + ) + repository_url: str = SchemaField(description="URL to the repository") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="551e0a35-100b-49b7-89b8-3031322239b6", + description="This block triggers on GitHub star events. " + "Useful for celebrating milestones (e.g., 1k, 10k stars) or tracking engagement.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubStarTriggerBlock.Input, + output_schema=GithubStarTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="star.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"created": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("starred_at", example_payload.get("starred_at", "")), + ("stargazers_count", example_payload["repository"]["stargazers_count"]), + ("repository_name", example_payload["repository"]["full_name"]), + ("repository_url", example_payload["repository"]["html_url"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + yield "event", input_data.payload["action"] + yield "starred_at", input_data.payload.get("starred_at", "") + yield "stargazers_count", input_data.payload["repository"]["stargazers_count"] + yield "repository_name", input_data.payload["repository"]["full_name"] + yield "repository_url", input_data.payload["repository"]["html_url"] + + +class GithubReleaseTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub release events - ideal for announcing new versions.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "release.published.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#release + """ + + published: bool = False + unpublished: bool = False + created: bool = False + edited: bool = False + deleted: bool = False + prereleased: bool = False + released: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The release events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The release event that triggered the webhook (e.g., 'published')" + ) + release: dict = SchemaField(description="The full release object") + release_url: str = SchemaField(description="URL to the release page") + tag_name: str = SchemaField(description="The release tag name (e.g., 'v1.0.0')") + release_name: str = SchemaField(description="Human-readable release name") + body: str = SchemaField(description="Release notes/description") + prerelease: bool = SchemaField(description="Whether this is a prerelease") + draft: bool = SchemaField(description="Whether this is a draft release") + assets: list = SchemaField(description="List of release assets/files") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="2052dd1b-74e1-46ac-9c87-c7a0e057b60b", + description="This block triggers on GitHub release events. " + "Perfect for automating announcements to Discord, Twitter, or other platforms.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubReleaseTriggerBlock.Input, + output_schema=GithubReleaseTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="release.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"published": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("release", example_payload["release"]), + ("release_url", example_payload["release"]["html_url"]), + ("tag_name", example_payload["release"]["tag_name"]), + ("release_name", example_payload["release"]["name"]), + ("body", example_payload["release"]["body"]), + ("prerelease", example_payload["release"]["prerelease"]), + ("draft", example_payload["release"]["draft"]), + ("assets", example_payload["release"]["assets"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + release = input_data.payload["release"] + yield "event", input_data.payload["action"] + yield "release", release + yield "release_url", release["html_url"] + yield "tag_name", release["tag_name"] + yield "release_name", release.get("name", "") + yield "body", release.get("body", "") + yield "prerelease", release["prerelease"] + yield "draft", release["draft"] + yield "assets", release["assets"] + + +class GithubIssuesTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub issues events - great for triage and notifications.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "issues.opened.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues + """ + + opened: bool = False + edited: bool = False + deleted: bool = False + closed: bool = False + reopened: bool = False + assigned: bool = False + unassigned: bool = False + labeled: bool = False + unlabeled: bool = False + locked: bool = False + unlocked: bool = False + transferred: bool = False + milestoned: bool = False + demilestoned: bool = False + pinned: bool = False + unpinned: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The issue events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The issue event that triggered the webhook (e.g., 'opened')" + ) + number: int = SchemaField(description="The issue number") + issue: dict = SchemaField(description="The full issue object") + issue_url: str = SchemaField(description="URL to the issue") + issue_title: str = SchemaField(description="The issue title") + issue_body: str = SchemaField(description="The issue body/description") + labels: list = SchemaField(description="List of labels on the issue") + assignees: list = SchemaField(description="List of assignees") + state: str = SchemaField(description="Issue state ('open' or 'closed')") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="b2605464-e486-4bf4-aad3-d8a213c8a48a", + description="This block triggers on GitHub issues events. " + "Useful for automated triage, notifications, and welcoming first-time contributors.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubIssuesTriggerBlock.Input, + output_schema=GithubIssuesTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="issues.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"opened": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("number", example_payload["issue"]["number"]), + ("issue", example_payload["issue"]), + ("issue_url", example_payload["issue"]["html_url"]), + ("issue_title", example_payload["issue"]["title"]), + ("issue_body", example_payload["issue"]["body"]), + ("labels", example_payload["issue"]["labels"]), + ("assignees", example_payload["issue"]["assignees"]), + ("state", example_payload["issue"]["state"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + issue = input_data.payload["issue"] + yield "event", input_data.payload["action"] + yield "number", issue["number"] + yield "issue", issue + yield "issue_url", issue["html_url"] + yield "issue_title", issue["title"] + yield "issue_body", issue.get("body") or "" + yield "labels", issue["labels"] + yield "assignees", issue["assignees"] + yield "state", issue["state"] + + +class GithubDiscussionTriggerBlock(GitHubTriggerBase, Block): + """Trigger block for GitHub discussion events - perfect for community Q&A sync.""" + + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "discussion.created.json" + ) + + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#discussion + """ + + created: bool = False + edited: bool = False + deleted: bool = False + answered: bool = False + unanswered: bool = False + labeled: bool = False + unlabeled: bool = False + locked: bool = False + unlocked: bool = False + category_changed: bool = False + transferred: bool = False + pinned: bool = False + unpinned: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The discussion events to subscribe to" + ) + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The discussion event that triggered the webhook" + ) + number: int = SchemaField(description="The discussion number") + discussion: dict = SchemaField(description="The full discussion object") + discussion_url: str = SchemaField(description="URL to the discussion") + title: str = SchemaField(description="The discussion title") + body: str = SchemaField(description="The discussion body") + category: dict = SchemaField(description="The discussion category object") + category_name: str = SchemaField(description="Name of the category") + state: str = SchemaField(description="Discussion state") + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="87f847b3-d81a-424e-8e89-acadb5c9d52b", + description="This block triggers on GitHub Discussions events. " + "Great for syncing Q&A to Discord or auto-responding to common questions. " + "Note: Discussions must be enabled on the repository.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubDiscussionTriggerBlock.Input, + output_schema=GithubDiscussionTriggerBlock.Output, + webhook_config=BlockWebhookConfig( + provider=ProviderName.GITHUB, + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="discussion.{event}", + ), + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"created": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("number", example_payload["discussion"]["number"]), + ("discussion", example_payload["discussion"]), + ("discussion_url", example_payload["discussion"]["html_url"]), + ("title", example_payload["discussion"]["title"]), + ("body", example_payload["discussion"]["body"]), + ("category", example_payload["discussion"]["category"]), + ("category_name", example_payload["discussion"]["category"]["name"]), + ("state", example_payload["discussion"]["state"]), + ], + ) + + async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + async for name, value in super().run(input_data, **kwargs): + yield name, value + discussion = input_data.payload["discussion"] + yield "event", input_data.payload["action"] + yield "number", discussion["number"] + yield "discussion", discussion + yield "discussion_url", discussion["html_url"] + yield "title", discussion["title"] + yield "body", discussion.get("body") or "" + yield "category", discussion["category"] + yield "category_name", discussion["category"]["name"] + yield "state", discussion["state"] diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py index fac4e2d1aa..7b9ba2161e 100644 --- a/autogpt_platform/backend/backend/blocks/google/sheets.py +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -1,6 +1,8 @@ import asyncio +import csv +import io +import re from enum import Enum -from typing import Any from google.oauth2.credentials import Credentials from googleapiclient.discovery import build @@ -131,35 +133,6 @@ def sheet_id_by_name(service, spreadsheet_id: str, sheet_name: str) -> int | Non return None -def _convert_dicts_to_rows( - data: list[dict[str, Any]], headers: list[str] -) -> list[list[str]]: - """Convert list of dictionaries to list of rows using the specified header order. - - Args: - data: List of dictionaries to convert - headers: List of column headers to use for ordering - - Returns: - List of rows where each row is a list of string values in header order - """ - if not data: - return [] - - if not headers: - raise ValueError("Headers are required when using list[dict] format") - - rows = [] - for item in data: - row = [] - for header in headers: - value = item.get(header, "") - row.append(str(value) if value is not None else "") - rows.append(row) - - return rows - - def _build_sheets_service(credentials: GoogleCredentials): """Build Sheets service from platform credentials (with refresh token).""" settings = Settings() @@ -260,6 +233,17 @@ class BatchOperationType(str, Enum): CLEAR = "clear" +class PublicAccessRole(str, Enum): + READER = "reader" + COMMENTER = "commenter" + + +class ShareRole(str, Enum): + READER = "reader" + WRITER = "writer" + COMMENTER = "commenter" + + class BatchOperation(BlockSchemaInput): type: BatchOperationType = SchemaField( description="The type of operation to perform" @@ -531,7 +515,9 @@ class GoogleSheetsWriteBlock(Block): return result -class GoogleSheetsAppendBlock(Block): +class GoogleSheetsAppendRowBlock(Block): + """Append a single row to the end of a Google Sheet.""" + class Input(BlockSchemaInput): spreadsheet: GoogleDriveFile = GoogleDriveFileField( title="Spreadsheet", @@ -540,54 +526,33 @@ class GoogleSheetsAppendBlock(Block): allowed_views=["SPREADSHEETS"], allowed_mime_types=["application/vnd.google-apps.spreadsheet"], ) + row: list[str] = SchemaField( + description="Row values to append (e.g., ['Alice', 'alice@example.com', '25'])", + ) sheet_name: str = SchemaField( - description="Optional sheet to append to (defaults to first sheet)", + description="Sheet to append to (optional, defaults to first sheet)", default="", ) - values: list[list[str]] = SchemaField( - description="Rows to append as list of rows (list[list[str]])", - default=[], - ) - dict_values: list[dict[str, Any]] = SchemaField( - description="Rows to append as list of dictionaries (list[dict])", - default=[], - ) - headers: list[str] = SchemaField( - description="Column headers to use for ordering dict values (required when dict_values is provided)", - default=[], - ) - range: str = SchemaField( - description="Range to append to (e.g. 'A:A' for column A only, 'A:C' for columns A-C, or leave empty for unlimited columns). When empty, data will span as many columns as needed.", - default="", - advanced=True, - ) value_input_option: ValueInputOption = SchemaField( - description="How input data should be interpreted", + description="How values are interpreted. USER_ENTERED: parsed like typed input (e.g., '=SUM(A1:A5)' becomes a formula, '1/2/2024' becomes a date). RAW: stored as-is without parsing.", default=ValueInputOption.USER_ENTERED, advanced=True, ) - insert_data_option: InsertDataOption = SchemaField( - description="How new data should be inserted", - default=InsertDataOption.INSERT_ROWS, - advanced=True, - ) class Output(BlockSchemaOutput): result: dict = SchemaField(description="Append API response") spreadsheet: GoogleDriveFile = SchemaField( - description="The spreadsheet as a GoogleDriveFile (for chaining to other blocks)", - ) - error: str = SchemaField( - description="Error message if any", + description="The spreadsheet for chaining to other blocks", ) + error: str = SchemaField(description="Error message if any") def __init__(self): super().__init__( id="531d50c0-d6b9-4cf9-a013-7bf783d313c7", - description="Append data to a Google Sheet. Use 'values' for list of rows (list[list[str]]) or 'dict_values' with 'headers' for list of dictionaries (list[dict]). Data is added to the next empty row without overwriting existing content. Leave range empty for unlimited columns, or specify range like 'A:A' to constrain to specific columns.", + description="Append or Add a single row to the end of a Google Sheet. The row is added after the last row with data.", categories={BlockCategory.DATA}, - input_schema=GoogleSheetsAppendBlock.Input, - output_schema=GoogleSheetsAppendBlock.Output, + input_schema=GoogleSheetsAppendRowBlock.Input, + output_schema=GoogleSheetsAppendRowBlock.Output, disabled=GOOGLE_SHEETS_DISABLED, test_input={ "spreadsheet": { @@ -595,7 +560,7 @@ class GoogleSheetsAppendBlock(Block): "name": "Test Spreadsheet", "mimeType": "application/vnd.google-apps.spreadsheet", }, - "values": [["Charlie", "95"]], + "row": ["Charlie", "95"], }, test_credentials=TEST_CREDENTIALS, test_output=[ @@ -614,7 +579,7 @@ class GoogleSheetsAppendBlock(Block): ), ], test_mock={ - "_append_sheet": lambda *args, **kwargs: { + "_append_row": lambda *args, **kwargs: { "updatedCells": 2, "updatedColumns": 2, "updatedRows": 1, @@ -629,44 +594,26 @@ class GoogleSheetsAppendBlock(Block): yield "error", "No spreadsheet selected" return - # Check if the selected file is actually a Google Sheets spreadsheet validation_error = _validate_spreadsheet_file(input_data.spreadsheet) if validation_error: yield "error", validation_error return + + if not input_data.row: + yield "error", "Row data is required" + return + try: service = _build_sheets_service(credentials) - - # Determine which values to use and convert if needed - processed_values: list[list[str]] - - # Validate that only one format is provided - if input_data.values and input_data.dict_values: - raise ValueError("Provide either 'values' or 'dict_values', not both") - - if input_data.dict_values: - if not input_data.headers: - raise ValueError("Headers are required when using dict_values") - processed_values = _convert_dicts_to_rows( - input_data.dict_values, input_data.headers - ) - elif input_data.values: - processed_values = input_data.values - else: - raise ValueError("Either 'values' or 'dict_values' must be provided") - result = await asyncio.to_thread( - self._append_sheet, + self._append_row, service, input_data.spreadsheet.id, input_data.sheet_name, - processed_values, - input_data.range, + input_data.row, input_data.value_input_option, - input_data.insert_data_option, ) yield "result", result - # Output the GoogleDriveFile for chaining (preserves credentials_id) yield "spreadsheet", GoogleDriveFile( id=input_data.spreadsheet.id, name=input_data.spreadsheet.name, @@ -677,40 +624,37 @@ class GoogleSheetsAppendBlock(Block): _credentials_id=input_data.spreadsheet.credentials_id, ) except Exception as e: - yield "error", f"Failed to append to Google Sheet: {str(e)}" + yield "error", f"Failed to append row: {str(e)}" - def _append_sheet( + def _append_row( self, service, spreadsheet_id: str, sheet_name: str, - values: list[list[str]], - range: str, + row: list[str], value_input_option: ValueInputOption, - insert_data_option: InsertDataOption, ) -> dict: - target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name) + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) formatted_sheet = format_sheet_name(target_sheet) - # If no range specified, use A1 to let Google Sheets find the next empty row with unlimited columns - # If range specified, use it to constrain columns (e.g., A:A for column A only) - if range: - append_range = f"{formatted_sheet}!{range}" - else: - # Use A1 as starting point for unlimited columns - Google Sheets will find next empty row - append_range = f"{formatted_sheet}!A1" - body = {"values": values} - return ( + append_range = f"{formatted_sheet}!A1" + body = {"values": [row]} # Wrap single row in list for API + result = ( service.spreadsheets() .values() .append( spreadsheetId=spreadsheet_id, range=append_range, valueInputOption=value_input_option.value, - insertDataOption=insert_data_option.value, + insertDataOption="INSERT_ROWS", body=body, ) .execute() ) + return { + "updatedCells": result.get("updates", {}).get("updatedCells", 0), + "updatedRows": result.get("updates", {}).get("updatedRows", 0), + "updatedColumns": result.get("updates", {}).get("updatedColumns", 0), + } class GoogleSheetsClearBlock(Block): @@ -2206,3 +2150,4382 @@ class GoogleSheetsUpdateCellBlock(Block): "updatedRows": result.get("updatedRows", 0), "updatedColumns": result.get("updatedColumns", 0), } + + +class FilterOperator(str, Enum): + EQUALS = "equals" + NOT_EQUALS = "not_equals" + CONTAINS = "contains" + NOT_CONTAINS = "not_contains" + GREATER_THAN = "greater_than" + LESS_THAN = "less_than" + GREATER_THAN_OR_EQUAL = "greater_than_or_equal" + LESS_THAN_OR_EQUAL = "less_than_or_equal" + IS_EMPTY = "is_empty" + IS_NOT_EMPTY = "is_not_empty" + + +class SortOrder(str, Enum): + ASCENDING = "ascending" + DESCENDING = "descending" + + +def _column_letter_to_index(letter: str) -> int: + """Convert column letter (A, B, ..., Z, AA, AB, ...) to 0-based index.""" + result = 0 + for char in letter.upper(): + result = result * 26 + (ord(char) - ord("A") + 1) + return result - 1 + + +def _index_to_column_letter(index: int) -> str: + """Convert 0-based column index to column letter (A, B, ..., Z, AA, AB, ...).""" + result = "" + index += 1 # Convert to 1-based + while index > 0: + index, remainder = divmod(index - 1, 26) + result = chr(ord("A") + remainder) + result + return result + + +def _apply_filter( + cell_value: str, + filter_value: str, + operator: FilterOperator, + match_case: bool, +) -> bool: + """Apply a filter condition to a cell value.""" + if operator == FilterOperator.IS_EMPTY: + return cell_value.strip() == "" + if operator == FilterOperator.IS_NOT_EMPTY: + return cell_value.strip() != "" + + # For comparison operators, apply case sensitivity + compare_cell = cell_value if match_case else cell_value.lower() + compare_filter = filter_value if match_case else filter_value.lower() + + if operator == FilterOperator.EQUALS: + return compare_cell == compare_filter + elif operator == FilterOperator.NOT_EQUALS: + return compare_cell != compare_filter + elif operator == FilterOperator.CONTAINS: + return compare_filter in compare_cell + elif operator == FilterOperator.NOT_CONTAINS: + return compare_filter not in compare_cell + elif operator in ( + FilterOperator.GREATER_THAN, + FilterOperator.LESS_THAN, + FilterOperator.GREATER_THAN_OR_EQUAL, + FilterOperator.LESS_THAN_OR_EQUAL, + ): + # Try numeric comparison first + try: + num_cell = float(cell_value) + num_filter = float(filter_value) + if operator == FilterOperator.GREATER_THAN: + return num_cell > num_filter + elif operator == FilterOperator.LESS_THAN: + return num_cell < num_filter + elif operator == FilterOperator.GREATER_THAN_OR_EQUAL: + return num_cell >= num_filter + elif operator == FilterOperator.LESS_THAN_OR_EQUAL: + return num_cell <= num_filter + except ValueError: + # Fall back to string comparison + if operator == FilterOperator.GREATER_THAN: + return compare_cell > compare_filter + elif operator == FilterOperator.LESS_THAN: + return compare_cell < compare_filter + elif operator == FilterOperator.GREATER_THAN_OR_EQUAL: + return compare_cell >= compare_filter + elif operator == FilterOperator.LESS_THAN_OR_EQUAL: + return compare_cell <= compare_filter + + return False + + +class GoogleSheetsFilterRowsBlock(Block): + """Filter rows in a Google Sheet based on column conditions.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + filter_column: str = SchemaField( + description="Column to filter on (header name or column letter like 'A', 'B')", + placeholder="Status", + ) + filter_value: str = SchemaField( + description="Value to filter by (not used for is_empty/is_not_empty operators)", + default="", + ) + operator: FilterOperator = SchemaField( + description="Filter comparison operator", + default=FilterOperator.EQUALS, + ) + match_case: bool = SchemaField( + description="Whether to match case in comparisons", + default=False, + ) + include_header: bool = SchemaField( + description="Include header row in output", + default=True, + ) + + class Output(BlockSchemaOutput): + rows: list[list[str]] = SchemaField( + description="Filtered rows (including header if requested)", + ) + row_indices: list[int] = SchemaField( + description="Original 1-based row indices of matching rows (useful for deletion)", + ) + count: int = SchemaField( + description="Number of matching rows (excluding header)", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="582195c2-ccee-4fc2-b646-18f72eb9906c", + description="Filter rows in a Google Sheet based on a column condition. Returns matching rows and their indices.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsFilterRowsBlock.Input, + output_schema=GoogleSheetsFilterRowsBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "filter_column": "Status", + "filter_value": "Active", + "operator": FilterOperator.EQUALS, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "rows", + [ + ["Name", "Status", "Score"], + ["Alice", "Active", "85"], + ["Charlie", "Active", "92"], + ], + ), + ("row_indices", [2, 4]), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_filter_rows": lambda *args, **kwargs: { + "rows": [ + ["Name", "Status", "Score"], + ["Alice", "Active", "85"], + ["Charlie", "Active", "92"], + ], + "row_indices": [2, 4], + "count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._filter_rows, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.filter_column, + input_data.filter_value, + input_data.operator, + input_data.match_case, + input_data.include_header, + ) + yield "rows", result["rows"] + yield "row_indices", result["row_indices"] + yield "count", result["count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to filter rows: {str(e)}" + + def _filter_rows( + self, + service, + spreadsheet_id: str, + sheet_name: str, + filter_column: str, + filter_value: str, + operator: FilterOperator, + match_case: bool, + include_header: bool, + ) -> dict: + # Resolve sheet name + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + # Read all data from the sheet + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"rows": [], "row_indices": [], "count": 0} + + header = all_rows[0] + data_rows = all_rows[1:] + + # Determine filter column index + filter_col_idx = -1 + + # First, try to match against header names (handles "ID", "No", "To", etc.) + for idx, col_name in enumerate(header): + if (match_case and col_name == filter_column) or ( + not match_case and col_name.lower() == filter_column.lower() + ): + filter_col_idx = idx + break + + # If no header match and looks like a column letter (A, B, AA, etc.), try that + if filter_col_idx < 0 and filter_column.isalpha() and len(filter_column) <= 2: + filter_col_idx = _column_letter_to_index(filter_column) + # Validate column letter is within data range + if filter_col_idx >= len(header): + raise ValueError( + f"Column '{filter_column}' (index {filter_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if filter_col_idx < 0: + raise ValueError( + f"Column '{filter_column}' not found. Available columns: {header}" + ) + + # Filter rows + filtered_rows = [] + row_indices = [] + + for row_idx, row in enumerate(data_rows): + # Get cell value (handle rows shorter than filter column) + cell_value = row[filter_col_idx] if filter_col_idx < len(row) else "" + + if _apply_filter(str(cell_value), filter_value, operator, match_case): + filtered_rows.append(row) + row_indices.append(row_idx + 2) # +2 for 1-based index and header + + # Prepare output + output_rows = [] + if include_header: + output_rows.append(header) + output_rows.extend(filtered_rows) + + return { + "rows": output_rows, + "row_indices": row_indices, + "count": len(filtered_rows), + } + + +class GoogleSheetsLookupRowBlock(Block): + """Look up a row by matching a value in a column (VLOOKUP-style).""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + lookup_column: str = SchemaField( + description="Column to search in (header name or column letter)", + placeholder="ID", + ) + lookup_value: str = SchemaField( + description="Value to search for", + ) + return_columns: list[str] = SchemaField( + description="Columns to return (header names or letters). Empty = all columns.", + default=[], + ) + match_case: bool = SchemaField( + description="Whether to match case", + default=False, + ) + + class Output(BlockSchemaOutput): + row: list[str] = SchemaField( + description="The matching row (all or selected columns)", + ) + row_dict: dict[str, str] = SchemaField( + description="The matching row as a dictionary (header: value)", + ) + row_index: int = SchemaField( + description="1-based row index of the match", + ) + found: bool = SchemaField( + description="Whether a match was found", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="e58c0bad-6597-400c-9548-d151ec428ffc", + description="Look up a row by finding a value in a specific column. Returns the first matching row and optionally specific columns.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsLookupRowBlock.Input, + output_schema=GoogleSheetsLookupRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "lookup_column": "ID", + "lookup_value": "123", + "return_columns": ["Name", "Email"], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("row", ["Alice", "alice@example.com"]), + ("row_dict", {"Name": "Alice", "Email": "alice@example.com"}), + ("row_index", 2), + ("found", True), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_lookup_row": lambda *args, **kwargs: { + "row": ["Alice", "alice@example.com"], + "row_dict": {"Name": "Alice", "Email": "alice@example.com"}, + "row_index": 2, + "found": True, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._lookup_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.lookup_column, + input_data.lookup_value, + input_data.return_columns, + input_data.match_case, + ) + yield "row", result["row"] + yield "row_dict", result["row_dict"] + yield "row_index", result["row_index"] + yield "found", result["found"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to lookup row: {str(e)}" + + def _lookup_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + lookup_column: str, + lookup_value: str, + return_columns: list[str], + match_case: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"row": [], "row_dict": {}, "row_index": 0, "found": False} + + header = all_rows[0] + data_rows = all_rows[1:] + + # Find lookup column index - first try header name match, then column letter + lookup_col_idx = -1 + for idx, col_name in enumerate(header): + if (match_case and col_name == lookup_column) or ( + not match_case and col_name.lower() == lookup_column.lower() + ): + lookup_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if lookup_col_idx < 0 and lookup_column.isalpha() and len(lookup_column) <= 2: + lookup_col_idx = _column_letter_to_index(lookup_column) + # Validate column letter is within data range + if lookup_col_idx >= len(header): + raise ValueError( + f"Column '{lookup_column}' (index {lookup_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if lookup_col_idx < 0: + raise ValueError( + f"Lookup column '{lookup_column}' not found. Available: {header}" + ) + + # Find return column indices - first try header name match, then column letter + return_col_indices = [] + return_col_headers = [] + if return_columns: + for ret_col in return_columns: + found = False + # First try header name match + for idx, col_name in enumerate(header): + if (match_case and col_name == ret_col) or ( + not match_case and col_name.lower() == ret_col.lower() + ): + return_col_indices.append(idx) + return_col_headers.append(col_name) + found = True + break + + # If no header match and looks like a column letter, try that + if not found and ret_col.isalpha() and len(ret_col) <= 2: + idx = _column_letter_to_index(ret_col) + # Validate column letter is within data range + if idx >= len(header): + raise ValueError( + f"Return column '{ret_col}' (index {idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + return_col_indices.append(idx) + return_col_headers.append(header[idx]) + found = True + + if not found: + raise ValueError( + f"Return column '{ret_col}' not found. Available: {header}" + ) + else: + return_col_indices = list(range(len(header))) + return_col_headers = header + + # Search for matching row + compare_value = lookup_value if match_case else lookup_value.lower() + + for row_idx, row in enumerate(data_rows): + cell_value = row[lookup_col_idx] if lookup_col_idx < len(row) else "" + compare_cell = str(cell_value) if match_case else str(cell_value).lower() + + if compare_cell == compare_value: + # Found a match - extract requested columns + result_row = [] + result_dict = {} + for i, col_idx in enumerate(return_col_indices): + value = row[col_idx] if col_idx < len(row) else "" + result_row.append(value) + result_dict[return_col_headers[i]] = value + + return { + "row": result_row, + "row_dict": result_dict, + "row_index": row_idx + 2, + "found": True, + } + + return {"row": [], "row_dict": {}, "row_index": 0, "found": False} + + +class GoogleSheetsDeleteRowsBlock(Block): + """Delete rows from a Google Sheet by row indices.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_indices: list[int] = SchemaField( + description="1-based row indices to delete (e.g., [2, 5, 7])", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the delete operation", + ) + deleted_count: int = SchemaField( + description="Number of rows deleted", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="24bcd490-b02d-44c6-847d-b62a2319f5eb", + description="Delete specific rows from a Google Sheet by their row indices. Works well with FilterRowsBlock output.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsDeleteRowsBlock.Input, + output_schema=GoogleSheetsDeleteRowsBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_indices": [2, 5], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("deleted_count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_delete_rows": lambda *args, **kwargs: { + "success": True, + "deleted_count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._delete_rows, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_indices, + ) + yield "result", {"success": True} + yield "deleted_count", result["deleted_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to delete rows: {str(e)}" + + def _delete_rows( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_indices: list[int], + ) -> dict: + if not row_indices: + return {"success": True, "deleted_count": 0} + + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Deduplicate and sort row indices in descending order to delete from bottom to top + # Deduplication prevents deleting wrong rows if same index appears multiple times + sorted_indices = sorted(set(row_indices), reverse=True) + + # Build delete requests + requests = [] + for row_idx in sorted_indices: + # Convert to 0-based index + start_idx = row_idx - 1 + requests.append( + { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + } + } + } + ) + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": requests} + ).execute() + + return {"success": True, "deleted_count": len(sorted_indices)} + + +class GoogleSheetsGetColumnBlock(Block): + """Get all values from a specific column by header name.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to extract (header name or column letter like 'A', 'B')", + placeholder="Email", + ) + include_header: bool = SchemaField( + description="Include the header in output", + default=False, + ) + skip_empty: bool = SchemaField( + description="Skip empty cells", + default=False, + ) + + class Output(BlockSchemaOutput): + values: list[str] = SchemaField( + description="List of values from the column", + ) + count: int = SchemaField( + description="Number of values (excluding header if not included)", + ) + column_index: int = SchemaField( + description="0-based column index", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="108d911f-e109-47fb-addc-2259792ee850", + description="Extract all values from a specific column. Useful for getting a list of emails, IDs, or any single field.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetColumnBlock.Input, + output_schema=GoogleSheetsGetColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Email", + "include_header": False, + "skip_empty": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "values", + ["alice@example.com", "bob@example.com", "charlie@example.com"], + ), + ("count", 3), + ("column_index", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_column": lambda *args, **kwargs: { + "values": [ + "alice@example.com", + "bob@example.com", + "charlie@example.com", + ], + "count": 3, + "column_index": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + input_data.include_header, + input_data.skip_empty, + ) + yield "values", result["values"] + yield "count", result["count"] + yield "column_index", result["column_index"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get column: {str(e)}" + + def _get_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + include_header: bool, + skip_empty: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"values": [], "count": 0, "column_index": -1} + + header = all_rows[0] + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError( + f"Column '{column}' not found. Available columns: {header}" + ) + + # Extract column values + values = [] + start_row = 0 if include_header else 1 + + for row in all_rows[start_row:]: + value = row[col_idx] if col_idx < len(row) else "" + if skip_empty and not str(value).strip(): + continue + values.append(str(value)) + + return {"values": values, "count": len(values), "column_index": col_idx} + + +class GoogleSheetsSortBlock(Block): + """Sort a Google Sheet by one or more columns.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + sort_column: str = SchemaField( + description="Primary column to sort by (header name or column letter)", + placeholder="Date", + ) + sort_order: SortOrder = SchemaField( + description="Sort order for primary column", + default=SortOrder.ASCENDING, + ) + secondary_column: str = SchemaField( + description="Secondary column to sort by (optional)", + default="", + ) + secondary_order: SortOrder = SchemaField( + description="Sort order for secondary column", + default=SortOrder.ASCENDING, + ) + has_header: bool = SchemaField( + description="Whether the data has a header row (header won't be sorted)", + default=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the sort operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="a265bd84-c93b-459d-bbe0-94e6addaa38f", + description="Sort a Google Sheet by one or two columns. The sheet is sorted in-place.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsSortBlock.Input, + output_schema=GoogleSheetsSortBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "sort_column": "Score", + "sort_order": SortOrder.DESCENDING, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_sort_sheet": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._sort_sheet, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.sort_column, + input_data.sort_order, + input_data.secondary_column, + input_data.secondary_order, + input_data.has_header, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to sort sheet: {str(e)}" + + def _sort_sheet( + self, + service, + spreadsheet_id: str, + sheet_name: str, + sort_column: str, + sort_order: SortOrder, + secondary_column: str, + secondary_order: SortOrder, + has_header: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get sheet metadata to find column indices and grid properties + meta = service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() + sheet_meta = None + for sheet in meta.get("sheets", []): + if sheet.get("properties", {}).get("sheetId") == sheet_id: + sheet_meta = sheet + break + + if not sheet_meta: + raise ValueError(f"Could not find metadata for sheet '{target_sheet}'") + + grid_props = sheet_meta.get("properties", {}).get("gridProperties", {}) + row_count = grid_props.get("rowCount", 1000) + col_count = grid_props.get("columnCount", 26) + + # Get header to resolve column names + formatted_sheet = format_sheet_name(target_sheet) + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Find primary sort column index - first try header name match, then column letter + sort_col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == sort_column.lower(): + sort_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if sort_col_idx < 0 and sort_column.isalpha() and len(sort_column) <= 2: + sort_col_idx = _column_letter_to_index(sort_column) + # Validate column letter is within data range + if sort_col_idx >= len(header): + raise ValueError( + f"Sort column '{sort_column}' (index {sort_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if sort_col_idx < 0: + raise ValueError( + f"Sort column '{sort_column}' not found. Available: {header}" + ) + + # Build sort specs + sort_specs = [ + { + "dimensionIndex": sort_col_idx, + "sortOrder": ( + "ASCENDING" if sort_order == SortOrder.ASCENDING else "DESCENDING" + ), + } + ] + + # Add secondary sort if specified + if secondary_column: + sec_col_idx = -1 + # First try header name match + for idx, col_name in enumerate(header): + if col_name.lower() == secondary_column.lower(): + sec_col_idx = idx + break + + # If no header match and looks like a column letter, try that + if ( + sec_col_idx < 0 + and secondary_column.isalpha() + and len(secondary_column) <= 2 + ): + sec_col_idx = _column_letter_to_index(secondary_column) + # Validate column letter is within data range + if sec_col_idx >= len(header): + raise ValueError( + f"Secondary sort column '{secondary_column}' (index {sec_col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if sec_col_idx < 0: + raise ValueError( + f"Secondary sort column '{secondary_column}' not found. Available: {header}" + ) + + sort_specs.append( + { + "dimensionIndex": sec_col_idx, + "sortOrder": ( + "ASCENDING" + if secondary_order == SortOrder.ASCENDING + else "DESCENDING" + ), + } + ) + + # Build sort range request + start_row = 1 if has_header else 0 # Skip header if present + + request = { + "sortRange": { + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": row_count, + "startColumnIndex": 0, + "endColumnIndex": col_count, + }, + "sortSpecs": sort_specs, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsGetUniqueValuesBlock(Block): + """Get unique values from a column.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to get unique values from (header name or column letter)", + placeholder="Category", + ) + include_count: bool = SchemaField( + description="Include count of each unique value", + default=False, + ) + sort_by_count: bool = SchemaField( + description="Sort results by count (most frequent first)", + default=False, + ) + + class Output(BlockSchemaOutput): + values: list[str] = SchemaField( + description="List of unique values", + ) + counts: dict[str, int] = SchemaField( + description="Count of each unique value (if include_count is True)", + ) + total_unique: int = SchemaField( + description="Total number of unique values", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="0f296c0b-6b6e-4280-b96e-ae1459b98dff", + description="Get unique values from a column. Useful for building dropdown options or finding distinct categories.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetUniqueValuesBlock.Input, + output_schema=GoogleSheetsGetUniqueValuesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Status", + "include_count": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("values", ["Active", "Inactive", "Pending"]), + ("counts", {"Active": 5, "Inactive": 3, "Pending": 2}), + ("total_unique", 3), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_unique_values": lambda *args, **kwargs: { + "values": ["Active", "Inactive", "Pending"], + "counts": {"Active": 5, "Inactive": 3, "Pending": 2}, + "total_unique": 3, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_unique_values, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + input_data.include_count, + input_data.sort_by_count, + ) + yield "values", result["values"] + yield "counts", result["counts"] + yield "total_unique", result["total_unique"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get unique values: {str(e)}" + + def _get_unique_values( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + include_count: bool, + sort_by_count: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return {"values": [], "counts": {}, "total_unique": 0} + + header = all_rows[0] + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, col_name in enumerate(header): + if col_name.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError( + f"Column '{column}' not found. Available columns: {header}" + ) + + # Count values + value_counts: dict[str, int] = {} + for row in all_rows[1:]: # Skip header + value = str(row[col_idx]) if col_idx < len(row) else "" + if value.strip(): # Skip empty values + value_counts[value] = value_counts.get(value, 0) + 1 + + # Sort values + if sort_by_count: + sorted_items = sorted(value_counts.items(), key=lambda x: -x[1]) + unique_values = [item[0] for item in sorted_items] + else: + unique_values = sorted(value_counts.keys()) + + return { + "values": unique_values, + "counts": value_counts if include_count else {}, + "total_unique": len(unique_values), + } + + +class GoogleSheetsInsertRowBlock(Block): + """Insert a single row at a specific position in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + row: list[str] = SchemaField( + description="Row values to insert (e.g., ['Alice', 'alice@example.com', '25'])", + ) + row_index: int = SchemaField( + description="1-based row index where to insert (existing rows shift down)", + placeholder="2", + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + value_input_option: ValueInputOption = SchemaField( + description="How values are interpreted. USER_ENTERED: parsed like typed input (e.g., '=SUM(A1:A5)' becomes a formula, '1/2/2024' becomes a date). RAW: stored as-is without parsing.", + default=ValueInputOption.USER_ENTERED, + advanced=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the insert operation") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="03eda5df-8080-4ed1-bfdf-212f543d657e", + description="Insert a single row at a specific position. Existing rows shift down.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsInsertRowBlock.Input, + output_schema=GoogleSheetsInsertRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row": ["New", "Row", "Data"], + "row_index": 3, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_insert_row": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.row: + yield "error", "Row data is required" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._insert_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + input_data.row, + input_data.value_input_option, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to insert row: {str(e)}" + + def _insert_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + row: list[str], + value_input_option: ValueInputOption, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + start_idx = row_index - 1 # Convert to 0-based + + # First, insert an empty row + insert_request = { + "insertDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + }, + "inheritFromBefore": start_idx > 0, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [insert_request]} + ).execute() + + # Then, write the values + formatted_sheet = format_sheet_name(target_sheet) + write_range = f"{formatted_sheet}!A{row_index}" + + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=write_range, + valueInputOption=value_input_option.value, + body={"values": [row]}, # Wrap single row in list for API + ).execute() + + return {"success": True} + + +class GoogleSheetsAddColumnBlock(Block): + """Add a new column with a header to a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + header: str = SchemaField( + description="Header name for the new column", + placeholder="New Column", + ) + position: str = SchemaField( + description="Where to add: 'end' for last column, or column letter (e.g., 'C') to insert before", + default="end", + ) + default_value: str = SchemaField( + description="Default value to fill in all data rows (optional). Requires existing data rows.", + default="", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + column_letter: str = SchemaField( + description="Letter of the new column (e.g., 'D')", + ) + column_index: int = SchemaField( + description="0-based index of the new column", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="cac51050-fc9e-4e63-987a-66c2ba2a127b", + description="Add a new column with a header. Can add at the end or insert at a specific position.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddColumnBlock.Input, + output_schema=GoogleSheetsAddColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "header": "New Status", + "position": "end", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("column_letter", "D"), + ("column_index", 3), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_column": lambda *args, **kwargs: { + "success": True, + "column_letter": "D", + "column_index": 3, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._add_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.header, + input_data.position, + input_data.default_value, + ) + yield "result", {"success": True} + yield "column_letter", result["column_letter"] + yield "column_index", result["column_index"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add column: {str(e)}" + + def _add_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + header: str, + position: str, + default_value: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get current data to determine column count and row count + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + current_col_count = max(len(row) for row in all_rows) if all_rows else 0 + row_count = len(all_rows) + + # Determine target column index + if position.lower() == "end": + col_idx = current_col_count + elif position.isalpha() and len(position) <= 2: + col_idx = _column_letter_to_index(position) + # Insert a new column at this position + insert_request = { + "insertDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "COLUMNS", + "startIndex": col_idx, + "endIndex": col_idx + 1, + }, + "inheritFromBefore": col_idx > 0, + } + } + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [insert_request]} + ).execute() + else: + raise ValueError( + f"Invalid position: '{position}'. Use 'end' or a column letter." + ) + + col_letter = _index_to_column_letter(col_idx) + + # Write header + header_range = f"{formatted_sheet}!{col_letter}1" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=header_range, + valueInputOption="USER_ENTERED", + body={"values": [[header]]}, + ).execute() + + # Fill default value if provided and there are data rows + if default_value and row_count > 1: + values_to_fill = [[default_value]] * (row_count - 1) + data_range = f"{formatted_sheet}!{col_letter}2:{col_letter}{row_count}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=data_range, + valueInputOption="USER_ENTERED", + body={"values": values_to_fill}, + ).execute() + + return { + "success": True, + "column_letter": col_letter, + "column_index": col_idx, + } + + +class GoogleSheetsGetRowCountBlock(Block): + """Get the number of rows in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + include_header: bool = SchemaField( + description="Include header row in count", + default=True, + ) + count_empty: bool = SchemaField( + description="Count rows with only empty cells", + default=False, + ) + + class Output(BlockSchemaOutput): + total_rows: int = SchemaField( + description="Total number of rows", + ) + data_rows: int = SchemaField( + description="Number of data rows (excluding header)", + ) + last_row: int = SchemaField( + description="1-based index of the last row with data", + ) + column_count: int = SchemaField( + description="Number of columns", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="080cc84b-a94a-4fb4-90e3-dcc55ee783af", + description="Get row count and dimensions of a Google Sheet. Useful for knowing where data ends.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetRowCountBlock.Input, + output_schema=GoogleSheetsGetRowCountBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("total_rows", 101), + ("data_rows", 100), + ("last_row", 101), + ("column_count", 5), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_row_count": lambda *args, **kwargs: { + "total_rows": 101, + "data_rows": 100, + "last_row": 101, + "column_count": 5, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_row_count, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.include_header, + input_data.count_empty, + ) + yield "total_rows", result["total_rows"] + yield "data_rows", result["data_rows"] + yield "last_row", result["last_row"] + yield "column_count", result["column_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get row count: {str(e)}" + + def _get_row_count( + self, + service, + spreadsheet_id: str, + sheet_name: str, + include_header: bool, + count_empty: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if not all_rows: + return { + "total_rows": 0, + "data_rows": 0, + "last_row": 0, + "column_count": 0, + } + + # Count non-empty rows + if count_empty: + total_rows = len(all_rows) + last_row = total_rows + else: + # Find last row with actual data + last_row = 0 + for idx, row in enumerate(all_rows): + if any(str(cell).strip() for cell in row): + last_row = idx + 1 + total_rows = last_row + + data_rows = total_rows - 1 if total_rows > 0 else 0 + if not include_header: + total_rows = data_rows + + column_count = max(len(row) for row in all_rows) if all_rows else 0 + + return { + "total_rows": total_rows, + "data_rows": data_rows, + "last_row": last_row, + "column_count": column_count, + } + + +class GoogleSheetsRemoveDuplicatesBlock(Block): + """Remove duplicate rows from a Google Sheet based on specified columns.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + columns: list[str] = SchemaField( + description="Columns to check for duplicates (header names or letters). Empty = all columns.", + default=[], + ) + keep: str = SchemaField( + description="Which duplicate to keep: 'first' or 'last'", + default="first", + ) + match_case: bool = SchemaField( + description="Whether to match case when comparing", + default=False, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + removed_count: int = SchemaField( + description="Number of duplicate rows removed", + ) + remaining_rows: int = SchemaField( + description="Number of rows remaining", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="6eb50ff7-205b-400e-8ecc-1ce8d50075be", + description="Remove duplicate rows based on specified columns. Keeps either the first or last occurrence.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsRemoveDuplicatesBlock.Input, + output_schema=GoogleSheetsRemoveDuplicatesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "columns": ["Email"], + "keep": "first", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("removed_count", 5), + ("remaining_rows", 95), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_remove_duplicates": lambda *args, **kwargs: { + "success": True, + "removed_count": 5, + "remaining_rows": 95, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._remove_duplicates, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.columns, + input_data.keep, + input_data.match_case, + ) + yield "result", {"success": True} + yield "removed_count", result["removed_count"] + yield "remaining_rows", result["remaining_rows"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to remove duplicates: {str(e)}" + + def _remove_duplicates( + self, + service, + spreadsheet_id: str, + sheet_name: str, + columns: list[str], + keep: str, + match_case: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Read all data + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=formatted_sheet) + .execute() + ) + all_rows = result.get("values", []) + + if len(all_rows) <= 1: # Only header or empty + return { + "success": True, + "removed_count": 0, + "remaining_rows": len(all_rows), + } + + header = all_rows[0] + data_rows = all_rows[1:] + + # Determine which column indices to use for comparison + # First try header name match, then column letter + if columns: + col_indices = [] + for col in columns: + found = False + # First try header name match + for idx, col_name in enumerate(header): + if col_name.lower() == col.lower(): + col_indices.append(idx) + found = True + break + + # If no header match and looks like a column letter, try that + if not found and col.isalpha() and len(col) <= 2: + col_idx = _column_letter_to_index(col) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{col}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + col_indices.append(col_idx) + found = True + + if not found: + raise ValueError( + f"Column '{col}' not found in sheet. " + f"Available columns: {', '.join(header)}" + ) + else: + col_indices = list(range(len(header))) + + # Find duplicates + seen: dict[tuple, int] = {} + rows_to_delete: list[int] = [] + + for row_idx, row in enumerate(data_rows): + # Build key from specified columns + key_parts = [] + for col_idx in col_indices: + value = str(row[col_idx]) if col_idx < len(row) else "" + if not match_case: + value = value.lower() + key_parts.append(value) + key = tuple(key_parts) + + if key in seen: + if keep == "first": + # Delete this row (keep the first one we saw) + rows_to_delete.append(row_idx + 2) # +2 for 1-based and header + else: + # Delete the previous row, then update seen to keep this one + prev_row = seen[key] + rows_to_delete.append(prev_row) + seen[key] = row_idx + 2 + else: + seen[key] = row_idx + 2 + + if not rows_to_delete: + return { + "success": True, + "removed_count": 0, + "remaining_rows": len(all_rows), + } + + # Sort in descending order to delete from bottom to top + rows_to_delete = sorted(set(rows_to_delete), reverse=True) + + # Delete rows + requests = [] + for row_idx in rows_to_delete: + start_idx = row_idx - 1 + requests.append( + { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "ROWS", + "startIndex": start_idx, + "endIndex": start_idx + 1, + } + } + } + ) + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": requests} + ).execute() + + remaining = len(all_rows) - len(rows_to_delete) + return { + "success": True, + "removed_count": len(rows_to_delete), + "remaining_rows": remaining, + } + + +class GoogleSheetsUpdateRowBlock(Block): + """Update a specific row by index with new values.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_index: int = SchemaField( + description="1-based row index to update", + ) + values: list[str] = SchemaField( + description="New values for the row (in column order)", + default=[], + ) + dict_values: dict[str, str] = SchemaField( + description="Values as dict with column headers as keys (alternative to values)", + default={}, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the update operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="b8a934d5-fca0-4be3-9fc2-a99bf63bd385", + description="Update a specific row by its index. Can use list or dict format for values.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsUpdateRowBlock.Input, + output_schema=GoogleSheetsUpdateRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_index": 5, + "dict_values": {"Name": "Updated Name", "Status": "Active"}, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True, "updatedCells": 2}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_update_row": lambda *args, **kwargs: { + "success": True, + "updatedCells": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.values and not input_data.dict_values: + yield "error", "Either values or dict_values must be provided" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._update_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + input_data.values, + input_data.dict_values, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to update row: {str(e)}" + + def _update_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + values: list[str], + dict_values: dict[str, str], + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + if dict_values: + # Get header to map column names to indices + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] + if header_result.get("values") + else [] + ) + + # Get current row values + row_range = f"{formatted_sheet}!{row_index}:{row_index}" + current_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=row_range) + .execute() + ) + current_row = ( + current_result.get("values", [[]])[0] + if current_result.get("values") + else [] + ) + + # Extend current row to match header length + while len(current_row) < len(header): + current_row.append("") + + # Update specific columns from dict - validate all column names first + for col_name in dict_values.keys(): + found = False + for h in header: + if h.lower() == col_name.lower(): + found = True + break + if not found: + raise ValueError( + f"Column '{col_name}' not found in sheet. " + f"Available columns: {', '.join(header)}" + ) + + # Now apply updates + updated_count = 0 + for col_name, value in dict_values.items(): + for idx, h in enumerate(header): + if h.lower() == col_name.lower(): + current_row[idx] = value + updated_count += 1 + break + + values = current_row + else: + updated_count = len(values) + + # Write the row + write_range = f"{formatted_sheet}!A{row_index}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=write_range, + valueInputOption="USER_ENTERED", + body={"values": [values]}, + ).execute() + + return {"success": True, "updatedCells": updated_count} + + +class GoogleSheetsGetRowBlock(Block): + """Get a specific row by its index.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + row_index: int = SchemaField( + description="1-based row index to retrieve", + ) + + class Output(BlockSchemaOutput): + row: list[str] = SchemaField( + description="The row values as a list", + ) + row_dict: dict[str, str] = SchemaField( + description="The row as a dictionary (header: value)", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="c4be9390-2431-4682-9769-7025b22a5fa7", + description="Get a specific row by its index. Returns both list and dict formats.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetRowBlock.Input, + output_schema=GoogleSheetsGetRowBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "row_index": 3, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("row", ["Alice", "Active", "85"]), + ("row_dict", {"Name": "Alice", "Status": "Active", "Score": "85"}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_row": lambda *args, **kwargs: { + "row": ["Alice", "Active", "85"], + "row_dict": {"Name": "Alice", "Status": "Active", "Score": "85"}, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_row, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.row_index, + ) + yield "row", result["row"] + yield "row_dict", result["row_dict"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get row: {str(e)}" + + def _get_row( + self, + service, + spreadsheet_id: str, + sheet_name: str, + row_index: int, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + + # Get header + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Get the row + row_range = f"{formatted_sheet}!{row_index}:{row_index}" + row_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=row_range) + .execute() + ) + row = row_result.get("values", [[]])[0] if row_result.get("values") else [] + + # Build dictionary + row_dict = {} + for idx, h in enumerate(header): + row_dict[h] = row[idx] if idx < len(row) else "" + + return {"row": row, "row_dict": row_dict} + + +class GoogleSheetsDeleteColumnBlock(Block): + """Delete a column from a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + column: str = SchemaField( + description="Column to delete (header name or column letter like 'A', 'B')", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the delete operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="59b266b6-5cce-4661-a1d3-c417e64d68e9", + description="Delete a column by header name or column letter.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsDeleteColumnBlock.Input, + output_schema=GoogleSheetsDeleteColumnBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "column": "Status", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_delete_column": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._delete_column, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.column, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to delete column: {str(e)}" + + def _delete_column( + self, + service, + spreadsheet_id: str, + sheet_name: str, + column: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + formatted_sheet = format_sheet_name(target_sheet) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Get header to find column by name or validate column letter + header_result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=f"{formatted_sheet}!1:1") + .execute() + ) + header = ( + header_result.get("values", [[]])[0] if header_result.get("values") else [] + ) + + # Find column index - first try header name match, then column letter + col_idx = -1 + for idx, h in enumerate(header): + if h.lower() == column.lower(): + col_idx = idx + break + + # If no header match and looks like a column letter, try that + if col_idx < 0 and column.isalpha() and len(column) <= 2: + col_idx = _column_letter_to_index(column) + # Validate column letter is within data range + if col_idx >= len(header): + raise ValueError( + f"Column '{column}' (index {col_idx}) is out of range. " + f"Sheet only has {len(header)} columns (A-{_index_to_column_letter(len(header) - 1)})." + ) + + if col_idx < 0: + raise ValueError(f"Column '{column}' not found") + + # Delete the column + request = { + "deleteDimension": { + "range": { + "sheetId": sheet_id, + "dimension": "COLUMNS", + "startIndex": col_idx, + "endIndex": col_idx + 1, + } + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsCreateNamedRangeBlock(Block): + """Create a named range in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + name: str = SchemaField( + description="Name for the range (e.g., 'SalesData', 'CustomerList')", + placeholder="MyNamedRange", + ) + range: str = SchemaField( + description="Cell range in A1 notation (e.g., 'A1:D10', 'B2:B100')", + placeholder="A1:D10", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + named_range_id: str = SchemaField( + description="ID of the created named range", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="a2707376-8016-494b-98c4-d0e2752ab9cb", + description="Create a named range to reference cells by name instead of A1 notation.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsCreateNamedRangeBlock.Input, + output_schema=GoogleSheetsCreateNamedRangeBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "name": "SalesData", + "range": "A1:D10", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("named_range_id", "nr_12345"), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_create_named_range": lambda *args, **kwargs: { + "success": True, + "named_range_id": "nr_12345", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._create_named_range, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.name, + input_data.range, + ) + yield "result", {"success": True} + yield "named_range_id", result["named_range_id"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to create named range: {str(e)}" + + def _create_named_range( + self, + service, + spreadsheet_id: str, + sheet_name: str, + name: str, + range_str: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse range to get grid coordinates + # Handle both "A1:D10" and "Sheet1!A1:D10" formats + if "!" in range_str: + range_str = range_str.split("!")[1] + + # Parse start and end cells + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + start_col = _column_letter_to_index(match.group(1)) + start_row = int(match.group(2)) - 1 # 0-based + end_col = _column_letter_to_index(match.group(3)) + 1 # exclusive + end_row = int(match.group(4)) # exclusive (already 1-based becomes 0-based + 1) + + request = { + "addNamedRange": { + "namedRange": { + "name": name, + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": end_row, + "startColumnIndex": start_col, + "endColumnIndex": end_col, + }, + } + } + } + + result = ( + service.spreadsheets() + .batchUpdate(spreadsheetId=spreadsheet_id, body={"requests": [request]}) + .execute() + ) + + # Extract the named range ID from the response + named_range_id = "" + replies = result.get("replies", []) + if replies and "addNamedRange" in replies[0]: + named_range_id = replies[0]["addNamedRange"]["namedRange"]["namedRangeId"] + + return {"success": True, "named_range_id": named_range_id} + + +class GoogleSheetsListNamedRangesBlock(Block): + """List all named ranges in a Google Sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + + class Output(BlockSchemaOutput): + named_ranges: list[dict] = SchemaField( + description="List of named ranges with name, id, and range info", + ) + count: int = SchemaField( + description="Number of named ranges", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="b81a9d27-3997-4860-9303-cc68086db13a", + description="List all named ranges in a spreadsheet.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsListNamedRangesBlock.Input, + output_schema=GoogleSheetsListNamedRangesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "named_ranges", + [ + {"name": "SalesData", "id": "nr_1", "range": "Sheet1!A1:D10"}, + { + "name": "CustomerList", + "id": "nr_2", + "range": "Sheet1!E1:F50", + }, + ], + ), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_list_named_ranges": lambda *args, **kwargs: { + "named_ranges": [ + {"name": "SalesData", "id": "nr_1", "range": "Sheet1!A1:D10"}, + { + "name": "CustomerList", + "id": "nr_2", + "range": "Sheet1!E1:F50", + }, + ], + "count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._list_named_ranges, + service, + input_data.spreadsheet.id, + ) + yield "named_ranges", result["named_ranges"] + yield "count", result["count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to list named ranges: {str(e)}" + + def _list_named_ranges( + self, + service, + spreadsheet_id: str, + ) -> dict: + # Get spreadsheet metadata including named ranges + meta = service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() + + named_ranges_list = [] + named_ranges = meta.get("namedRanges", []) + + # Get sheet names for reference + sheets = { + sheet["properties"]["sheetId"]: sheet["properties"]["title"] + for sheet in meta.get("sheets", []) + } + + for nr in named_ranges: + range_info = nr.get("range", {}) + sheet_id = range_info.get("sheetId", 0) + sheet_name = sheets.get(sheet_id, "Sheet1") + + # Convert grid range back to A1 notation + start_col = _index_to_column_letter(range_info.get("startColumnIndex", 0)) + end_col = _index_to_column_letter(range_info.get("endColumnIndex", 1) - 1) + start_row = range_info.get("startRowIndex", 0) + 1 + end_row = range_info.get("endRowIndex", 1) + + range_str = f"{sheet_name}!{start_col}{start_row}:{end_col}{end_row}" + + named_ranges_list.append( + { + "name": nr.get("name", ""), + "id": nr.get("namedRangeId", ""), + "range": range_str, + } + ) + + return {"named_ranges": named_ranges_list, "count": len(named_ranges_list)} + + +class GoogleSheetsAddDropdownBlock(Block): + """Add a dropdown (data validation) to cells.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + range: str = SchemaField( + description="Cell range to add dropdown to (e.g., 'B2:B100')", + placeholder="B2:B100", + ) + options: list[str] = SchemaField( + description="List of dropdown options", + ) + strict: bool = SchemaField( + description="Reject input not in the list", + default=True, + ) + show_dropdown: bool = SchemaField( + description="Show dropdown arrow in cells", + default=True, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="725431c9-71ba-4fce-b829-5a3e495a8a88", + description="Add a dropdown list (data validation) to cells. Useful for enforcing valid inputs.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddDropdownBlock.Input, + output_schema=GoogleSheetsAddDropdownBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "B2:B100", + "options": ["Active", "Inactive", "Pending"], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_dropdown": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + if not input_data.options: + yield "error", "Options list cannot be empty" + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._add_dropdown, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + input_data.options, + input_data.strict, + input_data.show_dropdown, + ) + yield "result", result + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add dropdown: {str(e)}" + + def _add_dropdown( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + options: list[str], + strict: bool, + show_dropdown: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse range + if "!" in range_str: + range_str = range_str.split("!")[1] + + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + start_col = _column_letter_to_index(match.group(1)) + start_row = int(match.group(2)) - 1 + end_col = _column_letter_to_index(match.group(3)) + 1 + end_row = int(match.group(4)) + + # Build condition values + condition_values = [{"userEnteredValue": opt} for opt in options] + + request = { + "setDataValidation": { + "range": { + "sheetId": sheet_id, + "startRowIndex": start_row, + "endRowIndex": end_row, + "startColumnIndex": start_col, + "endColumnIndex": end_col, + }, + "rule": { + "condition": { + "type": "ONE_OF_LIST", + "values": condition_values, + }, + "strict": strict, + "showCustomUi": show_dropdown, + }, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsCopyToSpreadsheetBlock(Block): + """Copy a sheet to another spreadsheet.""" + + class Input(BlockSchemaInput): + source_spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Source Spreadsheet", + description="Select the source spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + source_sheet_name: str = SchemaField( + description="Sheet to copy (optional, defaults to first sheet)", + default="", + ) + destination_spreadsheet_id: str = SchemaField( + description="ID of the destination spreadsheet", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the copy operation", + ) + new_sheet_id: int = SchemaField( + description="ID of the new sheet in the destination", + ) + new_sheet_name: str = SchemaField( + description="Name of the new sheet", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The source spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="740eec3f-2b51-4e95-b87f-22ce2acafdfa", + description="Copy a sheet from one spreadsheet to another.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsCopyToSpreadsheetBlock.Input, + output_schema=GoogleSheetsCopyToSpreadsheetBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "source_spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Source Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "destination_spreadsheet_id": "dest_spreadsheet_id_123", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("new_sheet_id", 12345), + ("new_sheet_name", "Copy of Sheet1"), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Source Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_copy_to_spreadsheet": lambda *args, **kwargs: { + "success": True, + "new_sheet_id": 12345, + "new_sheet_name": "Copy of Sheet1", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.source_spreadsheet: + yield "error", "No source spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.source_spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._copy_to_spreadsheet, + service, + input_data.source_spreadsheet.id, + input_data.source_sheet_name, + input_data.destination_spreadsheet_id, + ) + yield "result", {"success": True} + yield "new_sheet_id", result["new_sheet_id"] + yield "new_sheet_name", result["new_sheet_name"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.source_spreadsheet.id, + name=input_data.source_spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.source_spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.source_spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to copy sheet: {str(e)}" + + def _copy_to_spreadsheet( + self, + service, + source_spreadsheet_id: str, + source_sheet_name: str, + destination_spreadsheet_id: str, + ) -> dict: + target_sheet = resolve_sheet_name( + service, source_spreadsheet_id, source_sheet_name or None + ) + sheet_id = sheet_id_by_name(service, source_spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + result = ( + service.spreadsheets() + .sheets() + .copyTo( + spreadsheetId=source_spreadsheet_id, + sheetId=sheet_id, + body={"destinationSpreadsheetId": destination_spreadsheet_id}, + ) + .execute() + ) + + return { + "success": True, + "new_sheet_id": result.get("sheetId", 0), + "new_sheet_name": result.get("title", ""), + } + + +class GoogleSheetsProtectRangeBlock(Block): + """Protect a range from editing.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="Select a Google Sheets spreadsheet", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + description="Sheet name (optional, defaults to first sheet)", + default="", + ) + range: str = SchemaField( + description="Cell range to protect (e.g., 'A1:D10'). Leave empty to protect entire sheet.", + default="", + ) + description: str = SchemaField( + description="Description for the protected range", + default="Protected by automation", + ) + warning_only: bool = SchemaField( + description="Show warning but allow editing (vs blocking completely)", + default=False, + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField( + description="Result of the operation", + ) + protection_id: int = SchemaField( + description="ID of the protection", + ) + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining", + ) + error: str = SchemaField(description="Error message if any") + + def __init__(self): + super().__init__( + id="d0e4f5d1-76e7-4082-9be8-e656ec1f432d", + description="Protect a cell range or entire sheet from editing.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsProtectRangeBlock.Input, + output_schema=GoogleSheetsProtectRangeBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "range": "A1:D10", + "description": "Header row protection", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("protection_id", 12345), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_protect_range": lambda *args, **kwargs: { + "success": True, + "protection_id": 12345, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._protect_range, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + input_data.description, + input_data.warning_only, + ) + yield "result", {"success": True} + yield "protection_id", result["protection_id"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to protect range: {str(e)}" + + def _protect_range( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + description: str, + warning_only: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + protected_range: dict = {"sheetId": sheet_id} + + if range_str: + # Parse specific range + if "!" in range_str: + range_str = range_str.split("!")[1] + + match = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", range_str.upper()) + if not match: + raise ValueError(f"Invalid range format: {range_str}") + + protected_range["startRowIndex"] = int(match.group(2)) - 1 + protected_range["endRowIndex"] = int(match.group(4)) + protected_range["startColumnIndex"] = _column_letter_to_index( + match.group(1) + ) + protected_range["endColumnIndex"] = ( + _column_letter_to_index(match.group(3)) + 1 + ) + + request = { + "addProtectedRange": { + "protectedRange": { + "range": protected_range, + "description": description, + "warningOnly": warning_only, + } + } + } + + result = ( + service.spreadsheets() + .batchUpdate(spreadsheetId=spreadsheet_id, body={"requests": [request]}) + .execute() + ) + + protection_id = 0 + replies = result.get("replies", []) + if replies and "addProtectedRange" in replies[0]: + protection_id = replies[0]["addProtectedRange"]["protectedRange"][ + "protectedRangeId" + ] + + return {"success": True, "protection_id": protection_id} + + +class GoogleSheetsExportCsvBlock(Block): + """Export a sheet as CSV data.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to export from", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + sheet_name: str = SchemaField( + default="", + description="Name of the sheet to export. Defaults to first sheet.", + ) + include_headers: bool = SchemaField( + default=True, + description="Include the first row (headers) in the CSV output", + ) + + class Output(BlockSchemaOutput): + csv_data: str = SchemaField(description="The sheet data as CSV string") + row_count: int = SchemaField(description="Number of rows exported") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if export failed") + + def __init__(self): + super().__init__( + id="2617e68a-43b3-441f-8b11-66bb041105b8", + description="Export a Google Sheet as CSV data", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsExportCsvBlock.Input, + output_schema=GoogleSheetsExportCsvBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("csv_data", "Name,Email,Status\nJohn,john@test.com,Active\n"), + ("row_count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_export_csv": lambda *args, **kwargs: { + "csv_data": "Name,Email,Status\nJohn,john@test.com,Active\n", + "row_count": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._export_csv, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.include_headers, + ) + yield "csv_data", result["csv_data"] + yield "row_count", result["row_count"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to export CSV: {str(e)}" + + def _export_csv( + self, + service, + spreadsheet_id: str, + sheet_name: str, + include_headers: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + range_name = f"'{target_sheet}'" + + result = ( + service.spreadsheets() + .values() + .get(spreadsheetId=spreadsheet_id, range=range_name) + .execute() + ) + + rows = result.get("values", []) + + # Skip header row if not including headers + if not include_headers and rows: + rows = rows[1:] + + output = io.StringIO() + writer = csv.writer(output) + for row in rows: + writer.writerow(row) + + csv_data = output.getvalue() + return {"csv_data": csv_data, "row_count": len(rows)} + + +class GoogleSheetsImportCsvBlock(Block): + """Import CSV data into a sheet.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to import into", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + csv_data: str = SchemaField(description="CSV data to import") + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + start_cell: str = SchemaField( + default="A1", + description="Cell to start importing at (e.g., A1, B2)", + ) + clear_existing: bool = SchemaField( + default=False, + description="Clear existing data before importing", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Import result") + rows_imported: int = SchemaField(description="Number of rows imported") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if import failed") + + def __init__(self): + super().__init__( + id="cb992884-1ff2-450a-8f1b-7650d63e3aa0", + description="Import CSV data into a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsImportCsvBlock.Input, + output_schema=GoogleSheetsImportCsvBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "csv_data": "Name,Email,Status\nJohn,john@test.com,Active\n", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ("rows_imported", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_import_csv": lambda *args, **kwargs: { + "success": True, + "rows_imported": 2, + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._import_csv, + service, + input_data.spreadsheet.id, + input_data.csv_data, + input_data.sheet_name, + input_data.start_cell, + input_data.clear_existing, + ) + yield "result", {"success": True} + yield "rows_imported", result["rows_imported"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to import CSV: {str(e)}" + + def _import_csv( + self, + service, + spreadsheet_id: str, + csv_data: str, + sheet_name: str, + start_cell: str, + clear_existing: bool, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + + # Parse CSV data + reader = csv.reader(io.StringIO(csv_data)) + rows = list(reader) + + if not rows: + return {"success": True, "rows_imported": 0} + + # Clear existing data if requested + if clear_existing: + service.spreadsheets().values().clear( + spreadsheetId=spreadsheet_id, + range=f"'{target_sheet}'", + ).execute() + + # Write data + range_name = f"'{target_sheet}'!{start_cell}" + service.spreadsheets().values().update( + spreadsheetId=spreadsheet_id, + range=range_name, + valueInputOption="RAW", + body={"values": rows}, + ).execute() + + return {"success": True, "rows_imported": len(rows)} + + +class GoogleSheetsAddNoteBlock(Block): + """Add a note (comment) to a cell.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to add note to", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + cell: str = SchemaField( + description="Cell to add note to (e.g., A1, B2)", + ) + note: str = SchemaField(description="Note text to add") + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the operation") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="774ac529-74f9-41da-bbba-6a06a51a5d7e", + description="Add a note to a cell in a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsAddNoteBlock.Input, + output_schema=GoogleSheetsAddNoteBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "cell": "A1", + "note": "This is a test note", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_add_note": lambda *args, **kwargs: {"success": True}, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + await asyncio.to_thread( + self._add_note, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.cell, + input_data.note, + ) + yield "result", {"success": True} + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to add note: {str(e)}" + + def _add_note( + self, + service, + spreadsheet_id: str, + sheet_name: str, + cell: str, + note: str, + ) -> dict: + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + sheet_id = sheet_id_by_name(service, spreadsheet_id, target_sheet) + + if sheet_id is None: + raise ValueError(f"Sheet '{target_sheet}' not found") + + # Parse cell reference + match = re.match(r"([A-Z]+)(\d+)", cell.upper()) + if not match: + raise ValueError(f"Invalid cell reference: {cell}") + + col_index = _column_letter_to_index(match.group(1)) + row_index = int(match.group(2)) - 1 + + request = { + "updateCells": { + "rows": [{"values": [{"note": note}]}], + "fields": "note", + "start": { + "sheetId": sheet_id, + "rowIndex": row_index, + "columnIndex": col_index, + }, + } + } + + service.spreadsheets().batchUpdate( + spreadsheetId=spreadsheet_id, body={"requests": [request]} + ).execute() + + return {"success": True} + + +class GoogleSheetsGetNotesBlock(Block): + """Get notes from cells in a range.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to get notes from", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + range: str = SchemaField( + default="A1:Z100", + description="Range to get notes from (e.g., A1:B10)", + ) + sheet_name: str = SchemaField( + default="", + description="Name of the sheet. Defaults to first sheet.", + ) + + class Output(BlockSchemaOutput): + notes: list[dict] = SchemaField(description="List of notes with cell and text") + count: int = SchemaField(description="Number of notes found") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="fa16834f-fff4-4d7a-9f7f-531ced90492b", + description="Get notes from cells in a Google Sheet", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsGetNotesBlock.Input, + output_schema=GoogleSheetsGetNotesBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "notes", + [ + {"cell": "A1", "note": "Header note"}, + {"cell": "B2", "note": "Data note"}, + ], + ), + ("count", 2), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_get_notes": lambda *args, **kwargs: { + "notes": [ + {"cell": "A1", "note": "Header note"}, + {"cell": "B2", "note": "Data note"}, + ], + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_sheets_service(credentials) + result = await asyncio.to_thread( + self._get_notes, + service, + input_data.spreadsheet.id, + input_data.sheet_name, + input_data.range, + ) + notes = result["notes"] + yield "notes", notes + yield "count", len(notes) + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to get notes: {str(e)}" + + def _get_notes( + self, + service, + spreadsheet_id: str, + sheet_name: str, + range_str: str, + ) -> dict: + + target_sheet = resolve_sheet_name(service, spreadsheet_id, sheet_name or None) + full_range = f"'{target_sheet}'!{range_str}" + + # Get spreadsheet data including notes + result = ( + service.spreadsheets() + .get( + spreadsheetId=spreadsheet_id, + ranges=[full_range], + includeGridData=True, + ) + .execute() + ) + + notes = [] + sheets = result.get("sheets", []) + + for sheet in sheets: + data = sheet.get("data", []) + for grid_data in data: + start_row = grid_data.get("startRow", 0) + start_col = grid_data.get("startColumn", 0) + row_data = grid_data.get("rowData", []) + + for row_idx, row in enumerate(row_data): + values = row.get("values", []) + for col_idx, cell in enumerate(values): + note = cell.get("note") + if note: + col_letter = _index_to_column_letter(start_col + col_idx) + cell_ref = f"{col_letter}{start_row + row_idx + 1}" + notes.append({"cell": cell_ref, "note": note}) + + return {"notes": notes} + + +class GoogleSheetsShareSpreadsheetBlock(Block): + """Share a spreadsheet with specific users or make it accessible.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to share", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + email: str = SchemaField( + default="", + description="Email address to share with. Leave empty for link sharing.", + ) + role: ShareRole = SchemaField( + default=ShareRole.READER, + description="Permission role for the user", + ) + send_notification: bool = SchemaField( + default=True, + description="Send notification email to the user", + ) + message: str = SchemaField( + default="", + description="Optional message to include in notification email", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the share operation") + share_link: str = SchemaField(description="Link to the spreadsheet") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if share failed") + + def __init__(self): + super().__init__( + id="3e47e8ac-511a-4eb6-89c5-a6bcedc4236f", + description="Share a Google Spreadsheet with users or get shareable link", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsShareSpreadsheetBlock.Input, + output_schema=GoogleSheetsShareSpreadsheetBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "email": "test@example.com", + "role": "reader", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True}), + ( + "share_link", + "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_share_spreadsheet": lambda *args, **kwargs: { + "success": True, + "share_link": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_drive_service(credentials) + result = await asyncio.to_thread( + self._share_spreadsheet, + service, + input_data.spreadsheet.id, + input_data.email, + input_data.role, + input_data.send_notification, + input_data.message, + ) + yield "result", {"success": True} + yield "share_link", result["share_link"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to share spreadsheet: {str(e)}" + + def _share_spreadsheet( + self, + service, + spreadsheet_id: str, + email: str, + role: ShareRole, + send_notification: bool, + message: str, + ) -> dict: + share_link = f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit" + + if email: + # Share with specific user + permission = {"type": "user", "role": role.value, "emailAddress": email} + + kwargs: dict = { + "fileId": spreadsheet_id, + "body": permission, + "sendNotificationEmail": send_notification, + } + if message: + kwargs["emailMessage"] = message + + service.permissions().create(**kwargs).execute() + else: + # Get shareable link - use reader or commenter only (writer not allowed for "anyone") + link_role = "reader" if role == ShareRole.WRITER else role.value + permission = {"type": "anyone", "role": link_role} + service.permissions().create( + fileId=spreadsheet_id, body=permission + ).execute() + share_link += "?usp=sharing" + + return {"success": True, "share_link": share_link} + + +class GoogleSheetsSetPublicAccessBlock(Block): + """Make a spreadsheet publicly accessible or private.""" + + class Input(BlockSchemaInput): + spreadsheet: GoogleDriveFile = GoogleDriveFileField( + title="Spreadsheet", + description="The spreadsheet to modify access for", + credentials_kwarg="credentials", + allowed_views=["SPREADSHEETS"], + allowed_mime_types=["application/vnd.google-apps.spreadsheet"], + ) + public: bool = SchemaField( + default=True, + description="True to make public, False to make private", + ) + role: PublicAccessRole = SchemaField( + default=PublicAccessRole.READER, + description="Permission role for public access", + ) + + class Output(BlockSchemaOutput): + result: dict = SchemaField(description="Result of the operation") + share_link: str = SchemaField(description="Link to the spreadsheet") + spreadsheet: GoogleDriveFile = SchemaField( + description="The spreadsheet for chaining" + ) + error: str = SchemaField(description="Error message if operation failed") + + def __init__(self): + super().__init__( + id="d08d46cd-088b-4ba7-a545-45050f33b889", + description="Make a Google Spreadsheet public or private", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsSetPublicAccessBlock.Input, + output_schema=GoogleSheetsSetPublicAccessBlock.Output, + disabled=GOOGLE_SHEETS_DISABLED, + test_input={ + "spreadsheet": { + "id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "name": "Test Spreadsheet", + "mimeType": "application/vnd.google-apps.spreadsheet", + }, + "public": True, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"success": True, "is_public": True}), + ( + "share_link", + "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit?usp=sharing", + ), + ( + "spreadsheet", + GoogleDriveFile( + id="1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + name="Test Spreadsheet", + mimeType="application/vnd.google-apps.spreadsheet", + url="https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=None, + ), + ), + ], + test_mock={ + "_set_public_access": lambda *args, **kwargs: { + "success": True, + "is_public": True, + "share_link": "https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit?usp=sharing", + }, + }, + ) + + async def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + if not input_data.spreadsheet: + yield "error", "No spreadsheet selected" + return + + validation_error = _validate_spreadsheet_file(input_data.spreadsheet) + if validation_error: + yield "error", validation_error + return + + try: + service = _build_drive_service(credentials) + result = await asyncio.to_thread( + self._set_public_access, + service, + input_data.spreadsheet.id, + input_data.public, + input_data.role, + ) + yield "result", {"success": True, "is_public": result["is_public"]} + yield "share_link", result["share_link"] + yield "spreadsheet", GoogleDriveFile( + id=input_data.spreadsheet.id, + name=input_data.spreadsheet.name, + mimeType="application/vnd.google-apps.spreadsheet", + url=f"https://docs.google.com/spreadsheets/d/{input_data.spreadsheet.id}/edit", + iconUrl="https://www.gstatic.com/images/branding/product/1x/sheets_48dp.png", + isFolder=False, + _credentials_id=input_data.spreadsheet.credentials_id, + ) + except Exception as e: + yield "error", f"Failed to set public access: {str(e)}" + + def _set_public_access( + self, + service, + spreadsheet_id: str, + public: bool, + role: PublicAccessRole, + ) -> dict: + share_link = f"https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit" + + if public: + # Make public + permission = {"type": "anyone", "role": role.value} + service.permissions().create( + fileId=spreadsheet_id, body=permission + ).execute() + share_link += "?usp=sharing" + else: + # Make private - remove 'anyone' permissions + permissions = service.permissions().list(fileId=spreadsheet_id).execute() + for perm in permissions.get("permissions", []): + if perm.get("type") == "anyone": + service.permissions().delete( + fileId=spreadsheet_id, permissionId=perm["id"] + ).execute() + + return {"success": True, "is_public": public, "share_link": share_link} diff --git a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py index 42c98b5146..13c9fb31db 100644 --- a/autogpt_platform/backend/backend/blocks/human_in_the_loop.py +++ b/autogpt_platform/backend/backend/blocks/human_in_the_loop.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Literal +from typing import Any from prisma.enums import ReviewStatus @@ -45,11 +45,11 @@ class HumanInTheLoopBlock(Block): ) class Output(BlockSchemaOutput): - reviewed_data: Any = SchemaField( - description="The data after human review (may be modified)" + approved_data: Any = SchemaField( + description="The data when approved (may be modified by reviewer)" ) - status: Literal["approved", "rejected"] = SchemaField( - description="Status of the review: 'approved' or 'rejected'" + rejected_data: Any = SchemaField( + description="The data when rejected (may be modified by reviewer)" ) review_message: str = SchemaField( description="Any message provided by the reviewer", default="" @@ -69,8 +69,7 @@ class HumanInTheLoopBlock(Block): "editable": True, }, test_output=[ - ("status", "approved"), - ("reviewed_data", {"name": "John Doe", "age": 30}), + ("approved_data", {"name": "John Doe", "age": 30}), ], test_mock={ "get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult( @@ -116,8 +115,7 @@ class HumanInTheLoopBlock(Block): logger.info( f"HITL block skipping review for node {node_exec_id} - safe mode disabled" ) - yield "status", "approved" - yield "reviewed_data", input_data.data + yield "approved_data", input_data.data yield "review_message", "Auto-approved (safe mode disabled)" return @@ -158,12 +156,11 @@ class HumanInTheLoopBlock(Block): ) if result.status == ReviewStatus.APPROVED: - yield "status", "approved" - yield "reviewed_data", result.data + yield "approved_data", result.data if result.message: yield "review_message", result.message elif result.status == ReviewStatus.REJECTED: - yield "status", "rejected" + yield "rejected_data", result.data if result.message: yield "review_message", result.message diff --git a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py index 5f9d562e60..e2e5cfa3e4 100644 --- a/autogpt_platform/backend/backend/blocks/smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/smart_decision_maker.py @@ -1,8 +1,11 @@ import logging import re from collections import Counter +from concurrent.futures import Future from typing import TYPE_CHECKING, Any +from pydantic import BaseModel + import backend.blocks.llm as llm from backend.blocks.agent import AgentExecutorBlock from backend.data.block import ( @@ -20,16 +23,41 @@ from backend.data.dynamic_fields import ( is_dynamic_field, is_tool_pin, ) +from backend.data.execution import ExecutionContext from backend.data.model import NodeExecutionStats, SchemaField from backend.util import json from backend.util.clients import get_database_manager_async_client +from backend.util.prompt import MAIN_OBJECTIVE_PREFIX if TYPE_CHECKING: from backend.data.graph import Link, Node + from backend.executor.manager import ExecutionProcessor logger = logging.getLogger(__name__) +class ToolInfo(BaseModel): + """Processed tool call information.""" + + tool_call: Any # The original tool call object from LLM response + tool_name: str # The function name + tool_def: dict[str, Any] # The tool definition from tool_functions + input_data: dict[str, Any] # Processed input data ready for tool execution + field_mapping: dict[str, str] # Field name mapping for the tool + + +class ExecutionParams(BaseModel): + """Tool execution parameters.""" + + user_id: str + graph_id: str + node_id: str + graph_version: int + graph_exec_id: str + node_exec_id: str + execution_context: "ExecutionContext" + + def _get_tool_requests(entry: dict[str, Any]) -> list[str]: """ Return a list of tool_call_ids if the entry is a tool request. @@ -105,6 +133,50 @@ def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]: return {"role": "tool", "tool_call_id": call_id, "content": content} +def _combine_tool_responses(tool_outputs: list[dict[str, Any]]) -> list[dict[str, Any]]: + """ + Combine multiple Anthropic tool responses into a single user message. + For non-Anthropic formats, returns the original list unchanged. + """ + if len(tool_outputs) <= 1: + return tool_outputs + + # Anthropic responses have role="user", type="message", and content is a list with tool_result items + anthropic_responses = [ + output + for output in tool_outputs + if ( + output.get("role") == "user" + and output.get("type") == "message" + and isinstance(output.get("content"), list) + and any( + item.get("type") == "tool_result" + for item in output.get("content", []) + if isinstance(item, dict) + ) + ) + ] + + if len(anthropic_responses) > 1: + combined_content = [ + item for response in anthropic_responses for item in response["content"] + ] + + combined_response = { + "role": "user", + "type": "message", + "content": combined_content, + } + + non_anthropic_responses = [ + output for output in tool_outputs if output not in anthropic_responses + ] + + return [combined_response] + non_anthropic_responses + + return tool_outputs + + def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]: """ Safely convert raw_response to dictionary format for conversation history. @@ -204,6 +276,17 @@ class SmartDecisionMakerBlock(Block): default="localhost:11434", description="Ollama host for local models", ) + agent_mode_max_iterations: int = SchemaField( + title="Agent Mode Max Iterations", + description="Maximum iterations for agent mode. 0 = traditional mode (single LLM call, yield tool calls for external execution), -1 = infinite agent mode (loop until finished), 1+ = agent mode with max iterations limit.", + advanced=True, + default=0, + ) + conversation_compaction: bool = SchemaField( + default=True, + title="Context window auto-compaction", + description="Automatically compact the context window once it hits the limit", + ) @classmethod def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]: @@ -506,6 +589,7 @@ class SmartDecisionMakerBlock(Block): Returns the response if successful, raises ValueError if validation fails. """ resp = await llm.llm_call( + compress_prompt_to_fit=input_data.conversation_compaction, credentials=credentials, llm_model=input_data.model, prompt=current_prompt, @@ -593,6 +677,291 @@ class SmartDecisionMakerBlock(Block): return resp + def _process_tool_calls( + self, response, tool_functions: list[dict[str, Any]] + ) -> list[ToolInfo]: + """Process tool calls and extract tool definitions, arguments, and input data. + + Returns a list of tool info dicts with: + - tool_call: The original tool call object + - tool_name: The function name + - tool_def: The tool definition from tool_functions + - input_data: Processed input data dict (includes None values) + - field_mapping: Field name mapping for the tool + """ + if not response.tool_calls: + return [] + + processed_tools = [] + for tool_call in response.tool_calls: + tool_name = tool_call.function.name + tool_args = json.loads(tool_call.function.arguments) + + tool_def = next( + ( + tool + for tool in tool_functions + if tool["function"]["name"] == tool_name + ), + None, + ) + if not tool_def: + if len(tool_functions) == 1: + tool_def = tool_functions[0] + else: + continue + + # Build input data for the tool + input_data = {} + field_mapping = tool_def["function"].get("_field_mapping", {}) + if "function" in tool_def and "parameters" in tool_def["function"]: + expected_args = tool_def["function"]["parameters"].get("properties", {}) + for clean_arg_name in expected_args: + original_field_name = field_mapping.get( + clean_arg_name, clean_arg_name + ) + arg_value = tool_args.get(clean_arg_name) + # Include all expected parameters, even if None (for backward compatibility with tests) + input_data[original_field_name] = arg_value + + processed_tools.append( + ToolInfo( + tool_call=tool_call, + tool_name=tool_name, + tool_def=tool_def, + input_data=input_data, + field_mapping=field_mapping, + ) + ) + + return processed_tools + + def _update_conversation( + self, prompt: list[dict], response, tool_outputs: list | None = None + ): + """Update conversation history with response and tool outputs.""" + # Don't add separate reasoning message with tool calls (breaks Anthropic's tool_use->tool_result pairing) + assistant_message = _convert_raw_response_to_dict(response.raw_response) + has_tool_calls = isinstance(assistant_message.get("content"), list) and any( + item.get("type") == "tool_use" + for item in assistant_message.get("content", []) + ) + + if response.reasoning and not has_tool_calls: + prompt.append( + {"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"} + ) + + prompt.append(assistant_message) + + if tool_outputs: + prompt.extend(tool_outputs) + + async def _execute_single_tool_with_manager( + self, + tool_info: ToolInfo, + execution_params: ExecutionParams, + execution_processor: "ExecutionProcessor", + ) -> dict: + """Execute a single tool using the execution manager for proper integration.""" + # Lazy imports to avoid circular dependencies + from backend.data.execution import NodeExecutionEntry + + tool_call = tool_info.tool_call + tool_def = tool_info.tool_def + raw_input_data = tool_info.input_data + + # Get sink node and field mapping + sink_node_id = tool_def["function"]["_sink_node_id"] + + # Use proper database operations for tool execution + db_client = get_database_manager_async_client() + + # Get target node + target_node = await db_client.get_node(sink_node_id) + if not target_node: + raise ValueError(f"Target node {sink_node_id} not found") + + # Create proper node execution using upsert_execution_input + node_exec_result = None + final_input_data = None + + # Add all inputs to the execution + if not raw_input_data: + raise ValueError(f"Tool call has no input data: {tool_call}") + + for input_name, input_value in raw_input_data.items(): + node_exec_result, final_input_data = await db_client.upsert_execution_input( + node_id=sink_node_id, + graph_exec_id=execution_params.graph_exec_id, + input_name=input_name, + input_data=input_value, + ) + + assert node_exec_result is not None, "node_exec_result should not be None" + + # Create NodeExecutionEntry for execution manager + node_exec_entry = NodeExecutionEntry( + user_id=execution_params.user_id, + graph_exec_id=execution_params.graph_exec_id, + graph_id=execution_params.graph_id, + graph_version=execution_params.graph_version, + node_exec_id=node_exec_result.node_exec_id, + node_id=sink_node_id, + block_id=target_node.block_id, + inputs=final_input_data or {}, + execution_context=execution_params.execution_context, + ) + + # Use the execution manager to execute the tool node + try: + # Get NodeExecutionProgress from the execution manager's running nodes + node_exec_progress = execution_processor.running_node_execution[ + sink_node_id + ] + + # Use the execution manager's own graph stats + graph_stats_pair = ( + execution_processor.execution_stats, + execution_processor.execution_stats_lock, + ) + + # Create a completed future for the task tracking system + node_exec_future = Future() + node_exec_progress.add_task( + node_exec_id=node_exec_result.node_exec_id, + task=node_exec_future, + ) + + # Execute the node directly since we're in the SmartDecisionMaker context + node_exec_future.set_result( + await execution_processor.on_node_execution( + node_exec=node_exec_entry, + node_exec_progress=node_exec_progress, + nodes_input_masks=None, + graph_stats_pair=graph_stats_pair, + ) + ) + + # Get outputs from database after execution completes using database manager client + node_outputs = await db_client.get_execution_outputs_by_node_exec_id( + node_exec_result.node_exec_id + ) + + # Create tool response + tool_response_content = ( + json.dumps(node_outputs) + if node_outputs + else "Tool executed successfully" + ) + return _create_tool_response(tool_call.id, tool_response_content) + + except Exception as e: + logger.error(f"Tool execution with manager failed: {e}") + # Return error response + return _create_tool_response( + tool_call.id, f"Tool execution failed: {str(e)}" + ) + + async def _execute_tools_agent_mode( + self, + input_data, + credentials, + tool_functions: list[dict[str, Any]], + prompt: list[dict], + graph_exec_id: str, + node_id: str, + node_exec_id: str, + user_id: str, + graph_id: str, + graph_version: int, + execution_context: ExecutionContext, + execution_processor: "ExecutionProcessor", + ): + """Execute tools in agent mode with a loop until finished.""" + max_iterations = input_data.agent_mode_max_iterations + iteration = 0 + + # Execution parameters for tool execution + execution_params = ExecutionParams( + user_id=user_id, + graph_id=graph_id, + node_id=node_id, + graph_version=graph_version, + graph_exec_id=graph_exec_id, + node_exec_id=node_exec_id, + execution_context=execution_context, + ) + + current_prompt = list(prompt) + + while max_iterations < 0 or iteration < max_iterations: + iteration += 1 + logger.debug(f"Agent mode iteration {iteration}") + + # Prepare prompt for this iteration + iteration_prompt = list(current_prompt) + + # On the last iteration, add a special system message to encourage completion + if max_iterations > 0 and iteration == max_iterations: + last_iteration_message = { + "role": "system", + "content": f"{MAIN_OBJECTIVE_PREFIX}This is your last iteration ({iteration}/{max_iterations}). " + "Try to complete the task with the information you have. If you cannot fully complete it, " + "provide a summary of what you've accomplished and what remains to be done. " + "Prefer finishing with a clear response rather than making additional tool calls.", + } + iteration_prompt.append(last_iteration_message) + + # Get LLM response + try: + response = await self._attempt_llm_call_with_validation( + credentials, input_data, iteration_prompt, tool_functions + ) + except Exception as e: + yield "error", f"LLM call failed in agent mode iteration {iteration}: {str(e)}" + return + + # Process tool calls + processed_tools = self._process_tool_calls(response, tool_functions) + + # If no tool calls, we're done + if not processed_tools: + yield "finished", response.response + self._update_conversation(current_prompt, response) + yield "conversations", current_prompt + return + + # Execute tools and collect responses + tool_outputs = [] + for tool_info in processed_tools: + try: + tool_response = await self._execute_single_tool_with_manager( + tool_info, execution_params, execution_processor + ) + tool_outputs.append(tool_response) + except Exception as e: + logger.error(f"Tool execution failed: {e}") + # Create error response for the tool + error_response = _create_tool_response( + tool_info.tool_call.id, f"Error: {str(e)}" + ) + tool_outputs.append(error_response) + + tool_outputs = _combine_tool_responses(tool_outputs) + + self._update_conversation(current_prompt, response, tool_outputs) + + # Yield intermediate conversation state + yield "conversations", current_prompt + + # If we reach max iterations, yield the current state + if max_iterations < 0: + yield "finished", f"Agent mode completed after {iteration} iterations" + else: + yield "finished", f"Agent mode completed after {max_iterations} iterations (limit reached)" + yield "conversations", current_prompt + async def run( self, input_data: Input, @@ -603,8 +972,12 @@ class SmartDecisionMakerBlock(Block): graph_exec_id: str, node_exec_id: str, user_id: str, + graph_version: int, + execution_context: ExecutionContext, + execution_processor: "ExecutionProcessor", **kwargs, ) -> BlockOutput: + tool_functions = await self._create_tool_node_signatures(node_id) yield "tool_functions", json.dumps(tool_functions) @@ -648,24 +1021,52 @@ class SmartDecisionMakerBlock(Block): input_data.prompt = llm.fmt.format_string(input_data.prompt, values) input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values) - prefix = "[Main Objective Prompt]: " - if input_data.sys_prompt and not any( - p["role"] == "system" and p["content"].startswith(prefix) for p in prompt + p["role"] == "system" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX) + for p in prompt ): - prompt.append({"role": "system", "content": prefix + input_data.sys_prompt}) + prompt.append( + { + "role": "system", + "content": MAIN_OBJECTIVE_PREFIX + input_data.sys_prompt, + } + ) if input_data.prompt and not any( - p["role"] == "user" and p["content"].startswith(prefix) for p in prompt + p["role"] == "user" and p["content"].startswith(MAIN_OBJECTIVE_PREFIX) + for p in prompt ): - prompt.append({"role": "user", "content": prefix + input_data.prompt}) + prompt.append( + {"role": "user", "content": MAIN_OBJECTIVE_PREFIX + input_data.prompt} + ) + # Execute tools based on the selected mode + if input_data.agent_mode_max_iterations != 0: + # In agent mode, execute tools directly in a loop until finished + async for result in self._execute_tools_agent_mode( + input_data=input_data, + credentials=credentials, + tool_functions=tool_functions, + prompt=prompt, + graph_exec_id=graph_exec_id, + node_id=node_id, + node_exec_id=node_exec_id, + user_id=user_id, + graph_id=graph_id, + graph_version=graph_version, + execution_context=execution_context, + execution_processor=execution_processor, + ): + yield result + return + + # One-off mode: single LLM call and yield tool calls for external execution current_prompt = list(prompt) max_attempts = max(1, int(input_data.retry)) response = None last_error = None - for attempt in range(max_attempts): + for _ in range(max_attempts): try: response = await self._attempt_llm_call_with_validation( credentials, input_data, current_prompt, tool_functions diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py index 9d649e36c5..29f572d0d6 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker.py @@ -1,7 +1,11 @@ import logging +import threading +from collections import defaultdict +from unittest.mock import AsyncMock, MagicMock, patch import pytest +from backend.data.execution import ExecutionContext from backend.data.model import ProviderName, User from backend.server.model import CreateGraph from backend.server.rest_api import AgentServer @@ -17,10 +21,10 @@ async def create_graph(s: SpinTestServer, g, u: User): async def create_credentials(s: SpinTestServer, u: User): - import backend.blocks.llm as llm + import backend.blocks.llm as llm_module provider = ProviderName.OPENAI - credentials = llm.TEST_CREDENTIALS + credentials = llm_module.TEST_CREDENTIALS return await s.agent_server.test_create_credentials(u.id, provider, credentials) @@ -196,8 +200,6 @@ async def test_smart_decision_maker_function_signature(server: SpinTestServer): @pytest.mark.asyncio async def test_smart_decision_maker_tracks_llm_stats(): """Test that SmartDecisionMakerBlock correctly tracks LLM usage stats.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -216,7 +218,6 @@ async def test_smart_decision_maker_tracks_llm_stats(): } # Mock the _create_tool_node_signatures method to avoid database calls - from unittest.mock import AsyncMock with patch( "backend.blocks.llm.llm_call", @@ -234,10 +235,19 @@ async def test_smart_decision_maker_tracks_llm_stats(): prompt="Should I continue with this task?", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Execute the block outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -246,6 +256,9 @@ async def test_smart_decision_maker_tracks_llm_stats(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -263,8 +276,6 @@ async def test_smart_decision_maker_tracks_llm_stats(): @pytest.mark.asyncio async def test_smart_decision_maker_parameter_validation(): """Test that SmartDecisionMakerBlock correctly validates tool call parameters.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -311,8 +322,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_with_typo.reasoning = None mock_response_with_typo.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -329,8 +338,17 @@ async def test_smart_decision_maker_parameter_validation(): model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore retry=2, # Set retry to 2 for testing + agent_mode_max_iterations=0, ) + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + # Should raise ValueError after retries due to typo'd parameter name with pytest.raises(ValueError) as exc_info: outputs = {} @@ -342,6 +360,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -368,8 +389,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_missing_required.reasoning = None mock_response_missing_required.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -385,8 +404,17 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + # Should raise ValueError due to missing required parameter with pytest.raises(ValueError) as exc_info: outputs = {} @@ -398,6 +426,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -418,8 +449,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_valid.reasoning = None mock_response_valid.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -435,10 +464,19 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Should succeed - optional parameter missing is OK outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -447,6 +485,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -472,8 +513,6 @@ async def test_smart_decision_maker_parameter_validation(): mock_response_all_params.reasoning = None mock_response_all_params.raw_response = {"role": "assistant", "content": None} - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -489,10 +528,19 @@ async def test_smart_decision_maker_parameter_validation(): prompt="Search for keywords", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) # Should succeed with all parameters outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -501,6 +549,9 @@ async def test_smart_decision_maker_parameter_validation(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -513,8 +564,6 @@ async def test_smart_decision_maker_parameter_validation(): @pytest.mark.asyncio async def test_smart_decision_maker_raw_response_conversion(): """Test that SmartDecisionMaker correctly handles different raw_response types with retry mechanism.""" - from unittest.mock import MagicMock, patch - import backend.blocks.llm as llm_module from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock @@ -584,7 +633,6 @@ async def test_smart_decision_maker_raw_response_conversion(): ) # Mock llm_call to return different responses on different calls - from unittest.mock import AsyncMock with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock @@ -603,10 +651,19 @@ async def test_smart_decision_maker_raw_response_conversion(): model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore retry=2, + agent_mode_max_iterations=0, ) # Should succeed after retry, demonstrating our helper function works outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -615,6 +672,9 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -650,8 +710,6 @@ async def test_smart_decision_maker_raw_response_conversion(): "I'll help you with that." # Ollama returns string ) - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -666,9 +724,18 @@ async def test_smart_decision_maker_raw_response_conversion(): prompt="Simple prompt", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, ) outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + async for output_name, output_data in block.run( input_data, credentials=llm_module.TEST_CREDENTIALS, @@ -677,6 +744,9 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data @@ -696,8 +766,6 @@ async def test_smart_decision_maker_raw_response_conversion(): "content": "Test response", } # Dict format - from unittest.mock import AsyncMock - with patch( "backend.blocks.llm.llm_call", new_callable=AsyncMock, @@ -712,6 +780,160 @@ async def test_smart_decision_maker_raw_response_conversion(): prompt="Another test", model=llm_module.LlmModel.GPT4O, credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, + ) + + outputs = {} + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + + async for output_name, output_data in block.run( + input_data, + credentials=llm_module.TEST_CREDENTIALS, + graph_id="test-graph-id", + node_id="test-node-id", + graph_exec_id="test-exec-id", + node_exec_id="test-node-exec-id", + user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_data + + assert "finished" in outputs + assert outputs["finished"] == "Test response" + + +@pytest.mark.asyncio +async def test_smart_decision_maker_agent_mode(): + """Test that agent mode executes tools directly and loops until finished.""" + import backend.blocks.llm as llm_module + from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock + + block = SmartDecisionMakerBlock() + + # Mock tool call that requires multiple iterations + mock_tool_call_1 = MagicMock() + mock_tool_call_1.id = "call_1" + mock_tool_call_1.function.name = "search_keywords" + mock_tool_call_1.function.arguments = ( + '{"query": "test", "max_keyword_difficulty": 50}' + ) + + mock_response_1 = MagicMock() + mock_response_1.response = None + mock_response_1.tool_calls = [mock_tool_call_1] + mock_response_1.prompt_tokens = 50 + mock_response_1.completion_tokens = 25 + mock_response_1.reasoning = "Using search tool" + mock_response_1.raw_response = { + "role": "assistant", + "content": None, + "tool_calls": [{"id": "call_1", "type": "function"}], + } + + # Final response with no tool calls (finished) + mock_response_2 = MagicMock() + mock_response_2.response = "Task completed successfully" + mock_response_2.tool_calls = [] + mock_response_2.prompt_tokens = 30 + mock_response_2.completion_tokens = 15 + mock_response_2.reasoning = None + mock_response_2.raw_response = { + "role": "assistant", + "content": "Task completed successfully", + } + + # Mock the LLM call to return different responses on each iteration + llm_call_mock = AsyncMock() + llm_call_mock.side_effect = [mock_response_1, mock_response_2] + + # Mock tool node signatures + mock_tool_signatures = [ + { + "type": "function", + "function": { + "name": "search_keywords", + "_sink_node_id": "test-sink-node-id", + "_field_mapping": {}, + "parameters": { + "properties": { + "query": {"type": "string"}, + "max_keyword_difficulty": {"type": "integer"}, + }, + "required": ["query", "max_keyword_difficulty"], + }, + }, + } + ] + + # Mock database and execution components + mock_db_client = AsyncMock() + mock_node = MagicMock() + mock_node.block_id = "test-block-id" + mock_db_client.get_node.return_value = mock_node + + # Mock upsert_execution_input to return proper NodeExecutionResult and input data + mock_node_exec_result = MagicMock() + mock_node_exec_result.node_exec_id = "test-tool-exec-id" + mock_input_data = {"query": "test", "max_keyword_difficulty": 50} + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_input_data, + ) + + # No longer need mock_execute_node since we use execution_processor.on_node_execution + + with patch("backend.blocks.llm.llm_call", llm_call_mock), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures + ), patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client", + return_value=mock_db_client, + ), patch( + "backend.executor.manager.async_update_node_execution_status", + new_callable=AsyncMock, + ), patch( + "backend.integrations.creds_manager.IntegrationCredentialsManager" + ): + + # Create a mock execution context + + mock_execution_context = ExecutionContext( + safe_mode=False, + ) + + # Create a mock execution processor for agent mode tests + + mock_execution_processor = AsyncMock() + # Configure the execution processor mock with required attributes + mock_execution_processor.running_node_execution = defaultdict(MagicMock) + mock_execution_processor.execution_stats = MagicMock() + mock_execution_processor.execution_stats_lock = threading.Lock() + + # Mock the on_node_execution method to return successful stats + mock_node_stats = MagicMock() + mock_node_stats.error = None # No error + mock_execution_processor.on_node_execution = AsyncMock( + return_value=mock_node_stats + ) + + # Mock the get_execution_outputs_by_node_exec_id method + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = { + "result": {"status": "success", "data": "search completed"} + } + + # Test agent mode with max_iterations = 3 + input_data = SmartDecisionMakerBlock.Input( + prompt="Complete this task using tools", + model=llm_module.LlmModel.GPT4O, + credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations ) outputs = {} @@ -723,8 +945,115 @@ async def test_smart_decision_maker_raw_response_conversion(): graph_exec_id="test-exec-id", node_exec_id="test-node-exec-id", user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_data + # Verify agent mode behavior + assert "tool_functions" in outputs # tool_functions is yielded in both modes assert "finished" in outputs - assert outputs["finished"] == "Test response" + assert outputs["finished"] == "Task completed successfully" + assert "conversations" in outputs + + # Verify the conversation includes tool responses + conversations = outputs["conversations"] + assert len(conversations) > 2 # Should have multiple conversation entries + + # Verify LLM was called twice (once for tool call, once for finish) + assert llm_call_mock.call_count == 2 + + # Verify tool was executed via execution processor + assert mock_execution_processor.on_node_execution.call_count == 1 + + +@pytest.mark.asyncio +async def test_smart_decision_maker_traditional_mode_default(): + """Test that default behavior (agent_mode_max_iterations=0) works as traditional mode.""" + import backend.blocks.llm as llm_module + from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock + + block = SmartDecisionMakerBlock() + + # Mock tool call + mock_tool_call = MagicMock() + mock_tool_call.function.name = "search_keywords" + mock_tool_call.function.arguments = ( + '{"query": "test", "max_keyword_difficulty": 50}' + ) + + mock_response = MagicMock() + mock_response.response = None + mock_response.tool_calls = [mock_tool_call] + mock_response.prompt_tokens = 50 + mock_response.completion_tokens = 25 + mock_response.reasoning = None + mock_response.raw_response = {"role": "assistant", "content": None} + + mock_tool_signatures = [ + { + "type": "function", + "function": { + "name": "search_keywords", + "_sink_node_id": "test-sink-node-id", + "_field_mapping": {}, + "parameters": { + "properties": { + "query": {"type": "string"}, + "max_keyword_difficulty": {"type": "integer"}, + }, + "required": ["query", "max_keyword_difficulty"], + }, + }, + } + ] + + with patch( + "backend.blocks.llm.llm_call", + new_callable=AsyncMock, + return_value=mock_response, + ), patch.object( + block, "_create_tool_node_signatures", return_value=mock_tool_signatures + ): + + # Test default behavior (traditional mode) + input_data = SmartDecisionMakerBlock.Input( + prompt="Test prompt", + model=llm_module.LlmModel.GPT4O, + credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore + agent_mode_max_iterations=0, # Traditional mode + ) + + # Create execution context + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a mock execution processor for tests + + mock_execution_processor = MagicMock() + + outputs = {} + async for output_name, output_data in block.run( + input_data, + credentials=llm_module.TEST_CREDENTIALS, + graph_id="test-graph-id", + node_id="test-node-id", + graph_exec_id="test-exec-id", + node_exec_id="test-node-exec-id", + user_id="test-user-id", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_data + + # Verify traditional mode behavior + assert ( + "tool_functions" in outputs + ) # Should yield tool_functions in traditional mode + assert ( + "tools_^_test-sink-node-id_~_query" in outputs + ) # Should yield individual tool parameters + assert "tools_^_test-sink-node-id_~_max_keyword_difficulty" in outputs + assert "conversations" in outputs diff --git a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py index 55bcf4091e..d6a0c0fe39 100644 --- a/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py +++ b/autogpt_platform/backend/backend/blocks/test/test_smart_decision_maker_dynamic_fields.py @@ -1,7 +1,7 @@ """Comprehensive tests for SmartDecisionMakerBlock dynamic field handling.""" import json -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, MagicMock, Mock, patch import pytest @@ -308,10 +308,47 @@ async def test_output_yielding_with_dynamic_fields(): ) as mock_llm: mock_llm.return_value = mock_response - # Mock the function signature creation - with patch.object( + # Mock the database manager to avoid HTTP calls during tool execution + with patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client" + ) as mock_db_manager, patch.object( block, "_create_tool_node_signatures", new_callable=AsyncMock ) as mock_sig: + # Set up the mock database manager + mock_db_client = AsyncMock() + mock_db_manager.return_value = mock_db_client + + # Mock the node retrieval + mock_target_node = Mock() + mock_target_node.id = "test-sink-node-id" + mock_target_node.block_id = "CreateDictionaryBlock" + mock_target_node.block = Mock() + mock_target_node.block.name = "Create Dictionary" + mock_db_client.get_node.return_value = mock_target_node + + # Mock the execution result creation + mock_node_exec_result = Mock() + mock_node_exec_result.node_exec_id = "mock-node-exec-id" + mock_final_input_data = { + "values_#_name": "Alice", + "values_#_age": 30, + "values_#_email": "alice@example.com", + } + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_final_input_data, + ) + + # Mock the output retrieval + mock_outputs = { + "values_#_name": "Alice", + "values_#_age": 30, + "values_#_email": "alice@example.com", + } + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = ( + mock_outputs + ) + mock_sig.return_value = [ { "type": "function", @@ -337,10 +374,16 @@ async def test_output_yielding_with_dynamic_fields(): prompt="Create a user dictionary", credentials=llm.TEST_CREDENTIALS_INPUT, model=llm.LlmModel.GPT4O, + agent_mode_max_iterations=0, # Use traditional mode to test output yielding ) # Run the block outputs = {} + from backend.data.execution import ExecutionContext + + mock_execution_context = ExecutionContext(safe_mode=False) + mock_execution_processor = MagicMock() + async for output_name, output_value in block.run( input_data, credentials=llm.TEST_CREDENTIALS, @@ -349,6 +392,9 @@ async def test_output_yielding_with_dynamic_fields(): graph_exec_id="test_exec", node_exec_id="test_node_exec", user_id="test_user", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, ): outputs[output_name] = output_value @@ -511,45 +557,108 @@ async def test_validation_errors_dont_pollute_conversation(): } ] - # Create input data - from backend.blocks import llm + # Mock the database manager to avoid HTTP calls during tool execution + with patch( + "backend.blocks.smart_decision_maker.get_database_manager_async_client" + ) as mock_db_manager: + # Set up the mock database manager for agent mode + mock_db_client = AsyncMock() + mock_db_manager.return_value = mock_db_client - input_data = block.input_schema( - prompt="Test prompt", - credentials=llm.TEST_CREDENTIALS_INPUT, - model=llm.LlmModel.GPT4O, - retry=3, # Allow retries - ) + # Mock the node retrieval + mock_target_node = Mock() + mock_target_node.id = "test-sink-node-id" + mock_target_node.block_id = "TestBlock" + mock_target_node.block = Mock() + mock_target_node.block.name = "Test Block" + mock_db_client.get_node.return_value = mock_target_node - # Run the block - outputs = {} - async for output_name, output_value in block.run( - input_data, - credentials=llm.TEST_CREDENTIALS, - graph_id="test_graph", - node_id="test_node", - graph_exec_id="test_exec", - node_exec_id="test_node_exec", - user_id="test_user", - ): - outputs[output_name] = output_value + # Mock the execution result creation + mock_node_exec_result = Mock() + mock_node_exec_result.node_exec_id = "mock-node-exec-id" + mock_final_input_data = {"correct_param": "value"} + mock_db_client.upsert_execution_input.return_value = ( + mock_node_exec_result, + mock_final_input_data, + ) - # Verify we had 2 LLM calls (initial + retry) - assert call_count == 2 + # Mock the output retrieval + mock_outputs = {"correct_param": "value"} + mock_db_client.get_execution_outputs_by_node_exec_id.return_value = ( + mock_outputs + ) - # Check the final conversation output - final_conversation = outputs.get("conversations", []) + # Create input data + from backend.blocks import llm - # The final conversation should NOT contain the validation error message - error_messages = [ - msg - for msg in final_conversation - if msg.get("role") == "user" - and "parameter errors" in msg.get("content", "") - ] - assert ( - len(error_messages) == 0 - ), "Validation error leaked into final conversation" + input_data = block.input_schema( + prompt="Test prompt", + credentials=llm.TEST_CREDENTIALS_INPUT, + model=llm.LlmModel.GPT4O, + retry=3, # Allow retries + agent_mode_max_iterations=1, + ) - # The final conversation should only have the successful response - assert final_conversation[-1]["content"] == "valid" + # Run the block + outputs = {} + from backend.data.execution import ExecutionContext + + mock_execution_context = ExecutionContext(safe_mode=False) + + # Create a proper mock execution processor for agent mode + from collections import defaultdict + + mock_execution_processor = AsyncMock() + mock_execution_processor.execution_stats = MagicMock() + mock_execution_processor.execution_stats_lock = MagicMock() + + # Create a mock NodeExecutionProgress for the sink node + mock_node_exec_progress = MagicMock() + mock_node_exec_progress.add_task = MagicMock() + mock_node_exec_progress.pop_output = MagicMock( + return_value=None + ) # No outputs to process + + # Set up running_node_execution as a defaultdict that returns our mock for any key + mock_execution_processor.running_node_execution = defaultdict( + lambda: mock_node_exec_progress + ) + + # Mock the on_node_execution method that gets called during tool execution + mock_node_stats = MagicMock() + mock_node_stats.error = None + mock_execution_processor.on_node_execution.return_value = ( + mock_node_stats + ) + + async for output_name, output_value in block.run( + input_data, + credentials=llm.TEST_CREDENTIALS, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_exec", + node_exec_id="test_node_exec", + user_id="test_user", + graph_version=1, + execution_context=mock_execution_context, + execution_processor=mock_execution_processor, + ): + outputs[output_name] = output_value + + # Verify we had at least 1 LLM call + assert call_count >= 1 + + # Check the final conversation output + final_conversation = outputs.get("conversations", []) + + # The final conversation should NOT contain validation error messages + # Even if retries don't happen in agent mode, we should not leak errors + error_messages = [ + msg + for msg in final_conversation + if msg.get("role") == "user" + and "parameter errors" in msg.get("content", "") + ] + assert ( + len(error_messages) == 0 + ), "Validation error leaked into final conversation" diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py index d4b81bb1d3..020a5a1906 100644 --- a/autogpt_platform/backend/backend/data/execution.py +++ b/autogpt_platform/backend/backend/data/execution.py @@ -5,6 +5,7 @@ from enum import Enum from multiprocessing import Manager from queue import Empty from typing import ( + TYPE_CHECKING, Annotated, Any, AsyncGenerator, @@ -65,6 +66,9 @@ from .includes import ( ) from .model import CredentialsMetaInput, GraphExecutionStats, NodeExecutionStats +if TYPE_CHECKING: + pass + T = TypeVar("T") logger = logging.getLogger(__name__) @@ -836,6 +840,30 @@ async def upsert_execution_output( await AgentNodeExecutionInputOutput.prisma().create(data=data) +async def get_execution_outputs_by_node_exec_id( + node_exec_id: str, +) -> dict[str, Any]: + """ + Get all execution outputs for a specific node execution ID. + + Args: + node_exec_id: The node execution ID to get outputs for + + Returns: + Dictionary mapping output names to their data values + """ + outputs = await AgentNodeExecutionInputOutput.prisma().find_many( + where={"referencedByOutputExecId": node_exec_id} + ) + + result = {} + for output in outputs: + if output.data is not None: + result[output.name] = type_utils.convert(output.data, JsonValue) + + return result + + async def update_graph_execution_start_time( graph_exec_id: str, ) -> GraphExecution | None: diff --git a/autogpt_platform/backend/backend/data/human_review.py b/autogpt_platform/backend/backend/data/human_review.py index df0b4b21e8..11b87ec1dd 100644 --- a/autogpt_platform/backend/backend/data/human_review.py +++ b/autogpt_platform/backend/backend/data/human_review.py @@ -100,7 +100,7 @@ async def get_or_create_human_review( return None else: return ReviewResult( - data=review.payload if review.status == ReviewStatus.APPROVED else None, + data=review.payload, status=review.status, message=review.reviewMessage or "", processed=review.processed, diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 3d3ce3b791..9c2b3970c6 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -13,6 +13,7 @@ from backend.data.execution import ( get_block_error_stats, get_child_graph_executions, get_execution_kv_data, + get_execution_outputs_by_node_exec_id, get_frequently_executed_graphs, get_graph_execution_meta, get_graph_executions, @@ -147,6 +148,7 @@ class DatabaseManager(AppService): update_graph_execution_stats = _(update_graph_execution_stats) upsert_execution_input = _(upsert_execution_input) upsert_execution_output = _(upsert_execution_output) + get_execution_outputs_by_node_exec_id = _(get_execution_outputs_by_node_exec_id) get_execution_kv_data = _(get_execution_kv_data) set_execution_kv_data = _(set_execution_kv_data) get_block_error_stats = _(get_block_error_stats) @@ -277,6 +279,7 @@ class DatabaseManagerAsyncClient(AppServiceClient): get_user_integrations = d.get_user_integrations upsert_execution_input = d.upsert_execution_input upsert_execution_output = d.upsert_execution_output + get_execution_outputs_by_node_exec_id = d.get_execution_outputs_by_node_exec_id update_graph_execution_stats = d.update_graph_execution_stats update_node_execution_status = d.update_node_execution_status update_node_execution_status_batch = d.update_node_execution_status_batch diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index ee875bbf55..234f8127c8 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -133,9 +133,8 @@ def execute_graph( cluster_lock: ClusterLock, ): """Execute graph using thread-local ExecutionProcessor instance""" - return _tls.processor.on_graph_execution( - graph_exec_entry, cancel_event, cluster_lock - ) + processor: ExecutionProcessor = _tls.processor + return processor.on_graph_execution(graph_exec_entry, cancel_event, cluster_lock) T = TypeVar("T") @@ -143,8 +142,8 @@ T = TypeVar("T") async def execute_node( node: Node, - creds_manager: IntegrationCredentialsManager, data: NodeExecutionEntry, + execution_processor: "ExecutionProcessor", execution_stats: NodeExecutionStats | None = None, nodes_input_masks: Optional[NodesInputMasks] = None, ) -> BlockOutput: @@ -169,6 +168,7 @@ async def execute_node( node_id = data.node_id node_block = node.block execution_context = data.execution_context + creds_manager = execution_processor.creds_manager log_metadata = LogMetadata( logger=_logger, @@ -212,6 +212,7 @@ async def execute_node( "node_exec_id": node_exec_id, "user_id": user_id, "execution_context": execution_context, + "execution_processor": execution_processor, } # Last-minute fetch credentials + acquire a system-wide read-write lock to prevent @@ -608,8 +609,8 @@ class ExecutionProcessor: async for output_name, output_data in execute_node( node=node, - creds_manager=self.creds_manager, data=node_exec, + execution_processor=self, execution_stats=stats, nodes_input_masks=nodes_input_masks, ): @@ -860,12 +861,17 @@ class ExecutionProcessor: execution_stats_lock = threading.Lock() # State holders ---------------------------------------------------- - running_node_execution: dict[str, NodeExecutionProgress] = defaultdict( + self.running_node_execution: dict[str, NodeExecutionProgress] = defaultdict( NodeExecutionProgress ) - running_node_evaluation: dict[str, Future] = {} + self.running_node_evaluation: dict[str, Future] = {} + self.execution_stats = execution_stats + self.execution_stats_lock = execution_stats_lock execution_queue = ExecutionQueue[NodeExecutionEntry]() + running_node_execution = self.running_node_execution + running_node_evaluation = self.running_node_evaluation + try: if db_client.get_credits(graph_exec.user_id) <= 0: raise InsufficientBalanceError( diff --git a/autogpt_platform/backend/backend/server/v2/builder/db.py b/autogpt_platform/backend/backend/server/v2/builder/db.py index c3f6ac88ab..9856d53c0e 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/db.py +++ b/autogpt_platform/backend/backend/server/v2/builder/db.py @@ -1,9 +1,16 @@ import logging +from dataclasses import dataclass from datetime import datetime, timedelta, timezone +from difflib import SequenceMatcher +from typing import Sequence import prisma import backend.data.block +import backend.server.v2.library.db as library_db +import backend.server.v2.library.model as library_model +import backend.server.v2.store.db as store_db +import backend.server.v2.store.model as store_model from backend.blocks import load_all_blocks from backend.blocks.llm import LlmModel from backend.data.block import AnyBlockSchema, BlockCategory, BlockInfo, BlockSchema @@ -14,17 +21,36 @@ from backend.server.v2.builder.model import ( BlockResponse, BlockType, CountResponse, + FilterType, Provider, ProviderResponse, - SearchBlocksResponse, + SearchEntry, ) from backend.util.cache import cached from backend.util.models import Pagination logger = logging.getLogger(__name__) llm_models = [name.name.lower().replace("_", " ") for name in LlmModel] -_static_counts_cache: dict | None = None -_suggested_blocks: list[BlockInfo] | None = None + +MAX_LIBRARY_AGENT_RESULTS = 100 +MAX_MARKETPLACE_AGENT_RESULTS = 100 +MIN_SCORE_FOR_FILTERED_RESULTS = 10.0 + +SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent + + +@dataclass +class _ScoredItem: + item: SearchResultItem + filter_type: FilterType + score: float + sort_key: str + + +@dataclass +class _SearchCacheEntry: + items: list[SearchResultItem] + total_items: dict[FilterType, int] def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse]: @@ -130,71 +156,244 @@ def get_block_by_id(block_id: str) -> BlockInfo | None: return None -def search_blocks( - include_blocks: bool = True, - include_integrations: bool = True, - query: str = "", - page: int = 1, - page_size: int = 50, -) -> SearchBlocksResponse: +async def update_search(user_id: str, search: SearchEntry) -> str: """ - Get blocks based on the filter and query. - `providers` only applies for `integrations` filter. + Upsert a search request for the user and return the search ID. """ - blocks: list[AnyBlockSchema] = [] - query = query.lower() + if search.search_id: + # Update existing search + await prisma.models.BuilderSearchHistory.prisma().update( + where={ + "id": search.search_id, + }, + data={ + "searchQuery": search.search_query or "", + "filter": search.filter or [], # type: ignore + "byCreator": search.by_creator or [], + }, + ) + return search.search_id + else: + # Create new search + new_search = await prisma.models.BuilderSearchHistory.prisma().create( + data={ + "userId": user_id, + "searchQuery": search.search_query or "", + "filter": search.filter or [], # type: ignore + "byCreator": search.by_creator or [], + } + ) + return new_search.id - total = 0 - skip = (page - 1) * page_size - take = page_size + +async def get_recent_searches(user_id: str, limit: int = 5) -> list[SearchEntry]: + """ + Get the user's most recent search requests. + """ + searches = await prisma.models.BuilderSearchHistory.prisma().find_many( + where={ + "userId": user_id, + }, + order={ + "updatedAt": "desc", + }, + take=limit, + ) + return [ + SearchEntry( + search_query=s.searchQuery, + filter=s.filter, # type: ignore + by_creator=s.byCreator, + search_id=s.id, + ) + for s in searches + ] + + +async def get_sorted_search_results( + *, + user_id: str, + search_query: str | None, + filters: Sequence[FilterType], + by_creator: Sequence[str] | None = None, +) -> _SearchCacheEntry: + normalized_filters: tuple[FilterType, ...] = tuple(sorted(set(filters or []))) + normalized_creators: tuple[str, ...] = tuple(sorted(set(by_creator or []))) + return await _build_cached_search_results( + user_id=user_id, + search_query=search_query or "", + filters=normalized_filters, + by_creator=normalized_creators, + ) + + +@cached(ttl_seconds=300, shared_cache=True) +async def _build_cached_search_results( + user_id: str, + search_query: str, + filters: tuple[FilterType, ...], + by_creator: tuple[str, ...], +) -> _SearchCacheEntry: + normalized_query = (search_query or "").strip().lower() + + include_blocks = "blocks" in filters + include_integrations = "integrations" in filters + include_library_agents = "my_agents" in filters + include_marketplace_agents = "marketplace_agents" in filters + + scored_items: list[_ScoredItem] = [] + total_items: dict[FilterType, int] = { + "blocks": 0, + "integrations": 0, + "marketplace_agents": 0, + "my_agents": 0, + } + + block_results, block_total, integration_total = _collect_block_results( + normalized_query=normalized_query, + include_blocks=include_blocks, + include_integrations=include_integrations, + ) + scored_items.extend(block_results) + total_items["blocks"] = block_total + total_items["integrations"] = integration_total + + if include_library_agents: + library_response = await library_db.list_library_agents( + user_id=user_id, + search_term=search_query or None, + page=1, + page_size=MAX_LIBRARY_AGENT_RESULTS, + ) + total_items["my_agents"] = library_response.pagination.total_items + scored_items.extend( + _build_library_items( + agents=library_response.agents, + normalized_query=normalized_query, + ) + ) + + if include_marketplace_agents: + marketplace_response = await store_db.get_store_agents( + creators=list(by_creator) or None, + search_query=search_query or None, + page=1, + page_size=MAX_MARKETPLACE_AGENT_RESULTS, + ) + total_items["marketplace_agents"] = marketplace_response.pagination.total_items + scored_items.extend( + _build_marketplace_items( + agents=marketplace_response.agents, + normalized_query=normalized_query, + ) + ) + + sorted_items = sorted( + scored_items, + key=lambda entry: (-entry.score, entry.sort_key, entry.filter_type), + ) + + return _SearchCacheEntry( + items=[entry.item for entry in sorted_items], + total_items=total_items, + ) + + +def _collect_block_results( + *, + normalized_query: str, + include_blocks: bool, + include_integrations: bool, +) -> tuple[list[_ScoredItem], int, int]: + results: list[_ScoredItem] = [] block_count = 0 integration_count = 0 + if not include_blocks and not include_integrations: + return results, block_count, integration_count + for block_type in load_all_blocks().values(): block: AnyBlockSchema = block_type() - # Skip disabled blocks if block.disabled: continue - # Skip blocks that don't match the query - if ( - query not in block.name.lower() - and query not in block.description.lower() - and not _matches_llm_model(block.input_schema, query) - ): - continue - keep = False + + block_info = block.get_info() credentials = list(block.input_schema.get_credentials_fields().values()) - if include_integrations and len(credentials) > 0: - keep = True + is_integration = len(credentials) > 0 + + if is_integration and not include_integrations: + continue + if not is_integration and not include_blocks: + continue + + score = _score_block(block, block_info, normalized_query) + if not _should_include_item(score, normalized_query): + continue + + filter_type: FilterType = "integrations" if is_integration else "blocks" + if is_integration: integration_count += 1 - if include_blocks and len(credentials) == 0: - keep = True + else: block_count += 1 - if not keep: + results.append( + _ScoredItem( + item=block_info, + filter_type=filter_type, + score=score, + sort_key=_get_item_name(block_info), + ) + ) + + return results, block_count, integration_count + + +def _build_library_items( + *, + agents: list[library_model.LibraryAgent], + normalized_query: str, +) -> list[_ScoredItem]: + results: list[_ScoredItem] = [] + + for agent in agents: + score = _score_library_agent(agent, normalized_query) + if not _should_include_item(score, normalized_query): continue - total += 1 - if skip > 0: - skip -= 1 - continue - if take > 0: - take -= 1 - blocks.append(block) + results.append( + _ScoredItem( + item=agent, + filter_type="my_agents", + score=score, + sort_key=_get_item_name(agent), + ) + ) - return SearchBlocksResponse( - blocks=BlockResponse( - blocks=[b.get_info() for b in blocks], - pagination=Pagination( - total_items=total, - total_pages=(total + page_size - 1) // page_size, - current_page=page, - page_size=page_size, - ), - ), - total_block_count=block_count, - total_integration_count=integration_count, - ) + return results + + +def _build_marketplace_items( + *, + agents: list[store_model.StoreAgent], + normalized_query: str, +) -> list[_ScoredItem]: + results: list[_ScoredItem] = [] + + for agent in agents: + score = _score_store_agent(agent, normalized_query) + if not _should_include_item(score, normalized_query): + continue + + results.append( + _ScoredItem( + item=agent, + filter_type="marketplace_agents", + score=score, + sort_key=_get_item_name(agent), + ) + ) + + return results def get_providers( @@ -251,16 +450,12 @@ async def get_counts(user_id: str) -> CountResponse: ) +@cached(ttl_seconds=3600) async def _get_static_counts(): """ Get counts of blocks, integrations, and marketplace agents. This is cached to avoid unnecessary database queries and calculations. - Can't use functools.cache here because the function is async. """ - global _static_counts_cache - if _static_counts_cache is not None: - return _static_counts_cache - all_blocks = 0 input_blocks = 0 action_blocks = 0 @@ -287,7 +482,7 @@ async def _get_static_counts(): marketplace_agents = await prisma.models.StoreAgent.prisma().count() - _static_counts_cache = { + return { "all_blocks": all_blocks, "input_blocks": input_blocks, "action_blocks": action_blocks, @@ -296,8 +491,6 @@ async def _get_static_counts(): "marketplace_agents": marketplace_agents, } - return _static_counts_cache - def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool: for field in schema_cls.model_fields.values(): @@ -308,6 +501,123 @@ def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool: return False +def _score_block( + block: AnyBlockSchema, + block_info: BlockInfo, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = block_info.name.lower() + description = block_info.description.lower() + score = _score_primary_fields(name, description, normalized_query) + + category_text = " ".join( + category.get("category", "").lower() for category in block_info.categories + ) + score += _score_additional_field(category_text, normalized_query, 12, 6) + + credentials_info = block.input_schema.get_credentials_fields_info().values() + provider_names = [ + provider.value.lower() + for info in credentials_info + for provider in info.provider + ] + provider_text = " ".join(provider_names) + score += _score_additional_field(provider_text, normalized_query, 15, 6) + + if _matches_llm_model(block.input_schema, normalized_query): + score += 20 + + return score + + +def _score_library_agent( + agent: library_model.LibraryAgent, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = agent.name.lower() + description = (agent.description or "").lower() + instructions = (agent.instructions or "").lower() + + score = _score_primary_fields(name, description, normalized_query) + score += _score_additional_field(instructions, normalized_query, 15, 6) + score += _score_additional_field( + agent.creator_name.lower(), normalized_query, 10, 5 + ) + + return score + + +def _score_store_agent( + agent: store_model.StoreAgent, + normalized_query: str, +) -> float: + if not normalized_query: + return 0.0 + + name = agent.agent_name.lower() + description = agent.description.lower() + sub_heading = agent.sub_heading.lower() + + score = _score_primary_fields(name, description, normalized_query) + score += _score_additional_field(sub_heading, normalized_query, 12, 6) + score += _score_additional_field(agent.creator.lower(), normalized_query, 10, 5) + + return score + + +def _score_primary_fields(name: str, description: str, query: str) -> float: + score = 0.0 + if name == query: + score += 120 + elif name.startswith(query): + score += 90 + elif query in name: + score += 60 + + score += SequenceMatcher(None, name, query).ratio() * 50 + if description: + if query in description: + score += 30 + score += SequenceMatcher(None, description, query).ratio() * 25 + return score + + +def _score_additional_field( + value: str, + query: str, + contains_weight: float, + similarity_weight: float, +) -> float: + if not value or not query: + return 0.0 + + score = 0.0 + if query in value: + score += contains_weight + score += SequenceMatcher(None, value, query).ratio() * similarity_weight + return score + + +def _should_include_item(score: float, normalized_query: str) -> bool: + if not normalized_query: + return True + return score >= MIN_SCORE_FOR_FILTERED_RESULTS + + +def _get_item_name(item: SearchResultItem) -> str: + if isinstance(item, BlockInfo): + return item.name.lower() + if isinstance(item, library_model.LibraryAgent): + return item.name.lower() + return item.agent_name.lower() + + @cached(ttl_seconds=3600) def _get_all_providers() -> dict[ProviderName, Provider]: providers: dict[ProviderName, Provider] = {} @@ -329,13 +639,9 @@ def _get_all_providers() -> dict[ProviderName, Provider]: return providers +@cached(ttl_seconds=3600) async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: - global _suggested_blocks - - if _suggested_blocks is not None and len(_suggested_blocks) >= count: - return _suggested_blocks[:count] - - _suggested_blocks = [] + suggested_blocks = [] # Sum the number of executions for each block type # Prisma cannot group by nested relations, so we do a raw query # Calculate the cutoff timestamp @@ -376,7 +682,7 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]: # Sort blocks by execution count blocks.sort(key=lambda x: x[1], reverse=True) - _suggested_blocks = [block[0] for block in blocks] + suggested_blocks = [block[0] for block in blocks] # Return the top blocks - return _suggested_blocks[:count] + return suggested_blocks[:count] diff --git a/autogpt_platform/backend/backend/server/v2/builder/model.py b/autogpt_platform/backend/backend/server/v2/builder/model.py index e1a7e744fd..4a1de595d1 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/model.py +++ b/autogpt_platform/backend/backend/server/v2/builder/model.py @@ -18,10 +18,17 @@ FilterType = Literal[ BlockType = Literal["all", "input", "action", "output"] +class SearchEntry(BaseModel): + search_query: str | None = None + filter: list[FilterType] | None = None + by_creator: list[str] | None = None + search_id: str | None = None + + # Suggestions class SuggestionsResponse(BaseModel): otto_suggestions: list[str] - recent_searches: list[str] + recent_searches: list[SearchEntry] providers: list[ProviderName] top_blocks: list[BlockInfo] @@ -32,7 +39,7 @@ class BlockCategoryResponse(BaseModel): total_blocks: int blocks: list[BlockInfo] - model_config = {"use_enum_values": False} # <== use enum names like "AI" + model_config = {"use_enum_values": False} # Use enum names like "AI" # Input/Action/Output and see all for block categories @@ -53,17 +60,11 @@ class ProviderResponse(BaseModel): pagination: Pagination -class SearchBlocksResponse(BaseModel): - blocks: BlockResponse - total_block_count: int - total_integration_count: int - - class SearchResponse(BaseModel): items: list[BlockInfo | library_model.LibraryAgent | store_model.StoreAgent] + search_id: str total_items: dict[FilterType, int] - page: int - more_pages: bool + pagination: Pagination class CountResponse(BaseModel): diff --git a/autogpt_platform/backend/backend/server/v2/builder/routes.py b/autogpt_platform/backend/backend/server/v2/builder/routes.py index ebc9fd5baf..b87bf8ca1a 100644 --- a/autogpt_platform/backend/backend/server/v2/builder/routes.py +++ b/autogpt_platform/backend/backend/server/v2/builder/routes.py @@ -6,10 +6,6 @@ from autogpt_libs.auth.dependencies import get_user_id, requires_user import backend.server.v2.builder.db as builder_db import backend.server.v2.builder.model as builder_model -import backend.server.v2.library.db as library_db -import backend.server.v2.library.model as library_model -import backend.server.v2.store.db as store_db -import backend.server.v2.store.model as store_model from backend.integrations.providers import ProviderName from backend.util.models import Pagination @@ -45,7 +41,9 @@ def sanitize_query(query: str | None) -> str | None: summary="Get Builder suggestions", response_model=builder_model.SuggestionsResponse, ) -async def get_suggestions() -> builder_model.SuggestionsResponse: +async def get_suggestions( + user_id: Annotated[str, fastapi.Security(get_user_id)], +) -> builder_model.SuggestionsResponse: """ Get all suggestions for the Blocks Menu. """ @@ -55,11 +53,7 @@ async def get_suggestions() -> builder_model.SuggestionsResponse: "Help me create a list", "Help me feed my data to Google Maps", ], - recent_searches=[ - "image generation", - "deepfake", - "competitor analysis", - ], + recent_searches=await builder_db.get_recent_searches(user_id), providers=[ ProviderName.TWITTER, ProviderName.GITHUB, @@ -147,7 +141,6 @@ async def get_providers( ) -# Not using post method because on frontend, orval doesn't support Infinite Query with POST method. @router.get( "/search", summary="Builder search", @@ -157,7 +150,7 @@ async def get_providers( async def search( user_id: Annotated[str, fastapi.Security(get_user_id)], search_query: Annotated[str | None, fastapi.Query()] = None, - filter: Annotated[list[str] | None, fastapi.Query()] = None, + filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None, search_id: Annotated[str | None, fastapi.Query()] = None, by_creator: Annotated[list[str] | None, fastapi.Query()] = None, page: Annotated[int, fastapi.Query()] = 1, @@ -176,69 +169,43 @@ async def search( ] search_query = sanitize_query(search_query) - # Blocks&Integrations - blocks = builder_model.SearchBlocksResponse( - blocks=builder_model.BlockResponse( - blocks=[], - pagination=Pagination.empty(), - ), - total_block_count=0, - total_integration_count=0, + # Get all possible results + cached_results = await builder_db.get_sorted_search_results( + user_id=user_id, + search_query=search_query, + filters=filter, + by_creator=by_creator, ) - if "blocks" in filter or "integrations" in filter: - blocks = builder_db.search_blocks( - include_blocks="blocks" in filter, - include_integrations="integrations" in filter, - query=search_query or "", - page=page, - page_size=page_size, - ) - # Library Agents - my_agents = library_model.LibraryAgentResponse( - agents=[], - pagination=Pagination.empty(), + # Paginate results + total_combined_items = len(cached_results.items) + pagination = Pagination( + total_items=total_combined_items, + total_pages=(total_combined_items + page_size - 1) // page_size, + current_page=page, + page_size=page_size, ) - if "my_agents" in filter: - my_agents = await library_db.list_library_agents( - user_id=user_id, - search_term=search_query, - page=page, - page_size=page_size, - ) - # Marketplace Agents - marketplace_agents = store_model.StoreAgentsResponse( - agents=[], - pagination=Pagination.empty(), - ) - if "marketplace_agents" in filter: - marketplace_agents = await store_db.get_store_agents( - creators=by_creator, + start_idx = (page - 1) * page_size + end_idx = start_idx + page_size + paginated_items = cached_results.items[start_idx:end_idx] + + # Update the search entry by id + search_id = await builder_db.update_search( + user_id, + builder_model.SearchEntry( search_query=search_query, - page=page, - page_size=page_size, - ) - - more_pages = False - if ( - blocks.blocks.pagination.current_page < blocks.blocks.pagination.total_pages - or my_agents.pagination.current_page < my_agents.pagination.total_pages - or marketplace_agents.pagination.current_page - < marketplace_agents.pagination.total_pages - ): - more_pages = True + filter=filter, + by_creator=by_creator, + search_id=search_id, + ), + ) return builder_model.SearchResponse( - items=blocks.blocks.blocks + my_agents.agents + marketplace_agents.agents, - total_items={ - "blocks": blocks.total_block_count, - "integrations": blocks.total_integration_count, - "marketplace_agents": marketplace_agents.pagination.total_items, - "my_agents": my_agents.pagination.total_items, - }, - page=page, - more_pages=more_pages, + items=paginated_items, + search_id=search_id, + total_items=cached_results.total_items, + pagination=pagination, ) diff --git a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py index febfe40213..14fb435457 100644 --- a/autogpt_platform/backend/backend/server/v2/executions/review/routes.py +++ b/autogpt_platform/backend/backend/server/v2/executions/review/routes.py @@ -134,18 +134,14 @@ async def process_review_action( # Build review decisions map review_decisions = {} for review in request.reviews: - if review.approved: - review_decisions[review.node_exec_id] = ( - ReviewStatus.APPROVED, - review.reviewed_data, - review.message, - ) - else: - review_decisions[review.node_exec_id] = ( - ReviewStatus.REJECTED, - None, - review.message, - ) + review_status = ( + ReviewStatus.APPROVED if review.approved else ReviewStatus.REJECTED + ) + review_decisions[review.node_exec_id] = ( + review_status, + review.reviewed_data, + review.message, + ) # Process all reviews updated_reviews = await process_all_reviews_for_execution( diff --git a/autogpt_platform/backend/backend/util/prompt.py b/autogpt_platform/backend/backend/util/prompt.py index a39f0367dd..775d1c932b 100644 --- a/autogpt_platform/backend/backend/util/prompt.py +++ b/autogpt_platform/backend/backend/util/prompt.py @@ -5,6 +5,13 @@ from tiktoken import encoding_for_model from backend.util import json +# ---------------------------------------------------------------------------# +# CONSTANTS # +# ---------------------------------------------------------------------------# + +# Message prefixes for important system messages that should be protected during compression +MAIN_OBJECTIVE_PREFIX = "[Main Objective Prompt]: " + # ---------------------------------------------------------------------------# # INTERNAL UTILITIES # # ---------------------------------------------------------------------------# @@ -63,6 +70,55 @@ def _msg_tokens(msg: dict, enc) -> int: return WRAPPER + content_tokens + tool_call_tokens +def _is_tool_message(msg: dict) -> bool: + """Check if a message contains tool calls or results that should be protected.""" + content = msg.get("content") + + # Check for Anthropic-style tool messages + if isinstance(content, list) and any( + isinstance(item, dict) and item.get("type") in ("tool_use", "tool_result") + for item in content + ): + return True + + # Check for OpenAI-style tool calls in the message + if "tool_calls" in msg or msg.get("role") == "tool": + return True + + return False + + +def _is_objective_message(msg: dict) -> bool: + """Check if a message contains objective/system prompts that should be absolutely protected.""" + content = msg.get("content", "") + if isinstance(content, str): + # Protect any message with the main objective prefix + return content.startswith(MAIN_OBJECTIVE_PREFIX) + return False + + +def _truncate_tool_message_content(msg: dict, enc, max_tokens: int) -> None: + """ + Carefully truncate tool message content while preserving tool structure. + Only truncates tool_result content, leaves tool_use intact. + """ + content = msg.get("content") + if not isinstance(content, list): + return + + for item in content: + # Only process tool_result items, leave tool_use blocks completely intact + if not (isinstance(item, dict) and item.get("type") == "tool_result"): + continue + + result_content = item.get("content", "") + if ( + isinstance(result_content, str) + and _tok_len(result_content, enc) > max_tokens + ): + item["content"] = _truncate_middle_tokens(result_content, enc, max_tokens) + + def _truncate_middle_tokens(text: str, enc, max_tok: int) -> str: """ Return *text* shortened to ≈max_tok tokens by keeping the head & tail @@ -140,13 +196,21 @@ def compress_prompt( return sum(_msg_tokens(m, enc) for m in msgs) original_token_count = total_tokens() + if original_token_count + reserve <= target_tokens: return msgs # ---- STEP 0 : normalise content -------------------------------------- # Convert non-string payloads to strings so token counting is coherent. - for m in msgs[1:-1]: # keep the first & last intact + for i, m in enumerate(msgs): if not isinstance(m.get("content"), str) and m.get("content") is not None: + if _is_tool_message(m): + continue + + # Keep first and last messages intact (unless they're tool messages) + if i == 0 or i == len(msgs) - 1: + continue + # Reasonable 20k-char ceiling prevents pathological blobs content_str = json.dumps(m["content"], separators=(",", ":")) if len(content_str) > 20_000: @@ -157,34 +221,45 @@ def compress_prompt( cap = start_cap while total_tokens() + reserve > target_tokens and cap >= floor_cap: for m in msgs[1:-1]: # keep first & last intact - if _tok_len(m.get("content") or "", enc) > cap: - m["content"] = _truncate_middle_tokens(m["content"], enc, cap) + if _is_tool_message(m): + # For tool messages, only truncate tool result content, preserve structure + _truncate_tool_message_content(m, enc, cap) + continue + + if _is_objective_message(m): + # Never truncate objective messages - they contain the core task + continue + + content = m.get("content") or "" + if _tok_len(content, enc) > cap: + m["content"] = _truncate_middle_tokens(content, enc, cap) cap //= 2 # tighten the screw # ---- STEP 2 : middle-out deletion ----------------------------------- while total_tokens() + reserve > target_tokens and len(msgs) > 2: + # Identify all deletable messages (not first/last, not tool messages, not objective messages) + deletable_indices = [] + for i in range(1, len(msgs) - 1): # Skip first and last + if not _is_tool_message(msgs[i]) and not _is_objective_message(msgs[i]): + deletable_indices.append(i) + + if not deletable_indices: + break # nothing more we can drop + + # Delete from center outward - find the index closest to center centre = len(msgs) // 2 - # Build a symmetrical centre-out index walk: centre, centre+1, centre-1, ... - order = [centre] + [ - i - for pair in zip(range(centre + 1, len(msgs) - 1), range(centre - 1, 0, -1)) - for i in pair - ] - removed = False - for i in order: - msg = msgs[i] - if "tool_calls" in msg or msg.get("role") == "tool": - continue # protect tool shells - del msgs[i] - removed = True - break - if not removed: # nothing more we can drop - break + to_delete = min(deletable_indices, key=lambda i: abs(i - centre)) + del msgs[to_delete] # ---- STEP 3 : final safety-net trim on first & last ------------------ cap = start_cap while total_tokens() + reserve > target_tokens and cap >= floor_cap: for idx in (0, -1): # first and last + if _is_tool_message(msgs[idx]): + # For tool messages at first/last position, truncate tool result content only + _truncate_tool_message_content(msgs[idx], enc, cap) + continue + text = msgs[idx].get("content") or "" if _tok_len(text, enc) > cap: msgs[idx]["content"] = _truncate_middle_tokens(text, enc, cap) diff --git a/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql b/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql new file mode 100644 index 0000000000..8b9786e47c --- /dev/null +++ b/autogpt_platform/backend/migrations/20251209182537_add_builder_search/migration.sql @@ -0,0 +1,15 @@ +-- Create BuilderSearchHistory table +CREATE TABLE "BuilderSearchHistory" ( + "id" TEXT NOT NULL, + "userId" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "searchQuery" TEXT NOT NULL, + "filter" TEXT[] DEFAULT ARRAY[]::TEXT[], + "byCreator" TEXT[] DEFAULT ARRAY[]::TEXT[], + + CONSTRAINT "BuilderSearchHistory_pkey" PRIMARY KEY ("id") +); + +-- Define User foreign relation +ALTER TABLE "BuilderSearchHistory" ADD CONSTRAINT "BuilderSearchHistory_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 5dbec459cf..4550a06150 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -53,6 +53,7 @@ model User { Profile Profile[] UserOnboarding UserOnboarding? + BuilderSearchHistory BuilderSearchHistory[] StoreListings StoreListing[] StoreListingReviews StoreListingReview[] StoreVersionsReviewed StoreListingVersion[] @@ -114,6 +115,19 @@ model UserOnboarding { User User @relation(fields: [userId], references: [id], onDelete: Cascade) } +model BuilderSearchHistory { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + searchQuery String + filter String[] @default([]) + byCreator String[] @default([]) + + userId String + User User @relation(fields: [userId], references: [id], onDelete: Cascade) +} + // This model describes the Agent Graph/Flow (Multi Agent System). model AgentGraph { id String @default(uuid()) diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs index d4595990a2..e4e4cdf544 100644 --- a/autogpt_platform/frontend/next.config.mjs +++ b/autogpt_platform/frontend/next.config.mjs @@ -3,6 +3,14 @@ import { withSentryConfig } from "@sentry/nextjs"; /** @type {import('next').NextConfig} */ const nextConfig = { productionBrowserSourceMaps: true, + experimental: { + serverActions: { + bodySizeLimit: "256mb", + }, + // Increase body size limit for API routes (file uploads) - 256MB to match backend limit + proxyClientMaxBodySize: "256mb", + middlewareClientMaxBodySize: "256mb", + }, images: { domains: [ // We dont need to maintain alphabetical order here diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json index 556b733e96..4cbd867cd8 100644 --- a/autogpt_platform/frontend/package.json +++ b/autogpt_platform/frontend/package.json @@ -82,7 +82,7 @@ "lodash": "4.17.21", "lucide-react": "0.552.0", "moment": "2.30.1", - "next": "15.4.8", + "next": "15.4.10", "next-themes": "0.4.6", "nuqs": "2.7.2", "party-js": "2.2.0", @@ -137,9 +137,8 @@ "concurrently": "9.2.1", "cross-env": "10.1.0", "eslint": "8.57.1", - "eslint-config-next": "15.5.2", + "eslint-config-next": "15.5.7", "eslint-plugin-storybook": "9.1.5", - "import-in-the-middle": "1.14.2", "msw": "2.11.6", "msw-storybook-addon": "2.0.6", "orval": "7.13.0", diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml index 8e699cd907..54843fc589 100644 --- a/autogpt_platform/frontend/pnpm-lock.yaml +++ b/autogpt_platform/frontend/pnpm-lock.yaml @@ -16,7 +16,7 @@ importers: version: 5.2.2(react-hook-form@7.66.0(react@18.3.1)) '@next/third-parties': specifier: 15.4.6 - version: 15.4.6(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@phosphor-icons/react': specifier: 2.1.10 version: 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -88,7 +88,7 @@ importers: version: 5.24.13(@rjsf/utils@5.24.13(react@18.3.1)) '@sentry/nextjs': specifier: 10.27.0 - version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) + version: 10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9)) '@supabase/ssr': specifier: 0.7.0 version: 0.7.0(@supabase/supabase-js@2.78.0) @@ -106,10 +106,10 @@ importers: version: 0.2.4 '@vercel/analytics': specifier: 1.5.0 - version: 1.5.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@vercel/speed-insights': specifier: 1.2.0 - version: 1.2.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) '@xyflow/react': specifier: 12.9.2 version: 12.9.2(@types/react@18.3.17)(immer@10.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -148,7 +148,7 @@ importers: version: 12.23.24(@emotion/is-prop-valid@1.2.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) geist: specifier: 1.5.1 - version: 1.5.1(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) + version: 1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) highlight.js: specifier: 11.11.1 version: 11.11.1 @@ -171,14 +171,14 @@ importers: specifier: 2.30.1 version: 2.30.1 next: - specifier: 15.4.8 - version: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: 15.4.10 + version: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: specifier: 0.4.6 version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nuqs: specifier: 2.7.2 - version: 2.7.2(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + version: 2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) party-js: specifier: 2.2.0 version: 2.2.0 @@ -284,7 +284,7 @@ importers: version: 9.1.5(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2)) '@storybook/nextjs': specifier: 9.1.5 - version: 9.1.5(esbuild@0.25.9)(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9)) + version: 9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9)) '@tanstack/eslint-plugin-query': specifier: 5.91.2 version: 5.91.2(eslint@8.57.1)(typescript@5.9.3) @@ -331,14 +331,11 @@ importers: specifier: 8.57.1 version: 8.57.1 eslint-config-next: - specifier: 15.5.2 - version: 15.5.2(eslint@8.57.1)(typescript@5.9.3) + specifier: 15.5.7 + version: 15.5.7(eslint@8.57.1)(typescript@5.9.3) eslint-plugin-storybook: specifier: 9.1.5 version: 9.1.5(eslint@8.57.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(typescript@5.9.3) - import-in-the-middle: - specifier: 1.14.2 - version: 1.14.2 msw: specifier: 2.11.6 version: 2.11.6(@types/node@24.10.0)(typescript@5.9.3) @@ -986,12 +983,15 @@ packages: '@date-fns/tz@1.4.1': resolution: {integrity: sha512-P5LUNhtbj6YfI3iJjw5EL9eUAG6OitD0W3fWQcpQjDRc/QIsL0tRNuO1PcDvPccWL1fSTXXdE1ds+l95DV/OFA==} - '@emnapi/core@1.5.0': - resolution: {integrity: sha512-sbP8GzB1WDzacS8fgNPpHlp6C9VZe+SJP3F90W9rLemaQj2PzIuTEl1qDOYQf58YIpyjViI24y9aPWCjEzY2cg==} + '@emnapi/core@1.7.1': + resolution: {integrity: sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==} '@emnapi/runtime@1.5.0': resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==} + '@emnapi/runtime@1.7.1': + resolution: {integrity: sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==} + '@emnapi/wasi-threads@1.1.0': resolution: {integrity: sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==} @@ -1329,6 +1329,10 @@ packages: resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + '@eslint/eslintrc@2.1.4': resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -1602,11 +1606,11 @@ packages: '@neoconfetti/react@1.0.0': resolution: {integrity: sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==} - '@next/env@15.4.8': - resolution: {integrity: sha512-LydLa2MDI1NMrOFSkO54mTc8iIHSttj6R6dthITky9ylXV2gCGi0bHQjVCtLGRshdRPjyh2kXbxJukDtBWQZtQ==} + '@next/env@15.4.10': + resolution: {integrity: sha512-knhmoJ0Vv7VRf6pZEPSnciUG1S4bIhWx+qTYBW/AjxEtlzsiNORPk8sFDCEvqLfmKuey56UB9FL1UdHEV3uBrg==} - '@next/eslint-plugin-next@15.5.2': - resolution: {integrity: sha512-lkLrRVxcftuOsJNhWatf1P2hNVfh98k/omQHrCEPPriUypR6RcS13IvLdIrEvkm9AH2Nu2YpR5vLqBuy6twH3Q==} + '@next/eslint-plugin-next@15.5.7': + resolution: {integrity: sha512-DtRU2N7BkGr8r+pExfuWHwMEPX5SD57FeA6pxdgCHODo+b/UgIgjE+rgWKtJAbEbGhVZ2jtHn4g3wNhWFoNBQQ==} '@next/swc-darwin-arm64@15.4.8': resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==} @@ -2622,8 +2626,8 @@ packages: '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} - '@rushstack/eslint-patch@1.12.0': - resolution: {integrity: sha512-5EwMtOqvJMMa3HbmxLlF74e+3/HhwBTMcvt3nqVJgGCozO6hzIPOBlwm8mGVNR9SN2IJpxSnlxczyDjcn7qIyw==} + '@rushstack/eslint-patch@1.15.0': + resolution: {integrity: sha512-ojSshQPKwVvSMR8yT2L/QtUkV5SXi/IfDiJ4/8d6UbTPjiHVmxZzUAzGD8Tzks1b9+qQkZa0isUOvYObedITaw==} '@scarf/scarf@1.4.0': resolution: {integrity: sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==} @@ -3097,8 +3101,8 @@ packages: peerDependencies: '@testing-library/dom': '>=7.21.4' - '@tybys/wasm-util@0.10.0': - resolution: {integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==} + '@tybys/wasm-util@0.10.1': + resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} '@types/aria-query@5.0.4': resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} @@ -3288,16 +3292,16 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} - '@typescript-eslint/eslint-plugin@8.43.0': - resolution: {integrity: sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==} + '@typescript-eslint/eslint-plugin@8.48.1': + resolution: {integrity: sha512-X63hI1bxl5ohelzr0LY5coufyl0LJNthld+abwxpCoo6Gq+hSqhKwci7MUWkXo67mzgUK6YFByhmaHmUcuBJmA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.43.0 + '@typescript-eslint/parser': ^8.48.1 eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/parser@8.43.0': - resolution: {integrity: sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==} + '@typescript-eslint/parser@8.48.1': + resolution: {integrity: sha512-PC0PDZfJg8sP7cmKe6L3QIL8GZwU5aRvUFedqSIpw3B+QjRSUZeeITC2M5XKeMXEzL6wccN196iy3JLwKNvDVA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -3315,6 +3319,12 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/project-service@8.48.1': + resolution: {integrity: sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/scope-manager@8.43.0': resolution: {integrity: sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3323,6 +3333,10 @@ packages: resolution: {integrity: sha512-LF4b/NmGvdWEHD2H4MsHD8ny6JpiVNDzrSZr3CsckEgCbAGZbYM4Cqxvi9L+WqDMT+51Ozy7lt2M+d0JLEuBqA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/scope-manager@8.48.1': + resolution: {integrity: sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/tsconfig-utils@8.43.0': resolution: {integrity: sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3335,8 +3349,14 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' - '@typescript-eslint/type-utils@8.43.0': - resolution: {integrity: sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==} + '@typescript-eslint/tsconfig-utils@8.48.1': + resolution: {integrity: sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.48.1': + resolution: {integrity: sha512-1jEop81a3LrJQLTf/1VfPQdhIY4PlGDBc/i67EVWObrtvcziysbLN3oReexHOM6N3jyXgCrkBsZpqwH0hiDOQg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -3350,6 +3370,10 @@ packages: resolution: {integrity: sha512-lNCWCbq7rpg7qDsQrd3D6NyWYu+gkTENkG5IKYhUIcxSb59SQC/hEQ+MrG4sTgBVghTonNWq42bA/d4yYumldQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/types@8.48.1': + resolution: {integrity: sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/typescript-estree@8.43.0': resolution: {integrity: sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3362,6 +3386,12 @@ packages: peerDependencies: typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/typescript-estree@8.48.1': + resolution: {integrity: sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/utils@8.43.0': resolution: {integrity: sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3376,6 +3406,13 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/utils@8.48.1': + resolution: {integrity: sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + '@typescript-eslint/visitor-keys@8.43.0': resolution: {integrity: sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3384,6 +3421,10 @@ packages: resolution: {integrity: sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/visitor-keys@8.48.1': + resolution: {integrity: sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@ungap/structured-clone@1.3.0': resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} @@ -4585,8 +4626,8 @@ packages: resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} engines: {node: '>=12'} - eslint-config-next@15.5.2: - resolution: {integrity: sha512-3hPZghsLupMxxZ2ggjIIrat/bPniM2yRpsVPVM40rp8ZMzKWOJp2CGWn7+EzoV2ddkUr5fxNfHpF+wU1hGt/3g==} + eslint-config-next@15.5.7: + resolution: {integrity: sha512-nU/TRGHHeG81NeLW5DeQT5t6BDUqbpsNQTvef1ld/tqHT+/zTx60/TIhKnmPISTTe++DVo+DLxDmk4rnwHaZVw==} peerDependencies: eslint: ^7.23.0 || ^8.0.0 || ^9.0.0 typescript: '>=3.3.1' @@ -4918,6 +4959,10 @@ packages: peerDependencies: next: '>=13.2.0' + generator-function@2.0.1: + resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==} + engines: {node: '>= 0.4'} + gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -4946,8 +4991,8 @@ packages: resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==} engines: {node: '>= 0.4'} - get-tsconfig@4.10.1: - resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==} + get-tsconfig@4.13.0: + resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==} github-slugger@2.0.0: resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==} @@ -5168,9 +5213,6 @@ packages: resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} engines: {node: '>=6'} - import-in-the-middle@1.14.2: - resolution: {integrity: sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==} - import-in-the-middle@2.0.0: resolution: {integrity: sha512-yNZhyQYqXpkT0AKq3F3KLasUSK4fHvebNH5hOsKQw2dhGSALvQ4U0BqUc5suziKvydO5u5hgN2hy1RJaho8U5A==} @@ -5282,6 +5324,10 @@ packages: resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==} engines: {node: '>= 0.4'} + is-generator-function@1.1.2: + resolution: {integrity: sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==} + engines: {node: '>= 0.4'} + is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} @@ -5903,8 +5949,8 @@ packages: engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true - napi-postinstall@0.3.3: - resolution: {integrity: sha512-uTp172LLXSxuSYHv/kou+f6KW3SMppU9ivthaVTXian9sOt3XM/zHYHpRZiLgQoxeWfYUnslNWQHF1+G71xcow==} + napi-postinstall@0.3.4: + resolution: {integrity: sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==} engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} hasBin: true @@ -5920,8 +5966,8 @@ packages: react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc - next@15.4.8: - resolution: {integrity: sha512-jwOXTz/bo0Pvlf20FSb6VXVeWRssA2vbvq9SdrOPEg9x8E1B27C2rQtvriAn600o9hH61kjrVRexEffv3JybuA==} + next@15.4.10: + resolution: {integrity: sha512-itVlc79QjpKMFMRhP+kbGKaSG/gZM6RCvwhEbwmCNF06CdDiNaoHcbeg0PqkEa2GOcn8KJ0nnc7+yL7EjoYLHQ==} engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} hasBin: true peerDependencies: @@ -6769,6 +6815,11 @@ packages: engines: {node: '>= 0.4'} hasBin: true + resolve@1.22.11: + resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} + engines: {node: '>= 0.4'} + hasBin: true + resolve@1.22.8: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true @@ -7858,7 +7909,7 @@ snapshots: '@babel/helper-plugin-utils': 7.27.1 debug: 4.4.3 lodash.debounce: 4.0.8 - resolve: 1.22.10 + resolve: 1.22.11 transitivePeerDependencies: - supports-color @@ -8550,7 +8601,7 @@ snapshots: '@date-fns/tz@1.4.1': {} - '@emnapi/core@1.5.0': + '@emnapi/core@1.7.1': dependencies: '@emnapi/wasi-threads': 1.1.0 tslib: 2.8.1 @@ -8561,6 +8612,11 @@ snapshots: tslib: 2.8.1 optional: true + '@emnapi/runtime@1.7.1': + dependencies: + tslib: 2.8.1 + optional: true + '@emnapi/wasi-threads@1.1.0': dependencies: tslib: 2.8.1 @@ -8739,6 +8795,8 @@ snapshots: '@eslint-community/regexpp@4.12.1': {} + '@eslint-community/regexpp@4.12.2': {} + '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 @@ -8996,16 +9054,16 @@ snapshots: '@napi-rs/wasm-runtime@0.2.12': dependencies: - '@emnapi/core': 1.5.0 - '@emnapi/runtime': 1.5.0 - '@tybys/wasm-util': 0.10.0 + '@emnapi/core': 1.7.1 + '@emnapi/runtime': 1.7.1 + '@tybys/wasm-util': 0.10.1 optional: true '@neoconfetti/react@1.0.0': {} - '@next/env@15.4.8': {} + '@next/env@15.4.10': {} - '@next/eslint-plugin-next@15.5.2': + '@next/eslint-plugin-next@15.5.7': dependencies: fast-glob: 3.3.1 @@ -9033,9 +9091,9 @@ snapshots: '@next/swc-win32-x64-msvc@15.4.8': optional: true - '@next/third-parties@15.4.6(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@next/third-parties@15.4.6(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': dependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 third-party-capital: 1.0.20 @@ -10115,7 +10173,7 @@ snapshots: '@rtsao/scc@1.1.0': {} - '@rushstack/eslint-patch@1.12.0': {} + '@rushstack/eslint-patch@1.15.0': {} '@scarf/scarf@1.4.0': {} @@ -10267,7 +10325,7 @@ snapshots: '@sentry/core@10.27.0': {} - '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': + '@sentry/nextjs@10.27.0(@opentelemetry/context-async-hooks@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.2.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)(webpack@5.101.3(esbuild@0.25.9))': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.37.0 @@ -10280,7 +10338,7 @@ snapshots: '@sentry/react': 10.27.0(react@18.3.1) '@sentry/vercel-edge': 10.27.0 '@sentry/webpack-plugin': 4.3.0(webpack@5.101.3(esbuild@0.25.9)) - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) resolve: 1.22.8 rollup: 4.52.2 stacktrace-parser: 0.1.11 @@ -10642,7 +10700,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))': + '@storybook/nextjs@9.1.5(esbuild@0.25.9)(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(storybook@9.1.5(@testing-library/dom@10.4.1)(msw@2.11.6(@types/node@24.10.0)(typescript@5.9.3))(prettier@3.6.2))(type-fest@4.41.0)(typescript@5.9.3)(webpack-hot-middleware@2.26.1)(webpack@5.101.3(esbuild@0.25.9))': dependencies: '@babel/core': 7.28.4 '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.28.4) @@ -10666,7 +10724,7 @@ snapshots: css-loader: 6.11.0(webpack@5.101.3(esbuild@0.25.9)) image-size: 2.0.2 loader-utils: 3.3.1 - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) node-polyfill-webpack-plugin: 2.0.1(webpack@5.101.3(esbuild@0.25.9)) postcss: 8.5.6 postcss-loader: 8.2.0(postcss@8.5.6)(typescript@5.9.3)(webpack@5.101.3(esbuild@0.25.9)) @@ -10867,7 +10925,7 @@ snapshots: dependencies: '@testing-library/dom': 10.4.1 - '@tybys/wasm-util@0.10.0': + '@tybys/wasm-util@0.10.1': dependencies: tslib: 2.8.1 optional: true @@ -11065,14 +11123,14 @@ snapshots: dependencies: '@types/node': 24.10.0 - '@typescript-eslint/eslint-plugin@8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/eslint-plugin@8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3)': dependencies: - '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/scope-manager': 8.43.0 - '@typescript-eslint/type-utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.43.0 + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/type-utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.48.1 eslint: 8.57.1 graphemer: 1.4.0 ignore: 7.0.5 @@ -11082,12 +11140,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3)': dependencies: - '@typescript-eslint/scope-manager': 8.43.0 - '@typescript-eslint/types': 8.43.0 - '@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3) - '@typescript-eslint/visitor-keys': 8.43.0 + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.48.1 debug: 4.4.3 eslint: 8.57.1 typescript: 5.9.3 @@ -11097,7 +11155,7 @@ snapshots: '@typescript-eslint/project-service@8.43.0(typescript@5.9.3)': dependencies: '@typescript-eslint/tsconfig-utils': 8.43.0(typescript@5.9.3) - '@typescript-eslint/types': 8.43.0 + '@typescript-eslint/types': 8.48.1 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -11106,7 +11164,16 @@ snapshots: '@typescript-eslint/project-service@8.46.2(typescript@5.9.3)': dependencies: '@typescript-eslint/tsconfig-utils': 8.46.2(typescript@5.9.3) - '@typescript-eslint/types': 8.46.2 + '@typescript-eslint/types': 8.48.1 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.48.1(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3) + '@typescript-eslint/types': 8.48.1 debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: @@ -11122,6 +11189,11 @@ snapshots: '@typescript-eslint/types': 8.46.2 '@typescript-eslint/visitor-keys': 8.46.2 + '@typescript-eslint/scope-manager@8.48.1': + dependencies: + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/visitor-keys': 8.48.1 + '@typescript-eslint/tsconfig-utils@8.43.0(typescript@5.9.3)': dependencies: typescript: 5.9.3 @@ -11130,11 +11202,15 @@ snapshots: dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)': + '@typescript-eslint/tsconfig-utils@8.48.1(typescript@5.9.3)': dependencies: - '@typescript-eslint/types': 8.43.0 - '@typescript-eslint/typescript-estree': 8.43.0(typescript@5.9.3) - '@typescript-eslint/utils': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + '@typescript-eslint/utils': 8.48.1(eslint@8.57.1)(typescript@5.9.3) debug: 4.4.3 eslint: 8.57.1 ts-api-utils: 2.1.0(typescript@5.9.3) @@ -11146,6 +11222,8 @@ snapshots: '@typescript-eslint/types@8.46.2': {} + '@typescript-eslint/types@8.48.1': {} + '@typescript-eslint/typescript-estree@8.43.0(typescript@5.9.3)': dependencies: '@typescript-eslint/project-service': 8.43.0(typescript@5.9.3) @@ -11156,7 +11234,7 @@ snapshots: fast-glob: 3.3.3 is-glob: 4.0.3 minimatch: 9.0.5 - semver: 7.7.2 + semver: 7.7.3 ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: @@ -11178,6 +11256,21 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/typescript-estree@8.48.1(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.48.1(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.48.1(typescript@5.9.3) + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/visitor-keys': 8.48.1 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.3 + tinyglobby: 0.2.15 + ts-api-utils: 2.1.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/utils@8.43.0(eslint@8.57.1)(typescript@5.9.3)': dependencies: '@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1) @@ -11200,6 +11293,17 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/utils@8.48.1(eslint@8.57.1)(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.0(eslint@8.57.1) + '@typescript-eslint/scope-manager': 8.48.1 + '@typescript-eslint/types': 8.48.1 + '@typescript-eslint/typescript-estree': 8.48.1(typescript@5.9.3) + eslint: 8.57.1 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/visitor-keys@8.43.0': dependencies: '@typescript-eslint/types': 8.43.0 @@ -11210,6 +11314,11 @@ snapshots: '@typescript-eslint/types': 8.46.2 eslint-visitor-keys: 4.2.1 + '@typescript-eslint/visitor-keys@8.48.1': + dependencies: + '@typescript-eslint/types': 8.48.1 + eslint-visitor-keys: 4.2.1 + '@ungap/structured-clone@1.3.0': {} '@unrs/resolver-binding-android-arm-eabi@1.11.1': @@ -11271,14 +11380,14 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vercel/analytics@1.5.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@vercel/analytics@1.5.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 - '@vercel/speed-insights@1.2.0(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@vercel/speed-insights@1.2.0(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 '@vitest/expect@3.2.4': @@ -12532,16 +12641,16 @@ snapshots: escape-string-regexp@5.0.0: {} - eslint-config-next@15.5.2(eslint@8.57.1)(typescript@5.9.3): + eslint-config-next@15.5.7(eslint@8.57.1)(typescript@5.9.3): dependencies: - '@next/eslint-plugin-next': 15.5.2 - '@rushstack/eslint-patch': 1.12.0 - '@typescript-eslint/eslint-plugin': 8.43.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3) - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@next/eslint-plugin-next': 15.5.7 + '@rushstack/eslint-patch': 1.15.0 + '@typescript-eslint/eslint-plugin': 8.48.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1) - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) eslint-plugin-react: 7.37.5(eslint@8.57.1) eslint-plugin-react-hooks: 5.2.0(eslint@8.57.1) @@ -12556,7 +12665,7 @@ snapshots: dependencies: debug: 3.2.7 is-core-module: 2.16.1 - resolve: 1.22.10 + resolve: 1.22.11 transitivePeerDependencies: - supports-color @@ -12565,28 +12674,28 @@ snapshots: '@nolyfill/is-core-module': 1.0.39 debug: 4.4.3 eslint: 8.57.1 - get-tsconfig: 4.10.1 + get-tsconfig: 4.13.0 is-bun-module: 2.0.0 stable-hash: 0.0.5 tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.9 @@ -12597,7 +12706,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.43.0(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@8.57.1)(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@8.57.1) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -12609,7 +12718,7 @@ snapshots: string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.43.0(eslint@8.57.1)(typescript@5.9.3) + '@typescript-eslint/parser': 8.48.1(eslint@8.57.1)(typescript@5.9.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -12954,9 +13063,11 @@ snapshots: functions-have-names@1.2.3: {} - geist@1.5.1(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): + geist@1.5.1(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): dependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + + generator-function@2.0.1: {} gensync@1.0.0-beta.2: {} @@ -12990,7 +13101,7 @@ snapshots: es-errors: 1.3.0 get-intrinsic: 1.3.0 - get-tsconfig@4.10.1: + get-tsconfig@4.13.0: dependencies: resolve-pkg-maps: 1.0.0 @@ -13274,13 +13385,6 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 - import-in-the-middle@1.14.2: - dependencies: - acorn: 8.15.0 - acorn-import-attributes: 1.9.5(acorn@8.15.0) - cjs-module-lexer: 1.4.3 - module-details-from-path: 1.0.4 - import-in-the-middle@2.0.0: dependencies: acorn: 8.15.0 @@ -13357,7 +13461,7 @@ snapshots: is-bun-module@2.0.0: dependencies: - semver: 7.7.2 + semver: 7.7.3 is-callable@1.2.7: {} @@ -13395,6 +13499,14 @@ snapshots: has-tostringtag: 1.0.2 safe-regex-test: 1.1.0 + is-generator-function@1.1.2: + dependencies: + call-bound: 1.0.4 + generator-function: 2.0.1 + get-proto: 1.0.1 + has-tostringtag: 1.0.2 + safe-regex-test: 1.1.0 + is-glob@4.0.3: dependencies: is-extglob: 2.1.1 @@ -14215,7 +14327,7 @@ snapshots: nanoid@3.3.11: {} - napi-postinstall@0.3.3: {} + napi-postinstall@0.3.4: {} natural-compare@1.4.0: {} @@ -14226,9 +14338,9 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@next/env': 15.4.8 + '@next/env': 15.4.10 '@swc/helpers': 0.5.15 caniuse-lite: 1.0.30001741 postcss: 8.4.31 @@ -14321,12 +14433,12 @@ snapshots: dependencies: boolbase: 1.0.0 - nuqs@2.7.2(next@15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): + nuqs@2.7.2(next@15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1): dependencies: '@standard-schema/spec': 1.0.0 react: 18.3.1 optionalDependencies: - next: 15.4.8(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 15.4.10(@babel/core@7.28.4)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) oas-kit-common@1.0.8: dependencies: @@ -15185,6 +15297,12 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 + resolve@1.22.11: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + resolve@1.22.8: dependencies: is-core-module: 2.16.1 @@ -15996,7 +16114,7 @@ snapshots: unrs-resolver@1.11.1: dependencies: - napi-postinstall: 0.3.3 + napi-postinstall: 0.3.4 optionalDependencies: '@unrs/resolver-binding-android-arm-eabi': 1.11.1 '@unrs/resolver-binding-android-arm64': 1.11.1 @@ -16224,7 +16342,7 @@ snapshots: is-async-function: 2.1.1 is-date-object: 1.1.0 is-finalizationregistry: 1.1.1 - is-generator-function: 1.1.0 + is-generator-function: 1.1.2 is-regex: 1.2.1 is-weakref: 1.1.1 isarray: 2.0.5 diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx index 4b6abacbff..58960a0cf6 100644 --- a/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx +++ b/autogpt_platform/frontend/src/app/(no-navbar)/onboarding/5-run/page.tsx @@ -8,7 +8,6 @@ import { CardTitle, } from "@/components/__legacy__/ui/card"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; -import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { CircleNotchIcon } from "@phosphor-icons/react/dist/ssr"; import { Play } from "lucide-react"; import OnboardingButton from "../components/OnboardingButton"; @@ -79,20 +78,13 @@ export default function Page() { {Object.entries(agent?.input_schema.properties || {}).map( ([key, inputSubSchema]) => ( -
- - handleSetAgentInput(key, value)} - /> -
+ handleSetAgentInput(key, value)} + /> ), )} { - // Prevent copy/paste if any modal is open or if the focus is on an input element const activeElement = document.activeElement; const isInputField = activeElement?.tagName === "INPUT" || @@ -28,7 +29,6 @@ export function useCopyPaste() { if (isInputField) return; if (event.ctrlKey || event.metaKey) { - // COPY: Ctrl+C or Cmd+C if (event.key === "c" || event.key === "C") { const { nodes } = useNodeStore.getState(); const { edges } = useEdgeStore.getState(); @@ -53,81 +53,102 @@ export function useCopyPaste() { edges: selectedEdges, }; - storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData)); + const clipboardText = `${CLIPBOARD_PREFIX}${JSON.stringify(copiedData)}`; + navigator.clipboard + .writeText(clipboardText) + .then(() => { + toast({ + title: "Copied successfully", + description: `${selectedNodes.length} node(s) copied to clipboard`, + }); + }) + .catch((error) => { + console.error("Failed to copy to clipboard:", error); + }); } - // PASTE: Ctrl+V or Cmd+V if (event.key === "v" || event.key === "V") { - const copiedDataString = storage.get(Key.COPIED_FLOW_DATA); - if (copiedDataString) { - const copiedData = JSON.parse(copiedDataString) as CopyableData; - const oldToNewIdMap: Record = {}; + navigator.clipboard + .readText() + .then((clipboardText) => { + if (!clipboardText.startsWith(CLIPBOARD_PREFIX)) { + return; // Not our data, ignore + } - // Get fresh viewport values at paste time to ensure correct positioning - const { x, y, zoom } = getViewport(); - const viewportCenter = { - x: (window.innerWidth / 2 - x) / zoom, - y: (window.innerHeight / 2 - y) / zoom, - }; + const jsonString = clipboardText.slice(CLIPBOARD_PREFIX.length); + const copiedData = JSON.parse(jsonString) as CopyableData; + const oldToNewIdMap: Record = {}; - let minX = Infinity, - minY = Infinity, - maxX = -Infinity, - maxY = -Infinity; - copiedData.nodes.forEach((node) => { - minX = Math.min(minX, node.position.x); - minY = Math.min(minY, node.position.y); - maxX = Math.max(maxX, node.position.x); - maxY = Math.max(maxY, node.position.y); - }); - - const offsetX = viewportCenter.x - (minX + maxX) / 2; - const offsetY = viewportCenter.y - (minY + maxY) / 2; - - // Deselect existing nodes first - useNodeStore.setState((state) => ({ - nodes: state.nodes.map((node) => ({ ...node, selected: false })), - })); - - // Create and add new nodes with UNIQUE IDs using UUID - copiedData.nodes.forEach((node) => { - const newNodeId = uuidv4(); - oldToNewIdMap[node.id] = newNodeId; - - const newNode: CustomNode = { - ...node, - id: newNodeId, - selected: true, - position: { - x: node.position.x + offsetX, - y: node.position.y + offsetY, - }, + const { x, y, zoom } = getViewport(); + const viewportCenter = { + x: (window.innerWidth / 2 - x) / zoom, + y: (window.innerHeight / 2 - y) / zoom, }; - useNodeStore.getState().addNode(newNode); - }); - - // Add edges with updated source/target IDs - const { addEdge } = useEdgeStore.getState(); - copiedData.edges.forEach((edge) => { - const newSourceId = oldToNewIdMap[edge.source] ?? edge.source; - const newTargetId = oldToNewIdMap[edge.target] ?? edge.target; - - addEdge({ - source: newSourceId, - target: newTargetId, - sourceHandle: edge.sourceHandle ?? "", - targetHandle: edge.targetHandle ?? "", - data: { - ...edge.data, - }, + let minX = Infinity, + minY = Infinity, + maxX = -Infinity, + maxY = -Infinity; + copiedData.nodes.forEach((node) => { + minX = Math.min(minX, node.position.x); + minY = Math.min(minY, node.position.y); + maxX = Math.max(maxX, node.position.x); + maxY = Math.max(maxY, node.position.y); }); + + const offsetX = viewportCenter.x - (minX + maxX) / 2; + const offsetY = viewportCenter.y - (minY + maxY) / 2; + + // Deselect existing nodes first + useNodeStore.setState((state) => ({ + nodes: state.nodes.map((node) => ({ + ...node, + selected: false, + })), + })); + + // Create and add new nodes with UNIQUE IDs using UUID + copiedData.nodes.forEach((node) => { + const newNodeId = uuidv4(); + oldToNewIdMap[node.id] = newNodeId; + + const newNode: CustomNode = { + ...node, + id: newNodeId, + selected: true, + position: { + x: node.position.x + offsetX, + y: node.position.y + offsetY, + }, + }; + + useNodeStore.getState().addNode(newNode); + }); + + // Add edges with updated source/target IDs + const { addEdge } = useEdgeStore.getState(); + copiedData.edges.forEach((edge) => { + const newSourceId = oldToNewIdMap[edge.source] ?? edge.source; + const newTargetId = oldToNewIdMap[edge.target] ?? edge.target; + + addEdge({ + source: newSourceId, + target: newTargetId, + sourceHandle: edge.sourceHandle ?? "", + targetHandle: edge.targetHandle ?? "", + data: { + ...edge.data, + }, + }); + }); + }) + .catch((error) => { + console.error("Failed to read from clipboard:", error); }); - } } } }, - [getViewport], + [getViewport, toast], ); return handleCopyPaste; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts index 64f00871d8..bf0ebf0a97 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/Flow/useFlow.ts @@ -42,11 +42,12 @@ export const useFlow = () => { const setBlockMenuOpen = useControlPanelStore( useShallow((state) => state.setBlockMenuOpen), ); - const [{ flowID, flowVersion, flowExecutionID }] = useQueryStates({ - flowID: parseAsString, - flowVersion: parseAsInteger, - flowExecutionID: parseAsString, - }); + const [{ flowID, flowVersion, flowExecutionID }, setQueryStates] = + useQueryStates({ + flowID: parseAsString, + flowVersion: parseAsInteger, + flowExecutionID: parseAsString, + }); const { data: executionDetails } = useGetV1GetExecutionDetails( flowID || "", @@ -102,6 +103,9 @@ export const useFlow = () => { // load graph schemas useEffect(() => { if (graph) { + setQueryStates({ + flowVersion: graph.version ?? 1, + }); setGraphSchemas( graph.input_schema as Record | null, graph.credentials_input_schema as Record | null, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx index 974cbe3754..52068f3acb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/CustomNode/CustomNode.tsx @@ -106,7 +106,11 @@ export const CustomNode: React.FC> = React.memo( /> {data.uiType != BlockUIType.OUTPUT && ( - + )} diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx index 315a52f553..cfee0bf89f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/FormCreator.tsx @@ -20,17 +20,32 @@ export const FormCreator = React.memo( className?: string; }) => { const updateNodeData = useNodeStore((state) => state.updateNodeData); + const getHardCodedValues = useNodeStore( (state) => state.getHardCodedValues, ); + const handleChange = ({ formData }: any) => { if ("credentials" in formData && !formData.credentials?.id) { delete formData.credentials; } - updateNodeData(nodeId, { hardcodedValues: formData }); + + const updatedValues = + uiType === BlockUIType.AGENT + ? { + ...getHardCodedValues(nodeId), + inputs: formData, + } + : formData; + + updateNodeData(nodeId, { hardcodedValues: updatedValues }); }; - const initialValues = getHardCodedValues(nodeId); + const hardcodedValues = getHardCodedValues(nodeId); + const initialValues = + uiType === BlockUIType.AGENT + ? (hardcodedValues.inputs ?? {}) + : hardcodedValues; return (
diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx index 9c032ac20f..ab3b648ba9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/OutputHandler.tsx @@ -14,13 +14,16 @@ import { import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore"; import { getTypeDisplayInfo } from "./helpers"; import { generateHandleId } from "../handlers/helpers"; +import { BlockUIType } from "../../types"; export const OutputHandler = ({ outputSchema, nodeId, + uiType, }: { outputSchema: RJSFSchema; nodeId: string; + uiType: BlockUIType; }) => { const { isOutputConnected } = useEdgeStore(); const properties = outputSchema?.properties || {}; @@ -79,7 +82,9 @@ export const OutputHandler = ({ diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts index bff61f2d85..3eb14d3ca9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearch/useBlockMenuSearch.ts @@ -1,24 +1,36 @@ import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; import { useGetV2BuilderSearchInfinite } from "@/app/api/__generated__/endpoints/store/store"; import { SearchResponse } from "@/app/api/__generated__/models/searchResponse"; -import { useState } from "react"; +import { useCallback, useEffect, useState } from "react"; import { useAddAgentToBuilder } from "../hooks/useAddAgentToBuilder"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { getV2GetSpecificAgent } from "@/app/api/__generated__/endpoints/store/store"; import { getGetV2ListLibraryAgentsQueryKey, + getV2GetLibraryAgent, usePostV2AddMarketplaceAgent, } from "@/app/api/__generated__/endpoints/library/library"; -import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; +import { + getGetV2GetBuilderItemCountsQueryKey, + getGetV2GetBuilderSuggestionsQueryKey, +} from "@/app/api/__generated__/endpoints/default/default"; import { getQueryClient } from "@/lib/react-query/queryClient"; import { useToast } from "@/components/molecules/Toast/use-toast"; import * as Sentry from "@sentry/nextjs"; export const useBlockMenuSearch = () => { - const { searchQuery } = useBlockMenuStore(); + const { searchQuery, searchId, setSearchId } = useBlockMenuStore(); const { toast } = useToast(); const { addAgentToBuilder, addLibraryAgentToBuilder } = useAddAgentToBuilder(); + const queryClient = getQueryClient(); + + const resetSearchSession = useCallback(() => { + setSearchId(undefined); + queryClient.invalidateQueries({ + queryKey: getGetV2GetBuilderSuggestionsQueryKey(), + }); + }, [queryClient, setSearchId]); const [addingLibraryAgentId, setAddingLibraryAgentId] = useState< string | null @@ -38,13 +50,19 @@ export const useBlockMenuSearch = () => { page: 1, page_size: 8, search_query: searchQuery, + search_id: searchId, }, { query: { - getNextPageParam: (lastPage, allPages) => { - const pagination = lastPage.data as SearchResponse; - const isMore = pagination.more_pages; - return isMore ? allPages.length + 1 : undefined; + getNextPageParam: (lastPage) => { + const response = lastPage.data as SearchResponse; + const { pagination } = response; + if (!pagination) { + return undefined; + } + + const { current_page, total_pages } = pagination; + return current_page < total_pages ? current_page + 1 : undefined; }, }, }, @@ -53,7 +71,6 @@ export const useBlockMenuSearch = () => { const { mutateAsync: addMarketplaceAgent } = usePostV2AddMarketplaceAgent({ mutation: { onSuccess: () => { - const queryClient = getQueryClient(); queryClient.invalidateQueries({ queryKey: getGetV2ListLibraryAgentsQueryKey(), }); @@ -75,6 +92,24 @@ export const useBlockMenuSearch = () => { }, }); + useEffect(() => { + if (!searchData?.pages?.length) { + return; + } + + const latestPage = searchData.pages[searchData.pages.length - 1]; + const response = latestPage?.data as SearchResponse; + if (response?.search_id && response.search_id !== searchId) { + setSearchId(response.search_id); + } + }, [searchData, searchId, setSearchId]); + + useEffect(() => { + if (searchId && !searchQuery) { + resetSearchSession(); + } + }, [resetSearchSession, searchId, searchQuery]); + const allSearchData = searchData?.pages?.flatMap((page) => { const response = page.data as SearchResponse; @@ -117,7 +152,12 @@ export const useBlockMenuSearch = () => { }); const libraryAgent = response.data as LibraryAgent; - addAgentToBuilder(libraryAgent); + + const { data: libraryAgentDetails } = await getV2GetLibraryAgent( + libraryAgent.id, + ); + + addAgentToBuilder(libraryAgentDetails as LibraryAgent); toast({ title: "Agent Added", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts index b55a638e08..ab1af16584 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/BlockMenuSearchBar/useBlockMenuSearchBar.ts @@ -1,30 +1,32 @@ import { debounce } from "lodash"; import { useCallback, useEffect, useRef, useState } from "react"; import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; +import { getQueryClient } from "@/lib/react-query/queryClient"; +import { getGetV2GetBuilderSuggestionsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; const SEARCH_DEBOUNCE_MS = 300; export const useBlockMenuSearchBar = () => { const inputRef = useRef(null); const [localQuery, setLocalQuery] = useState(""); - const { setSearchQuery, setSearchId, searchId, searchQuery } = - useBlockMenuStore(); + const { setSearchQuery, setSearchId, searchQuery } = useBlockMenuStore(); + const queryClient = getQueryClient(); - const searchIdRef = useRef(searchId); - useEffect(() => { - searchIdRef.current = searchId; - }, [searchId]); + const clearSearchSession = useCallback(() => { + setSearchId(undefined); + queryClient.invalidateQueries({ + queryKey: getGetV2GetBuilderSuggestionsQueryKey(), + }); + }, [queryClient, setSearchId]); const debouncedSetSearchQuery = useCallback( debounce((value: string) => { setSearchQuery(value); if (value.length === 0) { - setSearchId(undefined); - } else if (!searchIdRef.current) { - setSearchId(crypto.randomUUID()); + clearSearchSession(); } }, SEARCH_DEBOUNCE_MS), - [setSearchQuery, setSearchId], + [clearSearchSession, setSearchQuery], ); useEffect(() => { @@ -36,13 +38,13 @@ export const useBlockMenuSearchBar = () => { const handleClear = () => { setLocalQuery(""); setSearchQuery(""); - setSearchId(undefined); + clearSearchSession(); debouncedSetSearchQuery.cancel(); }; useEffect(() => { setLocalQuery(searchQuery); - }, []); + }, [searchQuery]); return { handleClear, diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx new file mode 100644 index 0000000000..0f953394e6 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/HorizontalScroll.tsx @@ -0,0 +1,109 @@ +import React, { useEffect, useRef, useState } from "react"; +import { ArrowLeftIcon, ArrowRightIcon } from "@phosphor-icons/react"; +import { cn } from "@/lib/utils"; + +interface HorizontalScrollAreaProps { + children: React.ReactNode; + wrapperClassName?: string; + scrollContainerClassName?: string; + scrollAmount?: number; + dependencyList?: React.DependencyList; +} + +const defaultDependencies: React.DependencyList = []; +const baseScrollClasses = + "flex gap-2 overflow-x-auto px-8 [scrollbar-width:none] [-ms-overflow-style:'none'] [&::-webkit-scrollbar]:hidden"; + +export const HorizontalScroll: React.FC = ({ + children, + wrapperClassName, + scrollContainerClassName, + scrollAmount = 300, + dependencyList = defaultDependencies, +}) => { + const scrollRef = useRef(null); + const [canScrollLeft, setCanScrollLeft] = useState(false); + const [canScrollRight, setCanScrollRight] = useState(false); + + const scrollByDelta = (delta: number) => { + if (!scrollRef.current) { + return; + } + scrollRef.current.scrollBy({ left: delta, behavior: "smooth" }); + }; + + const updateScrollState = () => { + const element = scrollRef.current; + if (!element) { + setCanScrollLeft(false); + setCanScrollRight(false); + return; + } + setCanScrollLeft(element.scrollLeft > 0); + setCanScrollRight( + Math.ceil(element.scrollLeft + element.clientWidth) < element.scrollWidth, + ); + }; + + useEffect(() => { + updateScrollState(); + const element = scrollRef.current; + if (!element) { + return; + } + const handleScroll = () => updateScrollState(); + element.addEventListener("scroll", handleScroll); + window.addEventListener("resize", handleScroll); + return () => { + element.removeEventListener("scroll", handleScroll); + window.removeEventListener("resize", handleScroll); + }; + }, dependencyList); + + return ( +
+
+
+ {children} +
+ {canScrollLeft && ( +
+ )} + {canScrollRight && ( +
+ )} + {canScrollLeft && ( + + )} + {canScrollRight && ( + + )} +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts index 8ca3fe30f5..ff9b70b79a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/MarketplaceAgentsContent/useMarketplaceAgentsContent.ts @@ -1,6 +1,7 @@ import { getGetV2GetBuilderItemCountsQueryKey } from "@/app/api/__generated__/endpoints/default/default"; import { getGetV2ListLibraryAgentsQueryKey, + getV2GetLibraryAgent, usePostV2AddMarketplaceAgent, } from "@/app/api/__generated__/endpoints/library/library"; import { @@ -105,8 +106,16 @@ export const useMarketplaceAgentsContent = () => { }, }); + // Here, libraryAgent has empty input and output schemas. + // Not updating the endpoint because this endpoint is used elsewhere. + // TODO: Create a new endpoint for builder specific to marketplace agents. const libraryAgent = response.data as LibraryAgent; - addAgentToBuilder(libraryAgent); + + const { data: libraryAgentDetails } = await getV2GetLibraryAgent( + libraryAgent.id, + ); + + addAgentToBuilder(libraryAgentDetails as LibraryAgent); toast({ title: "Agent Added", diff --git a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx index 94efe063a6..b00714f4ca 100644 --- a/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/build/components/NewControlPanel/NewBlockMenu/SuggestionContent/SuggestionContent.tsx @@ -6,10 +6,15 @@ import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { blockMenuContainerStyle } from "../style"; import { useBlockMenuStore } from "../../../../stores/blockMenuStore"; import { DefaultStateType } from "../types"; +import { SearchHistoryChip } from "../SearchHistoryChip"; +import { HorizontalScroll } from "../HorizontalScroll"; export const SuggestionContent = () => { - const { setIntegration, setDefaultState } = useBlockMenuStore(); + const { setIntegration, setDefaultState, setSearchQuery, setSearchId } = + useBlockMenuStore(); const { data, isLoading, isError, error, refetch } = useSuggestionContent(); + const suggestions = data?.suggestions; + const hasRecentSearches = (suggestions?.recent_searches?.length ?? 0) > 0; if (isError) { return ( @@ -29,11 +34,45 @@ export const SuggestionContent = () => { ); } - const suggestions = data?.suggestions; - return (
+ {/* Recent searches */} + {hasRecentSearches && ( +
+

+ Recent searches +

+ + {!isLoading && suggestions + ? suggestions.recent_searches.map((entry, index) => ( + { + setSearchQuery(entry.search_query || ""); + setSearchId(entry.search_id || undefined); + }} + /> + )) + : Array(3) + .fill(0) + .map((_, index) => ( + + ))} + +
+ )} + {/* Integrations */}

diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx index 7a7470a391..2831d6cdba 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/NewAgentLibraryView.tsx @@ -10,10 +10,13 @@ import { AgentRunsLoading } from "./components/other/AgentRunsLoading"; import { EmptySchedules } from "./components/other/EmptySchedules"; import { EmptyTasks } from "./components/other/EmptyTasks"; import { EmptyTemplates } from "./components/other/EmptyTemplates"; +import { EmptyTriggers } from "./components/other/EmptyTriggers"; import { SectionWrap } from "./components/other/SectionWrap"; import { LoadingSelectedContent } from "./components/selected-views/LoadingSelectedContent"; import { SelectedRunView } from "./components/selected-views/SelectedRunView/SelectedRunView"; import { SelectedScheduleView } from "./components/selected-views/SelectedScheduleView/SelectedScheduleView"; +import { SelectedTemplateView } from "./components/selected-views/SelectedTemplateView/SelectedTemplateView"; +import { SelectedTriggerView } from "./components/selected-views/SelectedTriggerView/SelectedTriggerView"; import { SelectedViewLayout } from "./components/selected-views/SelectedViewLayout"; import { SidebarRunsList } from "./components/sidebar/SidebarRunsList/SidebarRunsList"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "./helpers"; @@ -21,11 +24,13 @@ import { useNewAgentLibraryView } from "./useNewAgentLibraryView"; export function NewAgentLibraryView() { const { - agent, - hasAnyItems, - ready, - error, agentId, + agent, + ready, + activeTemplate, + isTemplateLoading, + error, + hasAnyItems, activeItem, sidebarLoading, activeTab, @@ -33,6 +38,9 @@ export function NewAgentLibraryView() { handleSelectRun, handleCountsChange, handleClearSelectedRun, + onRunInitiated, + onTriggerSetup, + onScheduleCreated, } = useNewAgentLibraryView(); if (error) { @@ -62,14 +70,19 @@ export function NewAgentLibraryView() { />

- +
); } return ( -
+
+ } agent={agent} - agentId={agent.id.toString()} - onRunCreated={(execution) => handleSelectRun(execution.id, "runs")} - onScheduleCreated={(schedule) => - handleSelectRun(schedule.id, "scheduled") - } + onRunCreated={onRunInitiated} + onScheduleCreated={onScheduleCreated} + onTriggerSetup={onTriggerSetup} + initialInputValues={activeTemplate?.inputs} + initialInputCredentials={activeTemplate?.credentials} />
@@ -109,6 +127,21 @@ export function NewAgentLibraryView() { scheduleId={activeItem} onClearSelectedRun={handleClearSelectedRun} /> + ) : activeTab === "templates" ? ( + handleSelectRun(execution.id, "runs")} + onSwitchToRunsTab={() => setActiveTab("runs")} + /> + ) : activeTab === "triggers" ? ( + setActiveTab("runs")} + /> ) : ( + ) : activeTab === "triggers" ? ( + + + ) : ( - + )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx index d46a6bfc9e..bc9918c2bb 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/AgentInputsReadOnly.tsx @@ -1,13 +1,11 @@ "use client"; import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Text } from "@/components/atoms/Text/Text"; import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; import { CredentialsInput } from "../CredentialsInputs/CredentialsInputs"; -import { - getAgentCredentialsFields, - getAgentInputFields, - renderValue, -} from "./helpers"; +import { RunAgentInputs } from "../RunAgentInputs/RunAgentInputs"; +import { getAgentCredentialsFields, getAgentInputFields } from "./helpers"; type Props = { agent: LibraryAgent; @@ -20,16 +18,28 @@ export function AgentInputsReadOnly({ inputs, credentialInputs, }: Props) { - const fields = getAgentInputFields(agent); - const credentialFields = getAgentCredentialsFields(agent); - const inputEntries = Object.entries(fields); - const credentialEntries = Object.entries(credentialFields); + const inputFields = getAgentInputFields(agent); + const credentialFieldEntries = Object.entries( + getAgentCredentialsFields(agent), + ); - const hasInputs = inputs && inputEntries.length > 0; - const hasCredentials = credentialInputs && credentialEntries.length > 0; + const inputEntries = + inputs && + Object.entries(inputs).map(([key, value]) => ({ + key, + schema: inputFields[key], + value, + })); + + const hasInputs = inputEntries && inputEntries.length > 0; + const hasCredentials = credentialInputs && credentialFieldEntries.length > 0; if (!hasInputs && !hasCredentials) { - return
No input for this run.
; + return ( + + No input for this run. + + ); } return ( @@ -37,14 +47,20 @@ export function AgentInputsReadOnly({ {/* Regular inputs */} {hasInputs && (
- {inputEntries.map(([key, sub]) => ( -
- -

- {renderValue((inputs as Record)[key])} -

-
- ))} + {inputEntries.map(({ key, schema, value }) => { + if (!schema) return null; + + return ( + {}} + readOnly={true} + /> + ); + })}
)} @@ -52,7 +68,7 @@ export function AgentInputsReadOnly({ {hasCredentials && (
{hasInputs &&
} - {credentialEntries.map(([key, inputSubSchema]) => { + {credentialFieldEntries.map(([key, inputSubSchema]) => { const credential = credentialInputs![key]; if (!credential) return null; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts index 5fd8ff4fe7..95069b1d30 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts @@ -13,7 +13,8 @@ export function getCredentialTypeDisplayName(type: string): string { } export function getAgentInputFields(agent: LibraryAgent): Record { - const schema = agent.input_schema as unknown as { + const schema = (agent.trigger_setup_info?.config_schema ?? + agent.input_schema) as unknown as { properties?: Record; } | null; if (!schema || !schema.properties) return {}; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx index 251b4cfbb4..34c066e90d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialRow/CredentialRow.tsx @@ -62,12 +62,15 @@ export function CredentialRow({
- + {getCredentialDisplayName(credential, displayName)} {"*".repeat(MASKED_KEY_LENGTH)} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx index 29f9b09a22..7adfa5772b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/components/CredentialsSelect/CredentialsSelect.tsx @@ -48,8 +48,8 @@ export function CredentialsSelect({ onValueChange={(value) => onSelectCredential(value)} > - - {selectedCredentials ? ( + {selectedCredentials ? ( + - ) : ( - Select credential - )} - + + ) : ( + + )} {credentials.map((credential) => ( diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts index 9e6f374437..4cca825747 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/helpers.ts @@ -75,11 +75,11 @@ export function getActionButtonText( hasExistingCredentials: boolean, ): string { if (hasExistingCredentials) { - if (supportsOAuth2) return "Connect a different account"; - if (supportsApiKey) return "Use a different API key"; - if (supportsUserPassword) return "Use a different username and password"; - if (supportsHostScoped) return "Use different headers"; - return "Add credentials"; + if (supportsOAuth2) return "Connect another account"; + if (supportsApiKey) return "Use a new API key"; + if (supportsUserPassword) return "Add a new username and password"; + if (supportsHostScoped) return "Add new headers"; + return "Add new credentials"; } else { if (supportsOAuth2) return "Add account"; if (supportsApiKey) return "Add API key"; diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx index d98d3cb10d..ea372193c5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs.tsx @@ -9,6 +9,7 @@ import { Button } from "@/components/atoms/Button/Button"; import { FileInput } from "@/components/atoms/FileInput/FileInput"; import { Switch } from "@/components/atoms/Switch/Switch"; import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; import { TimePicker } from "@/components/molecules/TimePicker/TimePicker"; import { BlockIOObjectSubSchema, @@ -32,6 +33,7 @@ interface Props { value?: any; placeholder?: string; onChange: (value: any) => void; + readOnly?: boolean; } /** @@ -44,6 +46,7 @@ export function RunAgentInputs({ value, placeholder, onChange, + readOnly = false, ...props }: Props & React.HTMLAttributes) { const { handleUploadFile, uploadProgress } = useRunAgentInputs(); @@ -62,7 +65,6 @@ export function RunAgentInputs({ id={`${baseId}-number`} label={schema.title ?? placeholder ?? "Number"} hideLabel - size="small" type="number" value={value ?? ""} placeholder={placeholder || "Enter number"} @@ -80,7 +82,6 @@ export function RunAgentInputs({ id={`${baseId}-textarea`} label={schema.title ?? placeholder ?? "Text"} hideLabel - size="small" type="textarea" rows={3} value={value ?? ""} @@ -130,7 +131,6 @@ export function RunAgentInputs({ id={`${baseId}-date`} label={schema.title ?? placeholder ?? "Date"} hideLabel - size="small" type="date" value={value ? format(value as Date, "yyyy-MM-dd") : ""} onChange={(e) => { @@ -159,7 +159,6 @@ export function RunAgentInputs({ id={`${baseId}-datetime`} label={schema.title ?? placeholder ?? "Date time"} hideLabel - size="small" type="datetime-local" value={value ?? ""} onChange={(e) => onChange((e.target as HTMLInputElement).value)} @@ -194,7 +193,6 @@ export function RunAgentInputs({ label={schema.title ?? placeholder ?? "Select"} hideLabel value={value ?? ""} - size="small" onValueChange={(val: string) => onChange(val)} placeholder={placeholder || "Select an option"} options={schema.enum @@ -217,7 +215,6 @@ export function RunAgentInputs({ items={allKeys.map((key) => ({ value: key, label: _schema.properties[key]?.title ?? key, - size: "small", }))} selectedValues={selectedValues} onChange={(values: string[]) => @@ -336,7 +333,6 @@ export function RunAgentInputs({ id={`${baseId}-text`} label={schema.title ?? placeholder ?? "Text"} hideLabel - size="small" type="text" value={value ?? ""} onChange={(e) => onChange((e.target as HTMLInputElement).value)} @@ -347,6 +343,17 @@ export function RunAgentInputs({ } return ( -
{innerInputElement}
+
+ +
+ {innerInputElement} +
+
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index eff83cf824..e53f31a349 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -3,7 +3,14 @@ import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "@/components/atoms/Tooltip/BaseTooltip"; import { Dialog } from "@/components/molecules/Dialog/Dialog"; import { useState } from "react"; import { ScheduleAgentModal } from "../ScheduleAgentModal/ScheduleAgentModal"; @@ -16,16 +23,20 @@ import { useAgentRunModal } from "./useAgentRunModal"; interface Props { triggerSlot: React.ReactNode; agent: LibraryAgent; - agentId: string; - agentVersion?: number; + initialInputValues?: Record; + initialInputCredentials?: Record; onRunCreated?: (execution: GraphExecutionMeta) => void; + onTriggerSetup?: (preset: LibraryAgentPreset) => void; onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void; } export function RunAgentModal({ triggerSlot, agent, + initialInputValues, + initialInputCredentials, onRunCreated, + onTriggerSetup, onScheduleCreated, }: Props) { const { @@ -65,6 +76,9 @@ export function RunAgentModal({ handleRun, } = useAgentRunModal(agent, { onRun: onRunCreated, + onSetupTrigger: onTriggerSetup, + initialInputValues, + initialInputCredentials, }); const [isScheduleModalOpen, setIsScheduleModalOpen] = useState(false); @@ -73,6 +87,8 @@ export function RunAgentModal({ Object.keys(agentInputFields || {}).length > 0 || Object.keys(agentCredentialsInputFields || {}).length > 0; + const isTriggerRunType = defaultRunType.includes("trigger"); + function handleInputChange(key: string, value: string) { setInputValues((prev) => ({ ...prev, @@ -147,15 +163,45 @@ export function RunAgentModal({
- + {isTriggerRunType ? null : !allRequiredInputsAreSet ? ( + + + + + + + + +

+ Please set up all required inputs and credentials before + scheduling +

+
+
+
+ ) : ( + + )} - {defaultRunType === "automatic-trigger" ? ( + {defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" ? ( 0 ? ( - {/* Regular inputs */} {inputFields.map(([key, inputSubSchema]) => ( -
- - - setInputValue(key, value)} - data-testid={`agent-input-${key}`} - /> -
+ setInputValue(key, value)} + data-testid={`agent-input-${key}`} + /> ))}
) : null} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx index 3eb9514cc4..f5f66b30c7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/RunActions/RunActions.tsx @@ -24,7 +24,8 @@ export function RunActions({ disabled={!isRunReady || isExecuting || isSettingUpTrigger} loading={isExecuting || isSettingUpTrigger} > - {defaultRunType === "automatic-trigger" + {defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" ? "Set up Trigger" : "Start Task"} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx index bc69a5f633..23834cbd9d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/WebhookTriggerBanner/WebhookTriggerBanner.tsx @@ -1,6 +1,6 @@ export function WebhookTriggerBanner() { return ( -
+
void; - onCreateSchedule?: (schedule: GraphExecutionJobInfo) => void; onSetupTrigger?: (preset: LibraryAgentPreset) => void; + initialInputValues?: Record; + initialInputCredentials?: Record; } export function useAgentRunModal( @@ -38,31 +36,28 @@ export function useAgentRunModal( const { toast } = useToast(); const queryClient = useQueryClient(); const [isOpen, setIsOpen] = useState(false); - const [showScheduleView, setShowScheduleView] = useState(false); - const [inputValues, setInputValues] = useState>({}); + const [inputValues, setInputValues] = useState>( + callbacks?.initialInputValues || {}, + ); const [inputCredentials, setInputCredentials] = useState>( - {}, + callbacks?.initialInputCredentials || {}, ); const [presetName, setPresetName] = useState(""); const [presetDescription, setPresetDescription] = useState(""); - const defaultScheduleName = useMemo(() => `Run ${agent.name}`, [agent.name]); - const [scheduleName, setScheduleName] = useState(defaultScheduleName); - const [cronExpression, setCronExpression] = useState( - agent.recommended_schedule_cron || "0 9 * * 1", - ); - - // Get user timezone for scheduling - const { data: userTimezone } = useGetV1GetUserTimezone({ - query: { - select: (res) => (res.status === 200 ? res.data.timezone : undefined), - }, - }); // Determine the default run type based on agent capabilities - const defaultRunType: RunVariant = agent.has_external_trigger - ? "automatic-trigger" + const defaultRunType: RunVariant = agent.trigger_setup_info + ? agent.trigger_setup_info.credentials_input_name + ? "automatic-trigger" + : "manual-trigger" : "manual"; + // Update input values/credentials if template is selected/unselected + useEffect(() => { + setInputValues(callbacks?.initialInputValues || {}); + setInputCredentials(callbacks?.initialInputCredentials || {}); + }, [callbacks?.initialInputValues, callbacks?.initialInputCredentials]); + // API mutations const executeGraphMutation = usePostV1ExecuteGraphAgent({ mutation: { @@ -71,13 +66,11 @@ export function useAgentRunModal( toast({ title: "Agent execution started", }); - callbacks?.onRun?.(response.data as unknown as GraphExecutionMeta); // Invalidate runs list for this graph queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - agent.graph_id, - ).queryKey, + queryKey: getGetV1ListGraphExecutionsQueryKey(agent.graph_id), }); + callbacks?.onRun?.(response.data); analytics.sendDatafastEvent("run_agent", { name: agent.name, id: agent.graph_id, @@ -94,45 +87,16 @@ export function useAgentRunModal( }, }); - const createScheduleMutation = useCreateSchedule({ + const setupTriggerMutation = usePostV2SetupTrigger({ mutation: { onSuccess: (response) => { - if (response.status === 200) { - toast({ - title: "Schedule created", - }); - callbacks?.onCreateSchedule?.(response.data); - // Invalidate schedules list for this graph - queryClient.invalidateQueries({ - queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey( - agent.graph_id, - ), - }); - analytics.sendDatafastEvent("schedule_agent", { - name: agent.name, - id: agent.graph_id, - cronExpression: cronExpression, - }); - setIsOpen(false); - } - }, - onError: (error: any) => { - toast({ - title: "❌ Failed to create schedule", - description: error.message || "An unexpected error occurred.", - variant: "destructive", - }); - }, - }, - }); - - const setupTriggerMutation = usePostV2SetupTrigger({ - mutation: { - onSuccess: (response: any) => { if (response.status === 200) { toast({ title: "Trigger setup complete", }); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ graph_id: agent.graph_id }), + }); callbacks?.onSetupTrigger?.(response.data); setIsOpen(false); } @@ -147,11 +111,13 @@ export function useAgentRunModal( }, }); - // Input schema validation - const agentInputSchema = useMemo( - () => agent.input_schema || { properties: {}, required: [] }, - [agent.input_schema], - ); + // Input schema validation (use trigger schema for triggered agents) + const agentInputSchema = useMemo(() => { + if (agent.trigger_setup_info?.config_schema) { + return agent.trigger_setup_info.config_schema; + } + return agent.input_schema || { properties: {}, required: [] }; + }, [agent.input_schema, agent.trigger_setup_info]); const agentInputFields = useMemo(() => { if ( @@ -220,33 +186,25 @@ export function useAgentRunModal( [allRequiredInputsAreSetRaw, credentialsRequired, allCredentialsAreSet], ); - const notifyMissingRequirements = useCallback( - (needScheduleName: boolean = false) => { - const allMissingFields = ( - needScheduleName && !scheduleName ? ["schedule_name"] : [] - ) - .concat(missingInputs) - .concat( - credentialsRequired && !allCredentialsAreSet - ? missingCredentials.map((k) => `credentials:${k}`) - : [], - ); + const notifyMissingRequirements = useCallback(() => { + const allMissingFields = missingInputs.concat( + credentialsRequired && !allCredentialsAreSet + ? missingCredentials.map((k) => `credentials:${k}`) + : [], + ); - toast({ - title: "⚠️ Missing required inputs", - description: `Please provide: ${allMissingFields.map((k) => `"${k}"`).join(", ")}`, - variant: "destructive", - }); - }, - [ - missingInputs, - scheduleName, - toast, - credentialsRequired, - allCredentialsAreSet, - missingCredentials, - ], - ); + toast({ + title: "⚠️ Missing required inputs", + description: `Please provide: ${allMissingFields.map((k) => `"${k}"`).join(", ")}`, + variant: "destructive", + }); + }, [ + missingInputs, + toast, + credentialsRequired, + allCredentialsAreSet, + missingCredentials, + ]); // Action handlers const handleRun = useCallback(() => { @@ -255,9 +213,12 @@ export function useAgentRunModal( return; } - if (defaultRunType === "automatic-trigger") { + if ( + defaultRunType === "automatic-trigger" || + defaultRunType === "manual-trigger" + ) { // Setup trigger - if (!scheduleName.trim()) { + if (!presetName.trim()) { toast({ title: "⚠️ Trigger name required", description: "Please provide a name for your trigger.", @@ -268,7 +229,7 @@ export function useAgentRunModal( setupTriggerMutation.mutate({ data: { - name: presetName || scheduleName, + name: presetName, description: presetDescription || `Trigger for ${agent.name}`, graph_id: agent.graph_id, graph_version: agent.graph_version, @@ -291,7 +252,6 @@ export function useAgentRunModal( }, [ allRequiredInputsAreSet, defaultRunType, - scheduleName, inputValues, inputCredentials, agent, @@ -303,70 +263,6 @@ export function useAgentRunModal( toast, ]); - const handleSchedule = useCallback(() => { - if (!allRequiredInputsAreSet) { - notifyMissingRequirements(true); - return; - } - - if (!scheduleName.trim()) { - toast({ - title: "⚠️ Schedule name required", - description: "Please provide a name for your schedule.", - variant: "destructive", - }); - return; - } - - createScheduleMutation.mutate({ - graphId: agent.graph_id, - data: { - name: presetName || scheduleName, - cron: cronExpression, - inputs: inputValues, - graph_version: agent.graph_version, - credentials: inputCredentials, - timezone: - userTimezone && userTimezone !== "not-set" ? userTimezone : undefined, - }, - }); - }, [ - allRequiredInputsAreSet, - scheduleName, - cronExpression, - inputValues, - inputCredentials, - agent, - notifyMissingRequirements, - createScheduleMutation, - toast, - userTimezone, - ]); - - function handleShowSchedule() { - // Initialize with sensible defaults when entering schedule view - setScheduleName((prev) => prev || defaultScheduleName); - setCronExpression( - (prev) => prev || agent.recommended_schedule_cron || "0 9 * * 1", - ); - setShowScheduleView(true); - } - - function handleGoBack() { - setShowScheduleView(false); - // Reset schedule fields on exit - setScheduleName(defaultScheduleName); - setCronExpression(agent.recommended_schedule_cron || "0 9 * * 1"); - } - - function handleSetScheduleName(name: string) { - setScheduleName(name); - } - - function handleSetCronExpression(expression: string) { - setCronExpression(expression); - } - const hasInputFields = useMemo(() => { return Object.keys(agentInputFields).length > 0; }, [agentInputFields]); @@ -375,10 +271,9 @@ export function useAgentRunModal( // UI state isOpen, setIsOpen, - showScheduleView, // Run mode - defaultRunType, + defaultRunType: defaultRunType as RunVariant, // Form: regular inputs inputValues, @@ -394,10 +289,6 @@ export function useAgentRunModal( setPresetName, setPresetDescription, - // Scheduling - scheduleName, - cronExpression, - // Validation/readiness allRequiredInputsAreSet, missingInputs, @@ -409,15 +300,9 @@ export function useAgentRunModal( // Async states isExecuting: executeGraphMutation.isPending, - isCreatingSchedule: createScheduleMutation.isPending, isSettingUpTrigger: setupTriggerMutation.isPending, // Actions handleRun, - handleSchedule, - handleShowSchedule, - handleGoBack, - handleSetScheduleName, - handleSetCronExpression, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx index c0c2c900a1..3446611827 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTasks.tsx @@ -1,17 +1,58 @@ +"use client"; + +import { getV1GetGraphVersion } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { Button } from "@/components/atoms/Button/Button"; import { Text } from "@/components/atoms/Text/Text"; import { ShowMoreText } from "@/components/molecules/ShowMoreText/ShowMoreText"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { exportAsJSONFile } from "@/lib/utils"; import { formatDate } from "@/lib/utils/time"; +import Link from "next/link"; import { RunAgentModal } from "../modals/RunAgentModal/RunAgentModal"; import { RunDetailCard } from "../selected-views/RunDetailCard/RunDetailCard"; import { EmptyTasksIllustration } from "./EmptyTasksIllustration"; type Props = { agent: LibraryAgent; + onRun?: (run: GraphExecutionMeta) => void; + onTriggerSetup?: (preset: LibraryAgentPreset) => void; + onScheduleCreated?: (schedule: GraphExecutionJobInfo) => void; }; -export function EmptyTasks({ agent }: Props) { +export function EmptyTasks({ + agent, + onRun, + onTriggerSetup, + onScheduleCreated, +}: Props) { + const { toast } = useToast(); + + async function handleExport() { + try { + const res = await getV1GetGraphVersion( + agent.graph_id, + agent.graph_version, + { for_export: true }, + ); + if (res.status === 200) { + const filename = `${agent.name}_v${agent.graph_version}.json`; + exportAsJSONFile(res.data as any, filename); + toast({ title: "Agent exported" }); + } else { + toast({ title: "Failed to export agent", variant: "destructive" }); + } + } catch (e: any) { + toast({ + title: "Failed to export agent", + description: e?.message, + variant: "destructive", + }); + } + } const isPublished = Boolean(agent.marketplace_listing); const createdAt = formatDate(agent.created_at); const updatedAt = formatDate(agent.updated_at); @@ -45,7 +86,9 @@ export function EmptyTasks({ agent }: Props) { } agent={agent} - agentId={agent.id.toString()} + onRunCreated={onRun} + onTriggerSetup={onTriggerSetup} + onScheduleCreated={onScheduleCreated} />
@@ -93,10 +136,15 @@ export function EmptyTasks({ agent }: Props) { ) : null}
- -
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx new file mode 100644 index 0000000000..0d9dc47fff --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/other/EmptyTriggers.tsx @@ -0,0 +1,323 @@ +import { Text } from "@/components/atoms/Text/Text"; + +export function EmptyTriggers() { + return ( +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + No triggers yet + + + Set up automatic triggers for your agent to run tasks automatically — + they'll show up here. + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx new file mode 100644 index 0000000000..6dae969142 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/AnchorLinksWrap.tsx @@ -0,0 +1,14 @@ +import { cn } from "@/lib/utils"; +import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../helpers"; + +type Props = { + children: React.ReactNode; +}; + +export function AnchorLinksWrap({ children }: Props) { + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx index 97ff97d46c..d94966c6c8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers/renderers/MarkdownRenderer.tsx @@ -166,7 +166,7 @@ function renderMarkdown( className="prose prose-sm dark:prose-invert max-w-none" remarkPlugins={[ remarkGfm, // GitHub Flavored Markdown (tables, task lists, strikethrough) - remarkMath, // Math support for LaTeX + [remarkMath, { singleDollarTextMath: false }], // Math support for LaTeX ]} rehypePlugins={[ rehypeKatex, // Render math with KaTeX diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx index 7c70b5e6aa..3d04234bb3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/RunDetailHeader/RunDetailHeader.tsx @@ -1,6 +1,7 @@ import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { Text } from "@/components/atoms/Text/Text"; +import { ClockClockwiseIcon } from "@phosphor-icons/react"; import moment from "moment"; import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { RunStatusBadge } from "../SelectedRunView/components/RunStatusBadge"; @@ -20,7 +21,20 @@ export function RunDetailHeader({ agent, run, scheduleRecurrence }: Props) {
- {run?.status ? : null} + {run?.status ? ( + + ) : scheduleRecurrence ? ( +
+ + + Scheduled + +
+ ) : null} {agent.name} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx new file mode 100644 index 0000000000..da7985e3e2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedActionsWrap.tsx @@ -0,0 +1,11 @@ +type Props = { + children: React.ReactNode; +}; + +export function SelectedActionsWrap({ children }: Props) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx index 7cce125e7c..ff9a4e5809 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/SelectedRunView.tsx @@ -4,18 +4,18 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; import { Text } from "@/components/atoms/Text/Text"; -import { - Tooltip, - TooltipContent, - TooltipProvider, - TooltipTrigger, -} from "@/components/atoms/Tooltip/BaseTooltip"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip"; +import { + ScrollableTabs, + ScrollableTabsContent, + ScrollableTabsList, + ScrollableTabsTrigger, +} from "@/components/molecules/ScrollableTabs/ScrollableTabs"; import { PendingReviewsList } from "@/components/organisms/PendingReviewsList/PendingReviewsList"; import { usePendingReviewsForExecution } from "@/hooks/usePendingReviews"; -import { InfoIcon } from "@phosphor-icons/react"; +import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { useEffect } from "react"; -import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; @@ -24,11 +24,9 @@ import { SelectedViewLayout } from "../SelectedViewLayout"; import { RunOutputs } from "./components/RunOutputs"; import { RunSummary } from "./components/RunSummary"; import { SelectedRunActions } from "./components/SelectedRunActions/SelectedRunActions"; +import { WebhookTriggerSection } from "./components/WebhookTriggerSection"; import { useSelectedRunView } from "./useSelectedRunView"; -const anchorStyles = - "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; - interface Props { agent: LibraryAgent; runId: string; @@ -42,10 +40,11 @@ export function SelectedRunView({ onSelectRun, onClearSelectedRun, }: Props) { - const { run, isLoading, responseError, httpError } = useSelectedRunView( - agent.graph_id, - runId, - ); + const { run, preset, isLoading, responseError, httpError } = + useSelectedRunView(agent.graph_id, runId); + + const breakpoint = useBreakpoint(); + const isLgScreenUp = isLargeScreen(breakpoint); const { pendingReviews, @@ -62,13 +61,6 @@ export function SelectedRunView({ const withSummary = run?.stats?.activity_status; const withReviews = run?.status === AgentExecutionStatus.REVIEW; - function scrollToSection(id: string) { - const element = document.getElementById(id); - if (element) { - element.scrollIntoView({ behavior: "smooth", block: "start" }); - } - } - if (responseError || httpError) { return ( - {/* Navigation Links */} -
- -
+ {!isLgScreenUp ? ( + + ) : null} - {/* Summary Section */} - {withSummary && ( -
- - Summary - - - - - - -

- This AI-generated summary describes how the agent - handled your task. It's an experimental - feature and may occasionally be inaccurate. -

-
-
-
-
- } - > - - -
- )} - - {/* Output Section */} -
- - {isLoading ? ( -
- -
- ) : run && "outputs" in run ? ( - - ) : ( - - No output from this run. - - )} -
-
- - {/* Input Section */} -
- - - -
+ )} - {/* Reviews Section */} - {withReviews && ( -
- - {reviewsLoading ? ( -
Loading reviews…
- ) : pendingReviews.length > 0 ? ( - - ) : ( -
- No pending reviews for this execution + + + {withSummary && ( + + Summary + + )} + + Output + + + Your input + + {withReviews && ( + + Reviews ({pendingReviews.length}) + + )} + +
+ {/* Summary Section */} + {withSummary && ( + +
+ + Summary + +
+ } + > + +
- )} - + + )} + + {/* Output Section */} + +
+ + {isLoading ? ( +
+ +
+ ) : run && "outputs" in run ? ( + + ) : ( + + No output from this run. + + )} +
+
+
+ + {/* Input Section */} + +
+ + Your input + +
+ } + > + + +
+ + + {/* Reviews Section */} + {withReviews && ( + +
+ + {reviewsLoading ? ( + + ) : pendingReviews.length > 0 ? ( + + ) : ( + + No pending reviews for this execution + + )} + +
+
+ )}
- )} +
-
- -
+ {isLgScreenUp ? ( +
+ +
+ ) : null}
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal.tsx new file mode 100644 index 0000000000..4126d40a2a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal.tsx @@ -0,0 +1,98 @@ +"use client"; + +import { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import { Button } from "@/components/atoms/Button/Button"; +import { Input } from "@/components/atoms/Input/Input"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useState } from "react"; + +interface Props { + isOpen: boolean; + onClose: () => void; + onCreate: (name: string, description: string) => Promise; + run?: GraphExecution; +} + +export function CreateTemplateModal({ isOpen, onClose, onCreate }: Props) { + const [name, setName] = useState(""); + const [description, setDescription] = useState(""); + const [isCreating, setIsCreating] = useState(false); + + async function handleSubmit() { + if (!name.trim()) return; + + setIsCreating(true); + try { + await onCreate(name.trim(), description.trim()); + setName(""); + setDescription(""); + onClose(); + } finally { + setIsCreating(false); + } + } + + function handleCancel() { + setName(""); + setDescription(""); + onClose(); + } + + return ( + onClose() }} + styling={{ maxWidth: "500px" }} + > + +
+
+ + Create Template + + + Save this task as a template to reuse later with the same inputs + and credentials. + +
+ +
+ setName(e.target.value)} + autoFocus + /> + setDescription(e.target.value)} + rows={3} + /> +
+
+ + +
+ + +
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx index 4d3bea526e..92db3e0b37 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/SelectedRunActions.tsx @@ -7,44 +7,55 @@ import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { ArrowBendLeftUpIcon, ArrowBendRightDownIcon, + CardsThreeIcon, EyeIcon, StopIcon, } from "@phosphor-icons/react"; import { AgentActionsDropdown } from "../../../AgentActionsDropdown"; +import { SelectedActionsWrap } from "../../../SelectedActionsWrap"; import { ShareRunButton } from "../../../ShareRunButton/ShareRunButton"; +import { CreateTemplateModal } from "../CreateTemplateModal/CreateTemplateModal"; import { useSelectedRunActions } from "./useSelectedRunActions"; type Props = { agent: LibraryAgent; run: GraphExecution | undefined; - scheduleRecurrence?: string; onSelectRun?: (id: string) => void; onClearSelectedRun?: () => void; }; -export function SelectedRunActions(props: Props) { +export function SelectedRunActions({ + agent, + run, + onSelectRun, + onClearSelectedRun, +}: Props) { const { + canRunManually, handleRunAgain, handleStopRun, isRunningAgain, canStop, isStopping, openInBuilderHref, + handleCreateTemplate, + isCreateTemplateModalOpen, + setIsCreateTemplateModalOpen, } = useSelectedRunActions({ - agentGraphId: props.agent.graph_id, - run: props.run, - onSelectRun: props.onSelectRun, - onClearSelectedRun: props.onClearSelectedRun, + agentGraphId: agent.graph_id, + run: run, + agent: agent, + onSelectRun: onSelectRun, }); const shareExecutionResultsEnabled = useGetFlag(Flag.SHARE_EXECUTION_RESULTS); - const isRunning = props.run?.status === "RUNNING"; + const isRunning = run?.status === "RUNNING"; - if (!props.run || !props.agent) return null; + if (!run || !agent) return null; return ( -
- {!isRunning ? ( + + {canRunManually && !isRunning ? ( + setIsCreateTemplateModalOpen(false)} + onCreate={handleCreateTemplate} + run={run} + /> + + )} -
+ ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts index e88a4d6ea7..03fc0b4ae8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/SelectedRunActions/useSelectedRunActions.ts @@ -5,26 +5,39 @@ import { usePostV1ExecuteGraphAgent, usePostV1StopGraphExecution, } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2ListPresetsQueryKey, + usePostV2CreateANewPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; import type { GraphExecution } from "@/app/api/__generated__/models/graphExecution"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { useToast } from "@/components/molecules/Toast/use-toast"; import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; -interface Args { +interface Params { agentGraphId: string; run?: GraphExecution; + agent?: LibraryAgent; onSelectRun?: (id: string) => void; - onClearSelectedRun?: () => void; } -export function useSelectedRunActions(args: Args) { +export function useSelectedRunActions({ + agentGraphId, + run, + agent, + onSelectRun, +}: Params) { const queryClient = useQueryClient(); const { toast } = useToast(); const [showDeleteDialog, setShowDeleteDialog] = useState(false); + const [isCreateTemplateModalOpen, setIsCreateTemplateModalOpen] = + useState(false); - const canStop = - args.run?.status === "RUNNING" || args.run?.status === "QUEUED"; + const canStop = run?.status === "RUNNING" || run?.status === "QUEUED"; + + const canRunManually = !agent?.trigger_setup_info; const { mutateAsync: stopRun, isPending: isStopping } = usePostV1StopGraphExecution(); @@ -32,19 +45,22 @@ export function useSelectedRunActions(args: Args) { const { mutateAsync: executeRun, isPending: isRunningAgain } = usePostV1ExecuteGraphAgent(); + const { mutateAsync: createPreset, isPending: isCreatingTemplate } = + usePostV2CreateANewPreset(); + async function handleStopRun() { try { await stopRun({ - graphId: args.run?.graph_id ?? "", - graphExecId: args.run?.id ?? "", + graphId: run?.graph_id ?? "", + graphExecId: run?.id ?? "", }); toast({ title: "Run stopped" }); await queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - args.agentGraphId, - ).queryKey, + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) + .queryKey, }); } catch (error: unknown) { toast({ @@ -59,7 +75,7 @@ export function useSelectedRunActions(args: Args) { } async function handleRunAgain() { - if (!args.run) { + if (!run) { toast({ title: "Run not found", description: "Run not found", @@ -72,11 +88,11 @@ export function useSelectedRunActions(args: Args) { toast({ title: "Run started" }); const res = await executeRun({ - graphId: args.run.graph_id, - graphVersion: args.run.graph_version, + graphId: run.graph_id, + graphVersion: run.graph_version, data: { - inputs: args.run.inputs || {}, - credentials_inputs: args.run.credential_inputs || {}, + inputs: run.inputs || {}, + credentials_inputs: run.credential_inputs || {}, source: "library", }, }); @@ -84,12 +100,12 @@ export function useSelectedRunActions(args: Args) { const newRunId = res?.status === 200 ? (res?.data?.id ?? "") : ""; await queryClient.invalidateQueries({ - queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( - args.agentGraphId, - ).queryKey, + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(agentGraphId) + .queryKey, }); - if (newRunId && args.onSelectRun) args.onSelectRun(newRunId); + if (newRunId && onSelectRun) onSelectRun(newRunId); } catch (error: unknown) { toast({ title: "Failed to start run", @@ -106,9 +122,55 @@ export function useSelectedRunActions(args: Args) { setShowDeleteDialog(open); } + async function handleCreateTemplate(name: string, description: string) { + if (!run) { + toast({ + title: "Run not found", + description: "Cannot create template from missing run", + variant: "destructive", + }); + return; + } + + try { + const res = await createPreset({ + data: { + name, + description, + graph_execution_id: run.id, + }, + }); + + if (res.status === 200) { + toast({ + title: "Template created", + }); + + if (agent) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + } + + setIsCreateTemplateModalOpen(false); + } + } catch (error: unknown) { + toast({ + title: "Failed to create template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + // Open in builder URL helper - const openInBuilderHref = args.run - ? `/build?flowID=${args.run.graph_id}&flowVersion=${args.run.graph_version}&flowExecutionID=${args.run.id}` + const openInBuilderHref = run + ? `/build?flowID=${run.graph_id}&flowVersion=${run.graph_version}&flowExecutionID=${run.id}` : undefined; return { @@ -116,9 +178,14 @@ export function useSelectedRunActions(args: Args) { showDeleteDialog, canStop, isStopping, + canRunManually, isRunningAgain, handleShowDeleteDialog, handleStopRun, handleRunAgain, + handleCreateTemplate, + isCreatingTemplate, + isCreateTemplateModalOpen, + setIsCreateTemplateModalOpen, } as const; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx new file mode 100644 index 0000000000..0b24f38731 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/components/WebhookTriggerSection.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { GraphTriggerInfo } from "@/app/api/__generated__/models/graphTriggerInfo"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { CopyIcon } from "@phosphor-icons/react"; +import { RunDetailCard } from "../../RunDetailCard/RunDetailCard"; + +interface Props { + preset: LibraryAgentPreset; + triggerSetupInfo: GraphTriggerInfo; +} + +function getTriggerStatus( + preset: LibraryAgentPreset, +): "active" | "inactive" | "broken" { + if (!preset.webhook_id || !preset.webhook) return "broken"; + return preset.is_active ? "active" : "inactive"; +} + +export function WebhookTriggerSection({ preset, triggerSetupInfo }: Props) { + const status = getTriggerStatus(preset); + const webhook = preset.webhook; + + function handleCopyWebhookUrl() { + if (webhook?.url) { + navigator.clipboard.writeText(webhook.url); + } + } + + return ( + +
+
+ Status + + {status === "active" + ? "Active" + : status === "inactive" + ? "Inactive" + : "Broken"} + +
+ + {!preset.webhook_id ? ( + + This trigger is not attached to a webhook. Use "Set up + trigger" to fix this. + + ) : !triggerSetupInfo.credentials_input_name && webhook ? ( +
+ + This trigger is ready to be used. Use the Webhook URL below to set + up the trigger connection with the service of your choosing. + +
+ Webhook URL: +
+ {webhook.url} + +
+
+
+ ) : ( + + This agent trigger is{" "} + {preset.is_active + ? "ready. When a trigger is received, it will run with the provided settings." + : "disabled. It will not respond to triggers until you enable it."} + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts index 276673d389..342241ef89 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedRunView/useSelectedRunView.ts @@ -1,8 +1,11 @@ "use client"; import { useGetV1GetExecutionDetails } from "@/app/api/__generated__/endpoints/graphs/graphs"; -import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; +import { useGetV2GetASpecificPreset } from "@/app/api/__generated__/endpoints/presets/presets"; import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; +import type { GetV1GetExecutionDetails200 } from "@/app/api/__generated__/models/getV1GetExecutionDetails200"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { okData } from "@/app/api/helpers"; export function useSelectedRunView(graphId: string, runId: string) { const query = useGetV1GetExecutionDetails(graphId, runId, { @@ -37,6 +40,18 @@ export function useSelectedRunView(graphId: string, runId: string) { ? (query.data?.data as GetV1GetExecutionDetails200) : undefined; + const presetId = + run && "preset_id" in run && run.preset_id + ? (run.preset_id as string) + : undefined; + + const presetQuery = useGetV2GetASpecificPreset(presetId || "", { + query: { + enabled: !!presetId, + select: (res) => okData(res), + }, + }); + const httpError = status && status !== 200 ? { status, statusText: `Request failed: ${status}` } @@ -44,8 +59,9 @@ export function useSelectedRunView(graphId: string, runId: string) { return { run, - isLoading: query.isLoading, - responseError: query.error, + preset: presetQuery.data, + isLoading: query.isLoading || presetQuery.isLoading, + responseError: query.error || presetQuery.error, httpError, } as const; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx index 6eda578f87..7f67963aa3 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/SelectedScheduleView.tsx @@ -6,8 +6,8 @@ import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner import { Text } from "@/components/atoms/Text/Text"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; import { humanizeCronExpression } from "@/lib/cron-expression-utils"; +import { isLargeScreen, useBreakpoint } from "@/lib/hooks/useBreakpoint"; import { formatInTimezone, getTimezoneDisplayName } from "@/lib/timezone-utils"; -import { AGENT_LIBRARY_SECTION_PADDING_X } from "../../../helpers"; import { AgentInputsReadOnly } from "../../modals/AgentInputsReadOnly/AgentInputsReadOnly"; import { LoadingSelectedContent } from "../LoadingSelectedContent"; import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; @@ -16,9 +16,6 @@ import { SelectedViewLayout } from "../SelectedViewLayout"; import { SelectedScheduleActions } from "./components/SelectedScheduleActions"; import { useSelectedScheduleView } from "./useSelectedScheduleView"; -const anchorStyles = - "border-b-2 border-transparent pb-1 text-sm font-medium text-slate-600 transition-colors hover:text-slate-900 hover:border-slate-900"; - interface Props { agent: LibraryAgent; scheduleId: string; @@ -41,12 +38,8 @@ export function SelectedScheduleView({ }, }); - function scrollToSection(id: string) { - const element = document.getElementById(id); - if (element) { - element.scrollIntoView({ behavior: "smooth", block: "start" }); - } - } + const breakpoint = useBreakpoint(); + const isLgScreenUp = isLargeScreen(breakpoint); if (error) { return ( @@ -83,38 +76,25 @@ export function SelectedScheduleView({
-
-
-
- + + {schedule && !isLgScreenUp ? ( +
+
-
-
- - {/* Navigation Links */} -
- + ) : null}
{/* Schedule Section */} @@ -174,10 +154,6 @@ export function SelectedScheduleView({
- {/* {// TODO: re-enable edit inputs modal once the API supports it */} - {/* {schedule && Object.keys(schedule.input_data).length > 0 && ( - - )} */}
- {schedule ? ( -
+ {schedule && isLgScreenUp ? ( +
- - - - -
- Edit inputs -
- {Object.entries(inputFields).map(([key, fieldSchema]) => ( -
- - setValues((prev) => ({ ...prev, [key]: v }))} - /> -
- ))} -
-
- -
- - -
-
-
- - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts deleted file mode 100644 index 1c061ea7b7..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/EditInputsModal/useEditInputsModal.ts +++ /dev/null @@ -1,78 +0,0 @@ -"use client"; - -import { useMemo, useState } from "react"; -import { useQueryClient } from "@tanstack/react-query"; -import { getGetV1ListExecutionSchedulesForAGraphQueryKey } from "@/app/api/__generated__/endpoints/schedules/schedules"; -import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; -import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import { useToast } from "@/components/molecules/Toast/use-toast"; - -function getAgentInputFields(agent: LibraryAgent): Record { - const schema = agent.input_schema as unknown as { - properties?: Record; - } | null; - if (!schema || !schema.properties) return {}; - const properties = schema.properties as Record; - const visibleEntries = Object.entries(properties).filter( - ([, sub]) => !sub?.hidden, - ); - return Object.fromEntries(visibleEntries); -} - -export function useEditInputsModal( - agent: LibraryAgent, - schedule: GraphExecutionJobInfo, -) { - const queryClient = useQueryClient(); - const { toast } = useToast(); - const [isOpen, setIsOpen] = useState(false); - const [isSaving, setIsSaving] = useState(false); - const inputFields = useMemo(() => getAgentInputFields(agent), [agent]); - const [values, setValues] = useState>({ - ...(schedule.input_data as Record), - }); - - async function handleSave() { - setIsSaving(true); - try { - const res = await fetch(`/api/schedules/${schedule.id}`, { - method: "PATCH", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ inputs: values }), - }); - if (!res.ok) { - let message = "Failed to update schedule inputs"; - const data = await res.json(); - message = data?.message || data?.detail || message; - throw new Error(message); - } - - await queryClient.invalidateQueries({ - queryKey: getGetV1ListExecutionSchedulesForAGraphQueryKey( - schedule.graph_id, - ), - }); - toast({ - title: "Schedule inputs updated", - }); - setIsOpen(false); - } catch (error: any) { - toast({ - title: "Failed to update schedule inputs", - description: error?.message || "An unexpected error occurred.", - variant: "destructive", - }); - } - setIsSaving(false); - } - - return { - isOpen, - setIsOpen, - inputFields, - values, - setValues, - handleSave, - isSaving, - } as const; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx index 16e08e48ba..0fd34851fd 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedScheduleView/components/SelectedScheduleActions.tsx @@ -3,6 +3,7 @@ import { Button } from "@/components/atoms/Button/Button"; import { EyeIcon } from "@phosphor-icons/react"; import { AgentActionsDropdown } from "../../AgentActionsDropdown"; import { useScheduleDetailHeader } from "../../RunDetailHeader/useScheduleDetailHeader"; +import { SelectedActionsWrap } from "../../SelectedActionsWrap"; type Props = { agent: LibraryAgent; @@ -19,20 +20,21 @@ export function SelectedScheduleActions({ agent, scheduleId }: Props) { return ( <> -
+ {openInBuilderHref && ( )} -
+ ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx new file mode 100644 index 0000000000..ead985457e --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/SelectedTemplateView.tsx @@ -0,0 +1,192 @@ +"use client"; + +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Input } from "@/components/atoms/Input/Input"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { + getAgentCredentialsFields, + getAgentInputFields, +} from "../../modals/AgentInputsReadOnly/helpers"; +import { CredentialsInput } from "../../modals/CredentialsInputs/CredentialsInputs"; +import { RunAgentInputs } from "../../modals/RunAgentInputs/RunAgentInputs"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; +import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; +import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; +import { SelectedViewLayout } from "../SelectedViewLayout"; +import { SelectedTemplateActions } from "./components/SelectedTemplateActions"; +import { WebhookTriggerCard } from "./components/WebhookTriggerCard"; +import { useSelectedTemplateView } from "./useSelectedTemplateView"; + +interface Props { + agent: LibraryAgent; + templateId: string; + onClearSelectedRun?: () => void; + onRunCreated?: (execution: GraphExecutionMeta) => void; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTemplateView({ + agent, + templateId, + onClearSelectedRun, + onRunCreated, + onSwitchToRunsTab, +}: Props) { + const { + template, + isLoading, + error, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + handleStartTask, + isSaving, + isStarting, + } = useSelectedTemplateView({ + templateId, + graphId: agent.graph_id, + onRunCreated, + }); + + const agentInputFields = getAgentInputFields(agent); + const agentCredentialsFields = getAgentCredentialsFields(agent); + const inputFields = Object.entries(agentInputFields); + const credentialFields = Object.entries(agentCredentialsFields); + + if (error) { + return ( + + ); + } + + if (isLoading && !template) { + return ; + } + + if (!template) { + return null; + } + + const templateOrTrigger = agent.trigger_setup_info ? "Trigger" : "Template"; + const hasWebhook = !!template.webhook_id && template.webhook; + + return ( +
+
+ +
+ + + {hasWebhook && agent.trigger_setup_info && ( + + )} + + +
+ setName(e.target.value)} + placeholder={`Enter ${templateOrTrigger.toLowerCase()} name`} + /> + + setDescription(e.target.value)} + placeholder={`Enter ${templateOrTrigger.toLowerCase()} description`} + /> +
+
+ + {inputFields.length > 0 && ( + +
+ {inputFields.map(([key, inputSubSchema]) => ( + setInputValue(key, value)} + /> + ))} +
+
+ )} + + {credentialFields.length > 0 && ( + +
+ {credentialFields.map(([key, inputSubSchema]) => ( + + setCredentialValue(key, value!) + } + siblingInputs={inputs} + /> + ))} +
+
+ )} +
+
+
+ {template ? ( +
+ +
+ ) : null} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx new file mode 100644 index 0000000000..1d50ec7c85 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/SelectedTemplateActions.tsx @@ -0,0 +1,174 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; +import { okData } from "@/app/api/helpers"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { FloppyDiskIcon, PlayIcon, TrashIcon } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; +import { AgentActionsDropdown } from "../../AgentActionsDropdown"; + +interface Props { + agent: LibraryAgent; + templateId: string; + onDeleted?: () => void; + onSaveChanges?: () => void; + onStartTask?: () => void; + isSaving?: boolean; + isStarting?: boolean; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTemplateActions({ + agent, + templateId, + onDeleted, + onSaveChanges, + onStartTask, + isSaving, + isStarting, + onSwitchToRunsTab, +}: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const deleteMutation = useDeleteV2DeleteAPreset({ + mutation: { + onSuccess: async () => { + toast({ + title: "Template deleted", + }); + const queryKey = getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }); + + queryClient.invalidateQueries({ + queryKey, + }); + + const queryData = queryClient.getQueryData<{ + data: LibraryAgentPresetResponse; + }>(queryKey); + + const presets = + okData(queryData)?.presets ?? []; + const templates = presets.filter( + (preset) => !preset.webhook_id || !preset.webhook, + ); + + setShowDeleteDialog(false); + onDeleted?.(); + + if (templates.length === 0 && onSwitchToRunsTab) { + onSwitchToRunsTab(); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to delete template", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleDelete() { + deleteMutation.mutate({ presetId: templateId }); + } + + return ( + <> +
+ + {onStartTask && ( + + )} + + +
+ + + + + Are you sure you want to delete this template? This action cannot be + undone. + + + + + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx new file mode 100644 index 0000000000..d8a54f0474 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/components/WebhookTriggerCard.tsx @@ -0,0 +1,92 @@ +"use client"; + +import { GraphTriggerInfo } from "@/app/api/__generated__/models/graphTriggerInfo"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { CopyIcon } from "@phosphor-icons/react"; +import { RunDetailCard } from "../../RunDetailCard/RunDetailCard"; + +interface Props { + template: LibraryAgentPreset; + triggerSetupInfo: GraphTriggerInfo; +} + +function getTriggerStatus( + template: LibraryAgentPreset, +): "active" | "inactive" | "broken" { + if (!template.webhook_id || !template.webhook) return "broken"; + return template.is_active ? "active" : "inactive"; +} + +export function WebhookTriggerCard({ template, triggerSetupInfo }: Props) { + const status = getTriggerStatus(template); + const webhook = template.webhook; + + function handleCopyWebhookUrl() { + if (webhook?.url) { + navigator.clipboard.writeText(webhook.url); + } + } + + return ( + +
+
+ Status + + {status === "active" + ? "Active" + : status === "inactive" + ? "Inactive" + : "Broken"} + +
+ + {!template.webhook_id ? ( + + This trigger is not attached to a webhook. Use "Set up + trigger" to fix this. + + ) : !triggerSetupInfo.credentials_input_name && webhook ? ( +
+ + This trigger is ready to be used. Use the Webhook URL below to set + up the trigger connection with the service of your choosing. + +
+ Webhook URL: +
+ {webhook.url} + +
+
+
+ ) : ( + + This agent trigger is{" "} + {template.is_active + ? "ready. When a trigger is received, it will run with the provided settings." + : "disabled. It will not respond to triggers until you enable it."} + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts new file mode 100644 index 0000000000..a0f34f54a2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTemplateView/useSelectedTemplateView.ts @@ -0,0 +1,199 @@ +"use client"; + +import { getGetV1ListGraphExecutionsInfiniteQueryOptions } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2GetASpecificPresetQueryKey, + getGetV2ListPresetsQueryKey, + useGetV2GetASpecificPreset, + usePatchV2UpdateAnExistingPreset, + usePostV2ExecuteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import type { LibraryAgentPresetUpdatable } from "@/app/api/__generated__/models/libraryAgentPresetUpdatable"; +import { okData } from "@/app/api/helpers"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useState } from "react"; + +type Args = { + templateId: string; + graphId: string; + onRunCreated?: (execution: GraphExecutionMeta) => void; +}; + +export function useSelectedTemplateView({ + templateId, + graphId, + onRunCreated, +}: Args) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + + const query = useGetV2GetASpecificPreset(templateId, { + query: { + enabled: !!templateId, + select: (res) => okData(res), + }, + }); + + const [name, setName] = useState(""); + const [description, setDescription] = useState(""); + const [inputs, setInputs] = useState>({}); + const [credentials, setCredentials] = useState< + Record + >({}); + + useEffect(() => { + if (query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [query.data]); + + const updateMutation = usePatchV2UpdateAnExistingPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + toast({ + title: "Template updated", + }); + queryClient.invalidateQueries({ + queryKey: getGetV2GetASpecificPresetQueryKey(templateId), + }); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ graph_id: graphId }), + }); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to update template", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + const executeMutation = usePostV2ExecuteAPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + const execution = okData(response); + if (execution) { + toast({ + title: "Task started", + }); + queryClient.invalidateQueries({ + queryKey: + getGetV1ListGraphExecutionsInfiniteQueryOptions(graphId) + .queryKey, + }); + onRunCreated?.(execution); + } + } + }, + onError: (error: any) => { + toast({ + title: "Failed to start task", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleSaveChanges() { + if (!query.data) return; + + const updateData: LibraryAgentPresetUpdatable = {}; + if (name !== (query.data.name || "")) { + updateData.name = name; + } + + if (description !== (query.data.description || "")) { + updateData.description = description; + } + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + if (inputsChanged || credentialsChanged) { + updateData.inputs = inputs; + updateData.credentials = credentials; + } + + updateMutation.mutate({ + presetId: templateId, + data: updateData, + }); + } + + function handleStartTask() { + if (!query.data) return; + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + // Use changed unpersisted inputs if applicable + executeMutation.mutate({ + presetId: templateId, + data: { + inputs: inputsChanged ? inputs : undefined, + credential_inputs: credentialsChanged ? credentials : undefined, + }, + }); + } + + function setInputValue(key: string, value: any) { + setInputs((prev) => ({ ...prev, [key]: value })); + } + + function setCredentialValue(key: string, value: CredentialsMetaInput) { + setCredentials((prev) => ({ ...prev, [key]: value })); + } + + const httpError = + query.isSuccess && !query.data + ? { status: 404, statusText: "Not found" } + : undefined; + + useEffect(() => { + if (updateMutation.isSuccess && query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [updateMutation.isSuccess, query.data]); + + return { + template: query.data, + isLoading: query.isLoading, + error: query.error || httpError, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + handleStartTask, + isSaving: updateMutation.isPending, + isStarting: executeMutation.isPending, + } as const; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx new file mode 100644 index 0000000000..64d4430e78 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/SelectedTriggerView.tsx @@ -0,0 +1,183 @@ +"use client"; + +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Input } from "@/components/atoms/Input/Input"; +import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { + getAgentCredentialsFields, + getAgentInputFields, +} from "../../modals/AgentInputsReadOnly/helpers"; +import { CredentialsInput } from "../../modals/CredentialsInputs/CredentialsInputs"; +import { RunAgentInputs } from "../../modals/RunAgentInputs/RunAgentInputs"; +import { LoadingSelectedContent } from "../LoadingSelectedContent"; +import { RunDetailCard } from "../RunDetailCard/RunDetailCard"; +import { RunDetailHeader } from "../RunDetailHeader/RunDetailHeader"; +import { WebhookTriggerCard } from "../SelectedTemplateView/components/WebhookTriggerCard"; +import { SelectedViewLayout } from "../SelectedViewLayout"; +import { SelectedTriggerActions } from "./components/SelectedTriggerActions"; +import { useSelectedTriggerView } from "./useSelectedTriggerView"; + +interface Props { + agent: LibraryAgent; + triggerId: string; + onClearSelectedRun?: () => void; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTriggerView({ + agent, + triggerId, + onClearSelectedRun, + onSwitchToRunsTab, +}: Props) { + const { + trigger, + isLoading, + error, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + isSaving, + } = useSelectedTriggerView({ + triggerId, + graphId: agent.graph_id, + }); + + const agentInputFields = getAgentInputFields(agent); + const agentCredentialsFields = getAgentCredentialsFields(agent); + const inputFields = Object.entries(agentInputFields); + const credentialFields = Object.entries(agentCredentialsFields); + + if (error) { + return ( + + ); + } + + if (isLoading && !trigger) { + return ; + } + + if (!trigger) { + return null; + } + + const hasWebhook = !!trigger.webhook_id && trigger.webhook; + + return ( +
+
+ +
+ + + +
+ setName(e.target.value)} + placeholder="Enter trigger name" + /> + + setDescription(e.target.value)} + placeholder="Enter trigger description" + /> +
+
+ + {hasWebhook && agent.trigger_setup_info && ( + + )} + + {inputFields.length > 0 && ( + +
+ {inputFields.map(([key, inputSubSchema]) => ( + setInputValue(key, value)} + /> + ))} +
+
+ )} + + {credentialFields.length > 0 && ( + +
+ {credentialFields.map(([key, inputSubSchema]) => ( + + setCredentialValue(key, value!) + } + siblingInputs={inputs} + /> + ))} +
+
+ )} +
+
+
+ {trigger ? ( +
+ +
+ ) : null} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx new file mode 100644 index 0000000000..0746027f37 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/components/SelectedTriggerActions.tsx @@ -0,0 +1,151 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; +import { okData } from "@/app/api/helpers"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { FloppyDiskIcon, TrashIcon } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; +import { AgentActionsDropdown } from "../../AgentActionsDropdown"; + +interface Props { + agent: LibraryAgent; + triggerId: string; + onDeleted?: () => void; + onSaveChanges?: () => void; + isSaving?: boolean; + onSwitchToRunsTab?: () => void; +} + +export function SelectedTriggerActions({ + agent, + triggerId, + onDeleted, + onSaveChanges, + isSaving, + onSwitchToRunsTab, +}: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const deleteMutation = useDeleteV2DeleteAPreset({ + mutation: { + onSuccess: async () => { + toast({ + title: "Trigger deleted", + }); + const queryKey = getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }); + + queryClient.invalidateQueries({ + queryKey, + }); + + const queryData = queryClient.getQueryData<{ + data: LibraryAgentPresetResponse; + }>(queryKey); + + const presets = + okData(queryData)?.presets ?? []; + const triggers = presets.filter( + (preset) => preset.webhook_id && preset.webhook, + ); + + setShowDeleteDialog(false); + onDeleted?.(); + + if (triggers.length === 0 && onSwitchToRunsTab) { + onSwitchToRunsTab(); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to delete trigger", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleDelete() { + deleteMutation.mutate({ presetId: triggerId }); + } + + return ( + <> +
+ + + +
+ + + + + Are you sure you want to delete this trigger? This action cannot be + undone. + + + + + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts new file mode 100644 index 0000000000..4669d850b2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedTriggerView/useSelectedTriggerView.ts @@ -0,0 +1,141 @@ +"use client"; + +import { + getGetV2GetASpecificPresetQueryKey, + getGetV2ListPresetsQueryKey, + useGetV2GetASpecificPreset, + usePatchV2UpdateAnExistingPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import type { LibraryAgentPresetUpdatable } from "@/app/api/__generated__/models/libraryAgentPresetUpdatable"; +import { okData } from "@/app/api/helpers"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types"; +import { useQueryClient } from "@tanstack/react-query"; +import { useEffect, useState } from "react"; + +type Args = { + triggerId: string; + graphId: string; +}; + +export function useSelectedTriggerView({ triggerId, graphId }: Args) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + + const query = useGetV2GetASpecificPreset(triggerId, { + query: { + enabled: !!triggerId, + select: (res) => okData(res), + }, + }); + + const [name, setName] = useState(""); + const [description, setDescription] = useState(""); + const [inputs, setInputs] = useState>({}); + const [credentials, setCredentials] = useState< + Record + >({}); + + useEffect(() => { + if (query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [query.data]); + + const updateMutation = usePatchV2UpdateAnExistingPreset({ + mutation: { + onSuccess: (response) => { + if (response.status === 200) { + toast({ + title: "Trigger updated", + }); + queryClient.invalidateQueries({ + queryKey: getGetV2GetASpecificPresetQueryKey(triggerId), + }); + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ graph_id: graphId }), + }); + } + }, + onError: (error: any) => { + toast({ + title: "Failed to update trigger", + description: error.message || "An unexpected error occurred.", + variant: "destructive", + }); + }, + }, + }); + + function handleSaveChanges() { + if (!query.data) return; + + const updateData: LibraryAgentPresetUpdatable = {}; + if (name !== (query.data.name || "")) { + updateData.name = name; + } + + if (description !== (query.data.description || "")) { + updateData.description = description; + } + + const inputsChanged = + JSON.stringify(inputs) !== JSON.stringify(query.data.inputs || {}); + + const credentialsChanged = + JSON.stringify(credentials) !== + JSON.stringify(query.data.credentials || {}); + + if (inputsChanged || credentialsChanged) { + updateData.inputs = inputs; + updateData.credentials = credentials; + } + + updateMutation.mutate({ + presetId: triggerId, + data: updateData, + }); + } + + function setInputValue(key: string, value: any) { + setInputs((prev) => ({ ...prev, [key]: value })); + } + + function setCredentialValue(key: string, value: CredentialsMetaInput) { + setCredentials((prev) => ({ ...prev, [key]: value })); + } + + const httpError = + query.isSuccess && !query.data + ? { status: 404, statusText: "Not found" } + : undefined; + + useEffect(() => { + if (updateMutation.isSuccess && query.data) { + setName(query.data.name || ""); + setDescription(query.data.description || ""); + setInputs(query.data.inputs || {}); + setCredentials(query.data.credentials || {}); + } + }, [updateMutation.isSuccess, query.data]); + + return { + trigger: query.data, + isLoading: query.isLoading, + error: query.error || httpError, + name, + setName, + description, + setDescription, + inputs, + setInputValue, + credentials, + setCredentialValue, + handleSaveChanges, + isSaving: updateMutation.isPending, + } as const; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx index 7c1153374b..242430ba6a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/SelectedViewLayout.tsx @@ -12,7 +12,7 @@ export function SelectedViewLayout(props: Props) { return (
void; + onSelectRun: ( + id: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) => void; onClearSelectedRun?: () => void; - onTabChange?: (tab: "runs" | "scheduled" | "templates") => void; + onTabChange?: (tab: "runs" | "scheduled" | "templates" | "triggers") => void; onCountsChange?: (info: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => void; } @@ -42,8 +49,12 @@ export function SidebarRunsList({ const { runs, schedules, + templates, + triggers, runsCount, schedulesCount, + templatesCount, + triggersCount, error, loading, fetchMoreRuns, @@ -79,7 +90,7 @@ export function SidebarRunsList({ { - const value = v as "runs" | "scheduled" | "templates"; + const value = v as "runs" | "scheduled" | "templates" | "triggers"; onTabChange?.(value); if (value === "runs") { if (runs && runs.length) { @@ -95,21 +106,38 @@ export function SidebarRunsList({ } } else if (value === "templates") { onClearSelectedRun?.(); + } else if (value === "triggers") { + onClearSelectedRun?.(); } }} className="flex min-h-0 flex-col overflow-hidden" > - - - Tasks {runsCount} - - - Scheduled {schedulesCount} - - - Templates 0 - - +
+
+
+ + + Tasks {runsCount} + + + Scheduled{" "} + {schedulesCount} + + {triggersCount > 0 && ( + + Triggers{" "} + {triggersCount} + + )} + + Templates{" "} + {templatesCount} + + +
+
<> (
- onSelectRun && onSelectRun(run.id, "runs")} /> @@ -151,6 +180,7 @@ export function SidebarRunsList({
onSelectRun(s.id, "scheduled")} /> @@ -165,6 +195,36 @@ export function SidebarRunsList({ )}
+ {triggersCount > 0 && ( + +
+ {triggers.length > 0 ? ( + triggers.map((trigger) => ( +
+ onSelectRun(trigger.id, "triggers")} + /> +
+ )) + ) : ( +
+ + No triggers set up + +
+ )} +
+
+ )}
-
- - No templates saved - -
+ {templates.length > 0 ? ( + templates.map((template) => ( +
+ onSelectRun(template.id, "templates")} + /> +
+ )) + ) : ( +
+ + No templates saved + +
+ )}
diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/IconWrapper.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunIconWrapper.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/IconWrapper.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx new file mode 100644 index 0000000000..d85d3ddfaf --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleActionsDropdown.tsx @@ -0,0 +1,123 @@ +"use client"; + +import { + getGetV1ListExecutionSchedulesForAGraphQueryOptions, + useDeleteV1DeleteExecutionSchedule, +} from "@/app/api/__generated__/endpoints/schedules/schedules"; +import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + schedule: GraphExecutionJobInfo; + onDeleted?: () => void; +} + +export function ScheduleActionsDropdown({ agent, schedule, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deleteSchedule, isPending: isDeleting } = + useDeleteV1DeleteExecutionSchedule(); + + async function handleDelete() { + try { + await deleteSchedule({ scheduleId: schedule.id }); + + toast({ title: "Schedule deleted" }); + + queryClient.invalidateQueries({ + queryKey: getGetV1ListExecutionSchedulesForAGraphQueryOptions( + agent.graph_id, + ).queryKey, + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete schedule", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete schedule + + + + + + +
+ + Are you sure you want to delete this schedule? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx index b06b67647d..a389fb4fc8 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/ScheduleListItem.tsx @@ -1,38 +1,50 @@ "use client"; -import React from "react"; import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; -import moment from "moment"; -import { RunSidebarCard } from "./RunSidebarCard"; -import { IconWrapper } from "./RunIconWrapper"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { ClockClockwiseIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./IconWrapper"; +import { ScheduleActionsDropdown } from "./ScheduleActionsDropdown"; +import { SidebarItemCard } from "./SidebarItemCard"; -interface ScheduleListItemProps { +interface Props { schedule: GraphExecutionJobInfo; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } export function ScheduleListItem({ schedule, + agent, selected, onClick, -}: ScheduleListItemProps) { + onDeleted, +}: Props) { return ( - + } + actions={ + + } /> ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx similarity index 68% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx index eb163f7337..4f4e9962ce 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunSidebarCard.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/SidebarItemCard.tsx @@ -4,25 +4,27 @@ import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import React from "react"; -interface RunListItemProps { +interface Props { title: string; description?: string; icon?: React.ReactNode; selected?: boolean; onClick?: () => void; + actions?: React.ReactNode; } -export function RunSidebarCard({ +export function SidebarItemCard({ title, description, icon, selected, onClick, -}: RunListItemProps) { + actions, +}: Props) { return ( -
+ {actions ? ( +
e.stopPropagation()}>{actions}
+ ) : null}
- +
); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx new file mode 100644 index 0000000000..95cc7740f8 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskActionsDropdown.tsx @@ -0,0 +1,185 @@ +"use client"; + +import { + getGetV1ListGraphExecutionsInfiniteQueryOptions, + useDeleteV1DeleteGraphExecution, +} from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { + getGetV2ListPresetsQueryKey, + usePostV2CreateANewPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; +import { CreateTemplateModal } from "../../../selected-views/SelectedRunView/components/CreateTemplateModal/CreateTemplateModal"; + +interface Props { + agent: LibraryAgent; + run: GraphExecutionMeta; + onDeleted?: () => void; +} + +export function TaskActionsDropdown({ agent, run, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + const [isCreateTemplateModalOpen, setIsCreateTemplateModalOpen] = + useState(false); + + const { mutateAsync: deleteRun, isPending: isDeletingRun } = + useDeleteV1DeleteGraphExecution(); + + const { mutateAsync: createPreset } = usePostV2CreateANewPreset(); + + async function handleDeleteRun() { + try { + await deleteRun({ graphExecId: run.id }); + + toast({ title: "Task deleted" }); + + await queryClient.refetchQueries({ + queryKey: getGetV1ListGraphExecutionsInfiniteQueryOptions( + agent.graph_id, + ).queryKey, + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete task", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + async function handleCreateTemplate(name: string, description: string) { + try { + const res = await createPreset({ + data: { + name, + description, + graph_execution_id: run.id, + }, + }); + + if (res.status === 200) { + toast({ + title: "Template created", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setIsCreateTemplateModalOpen(false); + } + } catch (error: unknown) { + toast({ + title: "Failed to create template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setIsCreateTemplateModalOpen(true); + }} + className="flex items-center gap-2" + > + Save as template + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete task + + + + + + +
+ + Are you sure you want to delete this task? This action cannot be + undone. + + + + + +
+
+
+ + setIsCreateTemplateModalOpen(false)} + onCreate={handleCreateTemplate} + run={run as any} + /> + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx similarity index 80% rename from autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx rename to autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx index c038217f72..22adc54e4f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/RunListItem.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TaskListItem.tsx @@ -2,6 +2,7 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus"; import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { CheckCircleIcon, ClockIcon, @@ -12,8 +13,9 @@ import { } from "@phosphor-icons/react"; import moment from "moment"; import React from "react"; -import { IconWrapper } from "./RunIconWrapper"; -import { RunSidebarCard } from "./RunSidebarCard"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TaskActionsDropdown } from "./TaskActionsDropdown"; const statusIconMap: Record = { INCOMPLETE: ( @@ -53,26 +55,33 @@ const statusIconMap: Record = { ), }; -interface RunListItemProps { +interface Props { run: GraphExecutionMeta; title: string; + agent: LibraryAgent; selected?: boolean; onClick?: () => void; + onDeleted?: () => void; } -export function RunListItem({ +export function TaskListItem({ run, title, + agent, selected, onClick, -}: RunListItemProps) { + onDeleted, +}: Props) { return ( - + } /> ); } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx new file mode 100644 index 0000000000..b65e0fd44a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateActionsDropdown.tsx @@ -0,0 +1,125 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + template: LibraryAgentPreset; + onDeleted?: () => void; +} + +export function TemplateActionsDropdown({ agent, template, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deletePreset, isPending: isDeleting } = + useDeleteV2DeleteAPreset(); + + async function handleDelete() { + try { + await deletePreset({ presetId: template.id }); + + toast({ + title: "Template deleted", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete template", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete template + + + + + + +
+ + Are you sure you want to delete this template? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx new file mode 100644 index 0000000000..c970cd1522 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TemplateListItem.tsx @@ -0,0 +1,46 @@ +"use client"; + +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { FileTextIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TemplateActionsDropdown } from "./TemplateActionsDropdown"; + +interface Props { + template: LibraryAgentPreset; + agent: LibraryAgent; + selected?: boolean; + onClick?: () => void; + onDeleted?: () => void; +} + +export function TemplateListItem({ + template, + agent, + selected, + onClick, + onDeleted, +}: Props) { + return ( + + + + } + title={template.name} + description={moment(template.updated_at).fromNow()} + onClick={onClick} + selected={selected} + actions={ + + } + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx new file mode 100644 index 0000000000..35296948c1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerActionsDropdown.tsx @@ -0,0 +1,125 @@ +"use client"; + +import { + getGetV2ListPresetsQueryKey, + useDeleteV2DeleteAPreset, +} from "@/app/api/__generated__/endpoints/presets/presets"; +import type { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import type { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/molecules/DropdownMenu/DropdownMenu"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { DotsThreeVertical } from "@phosphor-icons/react"; +import { useQueryClient } from "@tanstack/react-query"; +import { useState } from "react"; + +interface Props { + agent: LibraryAgent; + trigger: LibraryAgentPreset; + onDeleted?: () => void; +} + +export function TriggerActionsDropdown({ agent, trigger, onDeleted }: Props) { + const { toast } = useToast(); + const queryClient = useQueryClient(); + const [showDeleteDialog, setShowDeleteDialog] = useState(false); + + const { mutateAsync: deletePreset, isPending: isDeleting } = + useDeleteV2DeleteAPreset(); + + async function handleDelete() { + try { + await deletePreset({ presetId: trigger.id }); + + toast({ + title: "Trigger deleted", + }); + + queryClient.invalidateQueries({ + queryKey: getGetV2ListPresetsQueryKey({ + graph_id: agent.graph_id, + }), + }); + + setShowDeleteDialog(false); + onDeleted?.(); + } catch (error: unknown) { + toast({ + title: "Failed to delete trigger", + description: + error instanceof Error + ? error.message + : "An unexpected error occurred.", + variant: "destructive", + }); + } + } + + return ( + <> + + + + + + { + e.stopPropagation(); + setShowDeleteDialog(true); + }} + className="flex items-center gap-2" + > + Delete trigger + + + + + + +
+ + Are you sure you want to delete this trigger? This action cannot + be undone. + + + + + +
+
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx new file mode 100644 index 0000000000..4c399e640a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/components/TriggerListItem.tsx @@ -0,0 +1,46 @@ +"use client"; + +import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; +import { LightningIcon } from "@phosphor-icons/react"; +import moment from "moment"; +import { IconWrapper } from "./IconWrapper"; +import { SidebarItemCard } from "./SidebarItemCard"; +import { TriggerActionsDropdown } from "./TriggerActionsDropdown"; + +interface Props { + trigger: LibraryAgentPreset; + agent: LibraryAgent; + selected?: boolean; + onClick?: () => void; + onDeleted?: () => void; +} + +export function TriggerListItem({ + trigger, + agent, + selected, + onClick, + onDeleted, +}: Props) { + return ( + + + + } + title={trigger.name} + description={moment(trigger.updated_at).fromNow()} + onClick={onClick} + selected={selected} + actions={ + + } + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts index eecada463a..7f7155bbdf 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/sidebar/SidebarRunsList/useSidebarRunsList.ts @@ -3,8 +3,10 @@ import { useEffect, useMemo } from "react"; import { useGetV1ListGraphExecutionsInfinite } from "@/app/api/__generated__/endpoints/graphs/graphs"; +import { useGetV2ListPresets } from "@/app/api/__generated__/endpoints/presets/presets"; import { useGetV1ListExecutionSchedulesForAGraph } from "@/app/api/__generated__/endpoints/schedules/schedules"; import type { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import type { LibraryAgentPresetResponse } from "@/app/api/__generated__/models/libraryAgentPresetResponse"; import { okData } from "@/app/api/helpers"; import { useExecutionEvents } from "@/hooks/useExecutionEvents"; import { useQueryClient } from "@tanstack/react-query"; @@ -15,19 +17,31 @@ import { getNextRunsPageParam, } from "./helpers"; -function parseTab(value: string | null): "runs" | "scheduled" | "templates" { - if (value === "runs" || value === "scheduled" || value === "templates") { +function parseTab( + value: string | null, +): "runs" | "scheduled" | "templates" | "triggers" { + if ( + value === "runs" || + value === "scheduled" || + value === "templates" || + value === "triggers" + ) { return value; } return "runs"; } type Args = { - graphId?: string; - onSelectRun: (runId: string, tab?: "runs" | "scheduled") => void; + graphId: string; + onSelectRun: ( + runId: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) => void; onCountsChange?: (info: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => void; }; @@ -46,7 +60,7 @@ export function useSidebarRunsList({ const queryClient = useQueryClient(); const runsQuery = useGetV1ListGraphExecutionsInfinite( - graphId || "", + graphId, { page: 1, page_size: 20 }, { query: { @@ -57,12 +71,19 @@ export function useSidebarRunsList({ }, ); - const schedulesQuery = useGetV1ListExecutionSchedulesForAGraph( - graphId || "", + const schedulesQuery = useGetV1ListExecutionSchedulesForAGraph(graphId, { + query: { + enabled: !!graphId, + select: (r) => okData(r), + }, + }); + + const presetsQuery = useGetV2ListPresets( + { graph_id: graphId, page: 1, page_size: 100 }, { query: { enabled: !!graphId, - select: (r) => okData(r) ?? [], + select: (r) => okData(r)?.presets, }, }, ); @@ -73,10 +94,26 @@ export function useSidebarRunsList({ ); const schedules = schedulesQuery.data || []; + const allPresets = presetsQuery.data || []; + const triggers = useMemo( + () => allPresets.filter((preset) => preset.webhook_id), + [allPresets], + ); + const templates = useMemo( + () => allPresets.filter((preset) => !preset.webhook_id), + [allPresets], + ); const runsCount = computeRunsCount(runsQuery.data, runs.length); const schedulesCount = schedules.length; - const loading = !schedulesQuery.isSuccess || !runsQuery.isSuccess; + const templatesCount = templates.length; + const triggersCount = triggers.length; + const loading = + !runsQuery.isSuccess || + !schedulesQuery.isSuccess || + !presetsQuery.isSuccess; + const stale = + runsQuery.isStale || schedulesQuery.isStale || presetsQuery.isStale; // Update query cache when execution events arrive via websocket useExecutionEvents({ @@ -93,10 +130,24 @@ export function useSidebarRunsList({ // Notify parent about counts and loading state useEffect(() => { - if (onCountsChange) { - onCountsChange({ runsCount, schedulesCount, loading }); + if (onCountsChange && !stale) { + onCountsChange({ + runsCount, + schedulesCount, + templatesCount, + triggersCount, + loading, + }); } - }, [runsCount, schedulesCount, loading, onCountsChange]); + }, [ + onCountsChange, + runsCount, + schedulesCount, + templatesCount, + triggersCount, + loading, + stale, + ]); useEffect(() => { if (runs.length > 0 && tabValue === "runs" && !activeItem) { @@ -111,15 +162,31 @@ export function useSidebarRunsList({ } }, [activeItem, runs.length, schedules, onSelectRun]); + useEffect(() => { + if (templates.length > 0 && tabValue === "templates" && !activeItem) { + onSelectRun(templates[0].id, "templates"); + } + }, [templates, activeItem, tabValue, onSelectRun]); + + useEffect(() => { + if (triggers.length > 0 && tabValue === "triggers" && !activeItem) { + onSelectRun(triggers[0].id, "triggers"); + } + }, [triggers, activeItem, tabValue, onSelectRun]); + return { runs, schedules, - error: schedulesQuery.error || runsQuery.error, + templates, + triggers, + error: schedulesQuery.error || runsQuery.error || presetsQuery.error, loading, runsQuery, tabValue, runsCount, schedulesCount, + templatesCount, + triggersCount, fetchMoreRuns: runsQuery.fetchNextPage, hasMoreRuns: runsQuery.hasNextPage, isFetchingMoreRuns: runsQuery.isFetchingNextPage, diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts index 46b9c9abc7..b280400401 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/useNewAgentLibraryView.ts @@ -1,12 +1,23 @@ import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; +import { useGetV2GetASpecificPreset } from "@/app/api/__generated__/endpoints/presets/presets"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; +import { LibraryAgentPreset } from "@/app/api/__generated__/models/libraryAgentPreset"; import { okData } from "@/app/api/helpers"; import { useParams } from "next/navigation"; import { parseAsString, useQueryStates } from "nuqs"; import { useCallback, useEffect, useMemo, useState } from "react"; -function parseTab(value: string | null): "runs" | "scheduled" | "templates" { - if (value === "runs" || value === "scheduled" || value === "templates") { +function parseTab( + value: string | null, +): "runs" | "scheduled" | "templates" | "triggers" { + if ( + value === "runs" || + value === "scheduled" || + value === "templates" || + value === "triggers" + ) { return value; } return "runs"; @@ -17,7 +28,7 @@ export function useNewAgentLibraryView() { const agentId = id as string; const { - data: response, + data: agent, isSuccess, error, } = useGetV2GetLibraryAgent(agentId, { @@ -34,6 +45,24 @@ export function useNewAgentLibraryView() { const activeTab = useMemo(() => parseTab(activeTabRaw), [activeTabRaw]); + const { + data: _template, + isSuccess: isTemplateLoaded, + isLoading: isTemplateLoading, + error: templateError, + } = useGetV2GetASpecificPreset(activeItem ?? "", { + query: { + enabled: Boolean(activeTab === "templates" && activeItem), + select: okData, + }, + }); + const activeTemplate = + isTemplateLoaded && + activeTab === "templates" && + _template?.id === activeItem + ? _template + : null; + useEffect(() => { if (!activeTabRaw && !activeItem) { setQueryStates({ @@ -45,6 +74,8 @@ export function useNewAgentLibraryView() { const [sidebarCounts, setSidebarCounts] = useState({ runsCount: 0, schedulesCount: 0, + templatesCount: 0, + triggersCount: 0, }); const [sidebarLoading, setSidebarLoading] = useState(true); @@ -52,7 +83,9 @@ export function useNewAgentLibraryView() { const hasAnyItems = useMemo( () => (sidebarCounts.runsCount ?? 0) > 0 || - (sidebarCounts.schedulesCount ?? 0) > 0, + (sidebarCounts.schedulesCount ?? 0) > 0 || + (sidebarCounts.templatesCount ?? 0) > 0 || + (sidebarCounts.triggersCount ?? 0) > 0, [sidebarCounts], ); @@ -60,12 +93,27 @@ export function useNewAgentLibraryView() { const showSidebarLayout = sidebarLoading || hasAnyItems; useEffect(() => { - if (response) { - document.title = `${response.name} - Library - AutoGPT Platform`; + if (agent) { + document.title = `${agent.name} - Library - AutoGPT Platform`; } - }, [response]); + }, [agent]); - function handleSelectRun(id: string, tab?: "runs" | "scheduled") { + useEffect(() => { + if ( + activeTab === "triggers" && + sidebarCounts.triggersCount === 0 && + !sidebarLoading + ) { + setQueryStates({ + activeTab: "runs", + }); + } + }, [activeTab, sidebarCounts.triggersCount, sidebarLoading, setQueryStates]); + + function handleSelectRun( + id: string, + tab?: "runs" | "scheduled" | "templates" | "triggers", + ) { setQueryStates({ activeItem: id, activeTab: tab ?? "runs", @@ -78,7 +126,9 @@ export function useNewAgentLibraryView() { }); } - function handleSetActiveTab(tab: "runs" | "scheduled" | "templates") { + function handleSetActiveTab( + tab: "runs" | "scheduled" | "templates" | "triggers", + ) { setQueryStates({ activeTab: tab, }); @@ -88,11 +138,15 @@ export function useNewAgentLibraryView() { (counts: { runsCount: number; schedulesCount: number; + templatesCount: number; + triggersCount: number; loading?: boolean; }) => { setSidebarCounts({ runsCount: counts.runsCount, schedulesCount: counts.schedulesCount, + templatesCount: counts.templatesCount, + triggersCount: counts.triggersCount, }); if (counts.loading !== undefined) { setSidebarLoading(counts.loading); @@ -101,11 +155,46 @@ export function useNewAgentLibraryView() { [], ); + function onItemCreated( + createEvent: + | { type: "runs"; item: GraphExecutionMeta } + | { type: "triggers"; item: LibraryAgentPreset } + | { type: "scheduled"; item: GraphExecutionJobInfo }, + ) { + if (!hasAnyItems) { + // Manually increment item count to flip hasAnyItems and showSidebarLayout + const counts = { + runsCount: createEvent.type === "runs" ? 1 : 0, + triggersCount: createEvent.type === "triggers" ? 1 : 0, + schedulesCount: createEvent.type === "scheduled" ? 1 : 0, + templatesCount: 0, + }; + handleCountsChange(counts); + } + } + + function onRunInitiated(newRun: GraphExecutionMeta) { + if (!agent) return; + onItemCreated({ item: newRun, type: "runs" }); + } + + function onTriggerSetup(newTrigger: LibraryAgentPreset) { + if (!agent) return; + onItemCreated({ item: newTrigger, type: "triggers" }); + } + + function onScheduleCreated(newSchedule: GraphExecutionJobInfo) { + if (!agent) return; + onItemCreated({ item: newSchedule, type: "scheduled" }); + } + return { agentId: id, + agent, ready: isSuccess, - error, - agent: response, + activeTemplate, + isTemplateLoading, + error: error || templateError, hasAnyItems, showSidebarLayout, activeItem, @@ -115,5 +204,8 @@ export function useNewAgentLibraryView() { handleClearSelectedRun, handleCountsChange, handleSelectRun, + onRunInitiated, + onTriggerSetup, + onScheduleCreated, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx index 0289bbdb5f..5f57032618 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view.tsx @@ -680,28 +680,20 @@ export function AgentRunDraftView({ {/* Regular inputs */} {Object.entries(agentInputFields).map(([key, inputSubSchema]) => ( -
- - - { - setInputValues((obj) => ({ - ...obj, - [key]: value, - })); - setChangedPresetAttributes((prev) => prev.add("inputs")); - }} - data-testid={`agent-input-${key}`} - /> -
+ { + setInputValues((obj) => ({ + ...obj, + [key]: value, + })); + setChangedPresetAttributes((prev) => prev.add("inputs")); + }} + data-testid={`agent-input-${key}`} + /> ))} diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 9a35a7b465..f8c5563476 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -3662,7 +3662,18 @@ "required": false, "schema": { "anyOf": [ - { "type": "array", "items": { "type": "string" } }, + { + "type": "array", + "items": { + "enum": [ + "blocks", + "integrations", + "marketplace_agents", + "my_agents" + ], + "type": "string" + } + }, { "type": "null" } ], "title": "Filter" @@ -8612,6 +8623,45 @@ "required": ["name", "cron", "inputs"], "title": "ScheduleCreationRequest" }, + "SearchEntry": { + "properties": { + "search_query": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Search Query" + }, + "filter": { + "anyOf": [ + { + "items": { + "type": "string", + "enum": [ + "blocks", + "integrations", + "marketplace_agents", + "my_agents" + ] + }, + "type": "array" + }, + { "type": "null" } + ], + "title": "Filter" + }, + "by_creator": { + "anyOf": [ + { "items": { "type": "string" }, "type": "array" }, + { "type": "null" } + ], + "title": "By Creator" + }, + "search_id": { + "anyOf": [{ "type": "string" }, { "type": "null" }], + "title": "Search Id" + } + }, + "type": "object", + "title": "SearchEntry" + }, "SearchResponse": { "properties": { "items": { @@ -8625,6 +8675,7 @@ "type": "array", "title": "Items" }, + "search_id": { "type": "string", "title": "Search Id" }, "total_items": { "additionalProperties": { "type": "integer" }, "propertyNames": { @@ -8638,11 +8689,10 @@ "type": "object", "title": "Total Items" }, - "page": { "type": "integer", "title": "Page" }, - "more_pages": { "type": "boolean", "title": "More Pages" } + "pagination": { "$ref": "#/components/schemas/Pagination" } }, "type": "object", - "required": ["items", "total_items", "page", "more_pages"], + "required": ["items", "search_id", "total_items", "pagination"], "title": "SearchResponse" }, "SessionDetailResponse": { @@ -9199,7 +9249,7 @@ "title": "Otto Suggestions" }, "recent_searches": { - "items": { "type": "string" }, + "items": { "$ref": "#/components/schemas/SearchEntry" }, "type": "array", "title": "Recent Searches" }, diff --git a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts index 09235f9c3b..293c406373 100644 --- a/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts +++ b/autogpt_platform/frontend/src/app/api/proxy/[...path]/route.ts @@ -6,6 +6,10 @@ import { import { environment } from "@/services/environment"; import { NextRequest, NextResponse } from "next/server"; +// Increase body size limit to 256MB to match backend file upload limit +export const maxDuration = 300; // 5 minutes timeout for large uploads +export const dynamic = "force-dynamic"; + function buildBackendUrl(path: string[], queryString: string): string { const backendPath = path.join("/"); return `${environment.getAGPTServerBaseUrl()}/${backendPath}${queryString}`; diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx index c798f6e487..8ea199abc8 100644 --- a/autogpt_platform/frontend/src/app/providers.tsx +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -1,36 +1,33 @@ "use client"; -import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; -import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip"; +import { SentryUserTracker } from "@/components/monitor/SentryUserTracker"; import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; import { getQueryClient } from "@/lib/react-query/queryClient"; -import { QueryClientProvider } from "@tanstack/react-query"; -import { - ThemeProvider as NextThemesProvider, - ThemeProviderProps, -} from "next-themes"; -import { NuqsAdapter } from "nuqs/adapters/next/app"; -import { TooltipProvider } from "@/components/atoms/Tooltip/BaseTooltip"; import CredentialsProvider from "@/providers/agent-credentials/credentials-provider"; -import { SentryUserTracker } from "@/components/monitor/SentryUserTracker"; +import OnboardingProvider from "@/providers/onboarding/onboarding-provider"; +import { LaunchDarklyProvider } from "@/services/feature-flags/feature-flag-provider"; +import { QueryClientProvider } from "@tanstack/react-query"; +import { ThemeProvider, ThemeProviderProps } from "next-themes"; +import { NuqsAdapter } from "nuqs/adapters/next/app"; export function Providers({ children, ...props }: ThemeProviderProps) { const queryClient = getQueryClient(); return ( - - - - - - + + + + + + {children} - - - - - + + + + + ); diff --git a/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx b/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx index 869263d5f0..805fedddcc 100644 --- a/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx +++ b/autogpt_platform/frontend/src/components/__legacy__/ui/skeleton.tsx @@ -6,7 +6,7 @@ function Skeleton({ }: React.HTMLAttributes) { return (
); diff --git a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts index db7938661c..4d9706838c 100644 --- a/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts +++ b/autogpt_platform/frontend/src/components/atoms/Button/helpers.ts @@ -16,7 +16,7 @@ export const extendedButtonVariants = cva( primary: "bg-zinc-800 border-zinc-800 text-white hover:bg-zinc-900 hover:border-zinc-900 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", secondary: - "bg-zinc-100 border-zinc-100 text-black hover:bg-zinc-300 hover:border-zinc-300 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", + "bg-zinc-200 border-zinc-200 text-black hover:bg-zinc-300 hover:border-zinc-300 rounded-full disabled:text-zinc-300 disabled:bg-zinc-50 disabled:border-zinc-50 disabled:opacity-1", destructive: "bg-red-500 border-red-500 text-white hover:bg-red-600 hover:border-red-600 rounded-full disabled:text-white disabled:bg-zinc-200 disabled:border-zinc-200 disabled:opacity-1", outline: diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx index 8f855ad47d..d43063b411 100644 --- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx +++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx @@ -266,6 +266,7 @@ export function FileInput(props: Props) { size="small" className="h-7 w-7 min-w-0 flex-shrink-0 border-zinc-300 p-0 text-gray-500 hover:text-red-600 dark:text-gray-400 dark:hover:text-red-500" onClick={handleClear} + type="button" > @@ -278,6 +279,7 @@ export function FileInput(props: Props) { onClick={() => inputRef.current?.click()} className="flex-1 border-zinc-300 text-xs" disabled={isUploading} + type="button" > {`Upload ${displayName}`} @@ -367,6 +369,7 @@ export function FileInput(props: Props) { diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx index b9d3b2e118..d00817bf59 100644 --- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx +++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/DrawerWrap.tsx @@ -1,4 +1,6 @@ import { Button } from "@/components/__legacy__/ui/button"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; import { X } from "@phosphor-icons/react"; import { PropsWithChildren } from "react"; import { Drawer } from "vaul"; @@ -41,7 +43,7 @@ export function DrawerWrap({ onInteractOutside={handleClose} >
@@ -61,7 +63,16 @@ export function DrawerWrap({ ) ) : null}
-
{children}
+
+
+ {children} +
+
); diff --git a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts index b04dcdd193..3b7d12e8e9 100644 --- a/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts +++ b/autogpt_platform/frontend/src/components/molecules/Dialog/components/styles.ts @@ -19,5 +19,5 @@ export const modalStyles = { // Drawer specific styles export const drawerStyles = { ...commonStyles, - content: `${commonStyles.content} max-h-[90vh] w-full bottom-0 rounded-br-none rounded-bl-none`, + content: `${commonStyles.content} max-h-[90vh] w-full bottom-0 rounded-br-none rounded-bl-none min-h-0`, }; diff --git a/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx b/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx index 4bf9a76b94..36184f08c4 100644 --- a/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx +++ b/autogpt_platform/frontend/src/components/molecules/InformationTooltip/InformationTooltip.tsx @@ -9,16 +9,20 @@ import ReactMarkdown from "react-markdown"; type Props = { description?: string; + iconSize?: number; }; -export function InformationTooltip({ description }: Props) { +export function InformationTooltip({ description, iconSize = 24 }: Props) { if (!description) return null; return ( - + ; + +export default meta; +type Story = StoryObj; + +function ScrollableTabsDemo() { + return ( +
+

ScrollableTabs Examples

+ +
+
+

+ Short Content (Tabs Hidden) +

+
+ + + + Account + + + Password + + + Settings + + + +
+ Make changes to your account here. Click save when you're + done. +
+
+ +
+ Change your password here. After saving, you'll be logged + out. +
+
+ +
+ Update your preferences and settings here. +
+
+
+
+
+ +
+

+ Long Content (Tabs Visible) +

+
+ + + + Account + + + Password + + + Settings + + + +
+

+ Account Settings +

+

+ Make changes to your account here. Click save when + you're done. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed + do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, quis nostrud exercitation + ullamco laboris. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. Excepteur sint + occaecat cupidatat non proident. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit + voluptatem accusantium doloremque laudantium, totam rem + aperiam. +

+
+
+ +
+

+ Password Settings +

+

+ Change your password here. After saving, you'll be + logged out. +

+

+ At vero eos et accusamus et iusto odio dignissimos ducimus + qui blanditiis praesentium voluptatum deleniti atque + corrupti quos dolores et quas molestias excepturi sint + occaecati cupiditate. +

+

+ Et harum quidem rerum facilis est et expedita distinctio. + Nam libero tempore, cum soluta nobis est eligendi optio + cumque nihil impedit quo minus. +

+

+ Temporibus autem quibusdam et aut officiis debitis aut rerum + necessitatibus saepe eveniet ut et voluptates repudiandae + sint. +

+
+
+ +
+

+ General Settings +

+

+ Update your preferences and settings here. +

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut + odit aut fugit, sed quia consequuntur magni dolores eos qui + ratione voluptatem sequi nesciunt. +

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit + amet, consectetur, adipisci velit, sed quia non numquam eius + modi tempora incidunt ut labore et dolore magnam aliquam + quaerat voluptatem. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam, nisi ut aliquid ex ea commodi + consequatur. +

+
+
+
+
+
+ +
+

Many Tabs

+
+ + + + Overview + + + Analytics + + + Reports + + + Notifications + + + Integrations + + + Billing + + + +
+

+ Dashboard Overview +

+

+ Dashboard overview with key metrics and recent activity. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed + do eiusmod tempor incididunt ut labore et dolore magna + aliqua. +

+

+ Ut enim ad minim veniam, quis nostrud exercitation ullamco + laboris nisi ut aliquip ex ea commodo consequat. +

+
+
+ +
+

Analytics

+

+ Detailed analytics and performance metrics. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit + esse cillum dolore eu fugiat nulla pariatur. +

+

+ Excepteur sint occaecat cupidatat non proident, sunt in + culpa qui officia deserunt mollit anim id est laborum. +

+
+
+ +
+

Reports

+

+ Generate and view reports for your account. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit + voluptatem accusantium doloremque laudantium. +

+

+ Totam rem aperiam, eaque ipsa quae ab illo inventore + veritatis et quasi architecto beatae vitae dicta sunt + explicabo. +

+
+
+ +
+

Notifications

+

Manage your notification preferences.

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut + odit aut fugit. +

+

+ Sed quia consequuntur magni dolores eos qui ratione + voluptatem sequi nesciunt. +

+
+
+ +
+

Integrations

+

+ Connect and manage third-party integrations. +

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit + amet. +

+

+ Consectetur, adipisci velit, sed quia non numquam eius modi + tempora incidunt. +

+
+
+ +
+

Billing

+

+ View and manage your billing information. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam. +

+

+ Nisi ut aliquid ex ea commodi consequatur? Quis autem vel + eum iure reprehenderit qui in ea voluptate velit esse. +

+
+
+
+
+
+
+
+ ); +} + +export const Default = { + render: () => , +} satisfies Story; + +export const ShortContent = { + render: () => ( +
+
+ + + + Account + + + Password + + + +
+ Make changes to your account here. Click save when you're + done. +
+
+ +
+ Change your password here. After saving, you'll be logged + out. +
+
+
+
+
+ ), +} satisfies Story; + +export const LongContent = { + render: () => ( +
+
+ + + Account + Password + Settings + + +
+

Account Settings

+

+ Make changes to your account here. Click save when you're + done. +

+

+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do + eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut + enim ad minim veniam, quis nostrud exercitation ullamco laboris + nisi ut aliquip ex ea commodo consequat. +

+

+ Duis aute irure dolor in reprehenderit in voluptate velit esse + cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat + cupidatat non proident, sunt in culpa qui officia deserunt + mollit anim id est laborum. +

+

+ Sed ut perspiciatis unde omnis iste natus error sit voluptatem + accusantium doloremque laudantium, totam rem aperiam, eaque ipsa + quae ab illo inventore veritatis et quasi architecto beatae + vitae dicta sunt explicabo. +

+

+ Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit + aut fugit, sed quia consequuntur magni dolores eos qui ratione + voluptatem sequi nesciunt. +

+
+
+ +
+

Password Settings

+

+ Change your password here. After saving, you'll be logged + out. +

+

+ At vero eos et accusamus et iusto odio dignissimos ducimus qui + blanditiis praesentium voluptatum deleniti atque corrupti quos + dolores et quas molestias excepturi sint occaecati cupiditate + non provident. +

+

+ Similique sunt in culpa qui officia deserunt mollitia animi, id + est laborum et dolorum fuga. Et harum quidem rerum facilis est + et expedita distinctio. +

+

+ Nam libero tempore, cum soluta nobis est eligendi optio cumque + nihil impedit quo minus id quod maxime placeat facere possimus, + omnis voluptas assumenda est, omnis dolor repellendus. +

+

+ Temporibus autem quibusdam et aut officiis debitis aut rerum + necessitatibus saepe eveniet ut et voluptates repudiandae sint + et molestiae non recusandae. +

+
+
+ +
+

General Settings

+

Update your preferences and settings here.

+

+ Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, + consectetur, adipisci velit, sed quia non numquam eius modi + tempora incidunt ut labore et dolore magnam aliquam quaerat + voluptatem. +

+

+ Ut enim ad minima veniam, quis nostrum exercitationem ullam + corporis suscipit laboriosam, nisi ut aliquid ex ea commodi + consequatur? Quis autem vel eum iure reprehenderit qui in ea + voluptate velit esse quam nihil molestiae consequatur. +

+

+ Vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At + vero eos et accusamus et iusto odio dignissimos ducimus qui + blanditiis praesentium voluptatum deleniti atque corrupti quos + dolores. +

+

+ Et quas molestias excepturi sint occaecati cupiditate non + provident, similique sunt in culpa qui officia deserunt mollitia + animi, id est laborum et dolorum fuga. +

+
+
+
+
+
+ ), +} satisfies Story; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx new file mode 100644 index 0000000000..bdbfa3cd4f --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/ScrollableTabs.tsx @@ -0,0 +1,59 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import { Children } from "react"; +import { ScrollableTabsContent } from "./components/ScrollableTabsContent"; +import { ScrollableTabsList } from "./components/ScrollableTabsList"; +import { ScrollableTabsTrigger } from "./components/ScrollableTabsTrigger"; +import { ScrollableTabsContext } from "./context"; +import { findContentElements, findListElement } from "./helpers"; +import { useScrollableTabsInternal } from "./useScrollableTabs"; + +interface Props { + children?: React.ReactNode; + className?: string; + defaultValue?: string; +} + +export function ScrollableTabs({ children, className, defaultValue }: Props) { + const { + activeValue, + setActiveValue, + registerContent, + scrollToSection, + scrollContainer, + contentContainerRef, + } = useScrollableTabsInternal({ defaultValue }); + + const childrenArray = Children.toArray(children); + const listElement = findListElement(childrenArray); + const contentElements = findContentElements(childrenArray); + + return ( + +
+ {listElement} +
{ + if (contentContainerRef) { + contentContainerRef.current = node; + } + }} + className="max-h-[64rem] overflow-y-auto scrollbar-thin scrollbar-track-transparent scrollbar-thumb-zinc-300 dark:scrollbar-thumb-zinc-700" + > +
{contentElements}
+
+
+
+ ); +} + +export { ScrollableTabsContent, ScrollableTabsList, ScrollableTabsTrigger }; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx new file mode 100644 index 0000000000..4027e87cfa --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsContent.tsx @@ -0,0 +1,48 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +interface Props extends React.HTMLAttributes { + value: string; +} + +export const ScrollableTabsContent = React.forwardRef( + function ScrollableTabsContent( + { className, value, children, ...props }, + ref, + ) { + const { registerContent } = useScrollableTabs(); + const contentRef = React.useRef(null); + + React.useEffect(() => { + if (contentRef.current) { + registerContent(value, contentRef.current); + } + return () => { + registerContent(value, null); + }; + }, [value, registerContent]); + + return ( +
{ + if (typeof ref === "function") ref(node); + else if (ref) ref.current = node; + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + contentRef.current = node; + }} + data-scrollable-tab-content + data-value={value} + className={cn("focus-visible:outline-none", className)} + {...props} + > + {children} +
+ ); + }, +); + +ScrollableTabsContent.displayName = "ScrollableTabsContent"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx new file mode 100644 index 0000000000..496a91ec5a --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsList.tsx @@ -0,0 +1,52 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +export const ScrollableTabsList = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(function ScrollableTabsList({ className, children, ...props }, ref) { + const { activeValue } = useScrollableTabs(); + const [activeTabElement, setActiveTabElement] = + React.useState(null); + + React.useEffect(() => { + const activeButton = Array.from( + document.querySelectorAll( + '[data-scrollable-tab-trigger][data-value="' + activeValue + '"]', + ), + )[0]; + + if (activeButton) { + setActiveTabElement(activeButton); + } + }, [activeValue]); + + return ( +
+
+ {children} +
+ {activeTabElement && ( +
+ )} +
+ ); +}); + +ScrollableTabsList.displayName = "ScrollableTabsList"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx new file mode 100644 index 0000000000..41367264d8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/components/ScrollableTabsTrigger.tsx @@ -0,0 +1,53 @@ +"use client"; + +import { cn } from "@/lib/utils"; +import * as React from "react"; +import { useScrollableTabs } from "../context"; + +interface Props extends React.ButtonHTMLAttributes { + value: string; +} + +export const ScrollableTabsTrigger = React.forwardRef( + function ScrollableTabsTrigger( + { className, value, children, ...props }, + ref, + ) { + const { activeValue, scrollToSection } = useScrollableTabs(); + const elementRef = React.useRef(null); + const isActive = activeValue === value; + + function handleClick(e: React.MouseEvent) { + e.preventDefault(); + e.stopPropagation(); + scrollToSection(value); + props.onClick?.(e); + } + + return ( + + ); + }, +); + +ScrollableTabsTrigger.displayName = "ScrollableTabsTrigger"; diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts new file mode 100644 index 0000000000..080ae3702c --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/context.ts @@ -0,0 +1,22 @@ +import * as React from "react"; +import { createContext, useContext } from "react"; + +interface ScrollableTabsContextValue { + activeValue: string | null; + setActiveValue: React.Dispatch>; + registerContent: (value: string, element: HTMLElement | null) => void; + scrollToSection: (value: string) => void; + scrollContainer: HTMLElement | null; +} + +export const ScrollableTabsContext = createContext< + ScrollableTabsContextValue | undefined +>(undefined); + +export function useScrollableTabs() { + const context = useContext(ScrollableTabsContext); + if (!context) { + throw new Error("useScrollableTabs must be used within a ScrollableTabs"); + } + return context; +} diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts new file mode 100644 index 0000000000..651e4c84fd --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/helpers.ts @@ -0,0 +1,48 @@ +import * as React from "react"; + +const HEADER_OFFSET = 100; + +export function calculateScrollPosition( + elementRect: DOMRect, + containerRect: DOMRect, + currentScrollTop: number, +): number { + const elementTopRelativeToContainer = + elementRect.top - containerRect.top + currentScrollTop - HEADER_OFFSET; + + return Math.max(0, elementTopRelativeToContainer); +} + +function hasDisplayName( + type: unknown, + displayName: string, +): type is { displayName: string } { + return ( + typeof type === "object" && + type !== null && + "displayName" in type && + (type as { displayName: unknown }).displayName === displayName + ); +} + +export function findListElement( + children: React.ReactNode[], +): React.ReactElement | undefined { + return children.find( + (child) => + React.isValidElement(child) && + hasDisplayName(child.type, "ScrollableTabsList"), + ) as React.ReactElement | undefined; +} + +export function findContentElements( + children: React.ReactNode[], +): React.ReactNode[] { + return children.filter( + (child) => + !( + React.isValidElement(child) && + hasDisplayName(child.type, "ScrollableTabsList") + ), + ); +} diff --git a/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts new file mode 100644 index 0000000000..5043f1047e --- /dev/null +++ b/autogpt_platform/frontend/src/components/molecules/ScrollableTabs/useScrollableTabs.ts @@ -0,0 +1,60 @@ +import { useCallback, useRef, useState } from "react"; +import { calculateScrollPosition } from "./helpers"; + +interface Args { + defaultValue?: string; +} + +export function useScrollableTabsInternal({ defaultValue }: Args) { + const [activeValue, setActiveValue] = useState( + defaultValue || null, + ); + const contentRefs = useRef>(new Map()); + const contentContainerRef = useRef(null); + + function registerContent(value: string, element: HTMLElement | null) { + if (element) { + contentRefs.current.set(value, element); + } else { + contentRefs.current.delete(value); + } + } + + function scrollToSection(value: string) { + const element = contentRefs.current.get(value); + const scrollContainer = contentContainerRef.current; + if (!element || !scrollContainer) return; + + setActiveValue(value); + + const containerRect = scrollContainer.getBoundingClientRect(); + const elementRect = element.getBoundingClientRect(); + const currentScrollTop = scrollContainer.scrollTop; + const scrollTop = calculateScrollPosition( + elementRect, + containerRect, + currentScrollTop, + ); + + const maxScrollTop = + scrollContainer.scrollHeight - scrollContainer.clientHeight; + const clampedScrollTop = Math.min(Math.max(0, scrollTop), maxScrollTop); + + scrollContainer.scrollTo({ + top: clampedScrollTop, + behavior: "smooth", + }); + } + + const memoizedRegisterContent = useCallback(registerContent, []); + const memoizedScrollToSection = useCallback(scrollToSection, []); + + return { + activeValue, + setActiveValue, + registerContent: memoizedRegisterContent, + scrollToSection: memoizedScrollToSection, + scrollContainer: contentContainerRef.current, + contentContainerRef, + }; +} diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx index 7fb3d9c938..79fa15304d 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/AnyOfField/AnyOfField.tsx @@ -23,6 +23,7 @@ import { TooltipTrigger, } from "@/components/atoms/Tooltip/BaseTooltip"; import { cn } from "@/lib/utils"; +import { BlockUIType } from "@/app/(platform)/build/components/types"; type TypeOption = { type: string; @@ -47,7 +48,14 @@ export const AnyOfField = ({ onBlur, onFocus, }: FieldProps) => { - const handleId = generateHandleId(idSchema.$id ?? ""); + const handleId = + formContext.uiType === BlockUIType.AGENT + ? (idSchema.$id ?? "") + .split("_") + .filter((p) => p !== "root" && p !== "properties" && p.length > 0) + .join("_") || "" + : generateHandleId(idSchema.$id ?? ""); + const updatedFormContexrt = { ...formContext, fromAnyOf: true }; const { nodeId, showHandles = true } = updatedFormContexrt; diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx index 602c9b0e2d..55081332aa 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/fields/CredentialField/SelectCredential.tsx @@ -84,7 +84,7 @@ export const SelectCredential: React.FC = ({ rel="noopener noreferrer" variant="outline" size="icon" - className="h-8 w-8 border-zinc-300 p-0" + className="h-8 w-8 !min-w-8 border-zinc-300 p-0" > diff --git a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx index a056782939..ebc8a1f038 100644 --- a/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx +++ b/autogpt_platform/frontend/src/components/renderers/input-renderer/templates/FieldTemplate.tsx @@ -58,7 +58,15 @@ const FieldTemplate: React.FC = ({ let handleId = null; if (!isArrayItem) { - handleId = generateHandleId(fieldId); + if (uiType === BlockUIType.AGENT) { + const parts = fieldId.split("_"); + const filtered = parts.filter( + (p) => p !== "root" && p !== "properties" && p.length > 0, + ); + handleId = filtered.join("_") || ""; + } else { + handleId = generateHandleId(fieldId); + } } else { handleId = arrayFieldHandleId; } diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index 3b0666bf62..682fc14108 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -910,7 +910,37 @@ export default class BackendAPI { reject(new Error("Invalid JSON response")); } } else { - reject(new Error(`HTTP ${xhr.status}: ${xhr.statusText}`)); + // Handle file size errors with user-friendly message + if (xhr.status === 413) { + reject(new Error("File is too large — max size is 256MB")); + return; + } + + // Try to parse error response for better messages + let errorMessage = `Upload failed (${xhr.status})`; + try { + const errorData = JSON.parse(xhr.responseText); + if (errorData.detail) { + if ( + typeof errorData.detail === "string" && + errorData.detail.includes("exceeds the maximum") + ) { + const match = errorData.detail.match( + /maximum allowed size of (\d+)MB/, + ); + const maxSize = match ? match[1] : "256"; + errorMessage = `File is too large — max size is ${maxSize}MB`; + } else if (typeof errorData.detail === "string") { + errorMessage = errorData.detail; + } + } else if (errorData.error) { + errorMessage = errorData.error; + } + } catch { + // Keep default message if parsing fails + } + + reject(new Error(errorMessage)); } }); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts index 7e20783042..4cb24df77d 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/helpers.ts @@ -184,6 +184,11 @@ export function serializeRequestBody( } export async function parseApiError(response: Response): Promise { + // Handle 413 Payload Too Large with user-friendly message + if (response.status === 413) { + return "File is too large — max size is 256MB"; + } + try { const errorData = await response.clone().json(); @@ -205,6 +210,16 @@ export async function parseApiError(response: Response): Promise { return response.statusText; // Fallback to status text if no message } + // Check for file size error from backend + if ( + typeof errorData.detail === "string" && + errorData.detail.includes("exceeds the maximum") + ) { + const match = errorData.detail.match(/maximum allowed size of (\d+)MB/); + const maxSize = match ? match[1] : "256"; + return `File is too large — max size is ${maxSize}MB`; + } + return errorData.detail || errorData.error || response.statusText; } catch { return response.statusText; diff --git a/autogpt_platform/frontend/src/tests/pages/build.page.ts b/autogpt_platform/frontend/src/tests/pages/build.page.ts index 709b0ef3ed..8acc9a8f40 100644 --- a/autogpt_platform/frontend/src/tests/pages/build.page.ts +++ b/autogpt_platform/frontend/src/tests/pages/build.page.ts @@ -472,14 +472,44 @@ export class BuildPage extends BasePage { ); } - async getGithubTriggerBlockDetails(): Promise { - return { - id: "6c60ec01-8128-419e-988f-96a063ee2fea", - name: "Github Trigger", - description: - "This block triggers on pull request events and outputs the event type and payload.", - type: "Standard", - }; + async getGithubTriggerBlockDetails(): Promise { + return [ + { + id: "6c60ec01-8128-419e-988f-96a063ee2fea", + name: "Github Trigger", + description: + "This block triggers on pull request events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "551e0a35-100b-49b7-89b8-3031322239b6", + name: "Github Star Trigger", + description: + "This block triggers on star events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "2052dd1b-74e1-46ac-9c87-c7a0e057b60b", + name: "Github Release Trigger", + description: + "This block triggers on release events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "b2605464-e486-4bf4-aad3-d8a213c8a48a", + name: "Github Issue Trigger", + description: + "This block triggers on issue events and outputs the event type and payload.", + type: "Standard", + }, + { + id: "87f847b3-d81a-424e-8e89-acadb5c9d52b", + name: "Github Discussion Trigger", + description: + "This block triggers on discussion events and outputs the event type and payload.", + type: "Standard", + }, + ]; } async nextTutorialStep(): Promise { @@ -488,7 +518,9 @@ export class BuildPage extends BasePage { } async getBlocksToSkip(): Promise { - return [(await this.getGithubTriggerBlockDetails()).id]; + return [ + (await this.getGithubTriggerBlockDetails()).map((b) => b.id), + ].flat(); } async createDummyAgent() { diff --git a/autogpt_platform/frontend/tailwind.config.ts b/autogpt_platform/frontend/tailwind.config.ts index 2a0d039b1a..ab3ea9bc74 100644 --- a/autogpt_platform/frontend/tailwind.config.ts +++ b/autogpt_platform/frontend/tailwind.config.ts @@ -1,10 +1,10 @@ +import scrollbar from "tailwind-scrollbar"; import type { Config } from "tailwindcss"; import tailwindcssAnimate from "tailwindcss-animate"; -import scrollbar from "tailwind-scrollbar"; import { colors } from "./src/components/styles/colors"; const config = { - darkMode: ["class"], + darkMode: ["class", ".dark-mode"], // ignore dark: prefix classes for now until we fully support dark mode content: ["./src/**/*.{ts,tsx}"], prefix: "", theme: {