diff --git a/.claude/commands/add-integration.md b/.claude/commands/add-integration.md index 12350ccd5..c3221b2e9 100644 --- a/.claude/commands/add-integration.md +++ b/.claude/commands/add-integration.md @@ -206,10 +206,15 @@ export const {Service}Block: BlockConfig = { } ``` -**Critical:** -- `canonicalParamId` must NOT match any other subblock's `id`, must be unique per block, and should only be used to link basic/advanced alternatives for the same parameter. -- `mode` only controls UI visibility, NOT serialization. Without `canonicalParamId`, both basic and advanced field values would be sent. -- Every subblock `id` must be unique within the block. Duplicate IDs cause conflicts even with different conditions. +**Critical Canonical Param Rules:** +- `canonicalParamId` must NOT match any subblock's `id` in the block +- `canonicalParamId` must be unique per operation/condition context +- Only use `canonicalParamId` to link basic/advanced alternatives for the same logical parameter +- `mode` only controls UI visibility, NOT serialization. Without `canonicalParamId`, both basic and advanced field values would be sent +- Every subblock `id` must be unique within the block. Duplicate IDs cause conflicts even with different conditions +- **Required consistency:** If one subblock in a canonical group has `required: true`, ALL subblocks in that group must have `required: true` (prevents bypassing validation by switching modes) +- **Inputs section:** Must list canonical param IDs (e.g., `fileId`), NOT raw subblock IDs (e.g., `fileSelector`, `manualFileId`) +- **Params function:** Must use canonical param IDs, NOT raw subblock IDs (raw IDs are deleted after canonical transformation) ## Step 4: Add Icon diff --git a/.claude/rules/sim-integrations.md b/.claude/rules/sim-integrations.md index 825acce5d..7a1c70017 100644 --- a/.claude/rules/sim-integrations.md +++ b/.claude/rules/sim-integrations.md @@ -157,6 +157,36 @@ dependsOn: { all: ['authMethod'], any: ['credential', 'botToken'] } - `'both'` - Show in both modes (default) - `'trigger'` - Only when block is used as trigger +### `canonicalParamId` - Link basic/advanced alternatives + +Use to map multiple UI inputs to a single logical parameter: + +```typescript +// Basic mode: Visual selector +{ + id: 'fileSelector', + type: 'file-selector', + mode: 'basic', + canonicalParamId: 'fileId', + required: true, +}, +// Advanced mode: Manual input +{ + id: 'manualFileId', + type: 'short-input', + mode: 'advanced', + canonicalParamId: 'fileId', + required: true, +}, +``` + +**Critical Rules:** +- `canonicalParamId` must NOT match any subblock's `id` +- `canonicalParamId` must be unique per operation/condition context +- **Required consistency:** All subblocks in a canonical group must have the same `required` status +- **Inputs section:** Must list canonical param IDs (e.g., `fileId`), NOT raw subblock IDs +- **Params function:** Must use canonical param IDs (raw IDs are deleted after canonical transformation) + **Register in `blocks/registry.ts`:** ```typescript diff --git a/.cursor/rules/sim-integrations.mdc b/.cursor/rules/sim-integrations.mdc index 20edc82e1..309cd6a4e 100644 --- a/.cursor/rules/sim-integrations.mdc +++ b/.cursor/rules/sim-integrations.mdc @@ -155,6 +155,36 @@ dependsOn: { all: ['authMethod'], any: ['credential', 'botToken'] } - `'both'` - Show in both modes (default) - `'trigger'` - Only when block is used as trigger +### `canonicalParamId` - Link basic/advanced alternatives + +Use to map multiple UI inputs to a single logical parameter: + +```typescript +// Basic mode: Visual selector +{ + id: 'fileSelector', + type: 'file-selector', + mode: 'basic', + canonicalParamId: 'fileId', + required: true, +}, +// Advanced mode: Manual input +{ + id: 'manualFileId', + type: 'short-input', + mode: 'advanced', + canonicalParamId: 'fileId', + required: true, +}, +``` + +**Critical Rules:** +- `canonicalParamId` must NOT match any subblock's `id` +- `canonicalParamId` must be unique per operation/condition context +- **Required consistency:** All subblocks in a canonical group must have the same `required` status +- **Inputs section:** Must list canonical param IDs (e.g., `fileId`), NOT raw subblock IDs +- **Params function:** Must use canonical param IDs (raw IDs are deleted after canonical transformation) + **Register in `blocks/registry.ts`:** ```typescript diff --git a/apps/docs/components/ui/icon-mapping.ts b/apps/docs/components/ui/icon-mapping.ts index 308902d64..c7a766f6c 100644 --- a/apps/docs/components/ui/icon-mapping.ts +++ b/apps/docs/components/ui/icon-mapping.ts @@ -163,9 +163,9 @@ export const blockTypeToIconMap: Record = { elevenlabs: ElevenLabsIcon, enrich: EnrichSoIcon, exa: ExaAIIcon, - file_v2: DocumentIcon, + file_v3: DocumentIcon, firecrawl: FirecrawlIcon, - fireflies: FirefliesIcon, + fireflies_v2: FirefliesIcon, github_v2: GithubIcon, gitlab: GitLabIcon, gmail_v2: GmailIcon, @@ -177,7 +177,7 @@ export const blockTypeToIconMap: Record = { google_maps: GoogleMapsIcon, google_search: GoogleIcon, google_sheets_v2: GoogleSheetsIcon, - google_slides: GoogleSlidesIcon, + google_slides_v2: GoogleSlidesIcon, google_vault: GoogleVaultIcon, grafana: GrafanaIcon, grain: GrainIcon, @@ -206,7 +206,7 @@ export const blockTypeToIconMap: Record = { microsoft_excel_v2: MicrosoftExcelIcon, microsoft_planner: MicrosoftPlannerIcon, microsoft_teams: MicrosoftTeamsIcon, - mistral_parse_v2: MistralIcon, + mistral_parse_v3: MistralIcon, mongodb: MongoDBIcon, mysql: MySQLIcon, neo4j: Neo4jIcon, @@ -221,11 +221,11 @@ export const blockTypeToIconMap: Record = { polymarket: PolymarketIcon, postgresql: PostgresIcon, posthog: PosthogIcon, - pulse: PulseIcon, + pulse_v2: PulseIcon, qdrant: QdrantIcon, rds: RDSIcon, reddit: RedditIcon, - reducto: ReductoIcon, + reducto_v2: ReductoIcon, resend: ResendIcon, s3: S3Icon, salesforce: SalesforceIcon, @@ -244,11 +244,11 @@ export const blockTypeToIconMap: Record = { ssh: SshIcon, stagehand: StagehandIcon, stripe: StripeIcon, - stt: STTIcon, + stt_v2: STTIcon, supabase: SupabaseIcon, tavily: TavilyIcon, telegram: TelegramIcon, - textract: TextractIcon, + textract_v2: TextractIcon, tinybird: TinybirdIcon, translate: TranslateIcon, trello: TrelloIcon, @@ -257,7 +257,7 @@ export const blockTypeToIconMap: Record = { twilio_voice: TwilioIcon, typeform: TypeformIcon, video_generator_v2: VideoIcon, - vision: EyeIcon, + vision_v2: EyeIcon, wealthbox: WealthboxIcon, webflow: WebflowIcon, whatsapp: WhatsAppIcon, diff --git a/apps/docs/content/docs/de/tools/file.mdx b/apps/docs/content/docs/de/tools/file.mdx index 08542aa01..3bea49629 100644 --- a/apps/docs/content/docs/de/tools/file.mdx +++ b/apps/docs/content/docs/de/tools/file.mdx @@ -6,7 +6,7 @@ description: Mehrere Dateien lesen und parsen import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/de/tools/fireflies.mdx b/apps/docs/content/docs/de/tools/fireflies.mdx index 4856b2df7..ab7dce902 100644 --- a/apps/docs/content/docs/de/tools/fireflies.mdx +++ b/apps/docs/content/docs/de/tools/fireflies.mdx @@ -6,7 +6,7 @@ description: Interagieren Sie mit Fireflies.ai-Besprechungstranskripten und -auf import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/de/tools/mistral_parse.mdx b/apps/docs/content/docs/de/tools/mistral_parse.mdx index feff01fe6..2191e1c91 100644 --- a/apps/docs/content/docs/de/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/de/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: Text aus PDF-Dokumenten extrahieren import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/en/tools/confluence.mdx b/apps/docs/content/docs/en/tools/confluence.mdx index 6b1f531f4..b8173f135 100644 --- a/apps/docs/content/docs/en/tools/confluence.mdx +++ b/apps/docs/content/docs/en/tools/confluence.mdx @@ -49,10 +49,25 @@ Retrieve content from Confluence pages using the Confluence API. | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of retrieval | +| `ts` | string | ISO 8601 timestamp of the operation | | `pageId` | string | Confluence page ID | -| `content` | string | Page content with HTML tags stripped | | `title` | string | Page title | +| `content` | string | Page content with HTML tags stripped | +| `status` | string | Page status \(current, archived, trashed, draft\) | +| `spaceId` | string | ID of the space containing the page | +| `parentId` | string | ID of the parent page | +| `authorId` | string | Account ID of the page author | +| `createdAt` | string | ISO 8601 timestamp when the page was created | +| `url` | string | URL to view the page in Confluence | +| `body` | object | Raw page body content in storage format | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| `version` | object | Page version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | ### `confluence_update` @@ -76,6 +91,25 @@ Update a Confluence page using the Confluence API. | `ts` | string | Timestamp of update | | `pageId` | string | Confluence page ID | | `title` | string | Updated page title | +| `status` | string | Page status | +| `spaceId` | string | Space ID | +| `body` | object | Page body content in storage format | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| `version` | object | Page version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `url` | string | URL to view the page in Confluence | | `success` | boolean | Update operation success status | ### `confluence_create_page` @@ -100,11 +134,30 @@ Create a new page in a Confluence space. | `ts` | string | Timestamp of creation | | `pageId` | string | Created page ID | | `title` | string | Page title | +| `status` | string | Page status | +| `spaceId` | string | Space ID | +| `parentId` | string | Parent page ID | +| `body` | object | Page body content | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| `version` | object | Page version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | | `url` | string | Page URL | ### `confluence_delete_page` -Delete a Confluence page (moves it to trash where it can be restored). +Delete a Confluence page. By default moves to trash; use purge=true to permanently delete. #### Input @@ -112,6 +165,7 @@ Delete a Confluence page (moves it to trash where it can be restored). | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | | `pageId` | string | Yes | Confluence page ID to delete | +| `purge` | boolean | No | If true, permanently deletes the page instead of moving to trash \(default: false\) | | `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output @@ -122,6 +176,229 @@ Delete a Confluence page (moves it to trash where it can be restored). | `pageId` | string | Deleted page ID | | `deleted` | boolean | Deletion status | +### `confluence_list_pages_in_space` + +List all pages within a specific Confluence space. Supports pagination and filtering by status. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `spaceId` | string | Yes | The ID of the Confluence space to list pages from | +| `limit` | number | No | Maximum number of pages to return \(default: 50, max: 250\) | +| `status` | string | No | Filter pages by status: current, archived, trashed, or draft | +| `bodyFormat` | string | No | Format for page body content: storage, atlas_doc_format, or view. If not specified, body is not included. | +| `cursor` | string | No | Pagination cursor from previous response to get the next page of results | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pages` | array | Array of pages in the space | +| ↳ `id` | string | Unique page identifier | +| ↳ `title` | string | Page title | +| ↳ `status` | string | Page status \(e.g., current, archived, trashed, draft\) | +| ↳ `spaceId` | string | ID of the space containing the page | +| ↳ `parentId` | string | ID of the parent page \(null if top-level\) | +| ↳ `authorId` | string | Account ID of the page author | +| ↳ `createdAt` | string | ISO 8601 timestamp when the page was created | +| ↳ `version` | object | Page version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| ↳ `body` | object | Page body content \(if bodyFormat was specified\) | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `webUrl` | string | URL to view the page in Confluence | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_get_page_children` + +Get all child pages of a specific Confluence page. Useful for navigating page hierarchies. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the parent page to get children from | +| `limit` | number | No | Maximum number of child pages to return \(default: 50, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response to get the next page of results | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `parentId` | string | ID of the parent page | +| `children` | array | Array of child pages | +| ↳ `id` | string | Child page ID | +| ↳ `title` | string | Child page title | +| ↳ `status` | string | Page status | +| ↳ `spaceId` | string | Space ID | +| ↳ `childPosition` | number | Position among siblings | +| ↳ `webUrl` | string | URL to view the page | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_get_page_ancestors` + +Get the ancestor (parent) pages of a specific Confluence page. Returns the full hierarchy from the page up to the root. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the page to get ancestors for | +| `limit` | number | No | Maximum number of ancestors to return \(default: 25, max: 250\) | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | ID of the page whose ancestors were retrieved | +| `ancestors` | array | Array of ancestor pages, ordered from direct parent to root | +| ↳ `id` | string | Ancestor page ID | +| ↳ `title` | string | Ancestor page title | +| ↳ `status` | string | Page status | +| ↳ `spaceId` | string | Space ID | +| ↳ `webUrl` | string | URL to view the page | + +### `confluence_list_page_versions` + +List all versions (revision history) of a Confluence page. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the page to get versions for | +| `limit` | number | No | Maximum number of versions to return \(default: 50, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | ID of the page | +| `versions` | array | Array of page versions | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_get_page_version` + +Get details about a specific version of a Confluence page. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the page | +| `versionNumber` | number | Yes | The version number to retrieve \(e.g., 1, 2, 3\) | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | ID of the page | +| `version` | object | Detailed version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| ↳ `contentTypeModified` | boolean | Whether the content type was modified in this version | +| ↳ `collaborators` | array | List of collaborator account IDs for this version | +| ↳ `prevVersion` | number | Previous version number | +| ↳ `nextVersion` | number | Next version number | + +### `confluence_list_page_properties` + +List all custom properties (metadata) attached to a Confluence page. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the page to list properties from | +| `limit` | number | No | Maximum number of properties to return \(default: 50, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | ID of the page | +| `properties` | array | Array of content properties | +| ↳ `id` | string | Property ID | +| ↳ `key` | string | Property key | +| ↳ `value` | json | Property value \(can be any JSON\) | +| ↳ `version` | object | Version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_create_page_property` + +Create a new custom property (metadata) on a Confluence page. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | The ID of the page to add the property to | +| `key` | string | Yes | The key/name for the property | +| `value` | json | Yes | The value for the property \(can be any JSON value\) | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | ID of the page | +| `propertyId` | string | ID of the created property | +| `key` | string | Property key | +| `value` | json | Property value | +| `version` | object | Version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | + ### `confluence_search` Search for content across Confluence pages, blog posts, and other content. @@ -155,6 +432,211 @@ Search for content across Confluence pages, blog posts, and other content. | ↳ `lastModified` | string | ISO 8601 timestamp of last modification | | ↳ `entityType` | string | Entity type identifier \(e.g., content, space\) | +### `confluence_search_in_space` + +Search for content within a specific Confluence space. Optionally filter by text query and content type. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `spaceKey` | string | Yes | The key of the Confluence space to search in \(e.g., "ENG", "HR"\) | +| `query` | string | No | Text search query. If not provided, returns all content in the space. | +| `contentType` | string | No | Filter by content type: page, blogpost, attachment, or comment | +| `limit` | number | No | Maximum number of results to return \(default: 25, max: 250\) | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `spaceKey` | string | The space key that was searched | +| `totalSize` | number | Total number of matching results | +| `results` | array | Array of search results | +| ↳ `id` | string | Unique content identifier | +| ↳ `title` | string | Content title | +| ↳ `type` | string | Content type \(e.g., page, blogpost, attachment, comment\) | +| ↳ `status` | string | Content status \(e.g., current\) | +| ↳ `url` | string | URL to view the content in Confluence | +| ↳ `excerpt` | string | Text excerpt matching the search query | +| ↳ `spaceKey` | string | Key of the space containing the content | +| ↳ `space` | object | Space information for the content | +| ↳ `id` | string | Space identifier | +| ↳ `key` | string | Space key | +| ↳ `name` | string | Space name | +| ↳ `lastModified` | string | ISO 8601 timestamp of last modification | +| ↳ `entityType` | string | Entity type identifier \(e.g., content, space\) | + +### `confluence_list_blogposts` + +List all blog posts across all accessible Confluence spaces. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `limit` | number | No | Maximum number of blog posts to return \(default: 25, max: 250\) | +| `status` | string | No | Filter by status: current, archived, trashed, or draft | +| `sort` | string | No | Sort order: created-date, -created-date, modified-date, -modified-date, title, -title | +| `cursor` | string | No | Pagination cursor from previous response | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `blogPosts` | array | Array of blog posts | +| ↳ `id` | string | Blog post ID | +| ↳ `title` | string | Blog post title | +| ↳ `status` | string | Blog post status | +| ↳ `spaceId` | string | Space ID | +| ↳ `authorId` | string | Author account ID | +| ↳ `createdAt` | string | Creation timestamp | +| ↳ `version` | object | Version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| ↳ `webUrl` | string | URL to view the blog post | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_get_blogpost` + +Get a specific Confluence blog post by ID, including its content. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `blogPostId` | string | Yes | The ID of the blog post to retrieve | +| `bodyFormat` | string | No | Format for blog post body: storage, atlas_doc_format, or view | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `id` | string | Blog post ID | +| `title` | string | Blog post title | +| `status` | string | Blog post status | +| `spaceId` | string | Space ID | +| `authorId` | string | Author account ID | +| `createdAt` | string | Creation timestamp | +| `version` | object | Version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `body` | object | Blog post body content in requested format\(s\) | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| `webUrl` | string | URL to view the blog post | + +### `confluence_create_blogpost` + +Create a new blog post in a Confluence space. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `spaceId` | string | Yes | The ID of the space to create the blog post in | +| `title` | string | Yes | Title of the blog post | +| `content` | string | Yes | Blog post content in Confluence storage format \(HTML\) | +| `status` | string | No | Blog post status: current \(default\) or draft | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `id` | string | Created blog post ID | +| `title` | string | Blog post title | +| `status` | string | Blog post status | +| `spaceId` | string | Space ID | +| `authorId` | string | Author account ID | +| `body` | object | Blog post body content | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| `version` | object | Blog post version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `webUrl` | string | URL to view the blog post | + +### `confluence_list_blogposts_in_space` + +List all blog posts within a specific Confluence space. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `spaceId` | string | Yes | The ID of the Confluence space to list blog posts from | +| `limit` | number | No | Maximum number of blog posts to return \(default: 25, max: 250\) | +| `status` | string | No | Filter by status: current, archived, trashed, or draft | +| `bodyFormat` | string | No | Format for blog post body: storage, atlas_doc_format, or view | +| `cursor` | string | No | Pagination cursor from previous response | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `blogPosts` | array | Array of blog posts in the space | +| ↳ `id` | string | Blog post ID | +| ↳ `title` | string | Blog post title | +| ↳ `status` | string | Blog post status | +| ↳ `spaceId` | string | Space ID | +| ↳ `authorId` | string | Author account ID | +| ↳ `createdAt` | string | Creation timestamp | +| ↳ `version` | object | Version information | +| ↳ `number` | number | Version number | +| ↳ `message` | string | Version message | +| ↳ `minorEdit` | boolean | Whether this is a minor edit | +| ↳ `authorId` | string | Account ID of the version author | +| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| ↳ `body` | object | Blog post body content | +| ↳ `storage` | object | Body in storage format \(Confluence markup\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `view` | object | Body in view format \(rendered HTML\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `atlas_doc_format` | object | Body in Atlassian Document Format \(ADF\) | +| ↳ `value` | string | The content value in the specified format | +| ↳ `representation` | string | Content representation type | +| ↳ `webUrl` | string | URL to view the blog post | +| `nextCursor` | string | Cursor for fetching the next page of results | + ### `confluence_create_comment` Add a comment to a Confluence page. @@ -187,6 +669,8 @@ List all comments on a Confluence page. | `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | | `pageId` | string | Yes | Confluence page ID to list comments from | | `limit` | number | No | Maximum number of comments to return \(default: 25\) | +| `bodyFormat` | string | No | Format for the comment body: storage, atlas_doc_format, view, or export_view \(default: storage\) | +| `cursor` | string | No | Pagination cursor from previous response | | `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output @@ -212,6 +696,7 @@ List all comments on a Confluence page. | ↳ `minorEdit` | boolean | Whether this is a minor edit | | ↳ `authorId` | string | Account ID of the version author | | ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `nextCursor` | string | Cursor for fetching the next page of results | ### `confluence_update_comment` @@ -291,7 +776,8 @@ List all attachments on a Confluence page. | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | | `pageId` | string | Yes | Confluence page ID to list attachments from | -| `limit` | number | No | Maximum number of attachments to return \(default: 25\) | +| `limit` | number | No | Maximum number of attachments to return \(default: 50, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response | | `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output @@ -316,6 +802,7 @@ List all attachments on a Confluence page. | ↳ `minorEdit` | boolean | Whether this is a minor edit | | ↳ `authorId` | string | Account ID of the version author | | ↳ `createdAt` | string | ISO 8601 timestamp of version creation | +| `nextCursor` | string | Cursor for fetching the next page of results | ### `confluence_delete_attachment` @@ -347,6 +834,8 @@ List all labels on a Confluence page. | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | | `pageId` | string | Yes | Confluence page ID to list labels from | +| `limit` | number | No | Maximum number of labels to return \(default: 25, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response | | `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output @@ -358,6 +847,30 @@ List all labels on a Confluence page. | ↳ `id` | string | Unique label identifier | | ↳ `name` | string | Label name | | ↳ `prefix` | string | Label prefix/type \(e.g., global, my, team\) | +| `nextCursor` | string | Cursor for fetching the next page of results | + +### `confluence_add_label` + +Add a label to a Confluence page for organization and categorization. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | +| `pageId` | string | Yes | Confluence page ID to add the label to | +| `labelName` | string | Yes | Name of the label to add | +| `prefix` | string | No | Label prefix: global \(default\), my, team, or system | +| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | ISO 8601 timestamp of the operation | +| `pageId` | string | Page ID that the label was added to | +| `labelName` | string | Name of the added label | +| `labelId` | string | ID of the added label | ### `confluence_get_space` @@ -375,13 +888,19 @@ Get details about a specific Confluence space. | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of retrieval | +| `ts` | string | ISO 8601 timestamp of the operation | | `spaceId` | string | Space ID | | `name` | string | Space name | | `key` | string | Space key | -| `type` | string | Space type | -| `status` | string | Space status | -| `url` | string | Space URL | +| `type` | string | Space type \(global, personal\) | +| `status` | string | Space status \(current, archived\) | +| `url` | string | URL to view the space in Confluence | +| `authorId` | string | Account ID of the space creator | +| `createdAt` | string | ISO 8601 timestamp when the space was created | +| `homepageId` | string | ID of the space homepage | +| `description` | object | Space description content | +| ↳ `value` | string | Description text content | +| ↳ `representation` | string | Content representation format \(e.g., plain, view, storage\) | ### `confluence_list_spaces` @@ -392,7 +911,8 @@ List all Confluence spaces accessible to the user. | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) | -| `limit` | number | No | Maximum number of spaces to return \(default: 25\) | +| `limit` | number | No | Maximum number of spaces to return \(default: 25, max: 250\) | +| `cursor` | string | No | Pagination cursor from previous response | | `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output @@ -412,5 +932,6 @@ List all Confluence spaces accessible to the user. | ↳ `description` | object | Space description | | ↳ `value` | string | Description text content | | ↳ `representation` | string | Content representation format \(e.g., plain, view, storage\) | +| `nextCursor` | string | Cursor for fetching the next page of results | diff --git a/apps/docs/content/docs/en/tools/discord.mdx b/apps/docs/content/docs/en/tools/discord.mdx index 84dd318f8..ad3195599 100644 --- a/apps/docs/content/docs/en/tools/discord.mdx +++ b/apps/docs/content/docs/en/tools/discord.mdx @@ -63,6 +63,7 @@ Send a message to a Discord channel | Parameter | Type | Description | | --------- | ---- | ----------- | | `message` | string | Success or error message | +| `files` | file[] | Files attached to the message | | `data` | object | Discord message data | | ↳ `id` | string | Message ID | | ↳ `content` | string | Message content | diff --git a/apps/docs/content/docs/en/tools/dropbox.mdx b/apps/docs/content/docs/en/tools/dropbox.mdx index 54cf3013d..aceae4da0 100644 --- a/apps/docs/content/docs/en/tools/dropbox.mdx +++ b/apps/docs/content/docs/en/tools/dropbox.mdx @@ -43,7 +43,8 @@ Upload a file to Dropbox | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `path` | string | Yes | The path in Dropbox where the file should be saved \(e.g., /folder/document.pdf\) | -| `fileContent` | string | Yes | The base64 encoded content of the file to upload | +| `file` | file | No | The file to upload \(UserFile object\) | +| `fileContent` | string | No | Legacy: base64 encoded file content | | `fileName` | string | No | Optional filename \(used if path is a folder\) | | `mode` | string | No | Write mode: add \(default\) or overwrite | | `autorename` | boolean | No | If true, rename the file if there is a conflict | @@ -66,7 +67,7 @@ Upload a file to Dropbox ### `dropbox_download` -Download a file from Dropbox and get a temporary link +Download a file from Dropbox with metadata and content #### Input @@ -78,11 +79,8 @@ Download a file from Dropbox and get a temporary link | Parameter | Type | Description | | --------- | ---- | ----------- | -| `file` | object | The file metadata | -| ↳ `id` | string | Unique identifier for the file | -| ↳ `name` | string | Name of the file | -| ↳ `path_display` | string | Display path of the file | -| ↳ `size` | number | Size of the file in bytes | +| `file` | file | Downloaded file stored in execution files | +| `metadata` | json | The file metadata | | `temporaryLink` | string | Temporary link to download the file \(valid for ~4 hours\) | | `content` | string | Base64 encoded file content \(if fetched\) | diff --git a/apps/docs/content/docs/en/tools/file.mdx b/apps/docs/content/docs/en/tools/file.mdx index 2a0cc1b87..ddc31bd60 100644 --- a/apps/docs/content/docs/en/tools/file.mdx +++ b/apps/docs/content/docs/en/tools/file.mdx @@ -6,7 +6,7 @@ description: Read and parse multiple files import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -27,7 +27,7 @@ The File Parser tool is particularly useful for scenarios where your agents need ## Usage Instructions -Integrate File into the workflow. Can upload a file manually or insert a file url. +Upload files directly or import from external URLs to get UserFile objects for use in other blocks. @@ -41,14 +41,15 @@ Parse one or more uploaded files or files from URLs (text, PDF, CSV, images, etc | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | -| `filePath` | string | Yes | Path to the file\(s\). Can be a single path, URL, or an array of paths. | +| `filePath` | string | No | Path to the file\(s\). Can be a single path, URL, or an array of paths. | +| `file` | file | No | Uploaded file\(s\) to parse | | `fileType` | string | No | Type of file to parse \(auto-detected if not specified\) | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `files` | array | Array of parsed files with content, metadata, and file properties | -| `combinedContent` | string | All file contents merged into a single text string | +| `files` | file[] | Parsed files as UserFile objects | +| `combinedContent` | string | Combined content of all parsed files | diff --git a/apps/docs/content/docs/en/tools/fireflies.mdx b/apps/docs/content/docs/en/tools/fireflies.mdx index 2445a2167..90c591605 100644 --- a/apps/docs/content/docs/en/tools/fireflies.mdx +++ b/apps/docs/content/docs/en/tools/fireflies.mdx @@ -6,7 +6,7 @@ description: Interact with Fireflies.ai meeting transcripts and recordings import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/en/tools/github.mdx b/apps/docs/content/docs/en/tools/github.mdx index 37b07dc7a..9ea94cb08 100644 --- a/apps/docs/content/docs/en/tools/github.mdx +++ b/apps/docs/content/docs/en/tools/github.mdx @@ -692,6 +692,7 @@ Get the content of a file from a GitHub repository. Supports files up to 1MB. Co | `download_url` | string | Direct download URL | | `git_url` | string | Git blob API URL | | `_links` | json | Related links | +| `file` | file | Downloaded file stored in execution files | ### `github_create_file` diff --git a/apps/docs/content/docs/en/tools/google_drive.mdx b/apps/docs/content/docs/en/tools/google_drive.mdx index f6a5fcb17..08298a9d0 100644 --- a/apps/docs/content/docs/en/tools/google_drive.mdx +++ b/apps/docs/content/docs/en/tools/google_drive.mdx @@ -291,11 +291,7 @@ Download a file from Google Drive with complete metadata (exports Google Workspa | Parameter | Type | Description | | --------- | ---- | ----------- | -| `file` | object | Downloaded file data | -| ↳ `name` | string | File name | -| ↳ `mimeType` | string | MIME type of the file | -| ↳ `data` | string | File content as base64-encoded string | -| ↳ `size` | number | File size in bytes | +| `file` | file | Downloaded file stored in execution files | | `metadata` | object | Complete file metadata from Google Drive | | ↳ `id` | string | Google Drive file ID | | ↳ `kind` | string | Resource type identifier | diff --git a/apps/docs/content/docs/en/tools/google_slides.mdx b/apps/docs/content/docs/en/tools/google_slides.mdx index 2aa03f7e8..30d6a17d4 100644 --- a/apps/docs/content/docs/en/tools/google_slides.mdx +++ b/apps/docs/content/docs/en/tools/google_slides.mdx @@ -6,7 +6,7 @@ description: Read, write, and create presentations import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/en/tools/jira.mdx b/apps/docs/content/docs/en/tools/jira.mdx index 921ae9cd9..812752057 100644 --- a/apps/docs/content/docs/en/tools/jira.mdx +++ b/apps/docs/content/docs/en/tools/jira.mdx @@ -333,6 +333,28 @@ Get all attachments from a Jira issue | `issueKey` | string | Issue key | | `attachments` | array | Array of attachments with id, filename, size, mimeType, created, author | +### `jira_add_attachment` + +Add attachments to a Jira issue + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | +| `issueKey` | string | Yes | Jira issue key to add attachments to \(e.g., PROJ-123\) | +| `files` | file[] | Yes | Files to attach to the Jira issue | +| `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | Timestamp of the operation | +| `issueKey` | string | Issue key | +| `attachmentIds` | json | IDs of uploaded attachments | +| `files` | file[] | Uploaded attachment files | + ### `jira_delete_attachment` Delete an attachment from a Jira issue diff --git a/apps/docs/content/docs/en/tools/linear.mdx b/apps/docs/content/docs/en/tools/linear.mdx index ec1d24698..b64d9a915 100644 --- a/apps/docs/content/docs/en/tools/linear.mdx +++ b/apps/docs/content/docs/en/tools/linear.mdx @@ -1022,7 +1022,8 @@ Add an attachment to an issue in Linear | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `issueId` | string | Yes | Issue ID to attach to | -| `url` | string | Yes | URL of the attachment | +| `url` | string | No | URL of the attachment | +| `file` | file | No | File to attach | | `title` | string | Yes | Attachment title | | `subtitle` | string | No | Attachment subtitle/description | diff --git a/apps/docs/content/docs/en/tools/microsoft_teams.mdx b/apps/docs/content/docs/en/tools/microsoft_teams.mdx index f4964250e..0d86d9aaa 100644 --- a/apps/docs/content/docs/en/tools/microsoft_teams.mdx +++ b/apps/docs/content/docs/en/tools/microsoft_teams.mdx @@ -81,6 +81,7 @@ Write or update content in a Microsoft Teams chat | `createdTime` | string | Timestamp when message was created | | `url` | string | Web URL to the message | | `updatedContent` | boolean | Whether content was successfully updated | +| `files` | file[] | Files attached to the message | ### `microsoft_teams_read_channel` @@ -132,6 +133,7 @@ Write or send a message to a Microsoft Teams channel | `createdTime` | string | Timestamp when message was created | | `url` | string | Web URL to the message | | `updatedContent` | boolean | Whether content was successfully updated | +| `files` | file[] | Files attached to the message | ### `microsoft_teams_update_chat_message` diff --git a/apps/docs/content/docs/en/tools/mistral_parse.mdx b/apps/docs/content/docs/en/tools/mistral_parse.mdx index 711f74391..046620f39 100644 --- a/apps/docs/content/docs/en/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/en/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: Extract text from PDF documents import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -35,13 +35,12 @@ Integrate Mistral Parse into the workflow. Can extract text from uploaded PDF do ### `mistral_parser` -Parse PDF documents using Mistral OCR API - #### Input | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | -| `filePath` | string | Yes | URL to a PDF document to be processed | +| `filePath` | string | No | URL to a PDF document to be processed | +| `file` | file | No | Document file to be processed | | `fileUpload` | object | No | File upload data from file-upload component | | `resultType` | string | No | Type of parsed result \(markdown, text, or json\). Defaults to markdown. | | `includeImageBase64` | boolean | No | Include base64-encoded images in the response | @@ -55,27 +54,8 @@ Parse PDF documents using Mistral OCR API | Parameter | Type | Description | | --------- | ---- | ----------- | | `pages` | array | Array of page objects from Mistral OCR | -| ↳ `index` | number | Page index \(zero-based\) | -| ↳ `markdown` | string | Extracted markdown content | -| ↳ `images` | array | Images extracted from this page with bounding boxes | -| ↳ `id` | string | Image identifier \(e.g., img-0.jpeg\) | -| ↳ `top_left_x` | number | Top-left X coordinate in pixels | -| ↳ `top_left_y` | number | Top-left Y coordinate in pixels | -| ↳ `bottom_right_x` | number | Bottom-right X coordinate in pixels | -| ↳ `bottom_right_y` | number | Bottom-right Y coordinate in pixels | -| ↳ `image_base64` | string | Base64-encoded image data \(when include_image_base64=true\) | -| ↳ `dimensions` | object | Page dimensions | -| ↳ `dpi` | number | Dots per inch | -| ↳ `height` | number | Page height in pixels | -| ↳ `width` | number | Page width in pixels | -| ↳ `tables` | array | Extracted tables as HTML/markdown \(when table_format is set\). Referenced via placeholders like \[tbl-0.html\] | -| ↳ `hyperlinks` | array | Array of URL strings detected in the page \(e.g., \["https://...", "mailto:..."\]\) | -| ↳ `header` | string | Page header content \(when extract_header=true\) | -| ↳ `footer` | string | Page footer content \(when extract_footer=true\) | -| `model` | string | Mistral OCR model identifier \(e.g., mistral-ocr-latest\) | -| `usage_info` | object | Usage and processing statistics | -| ↳ `pages_processed` | number | Total number of pages processed | -| ↳ `doc_size_bytes` | number | Document file size in bytes | -| `document_annotation` | string | Structured annotation data as JSON string \(when applicable\) | +| `model` | string | Mistral OCR model identifier | +| `usage_info` | json | Usage statistics from the API | +| `document_annotation` | string | Structured annotation data | diff --git a/apps/docs/content/docs/en/tools/notion.mdx b/apps/docs/content/docs/en/tools/notion.mdx index 37a663af5..9a7ac9715 100644 --- a/apps/docs/content/docs/en/tools/notion.mdx +++ b/apps/docs/content/docs/en/tools/notion.mdx @@ -113,6 +113,26 @@ Create a new page in Notion | `last_edited_time` | string | ISO 8601 last edit timestamp | | `title` | string | Page title | +### `notion_update_page` + +Update properties of a Notion page + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `pageId` | string | Yes | The UUID of the Notion page to update | +| `properties` | json | Yes | JSON object of properties to update | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `id` | string | Page UUID | +| `url` | string | Notion page URL | +| `last_edited_time` | string | ISO 8601 last edit timestamp | +| `title` | string | Page title | + ### `notion_query_database` Query and filter Notion database entries with advanced filtering diff --git a/apps/docs/content/docs/en/tools/pipedrive.mdx b/apps/docs/content/docs/en/tools/pipedrive.mdx index fcaf58cef..28b8f723b 100644 --- a/apps/docs/content/docs/en/tools/pipedrive.mdx +++ b/apps/docs/content/docs/en/tools/pipedrive.mdx @@ -152,6 +152,7 @@ Retrieve files from Pipedrive with optional filters | `person_id` | string | No | Filter files by person ID \(e.g., "456"\) | | `org_id` | string | No | Filter files by organization ID \(e.g., "789"\) | | `limit` | string | No | Number of results to return \(e.g., "50", default: 100, max: 500\) | +| `downloadFiles` | boolean | No | Download file contents into file outputs | #### Output @@ -168,6 +169,7 @@ Retrieve files from Pipedrive with optional filters | ↳ `person_id` | number | Associated person ID | | ↳ `org_id` | number | Associated organization ID | | ↳ `url` | string | File download URL | +| `downloadedFiles` | file[] | Downloaded files from Pipedrive | | `total_items` | number | Total number of files returned | | `success` | boolean | Operation success status | diff --git a/apps/docs/content/docs/en/tools/pulse.mdx b/apps/docs/content/docs/en/tools/pulse.mdx index a804d9952..19fd85f4a 100644 --- a/apps/docs/content/docs/en/tools/pulse.mdx +++ b/apps/docs/content/docs/en/tools/pulse.mdx @@ -6,7 +6,7 @@ description: Extract text from documents using Pulse OCR import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -31,7 +31,7 @@ If you need accurate, scalable, and developer-friendly document parsing capabili ## Usage Instructions -Integrate Pulse into the workflow. Extract text from PDF documents, images, and Office files via URL or upload. +Integrate Pulse into the workflow. Extract text from PDF documents, images, and Office files via upload or file references. @@ -39,13 +39,12 @@ Integrate Pulse into the workflow. Extract text from PDF documents, images, and ### `pulse_parser` -Parse documents (PDF, images, Office docs) using Pulse OCR API - #### Input | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | -| `filePath` | string | Yes | URL to a document to be processed | +| `filePath` | string | No | URL to a document to be processed | +| `file` | file | No | Document file to be processed | | `fileUpload` | object | No | File upload data from file-upload component | | `pages` | string | No | Page range to process \(1-indexed, e.g., "1-2,5"\) | | `extractFigure` | boolean | No | Enable figure extraction from the document | @@ -57,16 +56,6 @@ Parse documents (PDF, images, Office docs) using Pulse OCR API #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `markdown` | string | Extracted content in markdown format | -| `page_count` | number | Number of pages in the document | -| `job_id` | string | Unique job identifier | -| `bounding_boxes` | json | Bounding box layout information | -| `extraction_url` | string | URL for extraction results \(for large documents\) | -| `html` | string | HTML content if requested | -| `structured_output` | json | Structured output if schema was provided | -| `chunks` | json | Chunked content if chunking was enabled | -| `figures` | json | Extracted figures if figure extraction was enabled | +This tool does not produce any outputs. diff --git a/apps/docs/content/docs/en/tools/reducto.mdx b/apps/docs/content/docs/en/tools/reducto.mdx index ef004cf89..11af5add1 100644 --- a/apps/docs/content/docs/en/tools/reducto.mdx +++ b/apps/docs/content/docs/en/tools/reducto.mdx @@ -6,7 +6,7 @@ description: Extract text from PDF documents import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -29,7 +29,7 @@ Looking for reliable and scalable PDF parsing? Reducto is optimized for develope ## Usage Instructions -Integrate Reducto Parse into the workflow. Can extract text from uploaded PDF documents, or from a URL. +Integrate Reducto Parse into the workflow. Can extract text from uploaded PDF documents or file references. @@ -37,13 +37,12 @@ Integrate Reducto Parse into the workflow. Can extract text from uploaded PDF do ### `reducto_parser` -Parse PDF documents using Reducto OCR API - #### Input | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | -| `filePath` | string | Yes | URL to a PDF document to be processed | +| `filePath` | string | No | URL to a PDF document to be processed | +| `file` | file | No | Document file to be processed | | `fileUpload` | object | No | File upload data from file-upload component | | `pages` | array | No | Specific pages to process \(1-indexed page numbers\) | | `tableOutputFormat` | string | No | Table output format \(html or markdown\). Defaults to markdown. | @@ -51,13 +50,6 @@ Parse PDF documents using Reducto OCR API #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `job_id` | string | Unique identifier for the processing job | -| `duration` | number | Processing time in seconds | -| `usage` | json | Resource consumption data | -| `result` | json | Parsed document content with chunks and blocks | -| `pdf_url` | string | Storage URL of converted PDF | -| `studio_link` | string | Link to Reducto studio interface | +This tool does not produce any outputs. diff --git a/apps/docs/content/docs/en/tools/s3.mdx b/apps/docs/content/docs/en/tools/s3.mdx index b4cf8ffa6..95715f0f1 100644 --- a/apps/docs/content/docs/en/tools/s3.mdx +++ b/apps/docs/content/docs/en/tools/s3.mdx @@ -78,6 +78,7 @@ Retrieve an object from an AWS S3 bucket | Parameter | Type | Description | | --------- | ---- | ----------- | | `url` | string | Pre-signed URL for downloading the S3 object | +| `file` | file | Downloaded file stored in execution files | | `metadata` | object | File metadata including type, size, name, and last modified date | ### `s3_list_objects` diff --git a/apps/docs/content/docs/en/tools/sendgrid.mdx b/apps/docs/content/docs/en/tools/sendgrid.mdx index a8e25a288..d5b9ea999 100644 --- a/apps/docs/content/docs/en/tools/sendgrid.mdx +++ b/apps/docs/content/docs/en/tools/sendgrid.mdx @@ -62,7 +62,7 @@ Send an email using SendGrid API | `bcc` | string | No | BCC email address | | `replyTo` | string | No | Reply-to email address | | `replyToName` | string | No | Reply-to name | -| `attachments` | file[] | No | Files to attach to the email as an array of attachment objects | +| `attachments` | file[] | No | Files to attach to the email \(UserFile objects\) | | `templateId` | string | No | SendGrid template ID to use | | `dynamicTemplateData` | json | No | JSON object of dynamic template data | diff --git a/apps/docs/content/docs/en/tools/sftp.mdx b/apps/docs/content/docs/en/tools/sftp.mdx index 3cb8d0876..fd0ef6aa7 100644 --- a/apps/docs/content/docs/en/tools/sftp.mdx +++ b/apps/docs/content/docs/en/tools/sftp.mdx @@ -97,6 +97,7 @@ Download a file from a remote SFTP server | Parameter | Type | Description | | --------- | ---- | ----------- | | `success` | boolean | Whether the download was successful | +| `file` | file | Downloaded file stored in execution files | | `fileName` | string | Name of the downloaded file | | `content` | string | File content \(text or base64 encoded\) | | `size` | number | File size in bytes | diff --git a/apps/docs/content/docs/en/tools/slack.mdx b/apps/docs/content/docs/en/tools/slack.mdx index 1471c8800..35562a17e 100644 --- a/apps/docs/content/docs/en/tools/slack.mdx +++ b/apps/docs/content/docs/en/tools/slack.mdx @@ -144,6 +144,7 @@ Send messages to Slack channels or direct messages. Supports Slack mrkdwn format | `ts` | string | Message timestamp | | `channel` | string | Channel ID where message was sent | | `fileCount` | number | Number of files uploaded \(when files are attached\) | +| `files` | file[] | Files attached to the message | ### `slack_canvas` diff --git a/apps/docs/content/docs/en/tools/ssh.mdx b/apps/docs/content/docs/en/tools/ssh.mdx index 2b0efdd23..a8bd98093 100644 --- a/apps/docs/content/docs/en/tools/ssh.mdx +++ b/apps/docs/content/docs/en/tools/ssh.mdx @@ -170,6 +170,7 @@ Download a file from a remote SSH server | Parameter | Type | Description | | --------- | ---- | ----------- | | `downloaded` | boolean | Whether the file was downloaded successfully | +| `file` | file | Downloaded file stored in execution files | | `fileContent` | string | File content \(base64 encoded for binary files\) | | `fileName` | string | Name of the downloaded file | | `remotePath` | string | Source path on the remote server | diff --git a/apps/docs/content/docs/en/tools/stt.mdx b/apps/docs/content/docs/en/tools/stt.mdx index 3026810a3..956724329 100644 --- a/apps/docs/content/docs/en/tools/stt.mdx +++ b/apps/docs/content/docs/en/tools/stt.mdx @@ -6,7 +6,7 @@ description: Convert speech to text using AI import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -50,8 +50,6 @@ Transcribe audio and video files to text using leading AI providers. Supports mu ### `stt_whisper` -Transcribe audio to text using OpenAI Whisper - #### Input | Parameter | Type | Required | Description | @@ -71,22 +69,10 @@ Transcribe audio to text using OpenAI Whisper #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `transcript` | string | Full transcribed text | -| `segments` | array | Timestamped segments | -| ↳ `text` | string | Transcribed text for this segment | -| ↳ `start` | number | Start time in seconds | -| ↳ `end` | number | End time in seconds | -| ↳ `speaker` | string | Speaker identifier \(if diarization enabled\) | -| ↳ `confidence` | number | Confidence score \(0-1\) | -| `language` | string | Detected or specified language | -| `duration` | number | Audio duration in seconds | +This tool does not produce any outputs. ### `stt_deepgram` -Transcribe audio to text using Deepgram - #### Input | Parameter | Type | Required | Description | @@ -103,23 +89,10 @@ Transcribe audio to text using Deepgram #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `transcript` | string | Full transcribed text | -| `segments` | array | Timestamped segments with speaker labels | -| ↳ `text` | string | Transcribed text for this segment | -| ↳ `start` | number | Start time in seconds | -| ↳ `end` | number | End time in seconds | -| ↳ `speaker` | string | Speaker identifier \(if diarization enabled\) | -| ↳ `confidence` | number | Confidence score \(0-1\) | -| `language` | string | Detected or specified language | -| `duration` | number | Audio duration in seconds | -| `confidence` | number | Overall confidence score | +This tool does not produce any outputs. ### `stt_elevenlabs` -Transcribe audio to text using ElevenLabs - #### Input | Parameter | Type | Required | Description | @@ -135,18 +108,10 @@ Transcribe audio to text using ElevenLabs #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `transcript` | string | Full transcribed text | -| `segments` | array | Timestamped segments | -| `language` | string | Detected or specified language | -| `duration` | number | Audio duration in seconds | -| `confidence` | number | Overall confidence score | +This tool does not produce any outputs. ### `stt_assemblyai` -Transcribe audio to text using AssemblyAI with advanced NLP features - #### Input | Parameter | Type | Required | Description | @@ -167,35 +132,10 @@ Transcribe audio to text using AssemblyAI with advanced NLP features #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `transcript` | string | Full transcribed text | -| `segments` | array | Timestamped segments with speaker labels | -| ↳ `text` | string | Transcribed text for this segment | -| ↳ `start` | number | Start time in seconds | -| ↳ `end` | number | End time in seconds | -| ↳ `speaker` | string | Speaker identifier \(if diarization enabled\) | -| ↳ `confidence` | number | Confidence score \(0-1\) | -| `language` | string | Detected or specified language | -| `duration` | number | Audio duration in seconds | -| `confidence` | number | Overall confidence score | -| `sentiment` | array | Sentiment analysis results | -| ↳ `text` | string | Text that was analyzed | -| ↳ `sentiment` | string | Sentiment \(POSITIVE, NEGATIVE, NEUTRAL\) | -| ↳ `confidence` | number | Confidence score | -| ↳ `start` | number | Start time in milliseconds | -| ↳ `end` | number | End time in milliseconds | -| `entities` | array | Detected entities | -| ↳ `entity_type` | string | Entity type \(e.g., person_name, location, organization\) | -| ↳ `text` | string | Entity text | -| ↳ `start` | number | Start time in milliseconds | -| ↳ `end` | number | End time in milliseconds | -| `summary` | string | Auto-generated summary | +This tool does not produce any outputs. ### `stt_gemini` -Transcribe audio to text using Google Gemini with multimodal capabilities - #### Input | Parameter | Type | Required | Description | @@ -211,12 +151,6 @@ Transcribe audio to text using Google Gemini with multimodal capabilities #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `transcript` | string | Full transcribed text | -| `segments` | array | Timestamped segments | -| `language` | string | Detected or specified language | -| `duration` | number | Audio duration in seconds | -| `confidence` | number | Overall confidence score | +This tool does not produce any outputs. diff --git a/apps/docs/content/docs/en/tools/telegram.mdx b/apps/docs/content/docs/en/tools/telegram.mdx index 0a0301789..26f7ae7e7 100644 --- a/apps/docs/content/docs/en/tools/telegram.mdx +++ b/apps/docs/content/docs/en/tools/telegram.mdx @@ -354,6 +354,7 @@ Send documents (PDF, ZIP, DOC, etc.) to Telegram channels or users through the T | Parameter | Type | Description | | --------- | ---- | ----------- | | `message` | string | Success or error message | +| `files` | file[] | Files attached to the message | | `data` | object | Telegram message data including document | | ↳ `message_id` | number | Unique Telegram message identifier | | ↳ `from` | object | Information about the sender | diff --git a/apps/docs/content/docs/en/tools/textract.mdx b/apps/docs/content/docs/en/tools/textract.mdx index 624e4e4fe..41a14abfd 100644 --- a/apps/docs/content/docs/en/tools/textract.mdx +++ b/apps/docs/content/docs/en/tools/textract.mdx @@ -6,7 +6,7 @@ description: Extract text, tables, and forms from documents import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -35,8 +35,6 @@ Integrate AWS Textract into your workflow to extract text, tables, forms, and ke ### `textract_parser` -Parse documents using AWS Textract OCR and document analysis - #### Input | Parameter | Type | Required | Description | @@ -46,8 +44,8 @@ Parse documents using AWS Textract OCR and document analysis | `region` | string | Yes | AWS region for Textract service \(e.g., us-east-1\) | | `processingMode` | string | No | Document type: single-page or multi-page. Defaults to single-page. | | `filePath` | string | No | URL to a document to be processed \(JPEG, PNG, or single-page PDF\). | +| `file` | file | No | Document file to be processed \(JPEG, PNG, or single-page PDF\). | | `s3Uri` | string | No | S3 URI for multi-page processing \(s3://bucket/key\). | -| `fileUpload` | object | No | File upload data from file-upload component | | `featureTypes` | array | No | Feature types to detect: TABLES, FORMS, QUERIES, SIGNATURES, LAYOUT. If not specified, only text detection is performed. | | `items` | string | No | Feature type | | `queries` | array | No | Custom queries to extract specific information. Only used when featureTypes includes QUERIES. | @@ -58,39 +56,6 @@ Parse documents using AWS Textract OCR and document analysis #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `blocks` | array | Array of Block objects containing detected text, tables, forms, and other elements | -| ↳ `BlockType` | string | Type of block \(PAGE, LINE, WORD, TABLE, CELL, KEY_VALUE_SET, etc.\) | -| ↳ `Id` | string | Unique identifier for the block | -| ↳ `Text` | string | The text content \(for LINE and WORD blocks\) | -| ↳ `TextType` | string | Type of text \(PRINTED or HANDWRITING\) | -| ↳ `Confidence` | number | Confidence score \(0-100\) | -| ↳ `Page` | number | Page number | -| ↳ `Geometry` | object | Location and bounding box information | -| ↳ `BoundingBox` | object | Height as ratio of document height | -| ↳ `Height` | number | Height as ratio of document height | -| ↳ `Left` | number | Left position as ratio of document width | -| ↳ `Top` | number | Top position as ratio of document height | -| ↳ `Width` | number | Width as ratio of document width | -| ↳ `Polygon` | array | Polygon coordinates | -| ↳ `X` | number | X coordinate | -| ↳ `Y` | number | Y coordinate | -| ↳ `Relationships` | array | Relationships to other blocks | -| ↳ `Type` | string | Relationship type \(CHILD, VALUE, ANSWER, etc.\) | -| ↳ `Ids` | array | IDs of related blocks | -| ↳ `EntityTypes` | array | Entity types for KEY_VALUE_SET \(KEY or VALUE\) | -| ↳ `SelectionStatus` | string | For checkboxes: SELECTED or NOT_SELECTED | -| ↳ `RowIndex` | number | Row index for table cells | -| ↳ `ColumnIndex` | number | Column index for table cells | -| ↳ `RowSpan` | number | Row span for merged cells | -| ↳ `ColumnSpan` | number | Column span for merged cells | -| ↳ `Query` | object | Query information for QUERY blocks | -| ↳ `Text` | string | Query text | -| ↳ `Alias` | string | Query alias | -| ↳ `Pages` | array | Pages to search | -| `documentMetadata` | object | Metadata about the analyzed document | -| ↳ `pages` | number | Number of pages in the document | -| `modelVersion` | string | Version of the Textract model used for processing | +This tool does not produce any outputs. diff --git a/apps/docs/content/docs/en/tools/twilio_voice.mdx b/apps/docs/content/docs/en/tools/twilio_voice.mdx index 6eb2e716f..35b70da38 100644 --- a/apps/docs/content/docs/en/tools/twilio_voice.mdx +++ b/apps/docs/content/docs/en/tools/twilio_voice.mdx @@ -122,6 +122,7 @@ Retrieve call recording information and transcription (if enabled via TwiML). | `channels` | number | Number of channels \(1 for mono, 2 for dual\) | | `source` | string | How the recording was created | | `mediaUrl` | string | URL to download the recording media file | +| `file` | file | Downloaded recording media file | | `price` | string | Cost of the recording | | `priceUnit` | string | Currency of the price | | `uri` | string | Relative URI of the recording resource | diff --git a/apps/docs/content/docs/en/tools/typeform.mdx b/apps/docs/content/docs/en/tools/typeform.mdx index 99998842d..dc364b260 100644 --- a/apps/docs/content/docs/en/tools/typeform.mdx +++ b/apps/docs/content/docs/en/tools/typeform.mdx @@ -75,6 +75,7 @@ Download files uploaded in Typeform responses | Parameter | Type | Description | | --------- | ---- | ----------- | | `fileUrl` | string | Direct download URL for the uploaded file | +| `file` | file | Downloaded file stored in execution files | | `contentType` | string | MIME type of the uploaded file | | `filename` | string | Original filename of the uploaded file | diff --git a/apps/docs/content/docs/en/tools/video_generator.mdx b/apps/docs/content/docs/en/tools/video_generator.mdx index 437bb2dd6..a33492c95 100644 --- a/apps/docs/content/docs/en/tools/video_generator.mdx +++ b/apps/docs/content/docs/en/tools/video_generator.mdx @@ -57,14 +57,14 @@ Generate videos using Runway Gen-4 with world consistency and visual references | `duration` | number | No | Video duration in seconds \(5 or 10, default: 5\) | | `aspectRatio` | string | No | Aspect ratio: 16:9 \(landscape\), 9:16 \(portrait\), or 1:1 \(square\) | | `resolution` | string | No | Video resolution \(720p output\). Note: Gen-4 Turbo outputs at 720p natively | -| `visualReference` | json | Yes | Reference image REQUIRED for Gen-4 \(UserFile object\). Gen-4 only supports image-to-video, not text-only generation | +| `visualReference` | file | Yes | Reference image REQUIRED for Gen-4 \(UserFile object\). Gen-4 only supports image-to-video, not text-only generation | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | | `videoUrl` | string | Generated video URL | -| `videoFile` | json | Video file object with metadata | +| `videoFile` | file | Video file object with metadata | | `duration` | number | Video duration in seconds | | `width` | number | Video width in pixels | | `height` | number | Video height in pixels | @@ -93,7 +93,7 @@ Generate videos using Google Veo 3/3.1 with native audio generation | Parameter | Type | Description | | --------- | ---- | ----------- | | `videoUrl` | string | Generated video URL | -| `videoFile` | json | Video file object with metadata | +| `videoFile` | file | Video file object with metadata | | `duration` | number | Video duration in seconds | | `width` | number | Video width in pixels | | `height` | number | Video height in pixels | @@ -123,7 +123,7 @@ Generate videos using Luma Dream Machine with advanced camera controls | Parameter | Type | Description | | --------- | ---- | ----------- | | `videoUrl` | string | Generated video URL | -| `videoFile` | json | Video file object with metadata | +| `videoFile` | file | Video file object with metadata | | `duration` | number | Video duration in seconds | | `width` | number | Video width in pixels | | `height` | number | Video height in pixels | @@ -151,7 +151,7 @@ Generate videos using MiniMax Hailuo through MiniMax Platform API with advanced | Parameter | Type | Description | | --------- | ---- | ----------- | | `videoUrl` | string | Generated video URL | -| `videoFile` | json | Video file object with metadata | +| `videoFile` | file | Video file object with metadata | | `duration` | number | Video duration in seconds | | `width` | number | Video width in pixels | | `height` | number | Video height in pixels | @@ -181,7 +181,7 @@ Generate videos using Fal.ai platform with access to multiple models including V | Parameter | Type | Description | | --------- | ---- | ----------- | | `videoUrl` | string | Generated video URL | -| `videoFile` | json | Video file object with metadata | +| `videoFile` | file | Video file object with metadata | | `duration` | number | Video duration in seconds | | `width` | number | Video width in pixels | | `height` | number | Video height in pixels | diff --git a/apps/docs/content/docs/en/tools/vision.mdx b/apps/docs/content/docs/en/tools/vision.mdx index 9ba14b5c4..af3b052f4 100644 --- a/apps/docs/content/docs/en/tools/vision.mdx +++ b/apps/docs/content/docs/en/tools/vision.mdx @@ -6,7 +6,7 @@ description: Analyze images with vision models import { BlockInfoCard } from "@/components/ui/block-info-card" @@ -35,8 +35,6 @@ Integrate Vision into the workflow. Can analyze images with vision models. ### `vision_tool` -Process and analyze images using advanced vision models. Capable of understanding image content, extracting text, identifying objects, and providing detailed visual descriptions. - #### Input | Parameter | Type | Required | Description | @@ -49,14 +47,6 @@ Process and analyze images using advanced vision models. Capable of understandin #### Output -| Parameter | Type | Description | -| --------- | ---- | ----------- | -| `content` | string | The analyzed content and description of the image | -| `model` | string | The vision model that was used for analysis | -| `tokens` | number | Total tokens used for the analysis | -| `usage` | object | Detailed token usage breakdown | -| ↳ `input_tokens` | number | Tokens used for input processing | -| ↳ `output_tokens` | number | Tokens used for response generation | -| ↳ `total_tokens` | number | Total tokens consumed | +This tool does not produce any outputs. diff --git a/apps/docs/content/docs/en/tools/zoom.mdx b/apps/docs/content/docs/en/tools/zoom.mdx index 8e5a4c238..4926fd249 100644 --- a/apps/docs/content/docs/en/tools/zoom.mdx +++ b/apps/docs/content/docs/en/tools/zoom.mdx @@ -335,6 +335,7 @@ Get all recordings for a specific Zoom meeting | `meetingId` | string | Yes | The meeting ID or meeting UUID \(e.g., "1234567890" or "4444AAABBBccccc12345=="\) | | `includeFolderItems` | boolean | No | Include items within a folder | | `ttl` | number | No | Time to live for download URLs in seconds \(max 604800\) | +| `downloadFiles` | boolean | No | Download recording files into file outputs | #### Output @@ -364,6 +365,7 @@ Get all recordings for a specific Zoom meeting | ↳ `download_url` | string | URL to download the recording | | ↳ `status` | string | Recording status | | ↳ `recording_type` | string | Type of recording \(shared_screen, audio_only, etc.\) | +| `files` | file[] | Downloaded recording files | ### `zoom_delete_recording` diff --git a/apps/docs/content/docs/es/tools/file.mdx b/apps/docs/content/docs/es/tools/file.mdx index f6c066645..094f34455 100644 --- a/apps/docs/content/docs/es/tools/file.mdx +++ b/apps/docs/content/docs/es/tools/file.mdx @@ -6,7 +6,7 @@ description: Leer y analizar múltiples archivos import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/es/tools/fireflies.mdx b/apps/docs/content/docs/es/tools/fireflies.mdx index 01aa1fe1d..bbfe70c8e 100644 --- a/apps/docs/content/docs/es/tools/fireflies.mdx +++ b/apps/docs/content/docs/es/tools/fireflies.mdx @@ -6,7 +6,7 @@ description: Interactúa con transcripciones y grabaciones de reuniones de Firef import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/es/tools/mistral_parse.mdx b/apps/docs/content/docs/es/tools/mistral_parse.mdx index 62c0922f2..f0c3ff445 100644 --- a/apps/docs/content/docs/es/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/es/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: Extraer texto de documentos PDF import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/fr/tools/file.mdx b/apps/docs/content/docs/fr/tools/file.mdx index f08cac941..eadc453f2 100644 --- a/apps/docs/content/docs/fr/tools/file.mdx +++ b/apps/docs/content/docs/fr/tools/file.mdx @@ -6,7 +6,7 @@ description: Lire et analyser plusieurs fichiers import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/fr/tools/fireflies.mdx b/apps/docs/content/docs/fr/tools/fireflies.mdx index 4b92f437d..72655a1c4 100644 --- a/apps/docs/content/docs/fr/tools/fireflies.mdx +++ b/apps/docs/content/docs/fr/tools/fireflies.mdx @@ -7,7 +7,7 @@ description: Interagissez avec les transcriptions et enregistrements de réunion import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/fr/tools/mistral_parse.mdx b/apps/docs/content/docs/fr/tools/mistral_parse.mdx index f89f36f64..107e68177 100644 --- a/apps/docs/content/docs/fr/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/fr/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: Extraire du texte à partir de documents PDF import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/ja/tools/file.mdx b/apps/docs/content/docs/ja/tools/file.mdx index 931d5a28b..0afffa948 100644 --- a/apps/docs/content/docs/ja/tools/file.mdx +++ b/apps/docs/content/docs/ja/tools/file.mdx @@ -6,7 +6,7 @@ description: 複数のファイルを読み込んで解析する import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/ja/tools/fireflies.mdx b/apps/docs/content/docs/ja/tools/fireflies.mdx index 03cadf5e2..5fd999bd1 100644 --- a/apps/docs/content/docs/ja/tools/fireflies.mdx +++ b/apps/docs/content/docs/ja/tools/fireflies.mdx @@ -6,7 +6,7 @@ description: Fireflies.aiの会議文字起こしと録画を操作 import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/ja/tools/mistral_parse.mdx b/apps/docs/content/docs/ja/tools/mistral_parse.mdx index c18152932..04f90f12f 100644 --- a/apps/docs/content/docs/ja/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/ja/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: PDFドキュメントからテキストを抽出する import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/zh/tools/file.mdx b/apps/docs/content/docs/zh/tools/file.mdx index 479a8ea88..153c0d31e 100644 --- a/apps/docs/content/docs/zh/tools/file.mdx +++ b/apps/docs/content/docs/zh/tools/file.mdx @@ -6,7 +6,7 @@ description: 读取并解析多个文件 import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/zh/tools/fireflies.mdx b/apps/docs/content/docs/zh/tools/fireflies.mdx index d8a8b6436..8f94b3e83 100644 --- a/apps/docs/content/docs/zh/tools/fireflies.mdx +++ b/apps/docs/content/docs/zh/tools/fireflies.mdx @@ -6,7 +6,7 @@ description: 与 Fireflies.ai 会议转录和录音进行交互 import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/docs/content/docs/zh/tools/mistral_parse.mdx b/apps/docs/content/docs/zh/tools/mistral_parse.mdx index 0f4aaa272..0821e4c09 100644 --- a/apps/docs/content/docs/zh/tools/mistral_parse.mdx +++ b/apps/docs/content/docs/zh/tools/mistral_parse.mdx @@ -6,7 +6,7 @@ description: 从 PDF 文档中提取文本 import { BlockInfoCard } from "@/components/ui/block-info-card" diff --git a/apps/sim/app/(auth)/components/support-footer.tsx b/apps/sim/app/(auth)/components/support-footer.tsx index 057334ee5..46614070b 100644 --- a/apps/sim/app/(auth)/components/support-footer.tsx +++ b/apps/sim/app/(auth)/components/support-footer.tsx @@ -1,7 +1,7 @@ 'use client' -import { useBrandConfig } from '@/lib/branding/branding' import { inter } from '@/app/_styles/fonts/inter/inter' +import { useBrandConfig } from '@/ee/whitelabeling' export interface SupportFooterProps { /** Position style - 'fixed' for pages without AuthLayout, 'absolute' for pages with AuthLayout */ diff --git a/apps/sim/app/(landing)/components/nav/nav.tsx b/apps/sim/app/(landing)/components/nav/nav.tsx index 0a72d5b49..5b60d773d 100644 --- a/apps/sim/app/(landing)/components/nav/nav.tsx +++ b/apps/sim/app/(landing)/components/nav/nav.tsx @@ -7,10 +7,10 @@ import Image from 'next/image' import Link from 'next/link' import { useRouter } from 'next/navigation' import { GithubIcon } from '@/components/icons' -import { useBrandConfig } from '@/lib/branding/branding' import { isHosted } from '@/lib/core/config/feature-flags' import { soehne } from '@/app/_styles/fonts/soehne/soehne' import { getFormattedGitHubStars } from '@/app/(landing)/actions/github' +import { useBrandConfig } from '@/ee/whitelabeling' import { useBrandedButtonClass } from '@/hooks/use-branded-button-class' const logger = createLogger('nav') diff --git a/apps/sim/app/api/a2a/serve/[agentId]/route.ts b/apps/sim/app/api/a2a/serve/[agentId]/route.ts index f8acda5a8..cfc9f06ff 100644 --- a/apps/sim/app/api/a2a/serve/[agentId]/route.ts +++ b/apps/sim/app/api/a2a/serve/[agentId]/route.ts @@ -14,7 +14,6 @@ import { parseWorkflowSSEChunk, } from '@/lib/a2a/utils' import { checkHybridAuth } from '@/lib/auth/hybrid' -import { getBrandConfig } from '@/lib/branding/branding' import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis' import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server' import { SSE_HEADERS } from '@/lib/core/utils/sse' @@ -35,6 +34,7 @@ import { type PushNotificationSetParams, type TaskIdParams, } from '@/app/api/a2a/serve/[agentId]/utils' +import { getBrandConfig } from '@/ee/whitelabeling' const logger = createLogger('A2AServeAPI') diff --git a/apps/sim/app/api/tools/confluence/attachments/route.ts b/apps/sim/app/api/tools/confluence/attachments/route.ts index cba790a10..4d25840bd 100644 --- a/apps/sim/app/api/tools/confluence/attachments/route.ts +++ b/apps/sim/app/api/tools/confluence/attachments/route.ts @@ -21,7 +21,8 @@ export async function GET(request: NextRequest) { const accessToken = searchParams.get('accessToken') const pageId = searchParams.get('pageId') const providedCloudId = searchParams.get('cloudId') - const limit = searchParams.get('limit') || '25' + const limit = searchParams.get('limit') || '50' + const cursor = searchParams.get('cursor') if (!domain) { return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) @@ -47,7 +48,12 @@ export async function GET(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/attachments?limit=${limit}` + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + if (cursor) { + queryParams.append('cursor', cursor) + } + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/attachments?${queryParams.toString()}` const response = await fetch(url, { method: 'GET', @@ -77,9 +83,20 @@ export async function GET(request: NextRequest) { fileSize: attachment.fileSize || 0, mediaType: attachment.mediaType || '', downloadUrl: attachment.downloadLink || attachment._links?.download || '', + status: attachment.status ?? null, + webuiUrl: attachment._links?.webui ?? null, + pageId: attachment.pageId ?? null, + blogPostId: attachment.blogPostId ?? null, + comment: attachment.comment ?? null, + version: attachment.version ?? null, })) - return NextResponse.json({ attachments }) + return NextResponse.json({ + attachments, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) } catch (error) { logger.error('Error listing Confluence attachments:', error) return NextResponse.json( diff --git a/apps/sim/app/api/tools/confluence/blogposts/route.ts b/apps/sim/app/api/tools/confluence/blogposts/route.ts new file mode 100644 index 000000000..c186d5ca5 --- /dev/null +++ b/apps/sim/app/api/tools/confluence/blogposts/route.ts @@ -0,0 +1,285 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluenceBlogPostsAPI') + +export const dynamic = 'force-dynamic' + +const getBlogPostSchema = z + .object({ + domain: z.string().min(1, 'Domain is required'), + accessToken: z.string().min(1, 'Access token is required'), + cloudId: z.string().optional(), + blogPostId: z.string().min(1, 'Blog post ID is required'), + bodyFormat: z.string().optional(), + }) + .refine( + (data) => { + const validation = validateAlphanumericId(data.blogPostId, 'blogPostId', 255) + return validation.isValid + }, + (data) => { + const validation = validateAlphanumericId(data.blogPostId, 'blogPostId', 255) + return { message: validation.error || 'Invalid blog post ID', path: ['blogPostId'] } + } + ) + +const createBlogPostSchema = z.object({ + domain: z.string().min(1, 'Domain is required'), + accessToken: z.string().min(1, 'Access token is required'), + cloudId: z.string().optional(), + spaceId: z.string().min(1, 'Space ID is required'), + title: z.string().min(1, 'Title is required'), + content: z.string().min(1, 'Content is required'), + status: z.enum(['current', 'draft']).optional(), +}) + +/** + * List all blog posts or get a specific blog post + */ +export async function GET(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const domain = searchParams.get('domain') + const accessToken = searchParams.get('accessToken') + const providedCloudId = searchParams.get('cloudId') + const limit = searchParams.get('limit') || '25' + const status = searchParams.get('status') + const sortOrder = searchParams.get('sort') + const cursor = searchParams.get('cursor') + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + + if (status) { + queryParams.append('status', status) + } + + if (sortOrder) { + queryParams.append('sort', sortOrder) + } + + if (cursor) { + queryParams.append('cursor', cursor) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/blogposts?${queryParams.toString()}` + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to list blog posts (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const blogPosts = (data.results || []).map((post: any) => ({ + id: post.id, + title: post.title, + status: post.status ?? null, + spaceId: post.spaceId ?? null, + authorId: post.authorId ?? null, + createdAt: post.createdAt ?? null, + version: post.version ?? null, + webUrl: post._links?.webui ?? null, + })) + + return NextResponse.json({ + blogPosts, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error listing blog posts:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} + +/** + * Get a specific blog post by ID + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + + // Check if this is a create or get request + if (body.title && body.content && body.spaceId) { + // Create blog post + const validation = createBlogPostSchema.safeParse(body) + if (!validation.success) { + const firstError = validation.error.errors[0] + return NextResponse.json({ error: firstError.message }, { status: 400 }) + } + + const { + domain, + accessToken, + cloudId: providedCloudId, + spaceId, + title, + content, + status, + } = validation.data + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/blogposts` + + const createBody = { + spaceId, + status: status || 'current', + title, + body: { + representation: 'storage', + value: content, + }, + } + + const response = await fetch(url, { + method: 'POST', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + body: JSON.stringify(createBody), + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to create blog post (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + return NextResponse.json({ + id: data.id, + title: data.title, + spaceId: data.spaceId, + webUrl: data._links?.webui ?? null, + }) + } + // Get blog post by ID + const validation = getBlogPostSchema.safeParse(body) + if (!validation.success) { + const firstError = validation.error.errors[0] + return NextResponse.json({ error: firstError.message }, { status: 400 }) + } + + const { + domain, + accessToken, + cloudId: providedCloudId, + blogPostId, + bodyFormat, + } = validation.data + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + if (bodyFormat) { + queryParams.append('body-format', bodyFormat) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/blogposts/${blogPostId}${queryParams.toString() ? `?${queryParams.toString()}` : ''}` + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to get blog post (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + return NextResponse.json({ + id: data.id, + title: data.title, + status: data.status ?? null, + spaceId: data.spaceId ?? null, + authorId: data.authorId ?? null, + createdAt: data.createdAt ?? null, + version: data.version ?? null, + body: data.body ?? null, + webUrl: data._links?.webui ?? null, + }) + } catch (error) { + logger.error('Error with blog post operation:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/comments/route.ts b/apps/sim/app/api/tools/confluence/comments/route.ts index 1c50aa0c0..8c7b03ac0 100644 --- a/apps/sim/app/api/tools/confluence/comments/route.ts +++ b/apps/sim/app/api/tools/confluence/comments/route.ts @@ -105,6 +105,8 @@ export async function GET(request: NextRequest) { const pageId = searchParams.get('pageId') const providedCloudId = searchParams.get('cloudId') const limit = searchParams.get('limit') || '25' + const bodyFormat = searchParams.get('bodyFormat') || 'storage' + const cursor = searchParams.get('cursor') if (!domain) { return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) @@ -130,7 +132,13 @@ export async function GET(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/footer-comments?limit=${limit}` + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + queryParams.append('body-format', bodyFormat) + if (cursor) { + queryParams.append('cursor', cursor) + } + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/footer-comments?${queryParams.toString()}` const response = await fetch(url, { method: 'GET', @@ -154,14 +162,31 @@ export async function GET(request: NextRequest) { const data = await response.json() - const comments = (data.results || []).map((comment: any) => ({ - id: comment.id, - body: comment.body?.storage?.value || comment.body?.view?.value || '', - createdAt: comment.createdAt || '', - authorId: comment.authorId || '', - })) + const comments = (data.results || []).map((comment: any) => { + const bodyValue = comment.body?.storage?.value || comment.body?.view?.value || '' + return { + id: comment.id, + body: { + value: bodyValue, + representation: bodyFormat, + }, + createdAt: comment.createdAt || '', + authorId: comment.authorId || '', + status: comment.status ?? null, + title: comment.title ?? null, + pageId: comment.pageId ?? null, + blogPostId: comment.blogPostId ?? null, + parentCommentId: comment.parentCommentId ?? null, + version: comment.version ?? null, + } + }) - return NextResponse.json({ comments }) + return NextResponse.json({ + comments, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) } catch (error) { logger.error('Error listing Confluence comments:', error) return NextResponse.json( diff --git a/apps/sim/app/api/tools/confluence/labels/route.ts b/apps/sim/app/api/tools/confluence/labels/route.ts index d008c3d55..ac5eb176a 100644 --- a/apps/sim/app/api/tools/confluence/labels/route.ts +++ b/apps/sim/app/api/tools/confluence/labels/route.ts @@ -22,6 +22,7 @@ export async function POST(request: NextRequest) { cloudId: providedCloudId, pageId, labelName, + prefix: labelPrefix, } = await request.json() if (!domain) { @@ -52,12 +53,14 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/labels` + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/content/${pageId}/label` - const body = { - prefix: 'global', - name: labelName, - } + const body = [ + { + prefix: labelPrefix || 'global', + name: labelName, + }, + ] const response = await fetch(url, { method: 'POST', @@ -82,7 +85,14 @@ export async function POST(request: NextRequest) { } const data = await response.json() - return NextResponse.json({ ...data, pageId, labelName }) + const addedLabel = data.results?.[0] || data[0] || data + return NextResponse.json({ + id: addedLabel.id ?? '', + name: addedLabel.name ?? labelName, + prefix: addedLabel.prefix ?? labelPrefix ?? 'global', + pageId, + labelName, + }) } catch (error) { logger.error('Error adding Confluence label:', error) return NextResponse.json( @@ -105,6 +115,8 @@ export async function GET(request: NextRequest) { const accessToken = searchParams.get('accessToken') const pageId = searchParams.get('pageId') const providedCloudId = searchParams.get('cloudId') + const limit = searchParams.get('limit') || '25' + const cursor = searchParams.get('cursor') if (!domain) { return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) @@ -130,7 +142,12 @@ export async function GET(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/labels` + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + if (cursor) { + queryParams.append('cursor', cursor) + } + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/labels?${queryParams.toString()}` const response = await fetch(url, { method: 'GET', @@ -160,7 +177,12 @@ export async function GET(request: NextRequest) { prefix: label.prefix || 'global', })) - return NextResponse.json({ labels }) + return NextResponse.json({ + labels, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) } catch (error) { logger.error('Error listing Confluence labels:', error) return NextResponse.json( diff --git a/apps/sim/app/api/tools/confluence/page-ancestors/route.ts b/apps/sim/app/api/tools/confluence/page-ancestors/route.ts new file mode 100644 index 000000000..743cce75a --- /dev/null +++ b/apps/sim/app/api/tools/confluence/page-ancestors/route.ts @@ -0,0 +1,96 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluencePageAncestorsAPI') + +export const dynamic = 'force-dynamic' + +/** + * Get ancestors (parent pages) of a specific Confluence page. + * Uses GET /wiki/api/v2/pages/{id}/ancestors + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { domain, accessToken, pageId, cloudId: providedCloudId, limit = 25 } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!pageId) { + return NextResponse.json({ error: 'Page ID is required' }, { status: 400 }) + } + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(limit, 250))) + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/ancestors?${queryParams.toString()}` + + logger.info(`Fetching ancestors for page ${pageId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to get page ancestors (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const ancestors = (data.results || []).map((page: any) => ({ + id: page.id, + title: page.title, + status: page.status ?? null, + spaceId: page.spaceId ?? null, + webUrl: page._links?.webui ?? null, + })) + + return NextResponse.json({ + ancestors, + pageId, + }) + } catch (error) { + logger.error('Error getting page ancestors:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/page-children/route.ts b/apps/sim/app/api/tools/confluence/page-children/route.ts new file mode 100644 index 000000000..7cd7a41bd --- /dev/null +++ b/apps/sim/app/api/tools/confluence/page-children/route.ts @@ -0,0 +1,104 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluencePageChildrenAPI') + +export const dynamic = 'force-dynamic' + +/** + * Get child pages of a specific Confluence page. + * Uses GET /wiki/api/v2/pages/{id}/children + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { domain, accessToken, pageId, cloudId: providedCloudId, limit = 50, cursor } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!pageId) { + return NextResponse.json({ error: 'Page ID is required' }, { status: 400 }) + } + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(limit, 250))) + + if (cursor) { + queryParams.append('cursor', cursor) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/children?${queryParams.toString()}` + + logger.info(`Fetching child pages for page ${pageId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to get child pages (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const children = (data.results || []).map((page: any) => ({ + id: page.id, + title: page.title, + status: page.status ?? null, + spaceId: page.spaceId ?? null, + childPosition: page.childPosition ?? null, + webUrl: page._links?.webui ?? null, + })) + + return NextResponse.json({ + children, + parentId: pageId, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error getting child pages:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/page-properties/route.ts b/apps/sim/app/api/tools/confluence/page-properties/route.ts new file mode 100644 index 000000000..f8c3ce0ee --- /dev/null +++ b/apps/sim/app/api/tools/confluence/page-properties/route.ts @@ -0,0 +1,365 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluencePagePropertiesAPI') + +export const dynamic = 'force-dynamic' + +const createPropertySchema = z.object({ + domain: z.string().min(1, 'Domain is required'), + accessToken: z.string().min(1, 'Access token is required'), + cloudId: z.string().optional(), + pageId: z.string().min(1, 'Page ID is required'), + key: z.string().min(1, 'Property key is required'), + value: z.any(), +}) + +const updatePropertySchema = z.object({ + domain: z.string().min(1, 'Domain is required'), + accessToken: z.string().min(1, 'Access token is required'), + cloudId: z.string().optional(), + pageId: z.string().min(1, 'Page ID is required'), + propertyId: z.string().min(1, 'Property ID is required'), + key: z.string().min(1, 'Property key is required'), + value: z.any(), + versionNumber: z.number().min(1, 'Version number is required'), +}) + +const deletePropertySchema = z.object({ + domain: z.string().min(1, 'Domain is required'), + accessToken: z.string().min(1, 'Access token is required'), + cloudId: z.string().optional(), + pageId: z.string().min(1, 'Page ID is required'), + propertyId: z.string().min(1, 'Property ID is required'), +}) + +/** + * List all content properties on a page. + */ +export async function GET(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const domain = searchParams.get('domain') + const accessToken = searchParams.get('accessToken') + const pageId = searchParams.get('pageId') + const providedCloudId = searchParams.get('cloudId') + const limit = searchParams.get('limit') || '50' + const cursor = searchParams.get('cursor') + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!pageId) { + return NextResponse.json({ error: 'Page ID is required' }, { status: 400 }) + } + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + if (cursor) { + queryParams.append('cursor', cursor) + } + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/properties?${queryParams.toString()}` + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to list page properties (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const properties = (data.results || []).map((prop: any) => ({ + id: prop.id, + key: prop.key, + value: prop.value ?? null, + version: prop.version ?? null, + })) + + return NextResponse.json({ + properties, + pageId, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error listing page properties:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} + +/** + * Create a new content property on a page. + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + + const validation = createPropertySchema.safeParse(body) + if (!validation.success) { + const firstError = validation.error.errors[0] + return NextResponse.json({ error: firstError.message }, { status: 400 }) + } + + const { domain, accessToken, cloudId: providedCloudId, pageId, key, value } = validation.data + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/properties` + + const response = await fetch(url, { + method: 'POST', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + body: JSON.stringify({ key, value }), + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to create page property (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + return NextResponse.json({ + id: data.id, + key: data.key, + value: data.value, + version: data.version, + pageId, + }) + } catch (error) { + logger.error('Error creating page property:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} + +/** + * Update a content property on a page. + */ +export async function PUT(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + + const validation = updatePropertySchema.safeParse(body) + if (!validation.success) { + const firstError = validation.error.errors[0] + return NextResponse.json({ error: firstError.message }, { status: 400 }) + } + + const { + domain, + accessToken, + cloudId: providedCloudId, + pageId, + propertyId, + key, + value, + versionNumber, + } = validation.data + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const propertyIdValidation = validateAlphanumericId(propertyId, 'propertyId', 255) + if (!propertyIdValidation.isValid) { + return NextResponse.json({ error: propertyIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/properties/${propertyId}` + + const response = await fetch(url, { + method: 'PUT', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + body: JSON.stringify({ + key, + value, + version: { number: versionNumber }, + }), + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to update page property (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + return NextResponse.json({ + id: data.id, + key: data.key, + value: data.value, + version: data.version, + pageId, + }) + } catch (error) { + logger.error('Error updating page property:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} + +/** + * Delete a content property from a page. + */ +export async function DELETE(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + + const validation = deletePropertySchema.safeParse(body) + if (!validation.success) { + const firstError = validation.error.errors[0] + return NextResponse.json({ error: firstError.message }, { status: 400 }) + } + + const { domain, accessToken, cloudId: providedCloudId, pageId, propertyId } = validation.data + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const propertyIdValidation = validateAlphanumericId(propertyId, 'propertyId', 255) + if (!propertyIdValidation.isValid) { + return NextResponse.json({ error: propertyIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/properties/${propertyId}` + + const response = await fetch(url, { + method: 'DELETE', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to delete page property (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + return NextResponse.json({ propertyId, pageId, deleted: true }) + } catch (error) { + logger.error('Error deleting page property:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/page-versions/route.ts b/apps/sim/app/api/tools/confluence/page-versions/route.ts new file mode 100644 index 000000000..9d7c16206 --- /dev/null +++ b/apps/sim/app/api/tools/confluence/page-versions/route.ts @@ -0,0 +1,151 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluencePageVersionsAPI') + +export const dynamic = 'force-dynamic' + +/** + * List all versions of a page or get a specific version. + * Uses GET /wiki/api/v2/pages/{id}/versions + * and GET /wiki/api/v2/pages/{page-id}/versions/{version-number} + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { + domain, + accessToken, + pageId, + versionNumber, + cloudId: providedCloudId, + limit = 50, + cursor, + } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!pageId) { + return NextResponse.json({ error: 'Page ID is required' }, { status: 400 }) + } + + const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255) + if (!pageIdValidation.isValid) { + return NextResponse.json({ error: pageIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + // If versionNumber is provided, get specific version + if (versionNumber !== undefined && versionNumber !== null) { + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/versions/${versionNumber}` + + logger.info(`Fetching version ${versionNumber} for page ${pageId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to get page version (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + return NextResponse.json({ + version: { + number: data.number, + message: data.message ?? null, + minorEdit: data.minorEdit ?? false, + authorId: data.authorId ?? null, + createdAt: data.createdAt ?? null, + }, + pageId, + }) + } + // List all versions + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(limit, 250))) + + if (cursor) { + queryParams.append('cursor', cursor) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}/versions?${queryParams.toString()}` + + logger.info(`Fetching versions for page ${pageId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to list page versions (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const versions = (data.results || []).map((version: any) => ({ + number: version.number, + message: version.message ?? null, + minorEdit: version.minorEdit ?? false, + authorId: version.authorId ?? null, + createdAt: version.createdAt ?? null, + })) + + return NextResponse.json({ + versions, + pageId, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error with page versions:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/page/route.ts b/apps/sim/app/api/tools/confluence/page/route.ts index 9cc83914e..232e453a9 100644 --- a/apps/sim/app/api/tools/confluence/page/route.ts +++ b/apps/sim/app/api/tools/confluence/page/route.ts @@ -62,6 +62,7 @@ const deletePageSchema = z accessToken: z.string().min(1, 'Access token is required'), cloudId: z.string().optional(), pageId: z.string().min(1, 'Page ID is required'), + purge: z.boolean().optional(), }) .refine( (data) => { @@ -98,7 +99,7 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}?expand=body.storage,body.view,body.atlas_doc_format` + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}?body-format=storage` const response = await fetch(url, { method: 'GET', @@ -130,16 +131,18 @@ export async function POST(request: NextRequest) { id: data.id, title: data.title, body: { - view: { - value: - data.body?.storage?.value || - data.body?.view?.value || - data.body?.atlas_doc_format?.value || - data.content || // try alternative fields - data.description || - `Content for page ${data.title}`, // fallback content + storage: { + value: data.body?.storage?.value ?? null, + representation: 'storage', }, }, + status: data.status ?? null, + spaceId: data.spaceId ?? null, + parentId: data.parentId ?? null, + authorId: data.authorId ?? null, + createdAt: data.createdAt ?? null, + version: data.version ?? null, + _links: data._links ?? null, }) } catch (error) { logger.error('Error fetching Confluence page:', error) @@ -274,7 +277,7 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: firstError.message }, { status: 400 }) } - const { domain, accessToken, cloudId: providedCloudId, pageId } = validation.data + const { domain, accessToken, cloudId: providedCloudId, pageId, purge } = validation.data const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) @@ -283,7 +286,12 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}` + const queryParams = new URLSearchParams() + if (purge) { + queryParams.append('purge', 'true') + } + const queryString = queryParams.toString() + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}${queryString ? `?${queryString}` : ''}` const response = await fetch(url, { method: 'DELETE', diff --git a/apps/sim/app/api/tools/confluence/pages/route.ts b/apps/sim/app/api/tools/confluence/pages/route.ts index e225bf34e..739dc0659 100644 --- a/apps/sim/app/api/tools/confluence/pages/route.ts +++ b/apps/sim/app/api/tools/confluence/pages/route.ts @@ -32,7 +32,6 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) } - // Use provided cloudId or fetch it if not provided const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') @@ -40,7 +39,6 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - // Build the URL with query parameters const baseUrl = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages` const queryParams = new URLSearchParams() @@ -57,7 +55,6 @@ export async function POST(request: NextRequest) { logger.info(`Fetching Confluence pages from: ${url}`) - // Make the request to Confluence API with OAuth Bearer token const response = await fetch(url, { method: 'GET', headers: { @@ -79,7 +76,6 @@ export async function POST(request: NextRequest) { } catch (e) { logger.error('Could not parse error response as JSON:', e) - // Try to get the response text for more context try { const text = await response.text() logger.error('Response text:', text) diff --git a/apps/sim/app/api/tools/confluence/search-in-space/route.ts b/apps/sim/app/api/tools/confluence/search-in-space/route.ts new file mode 100644 index 000000000..8a3dcf1a1 --- /dev/null +++ b/apps/sim/app/api/tools/confluence/search-in-space/route.ts @@ -0,0 +1,120 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluenceSearchInSpaceAPI') + +export const dynamic = 'force-dynamic' + +/** + * Search for content within a specific Confluence space using CQL. + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { + domain, + accessToken, + spaceKey, + query, + cloudId: providedCloudId, + limit = 25, + contentType, + } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!spaceKey) { + return NextResponse.json({ error: 'Space key is required' }, { status: 400 }) + } + + const spaceKeyValidation = validateAlphanumericId(spaceKey, 'spaceKey', 255) + if (!spaceKeyValidation.isValid) { + return NextResponse.json({ error: spaceKeyValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const escapeCqlValue = (value: string) => value.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + + let cql = `space = "${escapeCqlValue(spaceKey)}"` + + if (query) { + cql += ` AND text ~ "${escapeCqlValue(query)}"` + } + + if (contentType) { + cql += ` AND type = "${escapeCqlValue(contentType)}"` + } + + const searchParams = new URLSearchParams({ + cql, + limit: String(Math.min(limit, 250)), + }) + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/search?${searchParams.toString()}` + + logger.info(`Searching in space ${spaceKey} with CQL: ${cql}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = errorData?.message || `Failed to search in space (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const results = (data.results || []).map((result: any) => ({ + id: result.content?.id ?? result.id, + title: result.content?.title ?? result.title, + type: result.content?.type ?? result.type, + status: result.content?.status ?? null, + url: result.url ?? result._links?.webui ?? '', + excerpt: result.excerpt ?? '', + lastModified: result.lastModified ?? null, + })) + + return NextResponse.json({ + results, + spaceKey, + totalSize: data.totalSize ?? results.length, + }) + } catch (error) { + logger.error('Error searching in space:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/search/route.ts b/apps/sim/app/api/tools/confluence/search/route.ts index adeedb392..f31f99279 100644 --- a/apps/sim/app/api/tools/confluence/search/route.ts +++ b/apps/sim/app/api/tools/confluence/search/route.ts @@ -42,8 +42,10 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } + const escapeCqlValue = (value: string) => value.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + const searchParams = new URLSearchParams({ - cql: `text ~ "${query}"`, + cql: `text ~ "${escapeCqlValue(query)}"`, limit: limit.toString(), }) @@ -70,13 +72,27 @@ export async function POST(request: NextRequest) { const data = await response.json() - const results = (data.results || []).map((result: any) => ({ - id: result.content?.id || result.id, - title: result.content?.title || result.title, - type: result.content?.type || result.type, - url: result.url || result._links?.webui || '', - excerpt: result.excerpt || '', - })) + const results = (data.results || []).map((result: any) => { + const spaceData = result.resultGlobalContainer || result.content?.space + return { + id: result.content?.id || result.id, + title: result.content?.title || result.title, + type: result.content?.type || result.type, + url: result.url || result._links?.webui || '', + excerpt: result.excerpt || '', + status: result.content?.status ?? null, + spaceKey: result.resultGlobalContainer?.key ?? result.content?.space?.key ?? null, + space: spaceData + ? { + id: spaceData.id ?? null, + key: spaceData.key ?? null, + name: spaceData.name ?? spaceData.title ?? null, + } + : null, + lastModified: result.lastModified ?? result.content?.history?.lastUpdated?.when ?? null, + entityType: result.entityType ?? null, + } + }) return NextResponse.json({ results }) } catch (error) { diff --git a/apps/sim/app/api/tools/confluence/space-blogposts/route.ts b/apps/sim/app/api/tools/confluence/space-blogposts/route.ts new file mode 100644 index 000000000..4607f9f57 --- /dev/null +++ b/apps/sim/app/api/tools/confluence/space-blogposts/route.ts @@ -0,0 +1,124 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluenceSpaceBlogPostsAPI') + +export const dynamic = 'force-dynamic' + +/** + * List all blog posts in a specific Confluence space. + * Uses GET /wiki/api/v2/spaces/{id}/blogposts + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { + domain, + accessToken, + spaceId, + cloudId: providedCloudId, + limit = 25, + status, + bodyFormat, + cursor, + } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!spaceId) { + return NextResponse.json({ error: 'Space ID is required' }, { status: 400 }) + } + + const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255) + if (!spaceIdValidation.isValid) { + return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(limit, 250))) + + if (status) { + queryParams.append('status', status) + } + + if (bodyFormat) { + queryParams.append('body-format', bodyFormat) + } + + if (cursor) { + queryParams.append('cursor', cursor) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/blogposts?${queryParams.toString()}` + + logger.info(`Fetching blog posts in space ${spaceId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to list blog posts in space (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const blogPosts = (data.results || []).map((post: any) => ({ + id: post.id, + title: post.title, + status: post.status ?? null, + spaceId: post.spaceId ?? null, + authorId: post.authorId ?? null, + createdAt: post.createdAt ?? null, + version: post.version ?? null, + body: post.body ?? null, + webUrl: post._links?.webui ?? null, + })) + + return NextResponse.json({ + blogPosts, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error listing blog posts in space:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/space-pages/route.ts b/apps/sim/app/api/tools/confluence/space-pages/route.ts new file mode 100644 index 000000000..fcf17efa0 --- /dev/null +++ b/apps/sim/app/api/tools/confluence/space-pages/route.ts @@ -0,0 +1,125 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getConfluenceCloudId } from '@/tools/confluence/utils' + +const logger = createLogger('ConfluenceSpacePagesAPI') + +export const dynamic = 'force-dynamic' + +/** + * List all pages in a specific Confluence space. + * Uses GET /wiki/api/v2/spaces/{id}/pages + */ +export async function POST(request: NextRequest) { + try { + const auth = await checkSessionOrInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + const body = await request.json() + const { + domain, + accessToken, + spaceId, + cloudId: providedCloudId, + limit = 50, + status, + bodyFormat, + cursor, + } = body + + if (!domain) { + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!spaceId) { + return NextResponse.json({ error: 'Space ID is required' }, { status: 400 }) + } + + const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255) + if (!spaceIdValidation.isValid) { + return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 }) + } + + const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(limit, 250))) + + if (status) { + queryParams.append('status', status) + } + + if (bodyFormat) { + queryParams.append('body-format', bodyFormat) + } + + if (cursor) { + queryParams.append('cursor', cursor) + } + + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/pages?${queryParams.toString()}` + + logger.info(`Fetching pages in space ${spaceId}`) + + const response = await fetch(url, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${accessToken}`, + }, + }) + + if (!response.ok) { + const errorData = await response.json().catch(() => null) + logger.error('Confluence API error response:', { + status: response.status, + statusText: response.statusText, + error: JSON.stringify(errorData, null, 2), + }) + const errorMessage = + errorData?.message || `Failed to list pages in space (${response.status})` + return NextResponse.json({ error: errorMessage }, { status: response.status }) + } + + const data = await response.json() + + const pages = (data.results || []).map((page: any) => ({ + id: page.id, + title: page.title, + status: page.status ?? null, + spaceId: page.spaceId ?? null, + parentId: page.parentId ?? null, + authorId: page.authorId ?? null, + createdAt: page.createdAt ?? null, + version: page.version ?? null, + body: page.body ?? null, + webUrl: page._links?.webui ?? null, + })) + + return NextResponse.json({ + pages, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) + } catch (error) { + logger.error('Error listing pages in space:', error) + return NextResponse.json( + { error: (error as Error).message || 'Internal server error' }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/confluence/spaces/route.ts b/apps/sim/app/api/tools/confluence/spaces/route.ts index 53daafe0e..0ce8dd0ee 100644 --- a/apps/sim/app/api/tools/confluence/spaces/route.ts +++ b/apps/sim/app/api/tools/confluence/spaces/route.ts @@ -21,6 +21,7 @@ export async function GET(request: NextRequest) { const accessToken = searchParams.get('accessToken') const providedCloudId = searchParams.get('cloudId') const limit = searchParams.get('limit') || '25' + const cursor = searchParams.get('cursor') if (!domain) { return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) @@ -37,7 +38,12 @@ export async function GET(request: NextRequest) { return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces?limit=${limit}` + const queryParams = new URLSearchParams() + queryParams.append('limit', String(Math.min(Number(limit), 250))) + if (cursor) { + queryParams.append('cursor', cursor) + } + const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces?${queryParams.toString()}` const response = await fetch(url, { method: 'GET', @@ -67,9 +73,18 @@ export async function GET(request: NextRequest) { key: space.key, type: space.type, status: space.status, + authorId: space.authorId ?? null, + createdAt: space.createdAt ?? null, + homepageId: space.homepageId ?? null, + description: space.description ?? null, })) - return NextResponse.json({ spaces }) + return NextResponse.json({ + spaces, + nextCursor: data._links?.next + ? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor') + : null, + }) } catch (error) { logger.error('Error listing Confluence spaces:', error) return NextResponse.json( diff --git a/apps/sim/app/chat/components/header/header.tsx b/apps/sim/app/chat/components/header/header.tsx index 55fa058b9..68411916f 100644 --- a/apps/sim/app/chat/components/header/header.tsx +++ b/apps/sim/app/chat/components/header/header.tsx @@ -3,8 +3,8 @@ import Image from 'next/image' import Link from 'next/link' import { GithubIcon } from '@/components/icons' -import { useBrandConfig } from '@/lib/branding/branding' import { inter } from '@/app/_styles/fonts/inter/inter' +import { useBrandConfig } from '@/ee/whitelabeling' interface ChatHeaderProps { chatConfig: { diff --git a/apps/sim/app/form/[identifier]/components/powered-by-sim.tsx b/apps/sim/app/form/[identifier]/components/powered-by-sim.tsx index d882fbcf9..a647776bc 100644 --- a/apps/sim/app/form/[identifier]/components/powered-by-sim.tsx +++ b/apps/sim/app/form/[identifier]/components/powered-by-sim.tsx @@ -1,8 +1,8 @@ 'use client' import Image from 'next/image' -import { useBrandConfig } from '@/lib/branding/branding' import { inter } from '@/app/_styles/fonts/inter/inter' +import { useBrandConfig } from '@/ee/whitelabeling' export function PoweredBySim() { const brandConfig = useBrandConfig() diff --git a/apps/sim/app/layout.tsx b/apps/sim/app/layout.tsx index 6ab3aae35..11d1b3036 100644 --- a/apps/sim/app/layout.tsx +++ b/apps/sim/app/layout.tsx @@ -2,9 +2,12 @@ import type { Metadata, Viewport } from 'next' import Script from 'next/script' import { PublicEnvScript } from 'next-runtime-env' import { BrandedLayout } from '@/components/branded-layout' -import { generateThemeCSS } from '@/lib/branding/inject-theme' -import { generateBrandedMetadata, generateStructuredData } from '@/lib/branding/metadata' import { PostHogProvider } from '@/app/_shell/providers/posthog-provider' +import { + generateBrandedMetadata, + generateStructuredData, + generateThemeCSS, +} from '@/ee/whitelabeling' import '@/app/_styles/globals.css' import { OneDollarStats } from '@/components/analytics/onedollarstats' import { isReactGrabEnabled, isReactScanEnabled } from '@/lib/core/config/feature-flags' diff --git a/apps/sim/app/llms-full.txt/route.ts b/apps/sim/app/llms-full.txt/route.ts index ad4acb0dd..c7efe0d24 100644 --- a/apps/sim/app/llms-full.txt/route.ts +++ b/apps/sim/app/llms-full.txt/route.ts @@ -56,7 +56,7 @@ An execution is a single run of a workflow. It includes: ### LLM Orchestration Sim supports all major LLM providers: - OpenAI (GPT-5.2, GPT-5.1, GPT-5, GPT-4o, GPT-4.1) -- Anthropic (Claude Opus 4.5, Claude Opus 4.1, Claude Sonnet 4.5, Claude Haiku 4.5) +- Anthropic (Claude Opus 4.6, Claude Opus 4.5, Claude Sonnet 4.5, Claude Haiku 4.5) - Google (Gemini Pro 3, Gemini Pro 3 Preview, Gemini 2.5 Pro, Gemini 2.5 Flash) - Mistral (Mistral Large, Mistral Medium) - xAI (Grok) diff --git a/apps/sim/app/manifest.ts b/apps/sim/app/manifest.ts index 2ec404966..9ea650743 100644 --- a/apps/sim/app/manifest.ts +++ b/apps/sim/app/manifest.ts @@ -1,5 +1,5 @@ import type { MetadataRoute } from 'next' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' export default function manifest(): MetadataRoute.Manifest { const brand = getBrandConfig() diff --git a/apps/sim/app/resume/[workflowId]/[executionId]/resume-page-client.tsx b/apps/sim/app/resume/[workflowId]/[executionId]/resume-page-client.tsx index 3edd059aa..5b1ace2ea 100644 --- a/apps/sim/app/resume/[workflowId]/[executionId]/resume-page-client.tsx +++ b/apps/sim/app/resume/[workflowId]/[executionId]/resume-page-client.tsx @@ -24,8 +24,8 @@ import { SelectTrigger, SelectValue, } from '@/components/ui/select' -import { useBrandConfig } from '@/lib/branding/branding' import Nav from '@/app/(landing)/components/nav/nav' +import { useBrandConfig } from '@/ee/whitelabeling' import type { ResumeStatus } from '@/executor/types' interface ResumeLinks { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx index b6e7aa4cb..4888a9684 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx @@ -74,6 +74,12 @@ const SCOPE_DESCRIPTIONS: Record = { 'write:label:confluence': 'Add and remove labels', 'search:confluence': 'Search Confluence content', 'readonly:content.attachment:confluence': 'View attachments', + 'read:blogpost:confluence': 'View Confluence blog posts', + 'write:blogpost:confluence': 'Create and update Confluence blog posts', + 'read:content.property:confluence': 'View properties on Confluence content', + 'write:content.property:confluence': 'Create and manage content properties', + 'read:hierarchical-content:confluence': 'View page hierarchy (children and ancestors)', + 'read:content.metadata:confluence': 'View content metadata (required for ancestors)', 'read:me': 'Read profile information', 'database.read': 'Read database', 'database.write': 'Write to database', @@ -358,6 +364,7 @@ export function OAuthRequiredModal({ logger.info('Linking OAuth2:', { providerId, requiredScopes, + hasNewScopes: newScopes.length > 0, }) if (providerId === 'trello') { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/file-upload/file-upload.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/file-upload/file-upload.tsx index 39ad880d9..b46467aa3 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/file-upload/file-upload.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/file-upload/file-upload.tsx @@ -34,6 +34,103 @@ interface UploadedFile { type: string } +interface SingleFileSelectorProps { + file: UploadedFile + options: Array<{ label: string; value: string; disabled?: boolean }> + selectedValue: string + inputValue: string + onInputChange: (value: string) => void + onClear: (e: React.MouseEvent) => void + onOpenChange: (open: boolean) => void + disabled: boolean + isLoading: boolean + formatFileSize: (bytes: number) => string + truncateMiddle: (text: string, start?: number, end?: number) => string + isDeleting: boolean +} + +/** + * Single file selector component that shows the selected file with both + * a clear button (X) and a chevron to change the selection. + * Follows the same pattern as SelectorCombobox for consistency. + */ +function SingleFileSelector({ + file, + options, + selectedValue, + inputValue, + onInputChange, + onClear, + onOpenChange, + disabled, + isLoading, + formatFileSize, + truncateMiddle, + isDeleting, +}: SingleFileSelectorProps) { + const displayLabel = `${truncateMiddle(file.name, 20, 12)} (${formatFileSize(file.size)})` + const [localInputValue, setLocalInputValue] = useState(displayLabel) + const [isEditing, setIsEditing] = useState(false) + + // Sync display label when file changes + useEffect(() => { + if (!isEditing) { + setLocalInputValue(displayLabel) + } + }, [displayLabel, isEditing]) + + return ( +
+ { + // Check if user selected an option + const matched = options.find((opt) => opt.value === newValue || opt.label === newValue) + if (matched) { + setIsEditing(false) + setLocalInputValue(displayLabel) + onInputChange(matched.value) + return + } + // User is typing to search + setIsEditing(true) + setLocalInputValue(newValue) + }} + onOpenChange={(open) => { + if (!open) { + setIsEditing(false) + setLocalInputValue(displayLabel) + } + onOpenChange(open) + }} + placeholder={isLoading ? 'Loading files...' : 'Select or upload file'} + disabled={disabled || isDeleting} + editable={true} + filterOptions={isEditing} + isLoading={isLoading} + inputProps={{ + className: 'pr-[60px]', + }} + /> + +
+ ) +} + interface UploadingFile { id: string name: string @@ -500,6 +597,7 @@ export function FileUpload({ const hasFiles = filesArray.length > 0 const isUploading = uploadingFiles.length > 0 + // Options for multiple file mode (filters out already selected files) const comboboxOptions = useMemo( () => [ { label: 'Upload New File', value: '__upload_new__' }, @@ -516,10 +614,43 @@ export function FileUpload({ [availableWorkspaceFiles, acceptedTypes] ) + // Options for single file mode (includes all files, selected one will be highlighted) + const singleFileOptions = useMemo( + () => [ + { label: 'Upload New File', value: '__upload_new__' }, + ...workspaceFiles.map((file) => { + const isAccepted = + !acceptedTypes || acceptedTypes === '*' || isFileTypeAccepted(file.type, acceptedTypes) + return { + label: file.name, + value: file.id, + disabled: !isAccepted, + } + }), + ], + [workspaceFiles, acceptedTypes] + ) + + // Find the selected file's workspace ID for highlighting in single file mode + const selectedFileId = useMemo(() => { + if (!hasFiles || multiple) return '' + const currentFile = filesArray[0] + if (!currentFile) return '' + // Match by key or path + const matchedWorkspaceFile = workspaceFiles.find( + (wf) => + wf.key === currentFile.key || + wf.name === currentFile.name || + currentFile.path?.includes(wf.key) + ) + return matchedWorkspaceFile?.id || '' + }, [filesArray, workspaceFiles, hasFiles, multiple]) + const handleComboboxChange = (value: string) => { setInputValue(value) - const selectedFile = availableWorkspaceFiles.find((file) => file.id === value) + // Look in full workspaceFiles list (not filtered) to allow re-selecting same file in single mode + const selectedFile = workspaceFiles.find((file) => file.id === value) const isAcceptedType = selectedFile && (!acceptedTypes || @@ -559,16 +690,17 @@ export function FileUpload({ {/* Error message */} {uploadError &&
{uploadError}
} - {/* File list with consistent spacing */} - {(hasFiles || isUploading) && ( + {/* File list with consistent spacing - only show for multiple mode or when uploading */} + {((hasFiles && multiple) || isUploading) && (
- {/* Only show files that aren't currently uploading */} - {filesArray.map((file) => { - const isCurrentlyUploading = uploadingFiles.some( - (uploadingFile) => uploadingFile.name === file.name - ) - return !isCurrentlyUploading && renderFileItem(file) - })} + {/* Only show files that aren't currently uploading (for multiple mode only) */} + {multiple && + filesArray.map((file) => { + const isCurrentlyUploading = uploadingFiles.some( + (uploadingFile) => uploadingFile.name === file.name + ) + return !isCurrentlyUploading && renderFileItem(file) + })} {isUploading && ( <> {uploadingFiles.map(renderUploadingItem)} @@ -604,6 +736,26 @@ export function FileUpload({ /> )} + {/* Single file mode with file selected: show combobox-style UI with X and chevron */} + {hasFiles && !multiple && !isUploading && ( + handleRemoveFile(filesArray[0], e)} + onOpenChange={(open) => { + if (open) void loadWorkspaceFiles() + }} + disabled={disabled} + isLoading={loadingWorkspaceFiles} + formatFileSize={formatFileSize} + truncateMiddle={truncateMiddle} + isDeleting={deletingFiles[filesArray[0]?.path || '']} + /> + )} + {/* Show dropdown selector if no files and not uploading */} {!hasFiles && !isUploading && ( { + e.preventDefault() + e.stopPropagation() + if (readOnly || disabled) return + setStoreValue(null) + setInputValue('') + onOptionChange?.('') + }, + [setStoreValue, onOptionChange, readOnly, disabled] + ) + + const showClearButton = Boolean(activeValue) && !disabled && !readOnly + return (
{({ ref, onDrop, onDragOver }) => ( - { - const matched = optionMap.get(newValue) - if (matched) { - setInputValue(matched.label) - setIsEditing(false) - handleSelection(matched.id) - return - } - if (allowSearch) { - setInputValue(newValue) - setIsEditing(true) - setSearchTerm(newValue) - } - }} - placeholder={placeholder || subBlock.placeholder || 'Select an option'} - disabled={disabled || readOnly} - editable={allowSearch} - filterOptions={allowSearch} - inputRef={ref as React.RefObject} - inputProps={{ - onDrop: onDrop as (e: React.DragEvent) => void, - onDragOver: onDragOver as (e: React.DragEvent) => void, - }} - isLoading={isLoading} - error={error instanceof Error ? error.message : null} - /> +
+ { + const matched = optionMap.get(newValue) + if (matched) { + setInputValue(matched.label) + setIsEditing(false) + handleSelection(matched.id) + return + } + if (allowSearch) { + setInputValue(newValue) + setIsEditing(true) + setSearchTerm(newValue) + } + }} + placeholder={placeholder || subBlock.placeholder || 'Select an option'} + disabled={disabled || readOnly} + editable={allowSearch} + filterOptions={allowSearch} + inputRef={ref as React.RefObject} + inputProps={{ + onDrop: onDrop as (e: React.DragEvent) => void, + onDragOver: onDragOver as (e: React.DragEvent) => void, + className: showClearButton ? 'pr-[60px]' : undefined, + }} + isLoading={isLoading} + error={error instanceof Error ? error.message : null} + /> + {showClearButton && ( + + )} +
)}
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx index 540f97bba..12f4cb510 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx @@ -100,7 +100,7 @@ const BlockRow = memo(function BlockRow({ >
{BlockIcon && } @@ -276,7 +276,7 @@ const SubflowNodeRow = memo(function SubflowNodeRow({ >
{BlockIcon && } diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts index 1b514dccd..0b4916a2f 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts @@ -1288,6 +1288,13 @@ export function useWorkflowExecution() { onBlockCompleteCallback: onBlockComplete, }) + const clientWorkflowState = executionWorkflowState || { + blocks: filteredStates, + edges: workflowEdges, + loops: latestWorkflowState.loops, + parallels: latestWorkflowState.parallels, + } + await executionStream.execute({ workflowId: activeWorkflowId, input: finalWorkflowInput, @@ -1297,14 +1304,12 @@ export function useWorkflowExecution() { useDraftState: true, isClientSession: true, stopAfterBlockId, - workflowStateOverride: executionWorkflowState - ? { - blocks: executionWorkflowState.blocks, - edges: executionWorkflowState.edges, - loops: executionWorkflowState.loops, - parallels: executionWorkflowState.parallels, - } - : undefined, + workflowStateOverride: { + blocks: clientWorkflowState.blocks, + edges: clientWorkflowState.edges, + loops: clientWorkflowState.loops, + parallels: clientWorkflowState.parallels, + }, callbacks: { onExecutionStarted: (data) => { logger.info('Server execution started:', data) diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/general/general.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/general/general.tsx index 7ab1d0737..2893557be 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/general/general.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/general/general.tsx @@ -19,11 +19,11 @@ import { import { Input, Skeleton } from '@/components/ui' import { signOut, useSession } from '@/lib/auth/auth-client' import { ANONYMOUS_USER_ID } from '@/lib/auth/constants' -import { useBrandConfig } from '@/lib/branding/branding' import { getEnv, isTruthy } from '@/lib/core/config/env' import { isHosted } from '@/lib/core/config/feature-flags' import { getBaseUrl } from '@/lib/core/utils/urls' import { useProfilePictureUpload } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/hooks/use-profile-picture-upload' +import { useBrandConfig } from '@/ee/whitelabeling' import { useGeneralSettings, useUpdateGeneralSetting } from '@/hooks/queries/general-settings' import { useUpdateUserProfile, useUserProfile } from '@/hooks/queries/user-profile' import { clearUserData } from '@/stores' diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/usage-indicator/usage-indicator.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/usage-indicator/usage-indicator.tsx index 71ef8060c..742865fb7 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/usage-indicator/usage-indicator.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/usage-indicator/usage-indicator.tsx @@ -397,7 +397,7 @@ export function UsageIndicator({ onClick }: UsageIndicatorProps) { return () => window.clearInterval(interval) }, [isHovered, pillCount, startAnimationIndex]) - if (isLoading) { + if (isLoading && !subscriptionData) { return (
diff --git a/apps/sim/blocks/blocks.test.ts b/apps/sim/blocks/blocks.test.ts index 5790ca6d0..47bf66097 100644 --- a/apps/sim/blocks/blocks.test.ts +++ b/apps/sim/blocks/blocks.test.ts @@ -649,4 +649,394 @@ describe('Blocks Module', () => { } }) }) + + describe('Canonical Param Validation', () => { + /** + * Helper to serialize a condition for comparison + */ + function serializeCondition(condition: unknown): string { + if (!condition) return '' + return JSON.stringify(condition) + } + + it('should not have canonicalParamId that matches any subBlock id within the same block', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + const allSubBlockIds = new Set(block.subBlocks.map((sb) => sb.id)) + const canonicalParamIds = new Set( + block.subBlocks.filter((sb) => sb.canonicalParamId).map((sb) => sb.canonicalParamId) + ) + + for (const canonicalId of canonicalParamIds) { + if (allSubBlockIds.has(canonicalId!)) { + // Check if the matching subBlock also has a canonicalParamId pointing to itself + const matchingSubBlock = block.subBlocks.find( + (sb) => sb.id === canonicalId && !sb.canonicalParamId + ) + if (matchingSubBlock) { + errors.push( + `Block "${block.type}": canonicalParamId "${canonicalId}" clashes with subBlock id "${canonicalId}"` + ) + } + } + } + } + + if (errors.length > 0) { + throw new Error(`Canonical param ID clashes detected:\n${errors.join('\n')}`) + } + }) + + it('should have unique subBlock IDs within the same condition context', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + // Group subBlocks by their condition (only for static/JSON conditions, not functions) + const subBlocksByCondition = new Map< + string, + Array<{ id: string; mode?: string; hasCanonical: boolean }> + >() + + for (const subBlock of block.subBlocks) { + // Skip subBlocks with function conditions - we can't evaluate them statically + // These are valid when the function returns different conditions at runtime + if (typeof subBlock.condition === 'function') { + continue + } + + const conditionKey = serializeCondition(subBlock.condition) + if (!subBlocksByCondition.has(conditionKey)) { + subBlocksByCondition.set(conditionKey, []) + } + subBlocksByCondition.get(conditionKey)!.push({ + id: subBlock.id, + mode: subBlock.mode, + hasCanonical: Boolean(subBlock.canonicalParamId), + }) + } + + // Check for duplicate IDs within the same condition (excluding canonical pairs and mode swaps) + for (const [conditionKey, subBlocks] of subBlocksByCondition) { + const idCounts = new Map() + for (const sb of subBlocks) { + idCounts.set(sb.id, (idCounts.get(sb.id) || 0) + 1) + } + + for (const [id, count] of idCounts) { + if (count > 1) { + const duplicates = subBlocks.filter((sb) => sb.id === id) + + // Categorize modes + const basicModes = duplicates.filter( + (sb) => !sb.mode || sb.mode === 'basic' || sb.mode === 'both' + ) + const advancedModes = duplicates.filter((sb) => sb.mode === 'advanced') + const triggerModes = duplicates.filter((sb) => sb.mode === 'trigger') + + // Valid pattern 1: basic/advanced mode swap (with or without canonicalParamId) + if ( + basicModes.length === 1 && + advancedModes.length === 1 && + triggerModes.length === 0 + ) { + continue // This is a valid basic/advanced mode swap pair + } + + // Valid pattern 2: basic/trigger mode separation (trigger version for trigger mode) + // One basic/both + one or more trigger versions is valid + if ( + basicModes.length <= 1 && + advancedModes.length === 0 && + triggerModes.length >= 1 + ) { + continue // This is a valid pattern where trigger mode has its own subBlock + } + + // Valid pattern 3: All duplicates have canonicalParamId (they form a canonical group) + const allHaveCanonical = duplicates.every((sb) => sb.hasCanonical) + if (allHaveCanonical) { + continue // Validated separately by canonical pair tests + } + + // Invalid: duplicates without proper pairing + const condition = conditionKey || '(no condition)' + const modeBreakdown = duplicates.map((d) => d.mode || 'basic/both').join(', ') + errors.push( + `Block "${block.type}": Duplicate subBlock id "${id}" with condition ${condition} (count: ${count}, modes: ${modeBreakdown})` + ) + } + } + } + } + + if (errors.length > 0) { + throw new Error(`Duplicate subBlock IDs detected:\n${errors.join('\n')}`) + } + }) + + it('should have properly formed canonical pairs (matching conditions)', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + // Group subBlocks by canonicalParamId + const canonicalGroups = new Map< + string, + Array<{ id: string; mode?: string; condition: unknown; isStaticCondition: boolean }> + >() + + for (const subBlock of block.subBlocks) { + if (subBlock.canonicalParamId) { + if (!canonicalGroups.has(subBlock.canonicalParamId)) { + canonicalGroups.set(subBlock.canonicalParamId, []) + } + canonicalGroups.get(subBlock.canonicalParamId)!.push({ + id: subBlock.id, + mode: subBlock.mode, + condition: subBlock.condition, + isStaticCondition: typeof subBlock.condition !== 'function', + }) + } + } + + // Validate each canonical group + for (const [canonicalId, members] of canonicalGroups) { + // Only validate condition matching for static conditions + const staticMembers = members.filter((m) => m.isStaticCondition) + if (staticMembers.length > 1) { + const conditions = staticMembers.map((m) => serializeCondition(m.condition)) + const uniqueConditions = new Set(conditions) + + if (uniqueConditions.size > 1) { + errors.push( + `Block "${block.type}": Canonical param "${canonicalId}" has members with different conditions: ${[...uniqueConditions].join(' vs ')}` + ) + } + } + + // Check for proper basic/advanced pairing + const basicMembers = members.filter((m) => !m.mode || m.mode === 'basic') + const advancedMembers = members.filter((m) => m.mode === 'advanced') + + if (basicMembers.length > 1) { + errors.push( + `Block "${block.type}": Canonical param "${canonicalId}" has ${basicMembers.length} basic mode members (should have at most 1)` + ) + } + + if (basicMembers.length === 0 && advancedMembers.length === 0) { + errors.push( + `Block "${block.type}": Canonical param "${canonicalId}" has no basic or advanced mode members` + ) + } + } + } + + if (errors.length > 0) { + throw new Error(`Canonical pair validation errors:\n${errors.join('\n')}`) + } + }) + + it('should have unique canonicalParamIds per operation/condition context', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + // Group by condition + canonicalParamId to detect same canonical used for different operations + const canonicalByCondition = new Map>() + + for (const subBlock of block.subBlocks) { + if (subBlock.canonicalParamId) { + // Skip function conditions - we can't evaluate them statically + if (typeof subBlock.condition === 'function') { + continue + } + const conditionKey = serializeCondition(subBlock.condition) + if (!canonicalByCondition.has(subBlock.canonicalParamId)) { + canonicalByCondition.set(subBlock.canonicalParamId, new Set()) + } + canonicalByCondition.get(subBlock.canonicalParamId)!.add(conditionKey) + } + } + + // Check that each canonicalParamId is only used for one condition + for (const [canonicalId, conditions] of canonicalByCondition) { + if (conditions.size > 1) { + errors.push( + `Block "${block.type}": Canonical param "${canonicalId}" is used across ${conditions.size} different conditions. Each operation should have its own unique canonicalParamId.` + ) + } + } + } + + if (errors.length > 0) { + throw new Error(`Canonical param reuse across conditions:\n${errors.join('\n')}`) + } + }) + + it('should have inputs containing canonical param IDs instead of raw subBlock IDs', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + if (!block.inputs) continue + + // Find all canonical groups (subBlocks with canonicalParamId) + const canonicalGroups = new Map() + for (const subBlock of block.subBlocks) { + if (subBlock.canonicalParamId) { + if (!canonicalGroups.has(subBlock.canonicalParamId)) { + canonicalGroups.set(subBlock.canonicalParamId, []) + } + canonicalGroups.get(subBlock.canonicalParamId)!.push(subBlock.id) + } + } + + const inputKeys = Object.keys(block.inputs) + + for (const [canonicalId, rawSubBlockIds] of canonicalGroups) { + // Check that the canonical param ID is in inputs + if (!inputKeys.includes(canonicalId)) { + errors.push( + `Block "${block.type}": inputs section is missing canonical param "${canonicalId}"` + ) + } + + // Check that raw subBlock IDs are NOT in inputs (they get deleted after transformation) + for (const rawId of rawSubBlockIds) { + if (rawId !== canonicalId && inputKeys.includes(rawId)) { + errors.push( + `Block "${block.type}": inputs section contains raw subBlock id "${rawId}" which should be replaced by canonical param "${canonicalId}"` + ) + } + } + } + } + + if (errors.length > 0) { + throw new Error(`Inputs section validation errors:\n${errors.join('\n')}`) + } + }) + + it('should have params function using canonical IDs instead of raw subBlock IDs', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + // Check if block has a params function + const paramsFunc = block.tools?.config?.params + if (!paramsFunc || typeof paramsFunc !== 'function') continue + + // Get the function source code, stripping comments to avoid false positives + const rawFuncSource = paramsFunc.toString() + // Remove single-line comments (// ...) and multi-line comments (/* ... */) + const funcSource = rawFuncSource + .replace(/\/\/[^\n]*/g, '') // Remove single-line comments + .replace(/\/\*[\s\S]*?\*\//g, '') // Remove multi-line comments + + // Find all canonical groups (subBlocks with canonicalParamId) + const canonicalGroups = new Map() + for (const subBlock of block.subBlocks) { + if (subBlock.canonicalParamId) { + if (!canonicalGroups.has(subBlock.canonicalParamId)) { + canonicalGroups.set(subBlock.canonicalParamId, []) + } + canonicalGroups.get(subBlock.canonicalParamId)!.push(subBlock.id) + } + } + + // Check for raw subBlock IDs being used in the params function + for (const [canonicalId, rawSubBlockIds] of canonicalGroups) { + for (const rawId of rawSubBlockIds) { + // Skip if the rawId is the same as the canonicalId (self-referential, which is allowed in some cases) + if (rawId === canonicalId) continue + + // Check if the params function references the raw subBlock ID + // Look for patterns like: params.rawId, { rawId }, destructuring rawId + const patterns = [ + new RegExp(`params\\.${rawId}\\b`), // params.rawId + new RegExp(`\\{[^}]*\\b${rawId}\\b[^}]*\\}\\s*=\\s*params`), // { rawId } = params + new RegExp(`\\b${rawId}\\s*[,}]`), // rawId in destructuring + ] + + for (const pattern of patterns) { + if (pattern.test(funcSource)) { + errors.push( + `Block "${block.type}": params function references raw subBlock id "${rawId}" which is deleted after canonical transformation. Use canonical param "${canonicalId}" instead.` + ) + break + } + } + } + } + } + + if (errors.length > 0) { + throw new Error(`Params function validation errors:\n${errors.join('\n')}`) + } + }) + + it('should have consistent required status across canonical param groups', () => { + const blocks = getAllBlocks() + const errors: string[] = [] + + for (const block of blocks) { + // Find all canonical groups (subBlocks with canonicalParamId) + const canonicalGroups = new Map() + for (const subBlock of block.subBlocks) { + if (subBlock.canonicalParamId) { + if (!canonicalGroups.has(subBlock.canonicalParamId)) { + canonicalGroups.set(subBlock.canonicalParamId, []) + } + canonicalGroups.get(subBlock.canonicalParamId)!.push(subBlock) + } + } + + // For each canonical group, check that required status is consistent + for (const [canonicalId, subBlocks] of canonicalGroups) { + if (subBlocks.length < 2) continue // Single subblock, no consistency check needed + + // Get required status for each subblock (handling both boolean and condition object) + const requiredStatuses = subBlocks.map((sb) => { + // If required is a condition object or function, we can't statically determine it + // so we skip those cases + if (typeof sb.required === 'object' || typeof sb.required === 'function') { + return 'dynamic' + } + return sb.required === true ? 'required' : 'optional' + }) + + // Filter out dynamic cases + const staticStatuses = requiredStatuses.filter((s) => s !== 'dynamic') + if (staticStatuses.length < 2) continue // Not enough static statuses to compare + + // Check if all static statuses are the same + const hasRequired = staticStatuses.includes('required') + const hasOptional = staticStatuses.includes('optional') + + if (hasRequired && hasOptional) { + const requiredSubBlocks = subBlocks + .filter((sb, i) => requiredStatuses[i] === 'required') + .map((sb) => `${sb.id} (${sb.mode || 'both'})`) + const optionalSubBlocks = subBlocks + .filter((sb, i) => requiredStatuses[i] === 'optional') + .map((sb) => `${sb.id} (${sb.mode || 'both'})`) + + errors.push( + `Block "${block.type}": canonical param "${canonicalId}" has inconsistent required status. ` + + `Required: [${requiredSubBlocks.join(', ')}], Optional: [${optionalSubBlocks.join(', ')}]. ` + + `All subBlocks in a canonical group should have the same required status.` + ) + } + } + } + + if (errors.length > 0) { + throw new Error(`Required status consistency errors:\n${errors.join('\n')}`) + } + }) + }) }) diff --git a/apps/sim/blocks/blocks/a2a.ts b/apps/sim/blocks/blocks/a2a.ts index 7426ea917..b12905b4b 100644 --- a/apps/sim/blocks/blocks/a2a.ts +++ b/apps/sim/blocks/blocks/a2a.ts @@ -216,8 +216,8 @@ export const A2ABlock: BlockConfig = { config: { tool: (params) => params.operation as string, params: (params) => { - const { fileUpload, fileReference, ...rest } = params - const normalizedFiles = normalizeFileInput(fileUpload || fileReference || params.files) + const { files, ...rest } = params + const normalizedFiles = normalizeFileInput(files) return { ...rest, ...(normalizedFiles && { files: normalizedFiles }), @@ -252,15 +252,7 @@ export const A2ABlock: BlockConfig = { }, files: { type: 'array', - description: 'Files to include with the message', - }, - fileUpload: { - type: 'array', - description: 'Uploaded files (basic mode)', - }, - fileReference: { - type: 'json', - description: 'File reference from previous blocks (advanced mode)', + description: 'Files to include with the message (canonical param)', }, historyLength: { type: 'number', diff --git a/apps/sim/blocks/blocks/agent.ts b/apps/sim/blocks/blocks/agent.ts index 3c923bda6..230ec1645 100644 --- a/apps/sim/blocks/blocks/agent.ts +++ b/apps/sim/blocks/blocks/agent.ts @@ -274,6 +274,7 @@ Return ONLY the JSON array.`, { label: 'low', id: 'low' }, { label: 'medium', id: 'medium' }, { label: 'high', id: 'high' }, + { label: 'max', id: 'max' }, ], dependsOn: ['model'], fetchOptions: async (blockId: string) => { @@ -318,14 +319,14 @@ Return ONLY the JSON array.`, { id: 'azureEndpoint', - title: 'Azure OpenAI Endpoint', + title: 'Azure Endpoint', type: 'short-input', password: true, - placeholder: 'https://your-resource.openai.azure.com', + placeholder: 'https://your-resource.services.ai.azure.com', connectionDroppable: false, condition: { field: 'model', - value: providers['azure-openai'].models, + value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models], }, }, { @@ -763,7 +764,10 @@ Example 3 (Array Input): maxTokens: { type: 'number', description: 'Maximum number of tokens in the response' }, reasoningEffort: { type: 'string', description: 'Reasoning effort level for GPT-5 models' }, verbosity: { type: 'string', description: 'Verbosity level for GPT-5 models' }, - thinkingLevel: { type: 'string', description: 'Thinking level for Gemini 3 models' }, + thinkingLevel: { + type: 'string', + description: 'Thinking level for models with extended thinking (Anthropic Claude, Gemini 3)', + }, tools: { type: 'json', description: 'Available tools configuration' }, }, outputs: { diff --git a/apps/sim/blocks/blocks/confluence.ts b/apps/sim/blocks/blocks/confluence.ts index 5bdb21e5e..4c8790928 100644 --- a/apps/sim/blocks/blocks/confluence.ts +++ b/apps/sim/blocks/blocks/confluence.ts @@ -75,6 +75,12 @@ export const ConfluenceBlock: BlockConfig = { 'search:confluence', 'read:me', 'offline_access', + 'read:blogpost:confluence', + 'write:blogpost:confluence', + 'read:content.property:confluence', + 'write:content.property:confluence', + 'read:hierarchical-content:confluence', + 'read:content.metadata:confluence', ], placeholder: 'Select Confluence account', required: true, @@ -88,6 +94,19 @@ export const ConfluenceBlock: BlockConfig = { placeholder: 'Select Confluence page', dependsOn: ['credential', 'domain'], mode: 'basic', + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'create_comment', + 'list_comments', + 'list_attachments', + 'list_labels', + 'upload_attachment', + ], + }, }, { id: 'manualPageId', @@ -96,14 +115,26 @@ export const ConfluenceBlock: BlockConfig = { canonicalParamId: 'pageId', placeholder: 'Enter Confluence page ID', mode: 'advanced', + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'create_comment', + 'list_comments', + 'list_attachments', + 'list_labels', + 'upload_attachment', + ], + }, }, { id: 'spaceId', title: 'Space ID', type: 'short-input', placeholder: 'Enter Confluence space ID', - required: true, - condition: { field: 'operation', value: ['create', 'get_space'] }, + required: { field: 'operation', value: ['create', 'get_space'] }, }, { id: 'title', @@ -258,7 +289,6 @@ export const ConfluenceBlock: BlockConfig = { const { credential, pageId, - manualPageId, operation, attachmentFile, attachmentFileName, @@ -266,28 +296,7 @@ export const ConfluenceBlock: BlockConfig = { ...rest } = params - const effectivePageId = (pageId || manualPageId || '').trim() - - const requiresPageId = [ - 'read', - 'update', - 'delete', - 'create_comment', - 'list_comments', - 'list_attachments', - 'list_labels', - 'upload_attachment', - ] - - const requiresSpaceId = ['create', 'get_space'] - - if (requiresPageId.includes(operation) && !effectivePageId) { - throw new Error('Page ID is required. Please select a page or enter a page ID manually.') - } - - if (requiresSpaceId.includes(operation) && !rest.spaceId) { - throw new Error('Space ID is required for this operation.') - } + const effectivePageId = pageId ? String(pageId).trim() : '' if (operation === 'upload_attachment') { return { @@ -314,8 +323,7 @@ export const ConfluenceBlock: BlockConfig = { operation: { type: 'string', description: 'Operation to perform' }, domain: { type: 'string', description: 'Confluence domain' }, credential: { type: 'string', description: 'Confluence access token' }, - pageId: { type: 'string', description: 'Page identifier' }, - manualPageId: { type: 'string', description: 'Manual page identifier' }, + pageId: { type: 'string', description: 'Page identifier (canonical param)' }, spaceId: { type: 'string', description: 'Space identifier' }, title: { type: 'string', description: 'Page title' }, content: { type: 'string', description: 'Page content' }, @@ -324,7 +332,7 @@ export const ConfluenceBlock: BlockConfig = { comment: { type: 'string', description: 'Comment text' }, commentId: { type: 'string', description: 'Comment identifier' }, attachmentId: { type: 'string', description: 'Attachment identifier' }, - attachmentFile: { type: 'json', description: 'File to upload as attachment' }, + attachmentFile: { type: 'json', description: 'File to upload as attachment (canonical param)' }, attachmentFileName: { type: 'string', description: 'Custom file name for attachment' }, attachmentComment: { type: 'string', description: 'Comment for the attachment' }, labelName: { type: 'string', description: 'Label name' }, @@ -334,6 +342,7 @@ export const ConfluenceBlock: BlockConfig = { ts: { type: 'string', description: 'Timestamp' }, pageId: { type: 'string', description: 'Page identifier' }, content: { type: 'string', description: 'Page content' }, + body: { type: 'json', description: 'Page body with storage format' }, title: { type: 'string', description: 'Page title' }, url: { type: 'string', description: 'Page or resource URL' }, success: { type: 'boolean', description: 'Operation success status' }, @@ -371,31 +380,46 @@ export const ConfluenceV2Block: BlockConfig = { title: 'Operation', type: 'dropdown', options: [ + // Page Operations { label: 'Read Page', id: 'read' }, { label: 'Create Page', id: 'create' }, { label: 'Update Page', id: 'update' }, { label: 'Delete Page', id: 'delete' }, + { label: 'List Pages in Space', id: 'list_pages_in_space' }, + { label: 'Get Page Children', id: 'get_page_children' }, + { label: 'Get Page Ancestors', id: 'get_page_ancestors' }, + // Version Operations + { label: 'List Page Versions', id: 'list_page_versions' }, + { label: 'Get Page Version', id: 'get_page_version' }, + // Page Property Operations + { label: 'List Page Properties', id: 'list_page_properties' }, + { label: 'Create Page Property', id: 'create_page_property' }, + // Search Operations { label: 'Search Content', id: 'search' }, + { label: 'Search in Space', id: 'search_in_space' }, + // Blog Post Operations + { label: 'List Blog Posts', id: 'list_blogposts' }, + { label: 'Get Blog Post', id: 'get_blogpost' }, + { label: 'Create Blog Post', id: 'create_blogpost' }, + { label: 'List Blog Posts in Space', id: 'list_blogposts_in_space' }, + // Comment Operations { label: 'Create Comment', id: 'create_comment' }, { label: 'List Comments', id: 'list_comments' }, { label: 'Update Comment', id: 'update_comment' }, { label: 'Delete Comment', id: 'delete_comment' }, + // Attachment Operations { label: 'Upload Attachment', id: 'upload_attachment' }, { label: 'List Attachments', id: 'list_attachments' }, { label: 'Delete Attachment', id: 'delete_attachment' }, + // Label Operations { label: 'List Labels', id: 'list_labels' }, + { label: 'Add Label', id: 'add_label' }, + // Space Operations { label: 'Get Space', id: 'get_space' }, { label: 'List Spaces', id: 'list_spaces' }, ], value: () => 'read', }, - { - id: 'domain', - title: 'Domain', - type: 'short-input', - placeholder: 'Enter Confluence domain (e.g., simstudio.atlassian.net)', - required: true, - }, { id: 'credential', title: 'Confluence Account', @@ -424,10 +448,23 @@ export const ConfluenceV2Block: BlockConfig = { 'search:confluence', 'read:me', 'offline_access', + 'read:blogpost:confluence', + 'write:blogpost:confluence', + 'read:content.property:confluence', + 'write:content.property:confluence', + 'read:hierarchical-content:confluence', + 'read:content.metadata:confluence', ], placeholder: 'Select Confluence account', required: true, }, + { + id: 'domain', + title: 'Domain', + type: 'short-input', + placeholder: 'Enter Confluence domain (e.g., simstudio.atlassian.net)', + required: true, + }, { id: 'pageId', title: 'Select Page', @@ -437,6 +474,40 @@ export const ConfluenceV2Block: BlockConfig = { placeholder: 'Select Confluence page', dependsOn: ['credential', 'domain'], mode: 'basic', + condition: { + field: 'operation', + value: [ + 'list_pages_in_space', + 'list_blogposts', + 'get_blogpost', + 'list_blogposts_in_space', + 'search', + 'search_in_space', + 'get_space', + 'list_spaces', + ], + not: true, + }, + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'create_comment', + 'list_comments', + 'list_attachments', + 'list_labels', + 'upload_attachment', + 'add_label', + 'get_page_children', + 'get_page_ancestors', + 'list_page_versions', + 'get_page_version', + 'list_page_properties', + 'create_page_property', + ], + }, }, { id: 'manualPageId', @@ -445,6 +516,40 @@ export const ConfluenceV2Block: BlockConfig = { canonicalParamId: 'pageId', placeholder: 'Enter Confluence page ID', mode: 'advanced', + condition: { + field: 'operation', + value: [ + 'list_pages_in_space', + 'list_blogposts', + 'get_blogpost', + 'list_blogposts_in_space', + 'search', + 'search_in_space', + 'get_space', + 'list_spaces', + ], + not: true, + }, + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'create_comment', + 'list_comments', + 'list_attachments', + 'list_labels', + 'upload_attachment', + 'add_label', + 'get_page_children', + 'get_page_ancestors', + 'list_page_versions', + 'get_page_version', + 'list_page_properties', + 'create_page_property', + ], + }, }, { id: 'spaceId', @@ -452,21 +557,63 @@ export const ConfluenceV2Block: BlockConfig = { type: 'short-input', placeholder: 'Enter Confluence space ID', required: true, - condition: { field: 'operation', value: ['create', 'get_space'] }, + condition: { + field: 'operation', + value: [ + 'create', + 'get_space', + 'list_pages_in_space', + 'search_in_space', + 'create_blogpost', + 'list_blogposts_in_space', + ], + }, + }, + { + id: 'blogPostId', + title: 'Blog Post ID', + type: 'short-input', + placeholder: 'Enter blog post ID', + required: true, + condition: { field: 'operation', value: 'get_blogpost' }, + }, + { + id: 'versionNumber', + title: 'Version Number', + type: 'short-input', + placeholder: 'Enter version number', + required: true, + condition: { field: 'operation', value: 'get_page_version' }, + }, + { + id: 'propertyKey', + title: 'Property Key', + type: 'short-input', + placeholder: 'Enter property key/name', + required: true, + condition: { field: 'operation', value: 'create_page_property' }, + }, + { + id: 'propertyValue', + title: 'Property Value', + type: 'long-input', + placeholder: 'Enter property value (JSON supported)', + required: true, + condition: { field: 'operation', value: 'create_page_property' }, }, { id: 'title', title: 'Title', type: 'short-input', - placeholder: 'Enter title for the page', - condition: { field: 'operation', value: ['create', 'update'] }, + placeholder: 'Enter title', + condition: { field: 'operation', value: ['create', 'update', 'create_blogpost'] }, }, { id: 'content', title: 'Content', type: 'long-input', - placeholder: 'Enter content for the page', - condition: { field: 'operation', value: ['create', 'update'] }, + placeholder: 'Enter content', + condition: { field: 'operation', value: ['create', 'update', 'create_blogpost'] }, }, { id: 'parentId', @@ -481,7 +628,7 @@ export const ConfluenceV2Block: BlockConfig = { type: 'short-input', placeholder: 'Enter search query', required: true, - condition: { field: 'operation', value: 'search' }, + condition: { field: 'operation', value: ['search', 'search_in_space'] }, }, { id: 'comment', @@ -515,6 +662,7 @@ export const ConfluenceV2Block: BlockConfig = { placeholder: 'Select file to upload', condition: { field: 'operation', value: 'upload_attachment' }, mode: 'basic', + required: { field: 'operation', value: 'upload_attachment' }, }, { id: 'attachmentFileReference', @@ -524,6 +672,7 @@ export const ConfluenceV2Block: BlockConfig = { placeholder: 'Reference file from previous blocks', condition: { field: 'operation', value: 'upload_attachment' }, mode: 'advanced', + required: { field: 'operation', value: 'upload_attachment' }, }, { id: 'attachmentFileName', @@ -545,40 +694,140 @@ export const ConfluenceV2Block: BlockConfig = { type: 'short-input', placeholder: 'Enter label name', required: true, - condition: { field: 'operation', value: ['add_label', 'remove_label'] }, + condition: { field: 'operation', value: 'add_label' }, + }, + { + id: 'labelPrefix', + title: 'Label Prefix', + type: 'dropdown', + options: [ + { label: 'Global (default)', id: 'global' }, + { label: 'My', id: 'my' }, + { label: 'Team', id: 'team' }, + { label: 'System', id: 'system' }, + ], + value: () => 'global', + condition: { field: 'operation', value: 'add_label' }, + }, + { + id: 'blogPostStatus', + title: 'Status', + type: 'dropdown', + options: [ + { label: 'Published (current)', id: 'current' }, + { label: 'Draft', id: 'draft' }, + ], + value: () => 'current', + condition: { field: 'operation', value: 'create_blogpost' }, + }, + { + id: 'purge', + title: 'Permanently Delete', + type: 'switch', + condition: { field: 'operation', value: 'delete' }, + }, + { + id: 'bodyFormat', + title: 'Body Format', + type: 'dropdown', + options: [ + { label: 'Storage (default)', id: 'storage' }, + { label: 'Atlas Doc Format', id: 'atlas_doc_format' }, + { label: 'View', id: 'view' }, + { label: 'Export View', id: 'export_view' }, + ], + value: () => 'storage', + condition: { field: 'operation', value: 'list_comments' }, }, { id: 'limit', title: 'Limit', type: 'short-input', - placeholder: 'Enter maximum number of results (default: 25)', + placeholder: 'Enter maximum number of results (default: 50, max: 250)', condition: { field: 'operation', - value: ['search', 'list_comments', 'list_attachments', 'list_spaces'], + value: [ + 'search', + 'search_in_space', + 'list_comments', + 'list_attachments', + 'list_spaces', + 'list_pages_in_space', + 'list_blogposts', + 'list_blogposts_in_space', + 'get_page_children', + 'list_page_versions', + 'list_page_properties', + 'list_labels', + ], + }, + }, + { + id: 'cursor', + title: 'Pagination Cursor', + type: 'short-input', + placeholder: 'Enter cursor from previous response (optional)', + condition: { + field: 'operation', + value: [ + 'list_comments', + 'list_attachments', + 'list_spaces', + 'list_pages_in_space', + 'list_blogposts', + 'list_blogposts_in_space', + 'get_page_children', + 'list_page_versions', + 'list_page_properties', + 'list_labels', + ], }, }, ], tools: { access: [ + // Page Tools 'confluence_retrieve', 'confluence_update', 'confluence_create_page', 'confluence_delete_page', + 'confluence_list_pages_in_space', + 'confluence_get_page_children', + 'confluence_get_page_ancestors', + // Version Tools + 'confluence_list_page_versions', + 'confluence_get_page_version', + // Property Tools + 'confluence_list_page_properties', + 'confluence_create_page_property', + // Search Tools 'confluence_search', + 'confluence_search_in_space', + // Blog Post Tools + 'confluence_list_blogposts', + 'confluence_get_blogpost', + 'confluence_create_blogpost', + 'confluence_list_blogposts_in_space', + // Comment Tools 'confluence_create_comment', 'confluence_list_comments', 'confluence_update_comment', 'confluence_delete_comment', + // Attachment Tools 'confluence_upload_attachment', 'confluence_list_attachments', 'confluence_delete_attachment', + // Label Tools 'confluence_list_labels', + 'confluence_add_label', + // Space Tools 'confluence_get_space', 'confluence_list_spaces', ], config: { tool: (params) => { switch (params.operation) { + // Page Operations case 'read': return 'confluence_retrieve' case 'create': @@ -587,8 +836,37 @@ export const ConfluenceV2Block: BlockConfig = { return 'confluence_update' case 'delete': return 'confluence_delete_page' + case 'list_pages_in_space': + return 'confluence_list_pages_in_space' + case 'get_page_children': + return 'confluence_get_page_children' + case 'get_page_ancestors': + return 'confluence_get_page_ancestors' + // Version Operations + case 'list_page_versions': + return 'confluence_list_page_versions' + case 'get_page_version': + return 'confluence_get_page_version' + // Property Operations + case 'list_page_properties': + return 'confluence_list_page_properties' + case 'create_page_property': + return 'confluence_create_page_property' + // Search Operations case 'search': return 'confluence_search' + case 'search_in_space': + return 'confluence_search_in_space' + // Blog Post Operations + case 'list_blogposts': + return 'confluence_list_blogposts' + case 'get_blogpost': + return 'confluence_get_blogpost' + case 'create_blogpost': + return 'confluence_create_blogpost' + case 'list_blogposts_in_space': + return 'confluence_list_blogposts_in_space' + // Comment Operations case 'create_comment': return 'confluence_create_comment' case 'list_comments': @@ -597,14 +875,19 @@ export const ConfluenceV2Block: BlockConfig = { return 'confluence_update_comment' case 'delete_comment': return 'confluence_delete_comment' + // Attachment Operations case 'upload_attachment': return 'confluence_upload_attachment' case 'list_attachments': return 'confluence_list_attachments' case 'delete_attachment': return 'confluence_delete_attachment' + // Label Operations case 'list_labels': return 'confluence_list_labels' + case 'add_label': + return 'confluence_add_label' + // Space Operations case 'get_space': return 'confluence_get_space' case 'list_spaces': @@ -617,42 +900,104 @@ export const ConfluenceV2Block: BlockConfig = { const { credential, pageId, - manualPageId, operation, - attachmentFileUpload, - attachmentFileReference, attachmentFile, attachmentFileName, attachmentComment, + blogPostId, + versionNumber, + propertyKey, + propertyValue, + labelPrefix, + blogPostStatus, + purge, + bodyFormat, + cursor, ...rest } = params - const effectivePageId = (pageId || manualPageId || '').trim() + // Use canonical param (serializer already handles basic/advanced mode) + const effectivePageId = pageId ? String(pageId).trim() : '' - const requiresPageId = [ - 'read', - 'update', - 'delete', - 'create_comment', - 'list_comments', - 'list_attachments', - 'list_labels', - 'upload_attachment', - ] - - const requiresSpaceId = ['create', 'get_space'] - - if (requiresPageId.includes(operation) && !effectivePageId) { - throw new Error('Page ID is required. Please select a page or enter a page ID manually.') + if (operation === 'add_label') { + return { + credential, + pageId: effectivePageId, + operation, + prefix: labelPrefix || 'global', + ...rest, + } } - if (requiresSpaceId.includes(operation) && !rest.spaceId) { - throw new Error('Space ID is required for this operation.') + if (operation === 'create_blogpost') { + return { + credential, + operation, + status: blogPostStatus || 'current', + ...rest, + } + } + + if (operation === 'delete') { + return { + credential, + pageId: effectivePageId, + operation, + purge: purge || false, + ...rest, + } + } + + if (operation === 'list_comments') { + return { + credential, + pageId: effectivePageId, + operation, + bodyFormat: bodyFormat || 'storage', + cursor: cursor || undefined, + ...rest, + } + } + + // Operations that support cursor pagination + const supportsCursor = [ + 'list_attachments', + 'list_spaces', + 'list_pages_in_space', + 'list_blogposts', + 'list_blogposts_in_space', + 'get_page_children', + 'list_page_versions', + 'list_page_properties', + 'list_labels', + ] + + if (supportsCursor.includes(operation) && cursor) { + return { + credential, + pageId: effectivePageId || undefined, + operation, + cursor, + ...rest, + } + } + + if (operation === 'create_page_property') { + if (!propertyKey) { + throw new Error('Property key is required for this operation.') + } + return { + credential, + pageId: effectivePageId, + operation, + key: propertyKey, + value: propertyValue, + ...rest, + } } if (operation === 'upload_attachment') { - const fileInput = attachmentFileUpload || attachmentFileReference || attachmentFile - const normalizedFile = normalizeFileInput(fileInput, { single: true }) + const normalizedFile = normalizeFileInput(attachmentFile, { single: true }) if (!normalizedFile) { throw new Error('File is required for upload attachment operation.') } @@ -670,6 +1015,8 @@ export const ConfluenceV2Block: BlockConfig = { return { credential, pageId: effectivePageId || undefined, + blogPostId: blogPostId || undefined, + versionNumber: versionNumber ? Number.parseInt(String(versionNumber), 10) : undefined, operation, ...rest, } @@ -680,22 +1027,79 @@ export const ConfluenceV2Block: BlockConfig = { operation: { type: 'string', description: 'Operation to perform' }, domain: { type: 'string', description: 'Confluence domain' }, credential: { type: 'string', description: 'Confluence access token' }, - pageId: { type: 'string', description: 'Page identifier' }, - manualPageId: { type: 'string', description: 'Manual page identifier' }, + pageId: { type: 'string', description: 'Page identifier (canonical param)' }, spaceId: { type: 'string', description: 'Space identifier' }, - title: { type: 'string', description: 'Page title' }, - content: { type: 'string', description: 'Page content' }, + blogPostId: { type: 'string', description: 'Blog post identifier' }, + versionNumber: { type: 'number', description: 'Page version number' }, + propertyKey: { type: 'string', description: 'Property key/name' }, + propertyValue: { type: 'json', description: 'Property value (JSON)' }, + title: { type: 'string', description: 'Page or blog post title' }, + content: { type: 'string', description: 'Page or blog post content' }, parentId: { type: 'string', description: 'Parent page identifier' }, query: { type: 'string', description: 'Search query' }, comment: { type: 'string', description: 'Comment text' }, commentId: { type: 'string', description: 'Comment identifier' }, attachmentId: { type: 'string', description: 'Attachment identifier' }, - attachmentFile: { type: 'json', description: 'File to upload as attachment' }, - attachmentFileUpload: { type: 'json', description: 'Uploaded file (basic mode)' }, - attachmentFileReference: { type: 'json', description: 'File reference (advanced mode)' }, + attachmentFile: { type: 'json', description: 'File to upload as attachment (canonical param)' }, attachmentFileName: { type: 'string', description: 'Custom file name for attachment' }, attachmentComment: { type: 'string', description: 'Comment for the attachment' }, labelName: { type: 'string', description: 'Label name' }, + labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' }, + blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' }, + purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' }, + bodyFormat: { type: 'string', description: 'Body format for comments' }, limit: { type: 'number', description: 'Maximum number of results' }, + cursor: { type: 'string', description: 'Pagination cursor from previous response' }, + }, + outputs: { + ts: { type: 'string', description: 'Timestamp' }, + pageId: { type: 'string', description: 'Page identifier' }, + content: { type: 'string', description: 'Page content' }, + body: { type: 'json', description: 'Page body with storage format' }, + title: { type: 'string', description: 'Page title' }, + url: { type: 'string', description: 'Page or resource URL' }, + success: { type: 'boolean', description: 'Operation success status' }, + deleted: { type: 'boolean', description: 'Deletion status' }, + added: { type: 'boolean', description: 'Addition status' }, + removed: { type: 'boolean', description: 'Removal status' }, + updated: { type: 'boolean', description: 'Update status' }, + // Search & List Results + results: { type: 'array', description: 'Search results' }, + pages: { type: 'array', description: 'List of pages' }, + children: { type: 'array', description: 'List of child pages' }, + ancestors: { type: 'array', description: 'List of ancestor pages' }, + // Comment Results + comments: { type: 'array', description: 'List of comments' }, + commentId: { type: 'string', description: 'Comment identifier' }, + // Attachment Results + attachments: { type: 'array', description: 'List of attachments' }, + attachmentId: { type: 'string', description: 'Attachment identifier' }, + fileSize: { type: 'number', description: 'Attachment file size in bytes' }, + mediaType: { type: 'string', description: 'Attachment MIME type' }, + downloadUrl: { type: 'string', description: 'Attachment download URL' }, + // Label Results + labels: { type: 'array', description: 'List of labels' }, + labelName: { type: 'string', description: 'Label name' }, + // Space Results + spaces: { type: 'array', description: 'List of spaces' }, + spaceId: { type: 'string', description: 'Space identifier' }, + name: { type: 'string', description: 'Space name' }, + key: { type: 'string', description: 'Space key' }, + type: { type: 'string', description: 'Space or content type' }, + status: { type: 'string', description: 'Space status' }, + // Blog Post Results + blogPosts: { type: 'array', description: 'List of blog posts' }, + blogPostId: { type: 'string', description: 'Blog post identifier' }, + // Version Results + versions: { type: 'array', description: 'List of page versions' }, + version: { type: 'json', description: 'Version information' }, + versionNumber: { type: 'number', description: 'Version number' }, + // Property Results + properties: { type: 'array', description: 'List of page properties' }, + propertyId: { type: 'string', description: 'Property identifier' }, + propertyKey: { type: 'string', description: 'Property key' }, + propertyValue: { type: 'json', description: 'Property value' }, + // Pagination + nextCursor: { type: 'string', description: 'Cursor for fetching next page of results' }, }, } diff --git a/apps/sim/blocks/blocks/discord.ts b/apps/sim/blocks/blocks/discord.ts index 79331eaac..0e245c9e8 100644 --- a/apps/sim/blocks/blocks/discord.ts +++ b/apps/sim/blocks/blocks/discord.ts @@ -584,7 +584,7 @@ export const DiscordBlock: BlockConfig = { ...commonParams, channelId: params.channelId, content: params.content, - files: normalizeFileInput(params.attachmentFiles || params.files), + files: normalizeFileInput(params.files), } } case 'discord_get_messages': @@ -773,8 +773,7 @@ export const DiscordBlock: BlockConfig = { nick: { type: 'string', description: 'Member nickname' }, reason: { type: 'string', description: 'Reason for moderation action' }, archived: { type: 'string', description: 'Archive status (true/false)' }, - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - files: { type: 'array', description: 'Files to attach (UserFile array)' }, + files: { type: 'array', description: 'Files to attach (canonical param)' }, limit: { type: 'number', description: 'Message limit' }, autoArchiveDuration: { type: 'number', description: 'Thread auto-archive duration in minutes' }, channelType: { type: 'number', description: 'Discord channel type (0=text, 2=voice, etc.)' }, diff --git a/apps/sim/blocks/blocks/dropbox.ts b/apps/sim/blocks/blocks/dropbox.ts index e7127c118..90be6e74f 100644 --- a/apps/sim/blocks/blocks/dropbox.ts +++ b/apps/sim/blocks/blocks/dropbox.ts @@ -317,12 +317,8 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, params.maxResults = Number(params.maxResults) } - // Normalize file input for upload operation - // Check all possible field IDs: uploadFile (basic), fileRef (advanced), fileContent (legacy) - const normalizedFile = normalizeFileInput( - params.uploadFile || params.fileRef || params.fileContent, - { single: true } - ) + // Normalize file input for upload operation - use canonical 'file' param + const normalizedFile = normalizeFileInput(params.file, { single: true }) if (normalizedFile) { params.file = normalizedFile } @@ -361,10 +357,7 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, path: { type: 'string', description: 'Path in Dropbox' }, autorename: { type: 'boolean', description: 'Auto-rename on conflict' }, // Upload inputs - uploadFile: { type: 'json', description: 'Uploaded file (UserFile)' }, - file: { type: 'json', description: 'File to upload (UserFile object)' }, - fileRef: { type: 'json', description: 'File reference from previous block' }, - fileContent: { type: 'string', description: 'Legacy: base64 encoded file content' }, + file: { type: 'json', description: 'File to upload (canonical param)' }, fileName: { type: 'string', description: 'Optional filename' }, mode: { type: 'string', description: 'Write mode: add or overwrite' }, mute: { type: 'boolean', description: 'Mute notifications' }, diff --git a/apps/sim/blocks/blocks/file.ts b/apps/sim/blocks/blocks/file.ts index 3db0c2d47..f9b3058f2 100644 --- a/apps/sim/blocks/blocks/file.ts +++ b/apps/sim/blocks/blocks/file.ts @@ -194,7 +194,8 @@ export const FileV2Block: BlockConfig = { fallbackToolId: 'file_parser_v2', }), params: (params) => { - const fileInput = params.file || params.filePath || params.fileInput + // Use canonical 'fileInput' param directly + const fileInput = params.fileInput if (!fileInput) { logger.error('No file input provided') throw new Error('File is required') @@ -228,9 +229,7 @@ export const FileV2Block: BlockConfig = { }, }, inputs: { - fileInput: { type: 'json', description: 'File input (upload or URL reference)' }, - filePath: { type: 'string', description: 'File URL (advanced mode)' }, - file: { type: 'json', description: 'Uploaded file data (basic mode)' }, + fileInput: { type: 'json', description: 'File input (canonical param)' }, fileType: { type: 'string', description: 'File type' }, }, outputs: { @@ -283,7 +282,8 @@ export const FileV3Block: BlockConfig = { config: { tool: () => 'file_parser_v3', params: (params) => { - const fileInput = params.fileInput ?? params.file ?? params.fileUrl ?? params.filePath + // Use canonical 'fileInput' param directly + const fileInput = params.fileInput if (!fileInput) { logger.error('No file input provided') throw new Error('File input is required') @@ -321,9 +321,7 @@ export const FileV3Block: BlockConfig = { }, }, inputs: { - fileInput: { type: 'json', description: 'File input (upload or URL)' }, - fileUrl: { type: 'string', description: 'External file URL (advanced mode)' }, - file: { type: 'json', description: 'Uploaded file data (basic mode)' }, + fileInput: { type: 'json', description: 'File input (canonical param)' }, fileType: { type: 'string', description: 'File type' }, }, outputs: { diff --git a/apps/sim/blocks/blocks/fireflies.ts b/apps/sim/blocks/blocks/fireflies.ts index 568cda788..308704f6e 100644 --- a/apps/sim/blocks/blocks/fireflies.ts +++ b/apps/sim/blocks/blocks/fireflies.ts @@ -461,12 +461,11 @@ Return ONLY the summary text - no quotes, no labels.`, return baseParams case 'fireflies_upload_audio': { - // Support both file upload and URL + // Support both file upload and URL - use canonical 'audioFile' param const audioUrl = params.audioUrl?.trim() const audioFile = params.audioFile - const audioFileReference = params.audioFileReference - if (!audioUrl && !audioFile && !audioFileReference) { + if (!audioUrl && !audioFile) { throw new Error('Either audio file or audio URL is required.') } @@ -474,7 +473,6 @@ Return ONLY the summary text - no quotes, no labels.`, ...baseParams, audioUrl: audioUrl || undefined, audioFile: audioFile || undefined, - audioFileReference: audioFileReference || undefined, title: params.title?.trim() || undefined, language: params.language?.trim() || undefined, attendees: params.attendees?.trim() || undefined, @@ -548,8 +546,7 @@ Return ONLY the summary text - no quotes, no labels.`, hostEmail: { type: 'string', description: 'Filter by host email' }, participants: { type: 'string', description: 'Filter by participants (comma-separated)' }, limit: { type: 'number', description: 'Maximum results to return' }, - audioFile: { type: 'json', description: 'Audio/video file (UserFile)' }, - audioFileReference: { type: 'json', description: 'Audio/video file reference' }, + audioFile: { type: 'json', description: 'Audio/video file (canonical param)' }, audioUrl: { type: 'string', description: 'Public URL to audio file' }, title: { type: 'string', description: 'Meeting title' }, language: { type: 'string', description: 'Language code for transcription' }, @@ -620,9 +617,8 @@ export const FirefliesV2Block: BlockConfig = { } if (params.operation === 'fireflies_upload_audio') { - const audioFile = normalizeFileInput(params.audioFile || params.audioFileReference, { - single: true, - }) + // Use canonical 'audioFile' param directly + const audioFile = normalizeFileInput(params.audioFile, { single: true }) if (!audioFile) { throw new Error('Audio file is required.') } @@ -635,7 +631,6 @@ export const FirefliesV2Block: BlockConfig = { ...params, audioUrl, audioFile: undefined, - audioFileReference: undefined, }) } @@ -643,8 +638,5 @@ export const FirefliesV2Block: BlockConfig = { }, }, }, - inputs: { - ...firefliesV2Inputs, - audioFileReference: { type: 'json', description: 'Audio/video file reference' }, - }, + inputs: firefliesV2Inputs, } diff --git a/apps/sim/blocks/blocks/gmail.ts b/apps/sim/blocks/blocks/gmail.ts index 5f8ac25e1..4a6d66e37 100644 --- a/apps/sim/blocks/blocks/gmail.ts +++ b/apps/sim/blocks/blocks/gmail.ts @@ -362,10 +362,10 @@ Return ONLY the search query - no explanations, no extra text.`, }, // Add/Remove Label - Label selector (basic mode) { - id: 'labelManagement', + id: 'labelSelector', title: 'Label', type: 'folder-selector', - canonicalParamId: 'labelIds', + canonicalParamId: 'manageLabelId', serviceId: 'gmail', requiredScopes: ['https://www.googleapis.com/auth/gmail.labels'], placeholder: 'Select label', @@ -376,10 +376,10 @@ Return ONLY the search query - no explanations, no extra text.`, }, // Add/Remove Label - Manual label input (advanced mode) { - id: 'manualLabelManagement', + id: 'manualLabelId', title: 'Label', type: 'short-input', - canonicalParamId: 'labelIds', + canonicalParamId: 'manageLabelId', placeholder: 'Enter label ID (e.g., INBOX, Label_123)', mode: 'advanced', condition: { field: 'operation', value: ['add_label_gmail', 'remove_label_gmail'] }, @@ -408,38 +408,33 @@ Return ONLY the search query - no explanations, no extra text.`, const { credential, folder, - manualFolder, - destinationLabel, - manualDestinationLabel, - sourceLabel, - manualSourceLabel, + addLabelIds, + removeLabelIds, moveMessageId, actionMessageId, labelActionMessageId, - labelManagement, - manualLabelManagement, - attachmentFiles, + manageLabelId, attachments, ...rest } = params - // Handle both selector and manual folder input - const effectiveFolder = (folder || manualFolder || '').trim() + // Use canonical 'folder' param directly + const effectiveFolder = folder ? String(folder).trim() : '' if (rest.operation === 'read_gmail') { rest.folder = effectiveFolder || 'INBOX' } - // Handle move operation + // Handle move operation - use canonical params addLabelIds and removeLabelIds if (rest.operation === 'move_gmail') { if (moveMessageId) { rest.messageId = moveMessageId } - if (!rest.addLabelIds) { - rest.addLabelIds = (destinationLabel || manualDestinationLabel || '').trim() + if (addLabelIds) { + rest.addLabelIds = String(addLabelIds).trim() } - if (!rest.removeLabelIds) { - rest.removeLabelIds = (sourceLabel || manualSourceLabel || '').trim() + if (removeLabelIds) { + rest.removeLabelIds = String(removeLabelIds).trim() } } @@ -462,13 +457,13 @@ Return ONLY the search query - no explanations, no extra text.`, if (labelActionMessageId) { rest.messageId = labelActionMessageId } - if (!rest.labelIds) { - rest.labelIds = (labelManagement || manualLabelManagement || '').trim() + if (manageLabelId) { + rest.labelIds = String(manageLabelId).trim() } } - // Normalize attachments for send/draft operations - const normalizedAttachments = normalizeFileInput(attachmentFiles || attachments) + // Normalize attachments for send/draft operations - use canonical 'attachments' param + const normalizedAttachments = normalizeFileInput(attachments) return { ...rest, @@ -493,10 +488,9 @@ Return ONLY the search query - no explanations, no extra text.`, }, cc: { type: 'string', description: 'CC recipients (comma-separated)' }, bcc: { type: 'string', description: 'BCC recipients (comma-separated)' }, - attachments: { type: 'array', description: 'Files to attach (UserFile array)' }, + attachments: { type: 'array', description: 'Files to attach (canonical param)' }, // Read operation inputs - folder: { type: 'string', description: 'Gmail folder' }, - manualFolder: { type: 'string', description: 'Manual folder name' }, + folder: { type: 'string', description: 'Gmail folder (canonical param)' }, readMessageId: { type: 'string', description: 'Message identifier for reading specific email' }, unreadOnly: { type: 'boolean', description: 'Unread messages only' }, includeAttachments: { type: 'boolean', description: 'Include email attachments' }, @@ -505,18 +499,16 @@ Return ONLY the search query - no explanations, no extra text.`, maxResults: { type: 'number', description: 'Maximum results' }, // Move operation inputs moveMessageId: { type: 'string', description: 'Message ID to move' }, - destinationLabel: { type: 'string', description: 'Destination label ID' }, - manualDestinationLabel: { type: 'string', description: 'Manual destination label ID' }, - sourceLabel: { type: 'string', description: 'Source label ID to remove' }, - manualSourceLabel: { type: 'string', description: 'Manual source label ID' }, - addLabelIds: { type: 'string', description: 'Label IDs to add' }, - removeLabelIds: { type: 'string', description: 'Label IDs to remove' }, + addLabelIds: { type: 'string', description: 'Label IDs to add (canonical param)' }, + removeLabelIds: { type: 'string', description: 'Label IDs to remove (canonical param)' }, // Action operation inputs actionMessageId: { type: 'string', description: 'Message ID for actions' }, labelActionMessageId: { type: 'string', description: 'Message ID for label actions' }, - labelManagement: { type: 'string', description: 'Label ID for management' }, - manualLabelManagement: { type: 'string', description: 'Manual label ID' }, - labelIds: { type: 'string', description: 'Label IDs for add/remove operations' }, + manageLabelId: { + type: 'string', + description: 'Label ID for add/remove operations (canonical param)', + }, + labelIds: { type: 'string', description: 'Label IDs to monitor (trigger)' }, }, outputs: { // Tool outputs diff --git a/apps/sim/blocks/blocks/google_calendar.ts b/apps/sim/blocks/blocks/google_calendar.ts index db010d696..2c28ebeba 100644 --- a/apps/sim/blocks/blocks/google_calendar.ts +++ b/apps/sim/blocks/blocks/google_calendar.ts @@ -517,21 +517,17 @@ Return ONLY the natural language event text - no explanations.`, attendees, replaceExisting, calendarId, - manualCalendarId, - destinationCalendar, - manualDestinationCalendarId, + destinationCalendarId, ...rest } = params - // Handle calendar ID (selector or manual) - const effectiveCalendarId = (calendarId || manualCalendarId || '').trim() + // Use canonical 'calendarId' param directly + const effectiveCalendarId = calendarId ? String(calendarId).trim() : '' - // Handle destination calendar ID for move operation (selector or manual) - const effectiveDestinationCalendarId = ( - destinationCalendar || - manualDestinationCalendarId || - '' - ).trim() + // Use canonical 'destinationCalendarId' param directly + const effectiveDestinationCalendarId = destinationCalendarId + ? String(destinationCalendarId).trim() + : '' const processedParams: Record = { ...rest, @@ -589,8 +585,7 @@ Return ONLY the natural language event text - no explanations.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Calendar access token' }, - calendarId: { type: 'string', description: 'Calendar identifier' }, - manualCalendarId: { type: 'string', description: 'Manual calendar identifier' }, + calendarId: { type: 'string', description: 'Calendar identifier (canonical param)' }, // Create/Update operation inputs summary: { type: 'string', description: 'Event title' }, @@ -609,8 +604,10 @@ Return ONLY the natural language event text - no explanations.`, eventId: { type: 'string', description: 'Event identifier' }, // Move operation inputs - destinationCalendar: { type: 'string', description: 'Destination calendar selector' }, - manualDestinationCalendarId: { type: 'string', description: 'Manual destination calendar ID' }, + destinationCalendarId: { + type: 'string', + description: 'Destination calendar ID (canonical param)', + }, // List Calendars operation inputs minAccessRole: { type: 'string', description: 'Minimum access role filter' }, diff --git a/apps/sim/blocks/blocks/google_docs.ts b/apps/sim/blocks/blocks/google_docs.ts index 14e919e90..2a780fd78 100644 --- a/apps/sim/blocks/blocks/google_docs.ts +++ b/apps/sim/blocks/blocks/google_docs.ts @@ -157,11 +157,10 @@ Return ONLY the document content - no explanations, no extra text.`, } }, params: (params) => { - const { credential, documentId, manualDocumentId, folderSelector, folderId, ...rest } = - params + const { credential, documentId, folderId, ...rest } = params - const effectiveDocumentId = (documentId || manualDocumentId || '').trim() - const effectiveFolderId = (folderSelector || folderId || '').trim() + const effectiveDocumentId = documentId ? String(documentId).trim() : '' + const effectiveFolderId = folderId ? String(folderId).trim() : '' return { ...rest, @@ -175,11 +174,9 @@ Return ONLY the document content - no explanations, no extra text.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Docs access token' }, - documentId: { type: 'string', description: 'Document identifier' }, - manualDocumentId: { type: 'string', description: 'Manual document identifier' }, + documentId: { type: 'string', description: 'Document identifier (canonical param)' }, title: { type: 'string', description: 'Document title' }, - folderSelector: { type: 'string', description: 'Selected folder' }, - folderId: { type: 'string', description: 'Folder identifier' }, + folderId: { type: 'string', description: 'Parent folder identifier (canonical param)' }, content: { type: 'string', description: 'Document content' }, }, outputs: { diff --git a/apps/sim/blocks/blocks/google_drive.ts b/apps/sim/blocks/blocks/google_drive.ts index d14168d5a..3c44d8092 100644 --- a/apps/sim/blocks/blocks/google_drive.ts +++ b/apps/sim/blocks/blocks/google_drive.ts @@ -121,10 +121,10 @@ Return ONLY the file content - no explanations, no markdown code blocks, no extr required: false, }, { - id: 'folderSelector', + id: 'uploadFolderSelector', title: 'Select Parent Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'uploadFolderId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -137,10 +137,10 @@ Return ONLY the file content - no explanations, no markdown code blocks, no extr condition: { field: 'operation', value: ['create_file', 'upload'] }, }, { - id: 'manualFolderId', + id: 'uploadManualFolderId', title: 'Parent Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'uploadFolderId', placeholder: 'Enter parent folder ID (leave empty for root folder)', mode: 'advanced', condition: { field: 'operation', value: ['create_file', 'upload'] }, @@ -193,10 +193,10 @@ Return ONLY the file content - no explanations, no markdown code blocks, no extr required: true, }, { - id: 'folderSelector', + id: 'createFolderParentSelector', title: 'Select Parent Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'createFolderParentId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -210,20 +210,20 @@ Return ONLY the file content - no explanations, no markdown code blocks, no extr }, // Manual Folder ID input (advanced mode) { - id: 'manualFolderId', + id: 'createFolderManualParentId', title: 'Parent Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'createFolderParentId', placeholder: 'Enter parent folder ID (leave empty for root folder)', mode: 'advanced', condition: { field: 'operation', value: 'create_folder' }, }, // List Fields - Folder Selector (basic mode) { - id: 'folderSelector', + id: 'listFolderSelector', title: 'Select Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'listFolderId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -237,10 +237,10 @@ Return ONLY the file content - no explanations, no markdown code blocks, no extr }, // Manual Folder ID input (advanced mode) { - id: 'manualFolderId', + id: 'listManualFolderId', title: 'Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'listFolderId', placeholder: 'Enter folder ID (leave empty for root folder)', mode: 'advanced', condition: { field: 'operation', value: 'list' }, @@ -279,10 +279,10 @@ Return ONLY the query string - no explanations, no quotes around the whole thing }, // Download File Fields - File Selector (basic mode) { - id: 'fileSelector', + id: 'downloadFileSelector', title: 'Select File', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'downloadFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -292,13 +292,14 @@ Return ONLY the query string - no explanations, no quotes around the whole thing mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'download' }, + required: true, }, // Manual File ID input (advanced mode) { - id: 'manualFileId', + id: 'downloadManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'downloadFileId', placeholder: 'Enter file ID', mode: 'advanced', condition: { field: 'operation', value: 'download' }, @@ -339,10 +340,10 @@ Return ONLY the query string - no explanations, no quotes around the whole thing }, // Get File Info Fields { - id: 'fileSelector', + id: 'getFileSelector', title: 'Select File', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'getFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -352,12 +353,13 @@ Return ONLY the query string - no explanations, no quotes around the whole thing mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'get_file' }, + required: true, }, { - id: 'manualFileId', + id: 'getManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'getFileId', placeholder: 'Enter file ID', mode: 'advanced', condition: { field: 'operation', value: 'get_file' }, @@ -365,10 +367,10 @@ Return ONLY the query string - no explanations, no quotes around the whole thing }, // Copy File Fields { - id: 'fileSelector', + id: 'copyFileSelector', title: 'Select File to Copy', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'copyFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -378,12 +380,13 @@ Return ONLY the query string - no explanations, no quotes around the whole thing mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'copy' }, + required: true, }, { - id: 'manualFileId', + id: 'copyManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'copyFileId', placeholder: 'Enter file ID to copy', mode: 'advanced', condition: { field: 'operation', value: 'copy' }, @@ -397,10 +400,10 @@ Return ONLY the query string - no explanations, no quotes around the whole thing condition: { field: 'operation', value: 'copy' }, }, { - id: 'folderSelector', + id: 'copyDestFolderSelector', title: 'Destination Folder', type: 'file-selector', - canonicalParamId: 'destinationFolderId', + canonicalParamId: 'copyDestFolderId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -413,20 +416,20 @@ Return ONLY the query string - no explanations, no quotes around the whole thing condition: { field: 'operation', value: 'copy' }, }, { - id: 'manualDestinationFolderId', + id: 'copyManualDestFolderId', title: 'Destination Folder ID', type: 'short-input', - canonicalParamId: 'destinationFolderId', + canonicalParamId: 'copyDestFolderId', placeholder: 'Enter destination folder ID (optional)', mode: 'advanced', condition: { field: 'operation', value: 'copy' }, }, // Update File Fields { - id: 'fileSelector', + id: 'updateFileSelector', title: 'Select File to Update', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'updateFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -436,12 +439,13 @@ Return ONLY the query string - no explanations, no quotes around the whole thing mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'update' }, + required: true, }, { - id: 'manualFileId', + id: 'updateManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'updateFileId', placeholder: 'Enter file ID to update', mode: 'advanced', condition: { field: 'operation', value: 'update' }, @@ -500,10 +504,10 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, }, // Trash File Fields { - id: 'fileSelector', + id: 'trashFileSelector', title: 'Select File to Trash', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'trashFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -513,12 +517,13 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'trash' }, + required: true, }, { - id: 'manualFileId', + id: 'trashManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'trashFileId', placeholder: 'Enter file ID to trash', mode: 'advanced', condition: { field: 'operation', value: 'trash' }, @@ -526,10 +531,10 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, }, // Delete File Fields { - id: 'fileSelector', + id: 'deleteFileSelector', title: 'Select File to Delete', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'deleteFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -539,12 +544,13 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'delete' }, + required: true, }, { - id: 'manualFileId', + id: 'deleteManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'deleteFileId', placeholder: 'Enter file ID to permanently delete', mode: 'advanced', condition: { field: 'operation', value: 'delete' }, @@ -552,10 +558,10 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, }, // Share File Fields { - id: 'fileSelector', + id: 'shareFileSelector', title: 'Select File to Share', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'shareFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -565,12 +571,13 @@ Return ONLY the description text - no explanations, no quotes, no extra text.`, mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'share' }, + required: true, }, { - id: 'manualFileId', + id: 'shareManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'shareFileId', placeholder: 'Enter file ID to share', mode: 'advanced', condition: { field: 'operation', value: 'share' }, @@ -665,10 +672,10 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr }, // Unshare (Remove Permission) Fields { - id: 'fileSelector', + id: 'unshareFileSelector', title: 'Select File', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'unshareFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -678,12 +685,13 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'unshare' }, + required: true, }, { - id: 'manualFileId', + id: 'unshareManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'unshareFileId', placeholder: 'Enter file ID', mode: 'advanced', condition: { field: 'operation', value: 'unshare' }, @@ -699,10 +707,10 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr }, // List Permissions Fields { - id: 'fileSelector', + id: 'listPermissionsFileSelector', title: 'Select File', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'listPermissionsFileId', serviceId: 'google-drive', requiredScopes: [ 'https://www.googleapis.com/auth/drive.file', @@ -712,12 +720,13 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'list_permissions' }, + required: true, }, { - id: 'manualFileId', + id: 'listPermissionsManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'listPermissionsFileId', placeholder: 'Enter file ID', mode: 'advanced', condition: { field: 'operation', value: 'list_permissions' }, @@ -778,13 +787,23 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr params: (params) => { const { credential, - folderSelector, - manualFolderId, - manualDestinationFolderId, - fileSelector, - manualFileId, + // Folder canonical params (per-operation) + uploadFolderId, + createFolderParentId, + listFolderId, + copyDestFolderId, + // File canonical params (per-operation) + downloadFileId, + getFileId, + copyFileId, + updateFileId, + trashFileId, + deleteFileId, + shareFileId, + unshareFileId, + listPermissionsFileId, + // File upload file, - fileUpload, mimeType, shareType, starred, @@ -793,19 +812,58 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr } = params // Normalize file input - handles both basic (file-upload) and advanced (short-input) modes - const normalizedFile = normalizeFileInput(file ?? fileUpload, { single: true }) + const normalizedFile = normalizeFileInput(file, { single: true }) - // Use folderSelector if provided, otherwise use manualFolderId - const effectiveFolderId = (folderSelector || manualFolderId || '').trim() + // Resolve folderId based on operation + let effectiveFolderId: string | undefined + switch (params.operation) { + case 'create_file': + case 'upload': + effectiveFolderId = uploadFolderId?.trim() || undefined + break + case 'create_folder': + effectiveFolderId = createFolderParentId?.trim() || undefined + break + case 'list': + effectiveFolderId = listFolderId?.trim() || undefined + break + } - // Use fileSelector if provided, otherwise use manualFileId - const effectiveFileId = (fileSelector || manualFileId || '').trim() + // Resolve fileId based on operation + let effectiveFileId: string | undefined + switch (params.operation) { + case 'download': + effectiveFileId = downloadFileId?.trim() || undefined + break + case 'get_file': + effectiveFileId = getFileId?.trim() || undefined + break + case 'copy': + effectiveFileId = copyFileId?.trim() || undefined + break + case 'update': + effectiveFileId = updateFileId?.trim() || undefined + break + case 'trash': + effectiveFileId = trashFileId?.trim() || undefined + break + case 'delete': + effectiveFileId = deleteFileId?.trim() || undefined + break + case 'share': + effectiveFileId = shareFileId?.trim() || undefined + break + case 'unshare': + effectiveFileId = unshareFileId?.trim() || undefined + break + case 'list_permissions': + effectiveFileId = listPermissionsFileId?.trim() || undefined + break + } - // Use folderSelector for destination or manualDestinationFolderId for copy operation + // Resolve destinationFolderId for copy operation const effectiveDestinationFolderId = - params.operation === 'copy' - ? (folderSelector || manualDestinationFolderId || '').trim() - : undefined + params.operation === 'copy' ? copyDestFolderId?.trim() || undefined : undefined // Convert starred dropdown to boolean const starredValue = starred === 'true' ? true : starred === 'false' ? false : undefined @@ -816,9 +874,9 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr return { credential, - folderId: effectiveFolderId || undefined, - fileId: effectiveFileId || undefined, - destinationFolderId: effectiveDestinationFolderId || undefined, + folderId: effectiveFolderId, + fileId: effectiveFileId, + destinationFolderId: effectiveDestinationFolderId, file: normalizedFile, pageSize: rest.pageSize ? Number.parseInt(rest.pageSize as string, 10) : undefined, mimeType: mimeType, @@ -834,13 +892,21 @@ Return ONLY the message text - no subject line, no greetings/signatures, no extr inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Drive access token' }, - // File selection inputs - fileSelector: { type: 'string', description: 'Selected file' }, - manualFileId: { type: 'string', description: 'Manual file identifier' }, - // Folder selection inputs - folderSelector: { type: 'string', description: 'Selected folder' }, - manualFolderId: { type: 'string', description: 'Manual folder identifier' }, - manualDestinationFolderId: { type: 'string', description: 'Destination folder for copy' }, + // Folder canonical params (per-operation) + uploadFolderId: { type: 'string', description: 'Parent folder for upload/create' }, + createFolderParentId: { type: 'string', description: 'Parent folder for create folder' }, + listFolderId: { type: 'string', description: 'Folder to list files from' }, + copyDestFolderId: { type: 'string', description: 'Destination folder for copy' }, + // File canonical params (per-operation) + downloadFileId: { type: 'string', description: 'File to download' }, + getFileId: { type: 'string', description: 'File to get info for' }, + copyFileId: { type: 'string', description: 'File to copy' }, + updateFileId: { type: 'string', description: 'File to update' }, + trashFileId: { type: 'string', description: 'File to trash' }, + deleteFileId: { type: 'string', description: 'File to delete' }, + shareFileId: { type: 'string', description: 'File to share' }, + unshareFileId: { type: 'string', description: 'File to unshare' }, + listPermissionsFileId: { type: 'string', description: 'File to list permissions for' }, // Upload and Create inputs fileName: { type: 'string', description: 'File or folder name' }, file: { type: 'json', description: 'File to upload (UserFile object)' }, diff --git a/apps/sim/blocks/blocks/google_forms.ts b/apps/sim/blocks/blocks/google_forms.ts index 0dee68977..435259c57 100644 --- a/apps/sim/blocks/blocks/google_forms.ts +++ b/apps/sim/blocks/blocks/google_forms.ts @@ -47,10 +47,11 @@ export const GoogleFormsBlock: BlockConfig = { }, // Form selector (basic mode) { - id: 'formId', + id: 'formSelector', title: 'Select Form', type: 'file-selector', canonicalParamId: 'formId', + required: true, serviceId: 'google-forms', requiredScopes: [], mimeType: 'application/vnd.google-apps.form', @@ -234,8 +235,7 @@ Example for "Add a required multiple choice question about favorite color": const { credential, operation, - formId, - manualFormId, + formId, // Canonical param from formSelector (basic) or manualFormId (advanced) responseId, pageSize, title, @@ -252,11 +252,10 @@ Example for "Add a required multiple choice question about favorite color": } = params const baseParams = { ...rest, credential } - const effectiveFormId = (formId || manualFormId || '').toString().trim() || undefined + const effectiveFormId = formId ? String(formId).trim() : undefined switch (operation) { case 'get_responses': - if (!effectiveFormId) throw new Error('Form ID is required.') return { ...baseParams, formId: effectiveFormId, @@ -265,10 +264,8 @@ Example for "Add a required multiple choice question about favorite color": } case 'get_form': case 'list_watches': - if (!effectiveFormId) throw new Error('Form ID is required.') return { ...baseParams, formId: effectiveFormId } case 'create_form': - if (!title) throw new Error('Form title is required.') return { ...baseParams, title: String(title).trim(), @@ -276,8 +273,6 @@ Example for "Add a required multiple choice question about favorite color": unpublished: unpublished ?? false, } case 'batch_update': - if (!effectiveFormId) throw new Error('Form ID is required.') - if (!requests) throw new Error('Update requests are required.') return { ...baseParams, formId: effectiveFormId, @@ -285,7 +280,6 @@ Example for "Add a required multiple choice question about favorite color": includeFormInResponse: includeFormInResponse ?? false, } case 'set_publish_settings': - if (!effectiveFormId) throw new Error('Form ID is required.') return { ...baseParams, formId: effectiveFormId, @@ -293,9 +287,6 @@ Example for "Add a required multiple choice question about favorite color": isAcceptingResponses: isAcceptingResponses, } case 'create_watch': - if (!effectiveFormId) throw new Error('Form ID is required.') - if (!eventType) throw new Error('Event type is required.') - if (!topicName) throw new Error('Pub/Sub topic is required.') return { ...baseParams, formId: effectiveFormId, @@ -305,8 +296,6 @@ Example for "Add a required multiple choice question about favorite color": } case 'delete_watch': case 'renew_watch': - if (!effectiveFormId) throw new Error('Form ID is required.') - if (!watchId) throw new Error('Watch ID is required.') return { ...baseParams, formId: effectiveFormId, @@ -321,8 +310,7 @@ Example for "Add a required multiple choice question about favorite color": inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google OAuth credential' }, - formId: { type: 'string', description: 'Google Form ID (from selector)' }, - manualFormId: { type: 'string', description: 'Google Form ID (manual entry)' }, + formId: { type: 'string', description: 'Google Form ID' }, responseId: { type: 'string', description: 'Specific response ID' }, pageSize: { type: 'string', description: 'Max responses to retrieve' }, title: { type: 'string', description: 'Form title for creation' }, diff --git a/apps/sim/blocks/blocks/google_sheets.ts b/apps/sim/blocks/blocks/google_sheets.ts index a849b718c..3294f0036 100644 --- a/apps/sim/blocks/blocks/google_sheets.ts +++ b/apps/sim/blocks/blocks/google_sheets.ts @@ -246,11 +246,11 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, } }, params: (params) => { - const { credential, values, spreadsheetId, manualSpreadsheetId, ...rest } = params + const { credential, values, spreadsheetId, ...rest } = params const parsedValues = values ? JSON.parse(values as string) : undefined - const effectiveSpreadsheetId = (spreadsheetId || manualSpreadsheetId || '').trim() + const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' if (!effectiveSpreadsheetId) { throw new Error('Spreadsheet ID is required.') @@ -268,8 +268,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Sheets access token' }, - spreadsheetId: { type: 'string', description: 'Spreadsheet identifier' }, - manualSpreadsheetId: { type: 'string', description: 'Manual spreadsheet identifier' }, + spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, range: { type: 'string', description: 'Cell range' }, values: { type: 'string', description: 'Cell values data' }, valueInputOption: { type: 'string', description: 'Value input option' }, @@ -719,9 +718,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, credential, values, spreadsheetId, - manualSpreadsheetId, sheetName, - manualSheetName, cellRange, title, sheetTitles, @@ -746,9 +743,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, } } - const effectiveSpreadsheetId = ( - (spreadsheetId || manualSpreadsheetId || '') as string - ).trim() + const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' if (!effectiveSpreadsheetId) { throw new Error('Spreadsheet ID is required.') @@ -804,7 +799,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, } // Handle read/write/update/append/clear operations (require sheet name) - const effectiveSheetName = ((sheetName || manualSheetName || '') as string).trim() + const effectiveSheetName = sheetName ? String(sheetName).trim() : '' if (!effectiveSheetName) { throw new Error('Sheet name is required. Please select or enter a sheet name.') @@ -826,10 +821,8 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Sheets access token' }, - spreadsheetId: { type: 'string', description: 'Spreadsheet identifier' }, - manualSpreadsheetId: { type: 'string', description: 'Manual spreadsheet identifier' }, - sheetName: { type: 'string', description: 'Name of the sheet/tab' }, - manualSheetName: { type: 'string', description: 'Manual sheet name entry' }, + spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, + sheetName: { type: 'string', description: 'Name of the sheet/tab (canonical param)' }, cellRange: { type: 'string', description: 'Cell range (e.g., A1:D10)' }, values: { type: 'string', description: 'Cell values data' }, valueInputOption: { type: 'string', description: 'Value input option' }, diff --git a/apps/sim/blocks/blocks/google_slides.ts b/apps/sim/blocks/blocks/google_slides.ts index 784fc73fc..baa890a2a 100644 --- a/apps/sim/blocks/blocks/google_slides.ts +++ b/apps/sim/blocks/blocks/google_slides.ts @@ -664,8 +664,6 @@ Return ONLY the text content - no explanations, no markdown formatting markers, const { credential, presentationId, - manualPresentationId, - folderSelector, folderId, slideIndex, createContent, @@ -675,8 +673,8 @@ Return ONLY the text content - no explanations, no markdown formatting markers, ...rest } = params - const effectivePresentationId = (presentationId || manualPresentationId || '').trim() - const effectiveFolderId = (folderSelector || folderId || '').trim() + const effectivePresentationId = presentationId ? String(presentationId).trim() : '' + const effectiveFolderId = folderId ? String(folderId).trim() : '' const result: Record = { ...rest, @@ -802,15 +800,13 @@ Return ONLY the text content - no explanations, no markdown formatting markers, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Google Slides access token' }, - presentationId: { type: 'string', description: 'Presentation identifier' }, - manualPresentationId: { type: 'string', description: 'Manual presentation identifier' }, + presentationId: { type: 'string', description: 'Presentation identifier (canonical param)' }, // Write operation slideIndex: { type: 'number', description: 'Slide index to write to' }, content: { type: 'string', description: 'Slide content' }, // Create operation title: { type: 'string', description: 'Presentation title' }, - folderSelector: { type: 'string', description: 'Selected folder' }, - folderId: { type: 'string', description: 'Folder identifier' }, + folderId: { type: 'string', description: 'Parent folder identifier (canonical param)' }, createContent: { type: 'string', description: 'Initial slide content' }, // Replace all text operation findText: { type: 'string', description: 'Text to find' }, @@ -826,8 +822,6 @@ Return ONLY the text content - no explanations, no markdown formatting markers, placeholderIdMappings: { type: 'string', description: 'JSON array of placeholder ID mappings' }, // Add image operation pageObjectId: { type: 'string', description: 'Slide object ID for image' }, - imageFile: { type: 'json', description: 'Uploaded image (UserFile)' }, - imageUrl: { type: 'string', description: 'Image URL or reference' }, imageSource: { type: 'json', description: 'Image source (file or URL)' }, imageWidth: { type: 'number', description: 'Image width in points' }, imageHeight: { type: 'number', description: 'Image height in points' }, @@ -936,11 +930,12 @@ const googleSlidesV2SubBlocks = (GoogleSlidesBlock.subBlocks || []).flatMap((sub }) const googleSlidesV2Inputs = GoogleSlidesBlock.inputs - ? Object.fromEntries( - Object.entries(GoogleSlidesBlock.inputs).filter( - ([key]) => key !== 'imageUrl' && key !== 'imageSource' - ) - ) + ? { + ...Object.fromEntries( + Object.entries(GoogleSlidesBlock.inputs).filter(([key]) => key !== 'imageSource') + ), + imageFile: { type: 'json', description: 'Image source (file or URL)' }, + } : {} export const GoogleSlidesV2Block: BlockConfig = { @@ -961,8 +956,7 @@ export const GoogleSlidesV2Block: BlockConfig = { } if (params.operation === 'add_image') { - const imageInput = params.imageFile || params.imageFileReference || params.imageSource - const fileObject = normalizeFileInput(imageInput, { single: true }) + const fileObject = normalizeFileInput(params.imageFile, { single: true }) if (!fileObject) { throw new Error('Image file is required.') } @@ -974,8 +968,6 @@ export const GoogleSlidesV2Block: BlockConfig = { return baseParams({ ...params, imageUrl, - imageFileReference: undefined, - imageSource: undefined, }) } @@ -983,8 +975,5 @@ export const GoogleSlidesV2Block: BlockConfig = { }, }, }, - inputs: { - ...googleSlidesV2Inputs, - imageFileReference: { type: 'json', description: 'Image file reference' }, - }, + inputs: googleSlidesV2Inputs, } diff --git a/apps/sim/blocks/blocks/jira.ts b/apps/sim/blocks/blocks/jira.ts index 3d67b3902..263e5e363 100644 --- a/apps/sim/blocks/blocks/jira.ts +++ b/apps/sim/blocks/blocks/jira.ts @@ -106,6 +106,7 @@ export const JiraBlock: BlockConfig = { placeholder: 'Select Jira project', dependsOn: ['credential', 'domain'], mode: 'basic', + required: { field: 'operation', value: ['write', 'update', 'read-bulk'] }, }, // Manual project ID input (advanced mode) { @@ -116,6 +117,7 @@ export const JiraBlock: BlockConfig = { placeholder: 'Enter Jira project ID', dependsOn: ['credential', 'domain'], mode: 'advanced', + required: { field: 'operation', value: ['write', 'update', 'read-bulk'] }, }, // Issue selector (basic mode) { @@ -148,6 +150,28 @@ export const JiraBlock: BlockConfig = { 'remove_watcher', ], }, + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'assign', + 'transition', + 'add_comment', + 'get_comments', + 'update_comment', + 'delete_comment', + 'get_attachments', + 'add_attachment', + 'add_worklog', + 'get_worklogs', + 'update_worklog', + 'delete_worklog', + 'add_watcher', + 'remove_watcher', + ], + }, mode: 'basic', }, // Manual issue key input (advanced mode) @@ -180,6 +204,28 @@ export const JiraBlock: BlockConfig = { 'remove_watcher', ], }, + required: { + field: 'operation', + value: [ + 'read', + 'update', + 'delete', + 'assign', + 'transition', + 'add_comment', + 'get_comments', + 'update_comment', + 'delete_comment', + 'get_attachments', + 'add_attachment', + 'add_worklog', + 'get_worklogs', + 'update_worklog', + 'delete_worklog', + 'add_watcher', + 'remove_watcher', + ], + }, mode: 'advanced', }, { @@ -615,8 +661,9 @@ Return ONLY the comment text - no explanations.`, ], config: { tool: (params) => { - const effectiveProjectId = (params.projectId || params.manualProjectId || '').trim() - const effectiveIssueKey = (params.issueKey || params.manualIssueKey || '').trim() + // Use canonical param IDs (raw subBlock IDs are deleted after serialization) + const effectiveProjectId = params.projectId ? String(params.projectId).trim() : '' + const effectiveIssueKey = params.issueKey ? String(params.issueKey).trim() : '' switch (params.operation) { case 'read': @@ -676,11 +723,11 @@ Return ONLY the comment text - no explanations.`, } }, params: (params) => { - const { credential, projectId, manualProjectId, issueKey, manualIssueKey, ...rest } = params + const { credential, projectId, issueKey, ...rest } = params - // Use the selected IDs or the manually entered ones - const effectiveProjectId = (projectId || manualProjectId || '').trim() - const effectiveIssueKey = (issueKey || manualIssueKey || '').trim() + // Use canonical param IDs (raw subBlock IDs are deleted after serialization) + const effectiveProjectId = projectId ? String(projectId).trim() : '' + const effectiveIssueKey = issueKey ? String(issueKey).trim() : '' const baseParams = { credential, @@ -689,11 +736,6 @@ Return ONLY the comment text - no explanations.`, switch (params.operation) { case 'write': { - if (!effectiveProjectId) { - throw new Error( - 'Project ID is required. Please select a project or enter a project ID manually.' - ) - } // Parse comma-separated strings into arrays const parseCommaSeparated = (value: string | undefined): string[] | undefined => { if (!value || value.trim() === '') return undefined @@ -726,16 +768,6 @@ Return ONLY the comment text - no explanations.`, } } case 'update': { - if (!effectiveProjectId) { - throw new Error( - 'Project ID is required. Please select a project or enter a project ID manually.' - ) - } - if (!effectiveIssueKey) { - throw new Error( - 'Issue Key is required. Please select an issue or enter an issue key manually.' - ) - } const updateParams = { projectId: effectiveProjectId, issueKey: effectiveIssueKey, @@ -748,40 +780,20 @@ Return ONLY the comment text - no explanations.`, } } case 'read': { - // Check for project ID from either source - const projectForRead = (params.projectId || params.manualProjectId || '').trim() - const issueForRead = (params.issueKey || params.manualIssueKey || '').trim() - - if (!issueForRead) { - throw new Error( - 'Select a project to read issues, or provide an issue key to read a single issue.' - ) - } return { ...baseParams, - issueKey: issueForRead, + issueKey: effectiveIssueKey, // Include projectId if available for context - ...(projectForRead && { projectId: projectForRead }), + ...(effectiveProjectId && { projectId: effectiveProjectId }), } } case 'read-bulk': { - // Check both projectId and manualProjectId directly from params - const finalProjectId = params.projectId || params.manualProjectId || '' - - if (!finalProjectId) { - throw new Error( - 'Project ID is required. Please select a project or enter a project ID manually.' - ) - } return { ...baseParams, - projectId: finalProjectId.trim(), + projectId: effectiveProjectId.trim(), } } case 'delete': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to delete an issue.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -789,9 +801,6 @@ Return ONLY the comment text - no explanations.`, } } case 'assign': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to assign an issue.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -799,9 +808,6 @@ Return ONLY the comment text - no explanations.`, } } case 'transition': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to transition an issue.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -817,9 +823,6 @@ Return ONLY the comment text - no explanations.`, } } case 'add_comment': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to add a comment.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -827,9 +830,6 @@ Return ONLY the comment text - no explanations.`, } } case 'get_comments': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to get comments.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -837,9 +837,6 @@ Return ONLY the comment text - no explanations.`, } } case 'update_comment': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to update a comment.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -848,9 +845,6 @@ Return ONLY the comment text - no explanations.`, } } case 'delete_comment': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to delete a comment.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -858,19 +852,13 @@ Return ONLY the comment text - no explanations.`, } } case 'get_attachments': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to get attachments.') - } return { ...baseParams, issueKey: effectiveIssueKey, } } case 'add_attachment': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to add attachments.') - } - const normalizedFiles = normalizeFileInput(params.attachmentFiles || params.files) + const normalizedFiles = normalizeFileInput(params.files) if (!normalizedFiles || normalizedFiles.length === 0) { throw new Error('At least one attachment file is required.') } @@ -887,9 +875,6 @@ Return ONLY the comment text - no explanations.`, } } case 'add_worklog': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to add a worklog.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -901,9 +886,6 @@ Return ONLY the comment text - no explanations.`, } } case 'get_worklogs': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to get worklogs.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -911,9 +893,6 @@ Return ONLY the comment text - no explanations.`, } } case 'update_worklog': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to update a worklog.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -926,9 +905,6 @@ Return ONLY the comment text - no explanations.`, } } case 'delete_worklog': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to delete a worklog.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -951,9 +927,6 @@ Return ONLY the comment text - no explanations.`, } } case 'add_watcher': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to add a watcher.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -961,9 +934,6 @@ Return ONLY the comment text - no explanations.`, } } case 'remove_watcher': { - if (!effectiveIssueKey) { - throw new Error('Issue Key is required to remove a watcher.') - } return { ...baseParams, issueKey: effectiveIssueKey, @@ -990,10 +960,8 @@ Return ONLY the comment text - no explanations.`, operation: { type: 'string', description: 'Operation to perform' }, domain: { type: 'string', description: 'Jira domain' }, credential: { type: 'string', description: 'Jira access token' }, - issueKey: { type: 'string', description: 'Issue key identifier' }, - projectId: { type: 'string', description: 'Project identifier' }, - manualProjectId: { type: 'string', description: 'Manual project identifier' }, - manualIssueKey: { type: 'string', description: 'Manual issue key' }, + issueKey: { type: 'string', description: 'Issue key identifier (canonical param)' }, + projectId: { type: 'string', description: 'Project identifier (canonical param)' }, // Update/Write operation inputs summary: { type: 'string', description: 'Issue summary' }, description: { type: 'string', description: 'Issue description' }, @@ -1024,8 +992,7 @@ Return ONLY the comment text - no explanations.`, commentBody: { type: 'string', description: 'Text content for comment operations' }, commentId: { type: 'string', description: 'Comment ID for update/delete operations' }, // Attachment operation inputs - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - files: { type: 'array', description: 'Files to attach (UserFile array)' }, + files: { type: 'array', description: 'Files to attach (canonical param)' }, attachmentId: { type: 'string', description: 'Attachment ID for delete operation' }, // Worklog operation inputs timeSpentSeconds: { diff --git a/apps/sim/blocks/blocks/linear.ts b/apps/sim/blocks/blocks/linear.ts index 2b8e43587..4774f7fe1 100644 --- a/apps/sim/blocks/blocks/linear.ts +++ b/apps/sim/blocks/blocks/linear.ts @@ -1476,9 +1476,9 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n return params.operation || 'linear_read_issues' }, params: (params) => { - // Handle both selector and manual inputs - const effectiveTeamId = (params.teamId || params.manualTeamId || '').trim() - const effectiveProjectId = (params.projectId || params.manualProjectId || '').trim() + // Use canonical param IDs (raw subBlock IDs are deleted after serialization) + const effectiveTeamId = params.teamId ? String(params.teamId).trim() : '' + const effectiveProjectId = params.projectId ? String(params.projectId).trim() : '' // Base params that most operations need const baseParams: Record = { @@ -1774,16 +1774,11 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n if (!params.issueId?.trim()) { throw new Error('Issue ID is required.') } - // Normalize file inputs - handles JSON stringified values from advanced mode - const attachmentFile = - normalizeFileInput(params.attachmentFileUpload, { - single: true, - errorMessage: 'Attachment file must be a single file.', - }) || - normalizeFileInput(params.file, { - single: true, - errorMessage: 'Attachment file must be a single file.', - }) + // Normalize file input - use canonical param 'file' (raw subBlock IDs are deleted after serialization) + const attachmentFile = normalizeFileInput(params.file, { + single: true, + errorMessage: 'Attachment file must be a single file.', + }) const attachmentUrl = params.url?.trim() || (attachmentFile ? (attachmentFile as { url?: string }).url : undefined) @@ -2261,10 +2256,8 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Linear access token' }, - teamId: { type: 'string', description: 'Linear team identifier' }, - projectId: { type: 'string', description: 'Linear project identifier' }, - manualTeamId: { type: 'string', description: 'Manual team identifier' }, - manualProjectId: { type: 'string', description: 'Manual project identifier' }, + teamId: { type: 'string', description: 'Linear team identifier (canonical param)' }, + projectId: { type: 'string', description: 'Linear project identifier (canonical param)' }, issueId: { type: 'string', description: 'Issue identifier' }, title: { type: 'string', description: 'Title' }, description: { type: 'string', description: 'Description' }, @@ -2294,8 +2287,7 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n endDate: { type: 'string', description: 'End date' }, targetDate: { type: 'string', description: 'Target date' }, url: { type: 'string', description: 'URL' }, - attachmentFileUpload: { type: 'json', description: 'File to attach (UI upload)' }, - file: { type: 'json', description: 'File to attach (UserFile)' }, + file: { type: 'json', description: 'File to attach (canonical param)' }, attachmentTitle: { type: 'string', description: 'Attachment title' }, attachmentId: { type: 'string', description: 'Attachment identifier' }, relationType: { type: 'string', description: 'Relation type' }, diff --git a/apps/sim/blocks/blocks/microsoft_excel.ts b/apps/sim/blocks/blocks/microsoft_excel.ts index 3438c5bdc..990912424 100644 --- a/apps/sim/blocks/blocks/microsoft_excel.ts +++ b/apps/sim/blocks/blocks/microsoft_excel.ts @@ -241,17 +241,10 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, } }, params: (params) => { - const { - credential, - values, - spreadsheetId, - manualSpreadsheetId, - tableName, - worksheetName, - ...rest - } = params + const { credential, values, spreadsheetId, tableName, worksheetName, ...rest } = params - const effectiveSpreadsheetId = (spreadsheetId || manualSpreadsheetId || '').trim() + // Use canonical param ID (raw subBlock IDs are deleted after serialization) + const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' let parsedValues try { @@ -300,8 +293,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Microsoft Excel access token' }, - spreadsheetId: { type: 'string', description: 'Spreadsheet identifier' }, - manualSpreadsheetId: { type: 'string', description: 'Manual spreadsheet identifier' }, + spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, range: { type: 'string', description: 'Cell range' }, tableName: { type: 'string', description: 'Table name' }, worksheetName: { type: 'string', description: 'Worksheet name' }, @@ -505,21 +497,13 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, fallbackToolId: 'microsoft_excel_read_v2', }), params: (params) => { - const { - credential, - values, - spreadsheetId, - manualSpreadsheetId, - sheetName, - manualSheetName, - cellRange, - ...rest - } = params + const { credential, values, spreadsheetId, sheetName, cellRange, ...rest } = params const parsedValues = values ? JSON.parse(values as string) : undefined - const effectiveSpreadsheetId = (spreadsheetId || manualSpreadsheetId || '').trim() - const effectiveSheetName = ((sheetName || manualSheetName || '') as string).trim() + // Use canonical param IDs (raw subBlock IDs are deleted after serialization) + const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : '' + const effectiveSheetName = sheetName ? String(sheetName).trim() : '' if (!effectiveSpreadsheetId) { throw new Error('Spreadsheet ID is required.') @@ -543,10 +527,8 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Microsoft Excel access token' }, - spreadsheetId: { type: 'string', description: 'Spreadsheet identifier' }, - manualSpreadsheetId: { type: 'string', description: 'Manual spreadsheet identifier' }, - sheetName: { type: 'string', description: 'Name of the sheet/tab' }, - manualSheetName: { type: 'string', description: 'Manual sheet name entry' }, + spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' }, + sheetName: { type: 'string', description: 'Name of the sheet/tab (canonical param)' }, cellRange: { type: 'string', description: 'Cell range (e.g., A1:D10)' }, values: { type: 'string', description: 'Cell values data' }, valueInputOption: { type: 'string', description: 'Value input option' }, diff --git a/apps/sim/blocks/blocks/microsoft_planner.ts b/apps/sim/blocks/blocks/microsoft_planner.ts index 035832def..b49f8c284 100644 --- a/apps/sim/blocks/blocks/microsoft_planner.ts +++ b/apps/sim/blocks/blocks/microsoft_planner.ts @@ -84,12 +84,16 @@ export const MicrosoftPlannerBlock: BlockConfig = { field: 'operation', value: ['create_task', 'read_task', 'read_plan', 'list_buckets', 'create_bucket'], }, + required: { + field: 'operation', + value: ['read_plan', 'list_buckets', 'create_bucket', 'create_task'], + }, dependsOn: ['credential'], }, - // Task ID selector - for read_task + // Task ID selector - for read_task (basic mode) { - id: 'taskId', + id: 'taskSelector', title: 'Task ID', type: 'file-selector', placeholder: 'Select a task', @@ -97,24 +101,24 @@ export const MicrosoftPlannerBlock: BlockConfig = { condition: { field: 'operation', value: ['read_task'] }, dependsOn: ['credential', 'planId'], mode: 'basic', - canonicalParamId: 'taskId', + canonicalParamId: 'readTaskId', }, - // Manual Task ID - for read_task advanced mode + // Manual Task ID - for read_task (advanced mode) { - id: 'manualTaskId', + id: 'manualReadTaskId', title: 'Manual Task ID', type: 'short-input', placeholder: 'Enter the task ID', condition: { field: 'operation', value: ['read_task'] }, dependsOn: ['credential', 'planId'], mode: 'advanced', - canonicalParamId: 'taskId', + canonicalParamId: 'readTaskId', }, - // Task ID for update/delete operations + // Task ID for update/delete operations (no basic/advanced split, just one input) { - id: 'taskIdForUpdate', + id: 'updateTaskId', title: 'Task ID', type: 'short-input', placeholder: 'Enter the task ID', @@ -122,8 +126,8 @@ export const MicrosoftPlannerBlock: BlockConfig = { field: 'operation', value: ['update_task', 'delete_task', 'get_task_details', 'update_task_details'], }, + required: true, dependsOn: ['credential'], - canonicalParamId: 'taskId', }, // Bucket ID for bucket operations @@ -133,6 +137,7 @@ export const MicrosoftPlannerBlock: BlockConfig = { type: 'short-input', placeholder: 'Enter the bucket ID', condition: { field: 'operation', value: ['read_bucket', 'update_bucket', 'delete_bucket'] }, + required: true, dependsOn: ['credential'], }, @@ -163,6 +168,7 @@ export const MicrosoftPlannerBlock: BlockConfig = { type: 'short-input', placeholder: 'Enter the task title', condition: { field: 'operation', value: ['create_task', 'update_task'] }, + required: { field: 'operation', value: 'create_task' }, }, // Name for bucket operations @@ -172,6 +178,7 @@ export const MicrosoftPlannerBlock: BlockConfig = { type: 'short-input', placeholder: 'Enter the bucket name', condition: { field: 'operation', value: ['create_bucket', 'update_bucket'] }, + required: { field: 'operation', value: 'create_bucket' }, }, // Description for task details @@ -347,9 +354,8 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, operation, groupId, planId, - taskId, - manualTaskId, - taskIdForUpdate, + readTaskId, // Canonical param from taskSelector (basic) or manualReadTaskId (advanced) for read_task + updateTaskId, // Task ID for update/delete operations bucketId, bucketIdForRead, title, @@ -372,8 +378,9 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, credential, } - // Handle different task ID fields - const effectiveTaskId = (taskIdForUpdate || taskId || manualTaskId || '').trim() + // Handle different task ID fields based on operation + const effectiveReadTaskId = readTaskId ? String(readTaskId).trim() : '' + const effectiveUpdateTaskId = updateTaskId ? String(updateTaskId).trim() : '' const effectiveBucketId = (bucketIdForRead || bucketId || '').trim() // List Plans @@ -383,31 +390,22 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Read Plan if (operation === 'read_plan') { - if (!planId?.trim()) { - throw new Error('Plan ID is required to read a plan.') - } return { ...baseParams, - planId: planId.trim(), + planId: planId?.trim(), } } // List Buckets if (operation === 'list_buckets') { - if (!planId?.trim()) { - throw new Error('Plan ID is required to list buckets.') - } return { ...baseParams, - planId: planId.trim(), + planId: planId?.trim(), } } // Read Bucket if (operation === 'read_bucket') { - if (!effectiveBucketId) { - throw new Error('Bucket ID is required to read a bucket.') - } return { ...baseParams, bucketId: effectiveBucketId, @@ -416,31 +414,19 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Create Bucket if (operation === 'create_bucket') { - if (!planId?.trim()) { - throw new Error('Plan ID is required to create a bucket.') - } - if (!name?.trim()) { - throw new Error('Bucket name is required to create a bucket.') - } return { ...baseParams, - planId: planId.trim(), - name: name.trim(), + planId: planId?.trim(), + name: name?.trim(), } } // Update Bucket if (operation === 'update_bucket') { - if (!effectiveBucketId) { - throw new Error('Bucket ID is required to update a bucket.') - } - if (!etag?.trim()) { - throw new Error('ETag is required to update a bucket.') - } const updateBucketParams: MicrosoftPlannerBlockParams = { ...baseParams, bucketId: effectiveBucketId, - etag: etag.trim(), + etag: etag?.trim(), } if (name?.trim()) { updateBucketParams.name = name.trim() @@ -450,26 +436,19 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Delete Bucket if (operation === 'delete_bucket') { - if (!effectiveBucketId) { - throw new Error('Bucket ID is required to delete a bucket.') - } - if (!etag?.trim()) { - throw new Error('ETag is required to delete a bucket.') - } return { ...baseParams, bucketId: effectiveBucketId, - etag: etag.trim(), + etag: etag?.trim(), } } // Read Task if (operation === 'read_task') { const readParams: MicrosoftPlannerBlockParams = { ...baseParams } - const readTaskId = (taskId || manualTaskId || '').trim() - if (readTaskId) { - readParams.taskId = readTaskId + if (effectiveReadTaskId) { + readParams.taskId = effectiveReadTaskId } else if (planId?.trim()) { readParams.planId = planId.trim() } @@ -479,17 +458,10 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Create Task if (operation === 'create_task') { - if (!planId?.trim()) { - throw new Error('Plan ID is required to create a task.') - } - if (!title?.trim()) { - throw new Error('Task title is required to create a task.') - } - const createParams: MicrosoftPlannerBlockParams = { ...baseParams, - planId: planId.trim(), - title: title.trim(), + planId: planId?.trim(), + title: title?.trim(), } if (description?.trim()) { @@ -510,17 +482,10 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Update Task if (operation === 'update_task') { - if (!effectiveTaskId) { - throw new Error('Task ID is required to update a task.') - } - if (!etag?.trim()) { - throw new Error('ETag is required to update a task.') - } - const updateParams: MicrosoftPlannerBlockParams = { ...baseParams, - taskId: effectiveTaskId, - etag: etag.trim(), + taskId: effectiveUpdateTaskId, + etag: etag?.trim(), } if (title?.trim()) { @@ -550,43 +515,27 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Delete Task if (operation === 'delete_task') { - if (!effectiveTaskId) { - throw new Error('Task ID is required to delete a task.') - } - if (!etag?.trim()) { - throw new Error('ETag is required to delete a task.') - } return { ...baseParams, - taskId: effectiveTaskId, - etag: etag.trim(), + taskId: effectiveUpdateTaskId, + etag: etag?.trim(), } } // Get Task Details if (operation === 'get_task_details') { - if (!effectiveTaskId) { - throw new Error('Task ID is required to get task details.') - } return { ...baseParams, - taskId: effectiveTaskId, + taskId: effectiveUpdateTaskId, } } // Update Task Details if (operation === 'update_task_details') { - if (!effectiveTaskId) { - throw new Error('Task ID is required to update task details.') - } - if (!etag?.trim()) { - throw new Error('ETag is required to update task details.') - } - const updateDetailsParams: MicrosoftPlannerBlockParams = { ...baseParams, - taskId: effectiveTaskId, - etag: etag.trim(), + taskId: effectiveUpdateTaskId, + etag: etag?.trim(), } if (description?.trim()) { @@ -614,9 +563,8 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, credential: { type: 'string', description: 'Microsoft account credential' }, groupId: { type: 'string', description: 'Microsoft 365 group ID' }, planId: { type: 'string', description: 'Plan ID' }, - taskId: { type: 'string', description: 'Task ID' }, - manualTaskId: { type: 'string', description: 'Manual Task ID' }, - taskIdForUpdate: { type: 'string', description: 'Task ID for update operations' }, + readTaskId: { type: 'string', description: 'Task ID for read operation' }, + updateTaskId: { type: 'string', description: 'Task ID for update/delete operations' }, bucketId: { type: 'string', description: 'Bucket ID' }, bucketIdForRead: { type: 'string', description: 'Bucket ID for read operations' }, title: { type: 'string', description: 'Task title' }, diff --git a/apps/sim/blocks/blocks/microsoft_teams.ts b/apps/sim/blocks/blocks/microsoft_teams.ts index 44324e426..9cf21df99 100644 --- a/apps/sim/blocks/blocks/microsoft_teams.ts +++ b/apps/sim/blocks/blocks/microsoft_teams.ts @@ -71,7 +71,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { required: true, }, { - id: 'teamId', + id: 'teamSelector', title: 'Select Team', type: 'file-selector', canonicalParamId: 'teamId', @@ -92,6 +92,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { 'list_channel_members', ], }, + required: true, }, { id: 'manualTeamId', @@ -112,9 +113,10 @@ export const MicrosoftTeamsBlock: BlockConfig = { 'list_channel_members', ], }, + required: true, }, { - id: 'chatId', + id: 'chatSelector', title: 'Select Chat', type: 'file-selector', canonicalParamId: 'chatId', @@ -127,6 +129,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { field: 'operation', value: ['read_chat', 'write_chat', 'update_chat_message', 'delete_chat_message'], }, + required: true, }, { id: 'manualChatId', @@ -139,16 +142,17 @@ export const MicrosoftTeamsBlock: BlockConfig = { field: 'operation', value: ['read_chat', 'write_chat', 'update_chat_message', 'delete_chat_message'], }, + required: true, }, { - id: 'channelId', + id: 'channelSelector', title: 'Select Channel', type: 'file-selector', canonicalParamId: 'channelId', serviceId: 'microsoft-teams', requiredScopes: [], placeholder: 'Select a channel', - dependsOn: ['credential', 'teamId'], + dependsOn: ['credential', 'teamSelector'], mode: 'basic', condition: { field: 'operation', @@ -161,6 +165,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { 'list_channel_members', ], }, + required: true, }, { id: 'manualChannelId', @@ -180,6 +185,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { 'list_channel_members', ], }, + required: true, }, { id: 'messageId', @@ -249,7 +255,7 @@ export const MicrosoftTeamsBlock: BlockConfig = { }, // Variable reference (advanced mode) { - id: 'files', + id: 'fileReferences', title: 'File Attachments', type: 'short-input', canonicalParamId: 'files', @@ -317,23 +323,19 @@ export const MicrosoftTeamsBlock: BlockConfig = { const { credential, operation, - teamId, - manualTeamId, - chatId, - manualChatId, - channelId, - manualChannelId, - attachmentFiles, - files, + teamId, // Canonical param from teamSelector (basic) or manualTeamId (advanced) + chatId, // Canonical param from chatSelector (basic) or manualChatId (advanced) + channelId, // Canonical param from channelSelector (basic) or manualChannelId (advanced) + files, // Canonical param from attachmentFiles (basic) or fileReferences (advanced) messageId, reactionType, includeAttachments, ...rest } = params - const effectiveTeamId = (teamId || manualTeamId || '').trim() - const effectiveChatId = (chatId || manualChatId || '').trim() - const effectiveChannelId = (channelId || manualChannelId || '').trim() + const effectiveTeamId = teamId ? String(teamId).trim() : '' + const effectiveChatId = chatId ? String(chatId).trim() : '' + const effectiveChannelId = channelId ? String(channelId).trim() : '' const baseParams: Record = { ...rest, @@ -344,9 +346,9 @@ export const MicrosoftTeamsBlock: BlockConfig = { baseParams.includeAttachments = true } - // Add files if provided + // Add files if provided (canonical param from attachmentFiles or fileReferences) if (operation === 'write_chat' || operation === 'write_channel') { - const normalizedFiles = normalizeFileInput(attachmentFiles || files) + const normalizedFiles = normalizeFileInput(files) if (normalizedFiles) { baseParams.files = normalizedFiles } @@ -369,9 +371,6 @@ export const MicrosoftTeamsBlock: BlockConfig = { operation === 'update_chat_message' || operation === 'delete_chat_message' ) { - if (!effectiveChatId) { - throw new Error('Chat ID is required. Please select a chat or enter a chat ID.') - } return { ...baseParams, chatId: effectiveChatId } } @@ -383,31 +382,16 @@ export const MicrosoftTeamsBlock: BlockConfig = { operation === 'delete_channel_message' || operation === 'reply_to_message' ) { - if (!effectiveTeamId) { - throw new Error('Team ID is required for channel operations.') - } - if (!effectiveChannelId) { - throw new Error('Channel ID is required for channel operations.') - } return { ...baseParams, teamId: effectiveTeamId, channelId: effectiveChannelId } } // Team member operations if (operation === 'list_team_members') { - if (!effectiveTeamId) { - throw new Error('Team ID is required for team member operations.') - } return { ...baseParams, teamId: effectiveTeamId } } // Channel member operations if (operation === 'list_channel_members') { - if (!effectiveTeamId) { - throw new Error('Team ID is required for channel member operations.') - } - if (!effectiveChannelId) { - throw new Error('Channel ID is required for channel member operations.') - } return { ...baseParams, teamId: effectiveTeamId, channelId: effectiveChannelId } } @@ -440,12 +424,11 @@ export const MicrosoftTeamsBlock: BlockConfig = { type: 'string', description: 'Message identifier for update/delete/reply/reaction operations', }, - chatId: { type: 'string', description: 'Chat identifier' }, - manualChatId: { type: 'string', description: 'Manual chat identifier' }, - channelId: { type: 'string', description: 'Channel identifier' }, - manualChannelId: { type: 'string', description: 'Manual channel identifier' }, + // Canonical params (used by params function) teamId: { type: 'string', description: 'Team identifier' }, - manualTeamId: { type: 'string', description: 'Manual team identifier' }, + chatId: { type: 'string', description: 'Chat identifier' }, + channelId: { type: 'string', description: 'Channel identifier' }, + files: { type: 'array', description: 'Files to attach' }, content: { type: 'string', description: 'Message content. Mention users with userName', @@ -455,8 +438,6 @@ export const MicrosoftTeamsBlock: BlockConfig = { type: 'boolean', description: 'Download and include message attachments', }, - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - files: { type: 'array', description: 'Files to attach (UserFile array)' }, }, outputs: { content: { type: 'string', description: 'Formatted message content from chat/channel' }, diff --git a/apps/sim/blocks/blocks/mistral_parse.ts b/apps/sim/blocks/blocks/mistral_parse.ts index b78630ed9..dca0faf33 100644 --- a/apps/sim/blocks/blocks/mistral_parse.ts +++ b/apps/sim/blocks/blocks/mistral_parse.ts @@ -215,8 +215,8 @@ export const MistralParseV2Block: BlockConfig = { resultType: params.resultType || 'markdown', } - // Original V2 pattern: fileUpload (basic) or filePath (advanced) or document (wired) - const documentInput = params.fileUpload || params.filePath || params.document + // Use canonical document param directly + const documentInput = params.document if (!documentInput) { throw new Error('PDF document is required') } @@ -261,8 +261,6 @@ export const MistralParseV2Block: BlockConfig = { }, inputs: { document: { type: 'json', description: 'Document input (file upload or URL reference)' }, - filePath: { type: 'string', description: 'PDF document URL (advanced mode)' }, - fileUpload: { type: 'json', description: 'Uploaded PDF file (basic mode)' }, apiKey: { type: 'string', description: 'Mistral API key' }, resultType: { type: 'string', description: 'Output format type' }, pages: { type: 'string', description: 'Page selection' }, @@ -345,11 +343,8 @@ export const MistralParseV3Block: BlockConfig = { resultType: params.resultType || 'markdown', } - // V3 pattern: normalize file inputs from basic/advanced modes - const documentInput = normalizeFileInput( - params.fileUpload || params.fileReference || params.document, - { single: true } - ) + // V3 pattern: use canonical document param directly + const documentInput = normalizeFileInput(params.document, { single: true }) if (!documentInput) { throw new Error('PDF document is required') } @@ -389,8 +384,6 @@ export const MistralParseV3Block: BlockConfig = { }, inputs: { document: { type: 'json', description: 'Document input (file upload or file reference)' }, - fileReference: { type: 'json', description: 'File reference (advanced mode)' }, - fileUpload: { type: 'json', description: 'Uploaded PDF file (basic mode)' }, apiKey: { type: 'string', description: 'Mistral API key' }, resultType: { type: 'string', description: 'Output format type' }, pages: { type: 'string', description: 'Page selection' }, diff --git a/apps/sim/blocks/blocks/onedrive.ts b/apps/sim/blocks/blocks/onedrive.ts index e2e3545fb..47a15d0cc 100644 --- a/apps/sim/blocks/blocks/onedrive.ts +++ b/apps/sim/blocks/blocks/onedrive.ts @@ -140,10 +140,10 @@ export const OneDriveBlock: BlockConfig = { }, { - id: 'folderSelector', + id: 'uploadFolderSelector', title: 'Select Parent Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'uploadFolderId', serviceId: 'onedrive', requiredScopes: [ 'openid', @@ -160,10 +160,10 @@ export const OneDriveBlock: BlockConfig = { condition: { field: 'operation', value: ['create_file', 'upload'] }, }, { - id: 'manualFolderId', + id: 'uploadManualFolderId', title: 'Parent Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'uploadFolderId', placeholder: 'Enter parent folder ID (leave empty for root folder)', dependsOn: ['credential'], mode: 'advanced', @@ -177,10 +177,10 @@ export const OneDriveBlock: BlockConfig = { condition: { field: 'operation', value: 'create_folder' }, }, { - id: 'folderSelector', + id: 'createFolderParentSelector', title: 'Select Parent Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'createFolderParentId', serviceId: 'onedrive', requiredScopes: [ 'openid', @@ -198,10 +198,10 @@ export const OneDriveBlock: BlockConfig = { }, // Manual Folder ID input (advanced mode) { - id: 'manualFolderId', + id: 'createFolderManualParentId', title: 'Parent Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'createFolderParentId', placeholder: 'Enter parent folder ID (leave empty for root folder)', dependsOn: ['credential'], mode: 'advanced', @@ -209,10 +209,10 @@ export const OneDriveBlock: BlockConfig = { }, // List Fields - Folder Selector (basic mode) { - id: 'folderSelector', + id: 'listFolderSelector', title: 'Select Folder', type: 'file-selector', - canonicalParamId: 'folderId', + canonicalParamId: 'listFolderId', serviceId: 'onedrive', requiredScopes: [ 'openid', @@ -230,10 +230,10 @@ export const OneDriveBlock: BlockConfig = { }, // Manual Folder ID input (advanced mode) { - id: 'manualFolderId', + id: 'listManualFolderId', title: 'Folder ID', type: 'short-input', - canonicalParamId: 'folderId', + canonicalParamId: 'listFolderId', placeholder: 'Enter folder ID (leave empty for root folder)', dependsOn: ['credential'], mode: 'advanced', @@ -255,10 +255,10 @@ export const OneDriveBlock: BlockConfig = { }, // Download File Fields - File Selector (basic mode) { - id: 'fileSelector', + id: 'downloadFileSelector', title: 'Select File', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'downloadFileId', serviceId: 'onedrive', requiredScopes: [ 'openid', @@ -273,13 +273,14 @@ export const OneDriveBlock: BlockConfig = { mode: 'basic', dependsOn: ['credential'], condition: { field: 'operation', value: 'download' }, + required: true, }, // Manual File ID input (advanced mode) { - id: 'manualFileId', + id: 'downloadManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'downloadFileId', placeholder: 'Enter file ID', mode: 'advanced', condition: { field: 'operation', value: 'download' }, @@ -294,10 +295,10 @@ export const OneDriveBlock: BlockConfig = { }, // Delete File Fields - File Selector (basic mode) { - id: 'fileSelector', + id: 'deleteFileSelector', title: 'Select File to Delete', type: 'file-selector', - canonicalParamId: 'fileId', + canonicalParamId: 'deleteFileId', serviceId: 'onedrive', requiredScopes: [ 'openid', @@ -316,10 +317,10 @@ export const OneDriveBlock: BlockConfig = { }, // Manual File ID input (advanced mode) { - id: 'manualFileId', + id: 'deleteManualFileId', title: 'File ID', type: 'short-input', - canonicalParamId: 'fileId', + canonicalParamId: 'deleteFileId', placeholder: 'Enter file or folder ID to delete', mode: 'advanced', condition: { field: 'operation', value: 'delete' }, @@ -355,13 +356,17 @@ export const OneDriveBlock: BlockConfig = { params: (params) => { const { credential, - folderId, - fileId, + // Folder canonical params (per-operation) + uploadFolderId, + createFolderParentId, + listFolderId, + // File canonical params (per-operation) + downloadFileId, + deleteFileId, mimeType, values, downloadFileName, file, - fileReference, ...rest } = params @@ -370,16 +375,42 @@ export const OneDriveBlock: BlockConfig = { normalizedValues = normalizeExcelValuesForToolParams(values) } - // Normalize file input from both basic (file-upload) and advanced (short-input) modes - const normalizedFile = normalizeFileInput(file || fileReference, { single: true }) + // Normalize file input from the canonical param + const normalizedFile = normalizeFileInput(file, { single: true }) + + // Resolve folderId based on operation + let resolvedFolderId: string | undefined + switch (params.operation) { + case 'create_file': + case 'upload': + resolvedFolderId = uploadFolderId?.trim() || undefined + break + case 'create_folder': + resolvedFolderId = createFolderParentId?.trim() || undefined + break + case 'list': + resolvedFolderId = listFolderId?.trim() || undefined + break + } + + // Resolve fileId based on operation + let resolvedFileId: string | undefined + switch (params.operation) { + case 'download': + resolvedFileId = downloadFileId?.trim() || undefined + break + case 'delete': + resolvedFileId = deleteFileId?.trim() || undefined + break + } return { credential, ...rest, values: normalizedValues, file: normalizedFile, - folderId: folderId || undefined, - fileId: fileId || undefined, + folderId: resolvedFolderId, + fileId: resolvedFileId, pageSize: rest.pageSize ? Number.parseInt(rest.pageSize as string, 10) : undefined, mimeType: mimeType, ...(downloadFileName && { fileName: downloadFileName }), @@ -390,16 +421,22 @@ export const OneDriveBlock: BlockConfig = { inputs: { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Microsoft account credential' }, - // Upload and Create Folder operation inputs + // Upload and Create operation inputs fileName: { type: 'string', description: 'File name' }, file: { type: 'json', description: 'File to upload (UserFile object)' }, - fileReference: { type: 'json', description: 'File reference from previous block' }, content: { type: 'string', description: 'Text content to upload' }, mimeType: { type: 'string', description: 'MIME type of file to create' }, values: { type: 'json', description: 'Cell values for new Excel as JSON' }, - fileId: { type: 'string', description: 'File ID to download' }, + // Folder canonical params (per-operation) + uploadFolderId: { type: 'string', description: 'Parent folder for upload/create file' }, + createFolderParentId: { type: 'string', description: 'Parent folder for create folder' }, + listFolderId: { type: 'string', description: 'Folder to list files from' }, + // File canonical params (per-operation) + downloadFileId: { type: 'string', description: 'File to download' }, + deleteFileId: { type: 'string', description: 'File to delete' }, downloadFileName: { type: 'string', description: 'File name override for download' }, - folderId: { type: 'string', description: 'Folder ID' }, + folderName: { type: 'string', description: 'Folder name for create_folder' }, + // List operation inputs query: { type: 'string', description: 'Search query' }, pageSize: { type: 'number', description: 'Results per page' }, }, diff --git a/apps/sim/blocks/blocks/outlook.ts b/apps/sim/blocks/blocks/outlook.ts index b626c20a4..a22d4195c 100644 --- a/apps/sim/blocks/blocks/outlook.ts +++ b/apps/sim/blocks/blocks/outlook.ts @@ -122,7 +122,7 @@ export const OutlookBlock: BlockConfig = { }, // Variable reference (advanced mode) { - id: 'attachments', + id: 'attachmentReference', title: 'Attachments', type: 'short-input', canonicalParamId: 'attachments', @@ -171,7 +171,7 @@ export const OutlookBlock: BlockConfig = { }, // Read Email Fields - Add folder selector (basic mode) { - id: 'folder', + id: 'folderSelector', title: 'Folder', type: 'folder-selector', canonicalParamId: 'folder', @@ -328,24 +328,20 @@ export const OutlookBlock: BlockConfig = { const { credential, folder, - manualFolder, - destinationFolder, - manualDestinationFolder, + destinationId, + copyDestinationId, + attachments, moveMessageId, actionMessageId, copyMessageId, - copyDestinationFolder, - manualCopyDestinationFolder, - attachmentFiles, - attachments, ...rest } = params - // Handle both selector and manual folder input - const effectiveFolder = (folder || manualFolder || '').trim() + // folder is already the canonical param - use it directly + const effectiveFolder = folder ? String(folder).trim() : '' - // Normalize file attachments from either basic (file-upload) or advanced (short-input) mode - const normalizedAttachments = normalizeFileInput(attachmentFiles || attachments) + // Normalize file attachments from the canonical attachments param + const normalizedAttachments = normalizeFileInput(attachments) if (normalizedAttachments) { rest.attachments = normalizedAttachments } @@ -359,8 +355,10 @@ export const OutlookBlock: BlockConfig = { if (moveMessageId) { rest.messageId = moveMessageId } - if (!rest.destinationId) { - rest.destinationId = (destinationFolder || manualDestinationFolder || '').trim() + // destinationId is already the canonical param + const effectiveDestinationId = destinationId ? String(destinationId).trim() : '' + if (effectiveDestinationId) { + rest.destinationId = effectiveDestinationId } } @@ -376,12 +374,12 @@ export const OutlookBlock: BlockConfig = { if (copyMessageId) { rest.messageId = copyMessageId } - // Handle copyDestinationId (from UI canonical param) or destinationId (from trigger) - if (rest.copyDestinationId) { - rest.destinationId = rest.copyDestinationId - rest.copyDestinationId = undefined - } else if (!rest.destinationId) { - rest.destinationId = (copyDestinationFolder || manualCopyDestinationFolder || '').trim() + // copyDestinationId is the canonical param - map it to destinationId for the tool + const effectiveCopyDestinationId = copyDestinationId + ? String(copyDestinationId).trim() + : '' + if (effectiveCopyDestinationId) { + rest.destinationId = effectiveCopyDestinationId } } @@ -400,30 +398,24 @@ export const OutlookBlock: BlockConfig = { subject: { type: 'string', description: 'Email subject' }, body: { type: 'string', description: 'Email content' }, contentType: { type: 'string', description: 'Content type (Text or HTML)' }, - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - attachments: { type: 'array', description: 'Files to attach (UserFile array)' }, + attachments: { type: 'array', description: 'Files to attach (canonical param)' }, // Forward operation inputs messageId: { type: 'string', description: 'Message ID to forward' }, comment: { type: 'string', description: 'Optional comment for forwarding' }, // Read operation inputs - folder: { type: 'string', description: 'Email folder' }, - manualFolder: { type: 'string', description: 'Manual folder name' }, + folder: { type: 'string', description: 'Email folder (canonical param)' }, maxResults: { type: 'number', description: 'Maximum emails' }, includeAttachments: { type: 'boolean', description: 'Include email attachments' }, // Move operation inputs moveMessageId: { type: 'string', description: 'Message ID to move' }, - destinationFolder: { type: 'string', description: 'Destination folder ID' }, - manualDestinationFolder: { type: 'string', description: 'Manual destination folder ID' }, - destinationId: { type: 'string', description: 'Destination folder ID for move' }, + destinationId: { type: 'string', description: 'Destination folder ID (canonical param)' }, // Action operation inputs actionMessageId: { type: 'string', description: 'Message ID for actions' }, copyMessageId: { type: 'string', description: 'Message ID to copy' }, - copyDestinationFolder: { type: 'string', description: 'Copy destination folder ID' }, - manualCopyDestinationFolder: { + copyDestinationId: { type: 'string', - description: 'Manual copy destination folder ID', + description: 'Destination folder ID for copy (canonical param)', }, - copyDestinationId: { type: 'string', description: 'Destination folder ID for copy' }, }, outputs: { // Common outputs diff --git a/apps/sim/blocks/blocks/pulse.ts b/apps/sim/blocks/blocks/pulse.ts index c61f11070..fda29aa16 100644 --- a/apps/sim/blocks/blocks/pulse.ts +++ b/apps/sim/blocks/blocks/pulse.ts @@ -25,6 +25,7 @@ export const PulseBlock: BlockConfig = { placeholder: 'Upload a document', mode: 'basic', maxSize: 50, + required: true, }, { id: 'filePath', @@ -33,6 +34,7 @@ export const PulseBlock: BlockConfig = { canonicalParamId: 'document', placeholder: 'Document URL', mode: 'advanced', + required: true, }, { id: 'pages', @@ -66,18 +68,12 @@ export const PulseBlock: BlockConfig = { config: { tool: () => 'pulse_parser', params: (params) => { - if (!params || !params.apiKey || params.apiKey.trim() === '') { - throw new Error('Pulse API key is required') - } - const parameters: Record = { apiKey: params.apiKey.trim(), } - const documentInput = params.fileUpload || params.filePath || params.document - if (!documentInput) { - throw new Error('Document is required') - } + // document is the canonical param from fileUpload (basic) or filePath (advanced) + const documentInput = params.document if (typeof documentInput === 'object') { parameters.file = documentInput } else if (typeof documentInput === 'string') { @@ -104,9 +100,10 @@ export const PulseBlock: BlockConfig = { }, }, inputs: { - document: { type: 'json', description: 'Document input (file upload or URL reference)' }, - filePath: { type: 'string', description: 'Document URL (advanced mode)' }, - fileUpload: { type: 'json', description: 'Uploaded document file (basic mode)' }, + document: { + type: 'json', + description: 'Document input (canonical param for file upload or URL)', + }, apiKey: { type: 'string', description: 'Pulse API key' }, pages: { type: 'string', description: 'Page range selection' }, chunking: { @@ -129,14 +126,8 @@ export const PulseBlock: BlockConfig = { }, } +// PulseV2Block uses the same canonical param 'document' for both basic and advanced modes const pulseV2Inputs = PulseBlock.inputs - ? { - ...Object.fromEntries( - Object.entries(PulseBlock.inputs).filter(([key]) => key !== 'filePath') - ), - fileReference: { type: 'json', description: 'File reference (advanced mode)' }, - } - : {} const pulseV2SubBlocks = (PulseBlock.subBlocks || []).flatMap((subBlock) => { if (subBlock.id === 'filePath') { return [] // Remove the old filePath subblock @@ -152,6 +143,7 @@ const pulseV2SubBlocks = (PulseBlock.subBlocks || []).flatMap((subBlock) => { canonicalParamId: 'document', placeholder: 'File reference', mode: 'advanced' as const, + required: true, }, ] } @@ -175,18 +167,12 @@ export const PulseV2Block: BlockConfig = { fallbackToolId: 'pulse_parser_v2', }), params: (params) => { - if (!params || !params.apiKey || params.apiKey.trim() === '') { - throw new Error('Pulse API key is required') - } - const parameters: Record = { apiKey: params.apiKey.trim(), } - const normalizedFile = normalizeFileInput( - params.fileUpload || params.fileReference || params.document, - { single: true } - ) + // document is the canonical param from fileUpload (basic) or fileReference (advanced) + const normalizedFile = normalizeFileInput(params.document, { single: true }) if (!normalizedFile) { throw new Error('Document file is required') } diff --git a/apps/sim/blocks/blocks/reducto.ts b/apps/sim/blocks/blocks/reducto.ts index fb9d39370..d0c6ed7ce 100644 --- a/apps/sim/blocks/blocks/reducto.ts +++ b/apps/sim/blocks/blocks/reducto.ts @@ -24,6 +24,7 @@ export const ReductoBlock: BlockConfig = { placeholder: 'Upload a PDF document', mode: 'basic', maxSize: 50, + required: true, }, { id: 'filePath', @@ -32,6 +33,7 @@ export const ReductoBlock: BlockConfig = { canonicalParamId: 'document', placeholder: 'Document URL', mode: 'advanced', + required: true, }, { id: 'pages', @@ -62,18 +64,12 @@ export const ReductoBlock: BlockConfig = { config: { tool: () => 'reducto_parser', params: (params) => { - if (!params || !params.apiKey || params.apiKey.trim() === '') { - throw new Error('Reducto API key is required') - } - const parameters: Record = { apiKey: params.apiKey.trim(), } - const documentInput = params.fileUpload || params.filePath || params.document - if (!documentInput) { - throw new Error('PDF document is required') - } + // document is the canonical param from fileUpload (basic) or filePath (advanced) + const documentInput = params.document if (typeof documentInput === 'object') { parameters.file = documentInput @@ -118,9 +114,10 @@ export const ReductoBlock: BlockConfig = { }, }, inputs: { - document: { type: 'json', description: 'Document input (file upload or URL reference)' }, - filePath: { type: 'string', description: 'PDF document URL (advanced mode)' }, - fileUpload: { type: 'json', description: 'Uploaded PDF file (basic mode)' }, + document: { + type: 'json', + description: 'Document input (canonical param for file upload or URL)', + }, apiKey: { type: 'string', description: 'Reducto API key' }, pages: { type: 'string', description: 'Page selection' }, tableOutputFormat: { type: 'string', description: 'Table output format' }, @@ -135,14 +132,8 @@ export const ReductoBlock: BlockConfig = { }, } +// ReductoV2Block uses the same canonical param 'document' for both basic and advanced modes const reductoV2Inputs = ReductoBlock.inputs - ? { - ...Object.fromEntries( - Object.entries(ReductoBlock.inputs).filter(([key]) => key !== 'filePath') - ), - fileReference: { type: 'json', description: 'File reference (advanced mode)' }, - } - : {} const reductoV2SubBlocks = (ReductoBlock.subBlocks || []).flatMap((subBlock) => { if (subBlock.id === 'filePath') { return [] @@ -157,6 +148,7 @@ const reductoV2SubBlocks = (ReductoBlock.subBlocks || []).flatMap((subBlock) => canonicalParamId: 'document', placeholder: 'File reference', mode: 'advanced' as const, + required: true, }, ] } @@ -179,18 +171,12 @@ export const ReductoV2Block: BlockConfig = { fallbackToolId: 'reducto_parser_v2', }), params: (params) => { - if (!params || !params.apiKey || params.apiKey.trim() === '') { - throw new Error('Reducto API key is required') - } - const parameters: Record = { apiKey: params.apiKey.trim(), } - const documentInput = normalizeFileInput( - params.fileUpload || params.fileReference || params.document, - { single: true } - ) + // document is the canonical param from fileUpload (basic) or fileReference (advanced) + const documentInput = normalizeFileInput(params.document, { single: true }) if (!documentInput) { throw new Error('PDF document file is required') } diff --git a/apps/sim/blocks/blocks/s3.ts b/apps/sim/blocks/blocks/s3.ts index 9c8c537a1..10491a078 100644 --- a/apps/sim/blocks/blocks/s3.ts +++ b/apps/sim/blocks/blocks/s3.ts @@ -87,7 +87,7 @@ export const S3Block: BlockConfig = { multiple: false, }, { - id: 'file', + id: 'fileReference', title: 'File Reference', type: 'short-input', canonicalParamId: 'file', @@ -216,7 +216,6 @@ export const S3Block: BlockConfig = { placeholder: 'Select ACL for copied object (default: private)', condition: { field: 'operation', value: 'copy_object' }, mode: 'advanced', - canonicalParamId: 'acl', }, ], tools: { @@ -271,9 +270,9 @@ export const S3Block: BlockConfig = { if (!params.objectKey) { throw new Error('Object Key is required for upload') } - // Use file from uploadFile if in basic mode, otherwise use file reference + // file is the canonical param from uploadFile (basic) or fileReference (advanced) // normalizeFileInput handles JSON stringified values from advanced mode - const fileParam = normalizeFileInput(params.uploadFile || params.file, { single: true }) + const fileParam = normalizeFileInput(params.file, { single: true }) return { accessKeyId: params.accessKeyId, @@ -396,8 +395,7 @@ export const S3Block: BlockConfig = { bucketName: { type: 'string', description: 'S3 bucket name' }, // Upload inputs objectKey: { type: 'string', description: 'Object key/path in S3' }, - uploadFile: { type: 'json', description: 'File to upload (UI)' }, - file: { type: 'json', description: 'File to upload (reference)' }, + file: { type: 'json', description: 'File to upload (canonical param)' }, content: { type: 'string', description: 'Text content to upload' }, contentType: { type: 'string', description: 'Content-Type header' }, acl: { type: 'string', description: 'Access control list' }, diff --git a/apps/sim/blocks/blocks/sendgrid.ts b/apps/sim/blocks/blocks/sendgrid.ts index c55513026..016d11f98 100644 --- a/apps/sim/blocks/blocks/sendgrid.ts +++ b/apps/sim/blocks/blocks/sendgrid.ts @@ -562,13 +562,12 @@ Return ONLY the HTML content.`, templateGenerations, listPageSize, templatePageSize, - attachmentFiles, attachments, ...rest } = params // Normalize attachments for send_mail operation - const normalizedAttachments = normalizeFileInput(attachmentFiles || attachments) + const normalizedAttachments = normalizeFileInput(attachments) // Map renamed fields back to tool parameter names return { @@ -606,8 +605,7 @@ Return ONLY the HTML content.`, replyToName: { type: 'string', description: 'Reply-to name' }, mailTemplateId: { type: 'string', description: 'Template ID for sending mail' }, dynamicTemplateData: { type: 'json', description: 'Dynamic template data' }, - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - attachments: { type: 'array', description: 'Files to attach (UserFile array)' }, + attachments: { type: 'array', description: 'Files to attach (canonical param)' }, // Contact inputs email: { type: 'string', description: 'Contact email' }, firstName: { type: 'string', description: 'Contact first name' }, diff --git a/apps/sim/blocks/blocks/sftp.ts b/apps/sim/blocks/blocks/sftp.ts index c7afdb534..0a868644d 100644 --- a/apps/sim/blocks/blocks/sftp.ts +++ b/apps/sim/blocks/blocks/sftp.ts @@ -223,7 +223,8 @@ export const SftpBlock: BlockConfig = { return { ...connectionConfig, remotePath: params.remotePath, - files: normalizeFileInput(params.uploadFiles || params.files), + // files is the canonical param from uploadFiles (basic) or files (advanced) + files: normalizeFileInput(params.files), overwrite: params.overwrite !== false, permissions: params.permissions, } diff --git a/apps/sim/blocks/blocks/sharepoint.ts b/apps/sim/blocks/blocks/sharepoint.ts index e1a6aac2a..f10c1d5d9 100644 --- a/apps/sim/blocks/blocks/sharepoint.ts +++ b/apps/sim/blocks/blocks/sharepoint.ts @@ -252,7 +252,19 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`, placeholder: 'Enter site ID (leave empty for root site)', dependsOn: ['credential'], mode: 'advanced', - condition: { field: 'operation', value: 'create_page' }, + condition: { + field: 'operation', + value: [ + 'create_page', + 'read_page', + 'list_sites', + 'create_list', + 'read_list', + 'update_list', + 'add_list_items', + 'upload_file', + ], + }, }, { @@ -391,18 +403,17 @@ Return ONLY the JSON object - no explanations, no markdown, no extra text.`, } }, params: (params) => { - const { credential, siteSelector, manualSiteId, mimeType, ...rest } = params + const { credential, siteId, mimeType, ...rest } = params - const effectiveSiteId = (siteSelector || manualSiteId || '').trim() + // siteId is the canonical param from siteSelector (basic) or manualSiteId (advanced) + const effectiveSiteId = siteId ? String(siteId).trim() : '' const { - itemId: providedItemId, - listItemId, - listItemFields, + itemId, // canonical param from listItemId + listItemFields, // canonical param includeColumns, includeItems, - uploadFiles, - files, + files, // canonical param from uploadFiles (basic) or files (advanced) columnDefinitions, ...others } = rest as any @@ -421,11 +432,9 @@ Return ONLY the JSON object - no explanations, no markdown, no extra text.`, parsedItemFields = undefined } - const rawItemId = providedItemId ?? listItemId + // itemId is the canonical param from listItemId const sanitizedItemId = - rawItemId === undefined || rawItemId === null - ? undefined - : String(rawItemId).trim() || undefined + itemId === undefined || itemId === null ? undefined : String(itemId).trim() || undefined const coerceBoolean = (value: any) => { if (typeof value === 'boolean') return value @@ -449,8 +458,8 @@ Return ONLY the JSON object - no explanations, no markdown, no extra text.`, } catch {} } - // Handle file upload files parameter - const normalizedFiles = normalizeFileInput(uploadFiles || files) + // Handle file upload files parameter using canonical param + const normalizedFiles = normalizeFileInput(files) const baseParams: Record = { credential, siteId: effectiveSiteId || undefined, @@ -486,8 +495,7 @@ Return ONLY the JSON object - no explanations, no markdown, no extra text.`, }, pageTitle: { type: 'string', description: 'Page title' }, pageId: { type: 'string', description: 'Page ID' }, - siteSelector: { type: 'string', description: 'Site selector' }, - manualSiteId: { type: 'string', description: 'Manual site ID' }, + siteId: { type: 'string', description: 'Site ID' }, pageSize: { type: 'number', description: 'Results per page' }, listDisplayName: { type: 'string', description: 'List display name' }, listDescription: { type: 'string', description: 'List description' }, @@ -496,13 +504,12 @@ Return ONLY the JSON object - no explanations, no markdown, no extra text.`, listTitle: { type: 'string', description: 'List title' }, includeColumns: { type: 'boolean', description: 'Include columns in response' }, includeItems: { type: 'boolean', description: 'Include items in response' }, - listItemId: { type: 'string', description: 'List item ID' }, - listItemFields: { type: 'string', description: 'List item fields' }, - driveId: { type: 'string', description: 'Document library (drive) ID' }, + itemId: { type: 'string', description: 'List item ID (canonical param)' }, + listItemFields: { type: 'string', description: 'List item fields (canonical param)' }, + driveId: { type: 'string', description: 'Document library (drive) ID (canonical param)' }, folderPath: { type: 'string', description: 'Folder path for file upload' }, fileName: { type: 'string', description: 'File name override' }, - uploadFiles: { type: 'json', description: 'Files to upload (UI upload)' }, - files: { type: 'array', description: 'Files to upload (UserFile array)' }, + files: { type: 'array', description: 'Files to upload (canonical param)' }, }, outputs: { sites: { diff --git a/apps/sim/blocks/blocks/slack.ts b/apps/sim/blocks/blocks/slack.ts index 68e0a7a27..38f22ca78 100644 --- a/apps/sim/blocks/blocks/slack.ts +++ b/apps/sim/blocks/blocks/slack.ts @@ -92,6 +92,7 @@ export const SlackBlock: BlockConfig = { field: 'authMethod', value: 'oauth', }, + required: true, }, { id: 'botToken', @@ -104,6 +105,7 @@ export const SlackBlock: BlockConfig = { field: 'authMethod', value: 'bot_token', }, + required: true, }, { id: 'channel', @@ -124,6 +126,7 @@ export const SlackBlock: BlockConfig = { not: true, }, }, + required: true, }, { id: 'manualChannel', @@ -142,6 +145,7 @@ export const SlackBlock: BlockConfig = { not: true, }, }, + required: true, }, { id: 'dmUserId', @@ -156,6 +160,7 @@ export const SlackBlock: BlockConfig = { field: 'destinationType', value: 'dm', }, + required: true, }, { id: 'manualDmUserId', @@ -168,6 +173,7 @@ export const SlackBlock: BlockConfig = { field: 'destinationType', value: 'dm', }, + required: true, }, { id: 'text', @@ -547,15 +553,12 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, operation, destinationType, channel, - manualChannel, dmUserId, - manualDmUserId, text, title, content, limit, oldest, - attachmentFiles, files, threadTs, updateTimestamp, @@ -576,20 +579,11 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, } = params const isDM = destinationType === 'dm' - const effectiveChannel = (channel || manualChannel || '').trim() - const effectiveUserId = (dmUserId || manualDmUserId || '').trim() + const effectiveChannel = channel ? String(channel).trim() : '' + const effectiveUserId = dmUserId ? String(dmUserId).trim() : '' - const noChannelOperations = ['list_channels', 'list_users', 'get_user'] const dmSupportedOperations = ['send', 'read'] - if (isDM && dmSupportedOperations.includes(operation)) { - if (!effectiveUserId) { - throw new Error('User is required for DM operations.') - } - } else if (!effectiveChannel && !noChannelOperations.includes(operation)) { - throw new Error('Channel is required.') - } - const baseParams: Record = {} if (isDM && dmSupportedOperations.includes(operation)) { @@ -600,28 +594,20 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, // Handle authentication based on method if (authMethod === 'bot_token') { - if (!botToken) { - throw new Error('Bot token is required when using bot token authentication') - } baseParams.accessToken = botToken } else { // Default to OAuth - if (!credential) { - throw new Error('Slack account credential is required when using Sim Bot') - } baseParams.credential = credential } switch (operation) { case 'send': { - if (!text || text.trim() === '') { - throw new Error('Message text is required for send operation') - } baseParams.text = text if (threadTs) { baseParams.thread_ts = threadTs } - const normalizedFiles = normalizeFileInput(attachmentFiles || files) + // files is the canonical param from attachmentFiles (basic) or files (advanced) + const normalizedFiles = normalizeFileInput(files) if (normalizedFiles) { baseParams.files = normalizedFiles } @@ -629,9 +615,6 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, } case 'canvas': - if (!title || !content) { - throw new Error('Title and content are required for canvas operation') - } baseParams.title = title baseParams.content = content break @@ -649,16 +632,10 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, } case 'get_message': - if (!getMessageTimestamp) { - throw new Error('Message timestamp is required for get message operation') - } baseParams.timestamp = getMessageTimestamp break case 'get_thread': { - if (!getThreadTimestamp) { - throw new Error('Thread timestamp is required for get thread operation') - } baseParams.threadTs = getThreadTimestamp if (threadLimit) { const parsedLimit = Number.parseInt(threadLimit, 10) @@ -688,18 +665,12 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, } case 'get_user': - if (!userId) { - throw new Error('User ID is required for get user operation') - } baseParams.userId = userId break case 'download': { const fileId = (rest as any).fileId const downloadFileName = (rest as any).downloadFileName - if (!fileId) { - throw new Error('File ID is required for download operation') - } baseParams.fileId = fileId if (downloadFileName) { baseParams.fileName = downloadFileName @@ -708,24 +679,15 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, } case 'update': - if (!updateTimestamp || !updateText) { - throw new Error('Timestamp and text are required for update operation') - } baseParams.timestamp = updateTimestamp baseParams.text = updateText break case 'delete': - if (!deleteTimestamp) { - throw new Error('Timestamp is required for delete operation') - } baseParams.timestamp = deleteTimestamp break case 'react': - if (!reactionTimestamp || !emojiName) { - throw new Error('Timestamp and emoji name are required for reaction operation') - } baseParams.timestamp = reactionTimestamp baseParams.name = emojiName break @@ -741,19 +703,16 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`, destinationType: { type: 'string', description: 'Destination type (channel or dm)' }, credential: { type: 'string', description: 'Slack access token' }, botToken: { type: 'string', description: 'Bot token' }, - channel: { type: 'string', description: 'Channel identifier' }, - manualChannel: { type: 'string', description: 'Manual channel identifier' }, - dmUserId: { type: 'string', description: 'User ID for DM recipient (selector)' }, - manualDmUserId: { type: 'string', description: 'User ID for DM recipient (manual input)' }, + channel: { type: 'string', description: 'Channel identifier (canonical param)' }, + dmUserId: { type: 'string', description: 'User ID for DM recipient (canonical param)' }, text: { type: 'string', description: 'Message text' }, - attachmentFiles: { type: 'json', description: 'Files to attach (UI upload)' }, - files: { type: 'array', description: 'Files to attach (UserFile array)' }, + files: { type: 'array', description: 'Files to attach (canonical param)' }, title: { type: 'string', description: 'Canvas title' }, content: { type: 'string', description: 'Canvas content' }, limit: { type: 'string', description: 'Message limit' }, oldest: { type: 'string', description: 'Oldest timestamp' }, fileId: { type: 'string', description: 'File ID to download' }, - downloadFileName: { type: 'string', description: 'File name override for download' }, + fileName: { type: 'string', description: 'File name override for download (canonical param)' }, // Update/Delete/React operation inputs updateTimestamp: { type: 'string', description: 'Message timestamp for update' }, updateText: { type: 'string', description: 'New text for update' }, diff --git a/apps/sim/blocks/blocks/smtp.ts b/apps/sim/blocks/blocks/smtp.ts index 640cdd680..6537beb5c 100644 --- a/apps/sim/blocks/blocks/smtp.ts +++ b/apps/sim/blocks/blocks/smtp.ts @@ -177,7 +177,7 @@ export const SmtpBlock: BlockConfig = { cc: params.cc, bcc: params.bcc, replyTo: params.replyTo, - attachments: normalizeFileInput(params.attachmentFiles || params.attachments), + attachments: normalizeFileInput(params.attachments), }), }, }, diff --git a/apps/sim/blocks/blocks/spotify.ts b/apps/sim/blocks/blocks/spotify.ts index c152b3a56..4b450bac3 100644 --- a/apps/sim/blocks/blocks/spotify.ts +++ b/apps/sim/blocks/blocks/spotify.ts @@ -824,8 +824,6 @@ export const SpotifyBlock: BlockConfig = { description: { type: 'string', description: 'Playlist description' }, public: { type: 'boolean', description: 'Whether playlist is public' }, coverImage: { type: 'json', description: 'Cover image (UserFile)' }, - coverImageFile: { type: 'json', description: 'Cover image upload (basic mode)' }, - coverImageRef: { type: 'json', description: 'Cover image reference (advanced mode)' }, range_start: { type: 'number', description: 'Start index for reorder' }, insert_before: { type: 'number', description: 'Insert before index' }, range_length: { type: 'number', description: 'Number of items to move' }, diff --git a/apps/sim/blocks/blocks/stt.ts b/apps/sim/blocks/blocks/stt.ts index 66adef7a9..92b10e2b7 100644 --- a/apps/sim/blocks/blocks/stt.ts +++ b/apps/sim/blocks/blocks/stt.ts @@ -259,8 +259,8 @@ export const SttBlock: BlockConfig = { } }, params: (params) => { - // Normalize file input from basic (file-upload) or advanced (short-input) mode - const audioFile = normalizeFileInput(params.audioFile || params.audioFileReference, { + // Normalize file input - audioFile is the canonical param for both basic and advanced modes + const audioFile = normalizeFileInput(params.audioFile, { single: true, }) @@ -269,7 +269,6 @@ export const SttBlock: BlockConfig = { apiKey: params.apiKey, model: params.model, audioFile, - audioFileReference: undefined, audioUrl: params.audioUrl, language: params.language, timestamps: params.timestamps, @@ -296,7 +295,6 @@ export const SttBlock: BlockConfig = { 'Provider-specific model (e.g., scribe_v1 for ElevenLabs, nova-3 for Deepgram, best for AssemblyAI, gemini-2.0-flash-exp for Gemini)', }, audioFile: { type: 'json', description: 'Audio/video file (UserFile)' }, - audioFileReference: { type: 'json', description: 'Audio/video file reference' }, audioUrl: { type: 'string', description: 'Audio/video URL' }, language: { type: 'string', description: 'Language code or auto' }, timestamps: { type: 'string', description: 'Timestamp granularity (none, sentence, word)' }, @@ -393,8 +391,8 @@ export const SttV2Block: BlockConfig = { fallbackToolId: 'stt_whisper_v2', }), params: (params) => { - // Normalize file input from basic (file-upload) or advanced (short-input) mode - const audioFile = normalizeFileInput(params.audioFile || params.audioFileReference, { + // Normalize file input - audioFile is the canonical param for both basic and advanced modes + const audioFile = normalizeFileInput(params.audioFile, { single: true, }) @@ -403,7 +401,6 @@ export const SttV2Block: BlockConfig = { apiKey: params.apiKey, model: params.model, audioFile, - audioFileReference: undefined, language: params.language, timestamps: params.timestamps, diarization: params.diarization, diff --git a/apps/sim/blocks/blocks/supabase.ts b/apps/sim/blocks/blocks/supabase.ts index 78256c5be..602111ffa 100644 --- a/apps/sim/blocks/blocks/supabase.ts +++ b/apps/sim/blocks/blocks/supabase.ts @@ -974,15 +974,13 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e allowedMimeTypes, upsert, download, - file, - fileContent, fileData, ...rest } = params // Normalize file input for storage_upload operation - // normalizeFileInput handles JSON stringified values from advanced mode - const normalizedFileData = normalizeFileInput(file || fileContent || fileData, { + // fileData is the canonical param for both basic (file) and advanced (fileContent) modes + const normalizedFileData = normalizeFileInput(fileData, { single: true, }) @@ -1156,7 +1154,7 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e // Storage operation inputs bucket: { type: 'string', description: 'Storage bucket name' }, path: { type: 'string', description: 'File or folder path in storage' }, - fileContent: { type: 'string', description: 'File content (base64 for binary)' }, + fileData: { type: 'json', description: 'File data (UserFile)' }, contentType: { type: 'string', description: 'MIME type of the file' }, fileName: { type: 'string', description: 'File name for upload or download override' }, upsert: { type: 'boolean', description: 'Whether to overwrite existing file' }, diff --git a/apps/sim/blocks/blocks/telegram.ts b/apps/sim/blocks/blocks/telegram.ts index 2be6eb546..ce4076d38 100644 --- a/apps/sim/blocks/blocks/telegram.ts +++ b/apps/sim/blocks/blocks/telegram.ts @@ -269,7 +269,8 @@ export const TelegramBlock: BlockConfig = { messageId: params.messageId, } case 'telegram_send_photo': { - const photoSource = normalizeFileInput(params.photoFile || params.photo, { + // photo is the canonical param for both basic (photoFile) and advanced modes + const photoSource = normalizeFileInput(params.photo, { single: true, }) if (!photoSource) { @@ -282,7 +283,8 @@ export const TelegramBlock: BlockConfig = { } } case 'telegram_send_video': { - const videoSource = normalizeFileInput(params.videoFile || params.video, { + // video is the canonical param for both basic (videoFile) and advanced modes + const videoSource = normalizeFileInput(params.video, { single: true, }) if (!videoSource) { @@ -295,7 +297,8 @@ export const TelegramBlock: BlockConfig = { } } case 'telegram_send_audio': { - const audioSource = normalizeFileInput(params.audioFile || params.audio, { + // audio is the canonical param for both basic (audioFile) and advanced modes + const audioSource = normalizeFileInput(params.audio, { single: true, }) if (!audioSource) { @@ -308,7 +311,8 @@ export const TelegramBlock: BlockConfig = { } } case 'telegram_send_animation': { - const animationSource = normalizeFileInput(params.animationFile || params.animation, { + // animation is the canonical param for both basic (animationFile) and advanced modes + const animationSource = normalizeFileInput(params.animation, { single: true, }) if (!animationSource) { @@ -321,9 +325,10 @@ export const TelegramBlock: BlockConfig = { } } case 'telegram_send_document': { + // files is the canonical param for both basic (attachmentFiles) and advanced modes return { ...commonParams, - files: normalizeFileInput(params.attachmentFiles || params.files), + files: normalizeFileInput(params.files), caption: params.caption, } } @@ -341,18 +346,10 @@ export const TelegramBlock: BlockConfig = { botToken: { type: 'string', description: 'Telegram bot token' }, chatId: { type: 'string', description: 'Chat identifier' }, text: { type: 'string', description: 'Message text' }, - photoFile: { type: 'json', description: 'Uploaded photo (UserFile)' }, - photo: { type: 'json', description: 'Photo reference or URL/file_id' }, - videoFile: { type: 'json', description: 'Uploaded video (UserFile)' }, - video: { type: 'json', description: 'Video reference or URL/file_id' }, - audioFile: { type: 'json', description: 'Uploaded audio (UserFile)' }, - audio: { type: 'json', description: 'Audio reference or URL/file_id' }, - animationFile: { type: 'json', description: 'Uploaded animation (UserFile)' }, - animation: { type: 'json', description: 'Animation reference or URL/file_id' }, - attachmentFiles: { - type: 'json', - description: 'Files to attach (UI upload)', - }, + photo: { type: 'json', description: 'Photo (UserFile or URL/file_id)' }, + video: { type: 'json', description: 'Video (UserFile or URL/file_id)' }, + audio: { type: 'json', description: 'Audio (UserFile or URL/file_id)' }, + animation: { type: 'json', description: 'Animation (UserFile or URL/file_id)' }, files: { type: 'array', description: 'Files to attach (UserFile array)' }, caption: { type: 'string', description: 'Caption for media' }, messageId: { type: 'string', description: 'Message ID to delete' }, diff --git a/apps/sim/blocks/blocks/textract.ts b/apps/sim/blocks/blocks/textract.ts index 10f5a1113..a2eea3050 100644 --- a/apps/sim/blocks/blocks/textract.ts +++ b/apps/sim/blocks/blocks/textract.ts @@ -137,7 +137,8 @@ export const TextractBlock: BlockConfig = { } parameters.s3Uri = params.s3Uri.trim() } else { - const documentInput = params.fileUpload || params.filePath || params.document + // document is the canonical param for both basic (fileUpload) and advanced (filePath) modes + const documentInput = params.document if (!documentInput) { throw new Error('Document is required') } @@ -165,8 +166,6 @@ export const TextractBlock: BlockConfig = { inputs: { processingMode: { type: 'string', description: 'Document type: single-page or multi-page' }, document: { type: 'json', description: 'Document input (file upload or URL reference)' }, - filePath: { type: 'string', description: 'Document URL (advanced mode)' }, - fileUpload: { type: 'json', description: 'Uploaded document file (basic mode)' }, s3Uri: { type: 'string', description: 'S3 URI for multi-page processing (s3://bucket/key)' }, extractTables: { type: 'boolean', description: 'Extract tables from document' }, extractForms: { type: 'boolean', description: 'Extract form key-value pairs' }, @@ -192,14 +191,7 @@ export const TextractBlock: BlockConfig = { }, } -const textractV2Inputs = TextractBlock.inputs - ? { - ...Object.fromEntries( - Object.entries(TextractBlock.inputs).filter(([key]) => key !== 'filePath') - ), - fileReference: { type: 'json', description: 'File reference (advanced mode)' }, - } - : {} +const textractV2Inputs = TextractBlock.inputs ? { ...TextractBlock.inputs } : {} const textractV2SubBlocks = (TextractBlock.subBlocks || []).flatMap((subBlock) => { if (subBlock.id === 'filePath') { return [] // Remove the old filePath subblock @@ -265,10 +257,8 @@ export const TextractV2Block: BlockConfig = { } parameters.s3Uri = params.s3Uri.trim() } else { - const file = normalizeFileInput( - params.fileUpload || params.fileReference || params.document, - { single: true } - ) + // document is the canonical param for both basic (fileUpload) and advanced (fileReference) modes + const file = normalizeFileInput(params.document, { single: true }) if (!file) { throw new Error('Document file is required') } diff --git a/apps/sim/blocks/blocks/video_generator.ts b/apps/sim/blocks/blocks/video_generator.ts index ae31eb951..55c5a2472 100644 --- a/apps/sim/blocks/blocks/video_generator.ts +++ b/apps/sim/blocks/blocks/video_generator.ts @@ -691,6 +691,7 @@ export const VideoGeneratorV2Block: BlockConfig = { condition: { field: 'provider', value: 'runway' }, placeholder: 'Reference image from previous blocks', mode: 'advanced', + required: true, }, { id: 'cameraControl', @@ -734,29 +735,25 @@ export const VideoGeneratorV2Block: BlockConfig = { return 'video_runway' } }, - params: (params) => { - const visualRef = - params.visualReferenceUpload || params.visualReferenceInput || params.visualReference - return { - provider: params.provider, - apiKey: params.apiKey, - model: params.model, - endpoint: params.endpoint, - prompt: params.prompt, - duration: params.duration ? Number(params.duration) : undefined, - aspectRatio: params.aspectRatio, - resolution: params.resolution, - visualReference: normalizeFileInput(visualRef, { single: true }), - consistencyMode: params.consistencyMode, - stylePreset: params.stylePreset, - promptOptimizer: params.promptOptimizer, - cameraControl: params.cameraControl - ? typeof params.cameraControl === 'string' - ? JSON.parse(params.cameraControl) - : params.cameraControl - : undefined, - } - }, + params: (params) => ({ + provider: params.provider, + apiKey: params.apiKey, + model: params.model, + endpoint: params.endpoint, + prompt: params.prompt, + duration: params.duration ? Number(params.duration) : undefined, + aspectRatio: params.aspectRatio, + resolution: params.resolution, + visualReference: normalizeFileInput(params.visualReference, { single: true }), + consistencyMode: params.consistencyMode, + stylePreset: params.stylePreset, + promptOptimizer: params.promptOptimizer, + cameraControl: params.cameraControl + ? typeof params.cameraControl === 'string' + ? JSON.parse(params.cameraControl) + : params.cameraControl + : undefined, + }), }, }, inputs: { @@ -784,11 +781,6 @@ export const VideoGeneratorV2Block: BlockConfig = { description: 'Video resolution - not available for MiniMax (fixed per endpoint)', }, visualReference: { type: 'json', description: 'Reference image for Runway (UserFile)' }, - visualReferenceUpload: { type: 'json', description: 'Uploaded reference image (basic mode)' }, - visualReferenceInput: { - type: 'json', - description: 'Reference image from previous blocks (advanced mode)', - }, consistencyMode: { type: 'string', description: 'Consistency mode for Runway (character, object, style, location)', diff --git a/apps/sim/blocks/blocks/vision.ts b/apps/sim/blocks/blocks/vision.ts index a367b0c58..3b791966f 100644 --- a/apps/sim/blocks/blocks/vision.ts +++ b/apps/sim/blocks/blocks/vision.ts @@ -91,7 +91,6 @@ export const VisionBlock: BlockConfig = { apiKey: { type: 'string', description: 'Provider API key' }, imageUrl: { type: 'string', description: 'Image URL' }, imageFile: { type: 'json', description: 'Image file (UserFile)' }, - imageFileReference: { type: 'json', description: 'Image file reference' }, model: { type: 'string', description: 'Vision model' }, prompt: { type: 'string', description: 'Analysis prompt' }, }, @@ -117,15 +116,13 @@ export const VisionV2Block: BlockConfig = { fallbackToolId: 'vision_tool_v2', }), params: (params) => { - // normalizeFileInput handles JSON stringified values from advanced mode - // Vision expects a single file - const imageFile = normalizeFileInput(params.imageFile || params.imageFileReference, { + // imageFile is the canonical param for both basic and advanced modes + const imageFile = normalizeFileInput(params.imageFile, { single: true, }) return { ...params, imageFile, - imageFileReference: undefined, } }, }, @@ -177,7 +174,6 @@ export const VisionV2Block: BlockConfig = { inputs: { apiKey: { type: 'string', description: 'Provider API key' }, imageFile: { type: 'json', description: 'Image file (UserFile)' }, - imageFileReference: { type: 'json', description: 'Image file reference' }, model: { type: 'string', description: 'Vision model' }, prompt: { type: 'string', description: 'Analysis prompt' }, }, diff --git a/apps/sim/blocks/blocks/wealthbox.ts b/apps/sim/blocks/blocks/wealthbox.ts index 7a1eacd3d..f14921377 100644 --- a/apps/sim/blocks/blocks/wealthbox.ts +++ b/apps/sim/blocks/blocks/wealthbox.ts @@ -169,9 +169,10 @@ Return ONLY the date/time string - no explanations, no quotes, no extra text.`, } }, params: (params) => { - const { credential, operation, contactId, manualContactId, taskId, ...rest } = params + const { credential, operation, contactId, taskId, ...rest } = params - const effectiveContactId = (contactId || manualContactId || '').trim() + // contactId is the canonical param for both basic (file-selector) and advanced (manualContactId) modes + const effectiveContactId = contactId ? String(contactId).trim() : '' const baseParams = { ...rest, @@ -222,7 +223,6 @@ Return ONLY the date/time string - no explanations, no quotes, no extra text.`, credential: { type: 'string', description: 'Wealthbox access token' }, noteId: { type: 'string', description: 'Note identifier' }, contactId: { type: 'string', description: 'Contact identifier' }, - manualContactId: { type: 'string', description: 'Manual contact identifier' }, taskId: { type: 'string', description: 'Task identifier' }, content: { type: 'string', description: 'Content text' }, firstName: { type: 'string', description: 'First name' }, diff --git a/apps/sim/blocks/blocks/webflow.ts b/apps/sim/blocks/blocks/webflow.ts index cfc396257..c785413bf 100644 --- a/apps/sim/blocks/blocks/webflow.ts +++ b/apps/sim/blocks/blocks/webflow.ts @@ -40,7 +40,7 @@ export const WebflowBlock: BlockConfig = { required: true, }, { - id: 'siteId', + id: 'siteSelector', title: 'Site', type: 'project-selector', canonicalParamId: 'siteId', @@ -60,13 +60,13 @@ export const WebflowBlock: BlockConfig = { required: true, }, { - id: 'collectionId', + id: 'collectionSelector', title: 'Collection', type: 'file-selector', canonicalParamId: 'collectionId', serviceId: 'webflow', placeholder: 'Select collection', - dependsOn: ['credential', 'siteId'], + dependsOn: ['credential', 'siteSelector'], mode: 'basic', required: true, }, @@ -80,13 +80,13 @@ export const WebflowBlock: BlockConfig = { required: true, }, { - id: 'itemId', + id: 'itemSelector', title: 'Item', type: 'file-selector', canonicalParamId: 'itemId', serviceId: 'webflow', placeholder: 'Select item', - dependsOn: ['credential', 'collectionId'], + dependsOn: ['credential', 'collectionSelector'], mode: 'basic', condition: { field: 'operation', value: ['get', 'update', 'delete'] }, required: true, @@ -158,12 +158,9 @@ export const WebflowBlock: BlockConfig = { const { credential, fieldData, - siteId, - manualSiteId, - collectionId, - manualCollectionId, - itemId, - manualItemId, + siteId, // Canonical param from siteSelector (basic) or manualSiteId (advanced) + collectionId, // Canonical param from collectionSelector (basic) or manualCollectionId (advanced) + itemId, // Canonical param from itemSelector (basic) or manualItemId (advanced) ...rest } = params let parsedFieldData: any | undefined @@ -176,21 +173,9 @@ export const WebflowBlock: BlockConfig = { throw new Error(`Invalid JSON input for ${params.operation} operation: ${error.message}`) } - const effectiveSiteId = ((siteId as string) || (manualSiteId as string) || '').trim() - const effectiveCollectionId = ( - (collectionId as string) || - (manualCollectionId as string) || - '' - ).trim() - const effectiveItemId = ((itemId as string) || (manualItemId as string) || '').trim() - - if (!effectiveSiteId) { - throw new Error('Site ID is required') - } - - if (!effectiveCollectionId) { - throw new Error('Collection ID is required') - } + const effectiveSiteId = siteId ? String(siteId).trim() : '' + const effectiveCollectionId = collectionId ? String(collectionId).trim() : '' + const effectiveItemId = itemId ? String(itemId).trim() : '' const baseParams = { credential, @@ -202,9 +187,6 @@ export const WebflowBlock: BlockConfig = { switch (params.operation) { case 'create': case 'update': - if (params.operation === 'update' && !effectiveItemId) { - throw new Error('Item ID is required for update operation') - } return { ...baseParams, itemId: effectiveItemId || undefined, @@ -212,9 +194,6 @@ export const WebflowBlock: BlockConfig = { } case 'get': case 'delete': - if (!effectiveItemId) { - throw new Error(`Item ID is required for ${params.operation} operation`) - } return { ...baseParams, itemId: effectiveItemId } default: return baseParams @@ -226,11 +205,8 @@ export const WebflowBlock: BlockConfig = { operation: { type: 'string', description: 'Operation to perform' }, credential: { type: 'string', description: 'Webflow OAuth access token' }, siteId: { type: 'string', description: 'Webflow site identifier' }, - manualSiteId: { type: 'string', description: 'Manual site identifier' }, collectionId: { type: 'string', description: 'Webflow collection identifier' }, - manualCollectionId: { type: 'string', description: 'Manual collection identifier' }, itemId: { type: 'string', description: 'Item identifier' }, - manualItemId: { type: 'string', description: 'Manual item identifier' }, offset: { type: 'number', description: 'Pagination offset' }, limit: { type: 'number', description: 'Maximum items to return' }, fieldData: { type: 'json', description: 'Item field data' }, diff --git a/apps/sim/blocks/blocks/wordpress.ts b/apps/sim/blocks/blocks/wordpress.ts index e0b206ce5..207c19740 100644 --- a/apps/sim/blocks/blocks/wordpress.ts +++ b/apps/sim/blocks/blocks/wordpress.ts @@ -768,9 +768,10 @@ export const WordPressBlock: BlockConfig = { parent: params.parent ? Number(params.parent) : undefined, } case 'wordpress_upload_media': + // file is the canonical param for both basic (fileUpload) and advanced modes return { ...baseParams, - file: normalizeFileInput(params.fileUpload || params.file, { single: true }), + file: normalizeFileInput(params.file, { single: true }), filename: params.filename, title: params.mediaTitle, caption: params.caption, @@ -905,8 +906,7 @@ export const WordPressBlock: BlockConfig = { parent: { type: 'number', description: 'Parent page ID' }, menuOrder: { type: 'number', description: 'Menu order' }, // Media inputs - fileUpload: { type: 'json', description: 'File to upload (UserFile object)' }, - file: { type: 'json', description: 'File reference from previous block' }, + file: { type: 'json', description: 'File to upload (UserFile)' }, filename: { type: 'string', description: 'Optional filename override' }, mediaTitle: { type: 'string', description: 'Media title' }, caption: { type: 'string', description: 'Media caption' }, diff --git a/apps/sim/components/branded-layout.tsx b/apps/sim/components/branded-layout.tsx index 84e4fd3eb..a42e712f3 100644 --- a/apps/sim/components/branded-layout.tsx +++ b/apps/sim/components/branded-layout.tsx @@ -1,7 +1,7 @@ 'use client' import { useEffect } from 'react' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface BrandedLayoutProps { children: React.ReactNode diff --git a/apps/sim/components/emails/auth/otp-verification-email.tsx b/apps/sim/components/emails/auth/otp-verification-email.tsx index d6ec6dc63..21dc1159a 100644 --- a/apps/sim/components/emails/auth/otp-verification-email.tsx +++ b/apps/sim/components/emails/auth/otp-verification-email.tsx @@ -1,7 +1,7 @@ import { Section, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface OTPVerificationEmailProps { otp: string diff --git a/apps/sim/components/emails/auth/reset-password-email.tsx b/apps/sim/components/emails/auth/reset-password-email.tsx index fa5e031b2..e86effe0f 100644 --- a/apps/sim/components/emails/auth/reset-password-email.tsx +++ b/apps/sim/components/emails/auth/reset-password-email.tsx @@ -1,7 +1,7 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface ResetPasswordEmailProps { username?: string diff --git a/apps/sim/components/emails/auth/welcome-email.tsx b/apps/sim/components/emails/auth/welcome-email.tsx index ba3e16b9a..3333e9bb7 100644 --- a/apps/sim/components/emails/auth/welcome-email.tsx +++ b/apps/sim/components/emails/auth/welcome-email.tsx @@ -1,8 +1,8 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface WelcomeEmailProps { userName?: string diff --git a/apps/sim/components/emails/billing/credit-purchase-email.tsx b/apps/sim/components/emails/billing/credit-purchase-email.tsx index 581f9dbc3..532add56c 100644 --- a/apps/sim/components/emails/billing/credit-purchase-email.tsx +++ b/apps/sim/components/emails/billing/credit-purchase-email.tsx @@ -1,8 +1,8 @@ import { Link, Section, Text } from '@react-email/components' import { baseStyles, colors } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface CreditPurchaseEmailProps { userName?: string diff --git a/apps/sim/components/emails/billing/enterprise-subscription-email.tsx b/apps/sim/components/emails/billing/enterprise-subscription-email.tsx index d3f237349..32f524126 100644 --- a/apps/sim/components/emails/billing/enterprise-subscription-email.tsx +++ b/apps/sim/components/emails/billing/enterprise-subscription-email.tsx @@ -1,8 +1,8 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface EnterpriseSubscriptionEmailProps { userName?: string diff --git a/apps/sim/components/emails/billing/free-tier-upgrade-email.tsx b/apps/sim/components/emails/billing/free-tier-upgrade-email.tsx index 9f42559d2..57c288bc8 100644 --- a/apps/sim/components/emails/billing/free-tier-upgrade-email.tsx +++ b/apps/sim/components/emails/billing/free-tier-upgrade-email.tsx @@ -1,7 +1,7 @@ import { Link, Section, Text } from '@react-email/components' import { baseStyles, colors, typography } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface FreeTierUpgradeEmailProps { userName?: string diff --git a/apps/sim/components/emails/billing/payment-failed-email.tsx b/apps/sim/components/emails/billing/payment-failed-email.tsx index 58d747410..d8aa23d14 100644 --- a/apps/sim/components/emails/billing/payment-failed-email.tsx +++ b/apps/sim/components/emails/billing/payment-failed-email.tsx @@ -1,7 +1,7 @@ import { Link, Section, Text } from '@react-email/components' import { baseStyles, colors } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface PaymentFailedEmailProps { userName?: string diff --git a/apps/sim/components/emails/billing/plan-welcome-email.tsx b/apps/sim/components/emails/billing/plan-welcome-email.tsx index 295f4a01c..13c0b7588 100644 --- a/apps/sim/components/emails/billing/plan-welcome-email.tsx +++ b/apps/sim/components/emails/billing/plan-welcome-email.tsx @@ -1,8 +1,8 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface PlanWelcomeEmailProps { planName: 'Pro' | 'Team' diff --git a/apps/sim/components/emails/billing/usage-threshold-email.tsx b/apps/sim/components/emails/billing/usage-threshold-email.tsx index be31ec0a6..1dec47536 100644 --- a/apps/sim/components/emails/billing/usage-threshold-email.tsx +++ b/apps/sim/components/emails/billing/usage-threshold-email.tsx @@ -1,7 +1,7 @@ import { Link, Section, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface UsageThresholdEmailProps { userName?: string diff --git a/apps/sim/components/emails/careers/careers-confirmation-email.tsx b/apps/sim/components/emails/careers/careers-confirmation-email.tsx index 1cdda08ef..42d5df3f1 100644 --- a/apps/sim/components/emails/careers/careers-confirmation-email.tsx +++ b/apps/sim/components/emails/careers/careers-confirmation-email.tsx @@ -2,8 +2,8 @@ import { Text } from '@react-email/components' import { format } from 'date-fns' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface CareersConfirmationEmailProps { name: string diff --git a/apps/sim/components/emails/components/email-footer.tsx b/apps/sim/components/emails/components/email-footer.tsx index a892d70a2..20ce2143d 100644 --- a/apps/sim/components/emails/components/email-footer.tsx +++ b/apps/sim/components/emails/components/email-footer.tsx @@ -1,8 +1,8 @@ import { Container, Img, Link, Section } from '@react-email/components' import { baseStyles, colors, spacing, typography } from '@/components/emails/_styles' -import { getBrandConfig } from '@/lib/branding/branding' import { isHosted } from '@/lib/core/config/feature-flags' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface EmailFooterProps { baseUrl?: string diff --git a/apps/sim/components/emails/components/email-layout.tsx b/apps/sim/components/emails/components/email-layout.tsx index f55249576..deb1eba9b 100644 --- a/apps/sim/components/emails/components/email-layout.tsx +++ b/apps/sim/components/emails/components/email-layout.tsx @@ -1,8 +1,8 @@ import { Body, Container, Head, Html, Img, Preview, Section } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailFooter } from '@/components/emails/components/email-footer' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface EmailLayoutProps { /** Preview text shown in email client list view */ diff --git a/apps/sim/components/emails/invitations/batch-invitation-email.tsx b/apps/sim/components/emails/invitations/batch-invitation-email.tsx index 53651044e..430a9fa18 100644 --- a/apps/sim/components/emails/invitations/batch-invitation-email.tsx +++ b/apps/sim/components/emails/invitations/batch-invitation-email.tsx @@ -1,7 +1,7 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface WorkspaceInvitation { workspaceId: string diff --git a/apps/sim/components/emails/invitations/invitation-email.tsx b/apps/sim/components/emails/invitations/invitation-email.tsx index 285901a32..f3ce41dcf 100644 --- a/apps/sim/components/emails/invitations/invitation-email.tsx +++ b/apps/sim/components/emails/invitations/invitation-email.tsx @@ -2,8 +2,8 @@ import { Link, Text } from '@react-email/components' import { createLogger } from '@sim/logger' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' interface InvitationEmailProps { inviterName?: string diff --git a/apps/sim/components/emails/invitations/polling-group-invitation-email.tsx b/apps/sim/components/emails/invitations/polling-group-invitation-email.tsx index b0f8e239b..f918dcc5e 100644 --- a/apps/sim/components/emails/invitations/polling-group-invitation-email.tsx +++ b/apps/sim/components/emails/invitations/polling-group-invitation-email.tsx @@ -1,7 +1,7 @@ import { Link, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' interface PollingGroupInvitationEmailProps { inviterName?: string diff --git a/apps/sim/components/emails/invitations/workspace-invitation-email.tsx b/apps/sim/components/emails/invitations/workspace-invitation-email.tsx index fb64cdfe3..f71e0fbb8 100644 --- a/apps/sim/components/emails/invitations/workspace-invitation-email.tsx +++ b/apps/sim/components/emails/invitations/workspace-invitation-email.tsx @@ -2,8 +2,8 @@ import { Link, Text } from '@react-email/components' import { createLogger } from '@sim/logger' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling' const logger = createLogger('WorkspaceInvitationEmail') diff --git a/apps/sim/components/emails/notifications/workflow-notification-email.tsx b/apps/sim/components/emails/notifications/workflow-notification-email.tsx index 860688c66..fd49f710f 100644 --- a/apps/sim/components/emails/notifications/workflow-notification-email.tsx +++ b/apps/sim/components/emails/notifications/workflow-notification-email.tsx @@ -1,7 +1,7 @@ import { Link, Section, Text } from '@react-email/components' import { baseStyles } from '@/components/emails/_styles' import { EmailLayout } from '@/components/emails/components' -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' /** * Serialized rate limit status for email payloads. diff --git a/apps/sim/components/emails/subjects.ts b/apps/sim/components/emails/subjects.ts index bf8b9197b..fb7984509 100644 --- a/apps/sim/components/emails/subjects.ts +++ b/apps/sim/components/emails/subjects.ts @@ -1,4 +1,4 @@ -import { getBrandConfig } from '@/lib/branding/branding' +import { getBrandConfig } from '@/ee/whitelabeling' /** Email subject type for all supported email templates */ export type EmailSubjectType = diff --git a/apps/sim/ee/LICENSE b/apps/sim/ee/LICENSE index ba5405dbf..3a492f83f 100644 --- a/apps/sim/ee/LICENSE +++ b/apps/sim/ee/LICENSE @@ -27,7 +27,7 @@ under the following terms: 3. ENTERPRISE SUBSCRIPTION Production deployment of enterprise features requires an active Sim Enterprise - subscription. Contact sales@simstudio.ai for licensing information. + subscription. Contact sales@sim.ai for licensing information. 4. DISCLAIMER @@ -40,4 +40,4 @@ under the following terms: IN NO EVENT SHALL SIM STUDIO, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY ARISING FROM THE USE OF THE SOFTWARE. -For questions about enterprise licensing, contact: sales@simstudio.ai +For questions about enterprise licensing, contact: sales@sim.ai diff --git a/apps/sim/ee/README.md b/apps/sim/ee/README.md index d9e91afaf..f0377161e 100644 --- a/apps/sim/ee/README.md +++ b/apps/sim/ee/README.md @@ -7,7 +7,7 @@ for production use. - **SSO (Single Sign-On)**: OIDC and SAML authentication integration - **Access Control**: Permission groups for fine-grained user access management -- **Credential Sets**: Shared credential pools for email polling workflows +- **Whitelabeling**: Custom branding and theming for enterprise deployments ## Licensing diff --git a/apps/sim/ee/whitelabeling/branding.ts b/apps/sim/ee/whitelabeling/branding.ts new file mode 100644 index 000000000..49af8592f --- /dev/null +++ b/apps/sim/ee/whitelabeling/branding.ts @@ -0,0 +1,45 @@ +import { type BrandConfig, defaultBrandConfig, type ThemeColors } from '@/lib/branding' +import { getEnv } from '@/lib/core/config/env' + +export type { BrandConfig, ThemeColors } + +const getThemeColors = (): ThemeColors => { + return { + primaryColor: + getEnv('NEXT_PUBLIC_BRAND_PRIMARY_COLOR') || defaultBrandConfig.theme?.primaryColor, + primaryHoverColor: + getEnv('NEXT_PUBLIC_BRAND_PRIMARY_HOVER_COLOR') || + defaultBrandConfig.theme?.primaryHoverColor, + accentColor: getEnv('NEXT_PUBLIC_BRAND_ACCENT_COLOR') || defaultBrandConfig.theme?.accentColor, + accentHoverColor: + getEnv('NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR') || defaultBrandConfig.theme?.accentHoverColor, + backgroundColor: + getEnv('NEXT_PUBLIC_BRAND_BACKGROUND_COLOR') || defaultBrandConfig.theme?.backgroundColor, + } +} + +/** + * Get branding configuration from environment variables + * Supports runtime configuration via Docker/Kubernetes + */ +export const getBrandConfig = (): BrandConfig => { + return { + name: getEnv('NEXT_PUBLIC_BRAND_NAME') || defaultBrandConfig.name, + logoUrl: getEnv('NEXT_PUBLIC_BRAND_LOGO_URL') || defaultBrandConfig.logoUrl, + faviconUrl: getEnv('NEXT_PUBLIC_BRAND_FAVICON_URL') || defaultBrandConfig.faviconUrl, + customCssUrl: getEnv('NEXT_PUBLIC_CUSTOM_CSS_URL') || defaultBrandConfig.customCssUrl, + supportEmail: getEnv('NEXT_PUBLIC_SUPPORT_EMAIL') || defaultBrandConfig.supportEmail, + documentationUrl: + getEnv('NEXT_PUBLIC_DOCUMENTATION_URL') || defaultBrandConfig.documentationUrl, + termsUrl: getEnv('NEXT_PUBLIC_TERMS_URL') || defaultBrandConfig.termsUrl, + privacyUrl: getEnv('NEXT_PUBLIC_PRIVACY_URL') || defaultBrandConfig.privacyUrl, + theme: getThemeColors(), + } +} + +/** + * Hook to use brand configuration in React components + */ +export const useBrandConfig = () => { + return getBrandConfig() +} diff --git a/apps/sim/ee/whitelabeling/index.ts b/apps/sim/ee/whitelabeling/index.ts new file mode 100644 index 000000000..1fe5b9487 --- /dev/null +++ b/apps/sim/ee/whitelabeling/index.ts @@ -0,0 +1,4 @@ +export type { BrandConfig, ThemeColors } from './branding' +export { getBrandConfig, useBrandConfig } from './branding' +export { generateThemeCSS } from './inject-theme' +export { generateBrandedMetadata, generateStructuredData } from './metadata' diff --git a/apps/sim/lib/branding/inject-theme.ts b/apps/sim/ee/whitelabeling/inject-theme.ts similarity index 96% rename from apps/sim/lib/branding/inject-theme.ts rename to apps/sim/ee/whitelabeling/inject-theme.ts index ecb34b1b7..4b44cd25d 100644 --- a/apps/sim/lib/branding/inject-theme.ts +++ b/apps/sim/ee/whitelabeling/inject-theme.ts @@ -1,4 +1,6 @@ -// Helper to detect if background is dark +/** + * Helper to detect if background is dark + */ function isDarkBackground(hexColor: string): boolean { const hex = hexColor.replace('#', '') const r = Number.parseInt(hex.substr(0, 2), 16) diff --git a/apps/sim/lib/branding/metadata.ts b/apps/sim/ee/whitelabeling/metadata.ts similarity index 98% rename from apps/sim/lib/branding/metadata.ts rename to apps/sim/ee/whitelabeling/metadata.ts index ae06860d8..2b3a8fd15 100644 --- a/apps/sim/lib/branding/metadata.ts +++ b/apps/sim/ee/whitelabeling/metadata.ts @@ -1,6 +1,6 @@ import type { Metadata } from 'next' -import { getBrandConfig } from '@/lib/branding/branding' import { getBaseUrl } from '@/lib/core/utils/urls' +import { getBrandConfig } from '@/ee/whitelabeling/branding' /** * Generate dynamic metadata based on brand configuration diff --git a/apps/sim/lib/auth/auth.ts b/apps/sim/lib/auth/auth.ts index d5ac1a8c2..be5b961f0 100644 --- a/apps/sim/lib/auth/auth.ts +++ b/apps/sim/lib/auth/auth.ts @@ -151,7 +151,8 @@ export const auth = betterAuth({ create: { before: async (account) => { // Only one credential per (userId, providerId) is allowed - // If user reconnects (even with a different external account), replace the existing one + // If user reconnects (even with a different external account), delete the old one + // and let Better Auth create the new one (returning false breaks account linking flow) const existing = await db.query.account.findFirst({ where: and( eq(schema.account.userId, account.userId), @@ -159,101 +160,59 @@ export const auth = betterAuth({ ), }) - if (existing) { - let scopeToStore = account.scope + const modifiedAccount = { ...account } - if (account.providerId === 'salesforce' && account.accessToken) { - try { - const response = await fetch( - 'https://login.salesforce.com/services/oauth2/userinfo', - { - headers: { - Authorization: `Bearer ${account.accessToken}`, - }, - } - ) - - if (response.ok) { - const data = await response.json() - - if (data.profile) { - const match = data.profile.match(/^(https:\/\/[^/]+)/) - if (match && match[1] !== 'https://login.salesforce.com') { - const instanceUrl = match[1] - scopeToStore = `__sf_instance__:${instanceUrl} ${account.scope}` - } - } + if (account.providerId === 'salesforce' && account.accessToken) { + try { + const response = await fetch( + 'https://login.salesforce.com/services/oauth2/userinfo', + { + headers: { + Authorization: `Bearer ${account.accessToken}`, + }, } - } catch (error) { - logger.error('Failed to fetch Salesforce instance URL', { error }) - } - } - - const refreshTokenExpiresAt = isMicrosoftProvider(account.providerId) - ? getMicrosoftRefreshTokenExpiry() - : account.refreshTokenExpiresAt - - await db - .update(schema.account) - .set({ - accountId: account.accountId, - accessToken: account.accessToken, - refreshToken: account.refreshToken, - idToken: account.idToken, - accessTokenExpiresAt: account.accessTokenExpiresAt, - refreshTokenExpiresAt, - scope: scopeToStore, - updatedAt: new Date(), - }) - .where(eq(schema.account.id, existing.id)) - - // Sync webhooks for credential sets after reconnecting - const requestId = crypto.randomUUID().slice(0, 8) - const userMemberships = await db - .select({ - credentialSetId: schema.credentialSetMember.credentialSetId, - providerId: schema.credentialSet.providerId, - }) - .from(schema.credentialSetMember) - .innerJoin( - schema.credentialSet, - eq(schema.credentialSetMember.credentialSetId, schema.credentialSet.id) - ) - .where( - and( - eq(schema.credentialSetMember.userId, account.userId), - eq(schema.credentialSetMember.status, 'active') - ) ) - for (const membership of userMemberships) { - if (membership.providerId === account.providerId) { - try { - await syncAllWebhooksForCredentialSet(membership.credentialSetId, requestId) - logger.info( - '[account.create.before] Synced webhooks after credential reconnect', - { - credentialSetId: membership.credentialSetId, - providerId: account.providerId, - } - ) - } catch (error) { - logger.error( - '[account.create.before] Failed to sync webhooks after credential reconnect', - { - credentialSetId: membership.credentialSetId, - providerId: account.providerId, - error, - } - ) + if (response.ok) { + const data = await response.json() + + if (data.profile) { + const match = data.profile.match(/^(https:\/\/[^/]+)/) + if (match && match[1] !== 'https://login.salesforce.com') { + const instanceUrl = match[1] + modifiedAccount.scope = `__sf_instance__:${instanceUrl} ${account.scope}` + } } } + } catch (error) { + logger.error('Failed to fetch Salesforce instance URL', { error }) } - - return false } - return { data: account } + // Handle Microsoft refresh token expiry + if (isMicrosoftProvider(account.providerId)) { + modifiedAccount.refreshTokenExpiresAt = getMicrosoftRefreshTokenExpiry() + } + + if (existing) { + // Delete the existing account so Better Auth can create the new one + // This allows account linking/re-authorization to succeed + await db.delete(schema.account).where(eq(schema.account.id, existing.id)) + + // Preserve the existing account ID so references (like workspace notifications) continue to work + modifiedAccount.id = existing.id + + logger.info('[account.create.before] Deleted existing account for re-authorization', { + userId: account.userId, + providerId: account.providerId, + existingAccountId: existing.id, + preservingId: true, + }) + + // Sync webhooks for credential sets after reconnecting (in after hook) + } + + return { data: modifiedAccount } }, after: async (account) => { try { @@ -1687,6 +1646,12 @@ export const auth = betterAuth({ 'search:confluence', 'read:me', 'offline_access', + 'read:blogpost:confluence', + 'write:blogpost:confluence', + 'read:content.property:confluence', + 'write:content.property:confluence', + 'read:hierarchical-content:confluence', + 'read:content.metadata:confluence', ], responseType: 'code', pkce: true, diff --git a/apps/sim/lib/branding/branding.ts b/apps/sim/lib/branding/branding.ts deleted file mode 100644 index e6964c05c..000000000 --- a/apps/sim/lib/branding/branding.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { getEnv } from '@/lib/core/config/env' - -export interface ThemeColors { - primaryColor?: string - primaryHoverColor?: string - accentColor?: string - accentHoverColor?: string - backgroundColor?: string -} - -export interface BrandConfig { - name: string - logoUrl?: string - faviconUrl?: string - customCssUrl?: string - supportEmail?: string - documentationUrl?: string - termsUrl?: string - privacyUrl?: string - theme?: ThemeColors -} - -/** - * Default brand configuration values - */ -const defaultConfig: BrandConfig = { - name: 'Sim', - logoUrl: undefined, - faviconUrl: '/favicon/favicon.ico', - customCssUrl: undefined, - supportEmail: 'help@sim.ai', - documentationUrl: undefined, - termsUrl: undefined, - privacyUrl: undefined, - theme: { - primaryColor: '#701ffc', - primaryHoverColor: '#802fff', - accentColor: '#9d54ff', - accentHoverColor: '#a66fff', - backgroundColor: '#0c0c0c', - }, -} - -const getThemeColors = (): ThemeColors => { - return { - primaryColor: getEnv('NEXT_PUBLIC_BRAND_PRIMARY_COLOR') || defaultConfig.theme?.primaryColor, - primaryHoverColor: - getEnv('NEXT_PUBLIC_BRAND_PRIMARY_HOVER_COLOR') || defaultConfig.theme?.primaryHoverColor, - accentColor: getEnv('NEXT_PUBLIC_BRAND_ACCENT_COLOR') || defaultConfig.theme?.accentColor, - accentHoverColor: - getEnv('NEXT_PUBLIC_BRAND_ACCENT_HOVER_COLOR') || defaultConfig.theme?.accentHoverColor, - backgroundColor: - getEnv('NEXT_PUBLIC_BRAND_BACKGROUND_COLOR') || defaultConfig.theme?.backgroundColor, - } -} - -/** - * Get branding configuration from environment variables - * Supports runtime configuration via Docker/Kubernetes - */ -export const getBrandConfig = (): BrandConfig => { - return { - name: getEnv('NEXT_PUBLIC_BRAND_NAME') || defaultConfig.name, - logoUrl: getEnv('NEXT_PUBLIC_BRAND_LOGO_URL') || defaultConfig.logoUrl, - faviconUrl: getEnv('NEXT_PUBLIC_BRAND_FAVICON_URL') || defaultConfig.faviconUrl, - customCssUrl: getEnv('NEXT_PUBLIC_CUSTOM_CSS_URL') || defaultConfig.customCssUrl, - supportEmail: getEnv('NEXT_PUBLIC_SUPPORT_EMAIL') || defaultConfig.supportEmail, - documentationUrl: getEnv('NEXT_PUBLIC_DOCUMENTATION_URL') || defaultConfig.documentationUrl, - termsUrl: getEnv('NEXT_PUBLIC_TERMS_URL') || defaultConfig.termsUrl, - privacyUrl: getEnv('NEXT_PUBLIC_PRIVACY_URL') || defaultConfig.privacyUrl, - theme: getThemeColors(), - } -} - -/** - * Hook to use brand configuration in React components - */ -export const useBrandConfig = () => { - return getBrandConfig() -} diff --git a/apps/sim/lib/branding/defaults.ts b/apps/sim/lib/branding/defaults.ts new file mode 100644 index 000000000..8ce6d1491 --- /dev/null +++ b/apps/sim/lib/branding/defaults.ts @@ -0,0 +1,22 @@ +import type { BrandConfig } from './types' + +/** + * Default brand configuration values + */ +export const defaultBrandConfig: BrandConfig = { + name: 'Sim', + logoUrl: undefined, + faviconUrl: '/favicon/favicon.ico', + customCssUrl: undefined, + supportEmail: 'help@sim.ai', + documentationUrl: undefined, + termsUrl: undefined, + privacyUrl: undefined, + theme: { + primaryColor: '#701ffc', + primaryHoverColor: '#802fff', + accentColor: '#9d54ff', + accentHoverColor: '#a66fff', + backgroundColor: '#0c0c0c', + }, +} diff --git a/apps/sim/lib/branding/index.ts b/apps/sim/lib/branding/index.ts new file mode 100644 index 000000000..495fcde7c --- /dev/null +++ b/apps/sim/lib/branding/index.ts @@ -0,0 +1,2 @@ +export { defaultBrandConfig } from './defaults' +export type { BrandConfig, ThemeColors } from './types' diff --git a/apps/sim/lib/branding/types.ts b/apps/sim/lib/branding/types.ts new file mode 100644 index 000000000..cd286de78 --- /dev/null +++ b/apps/sim/lib/branding/types.ts @@ -0,0 +1,19 @@ +export interface ThemeColors { + primaryColor?: string + primaryHoverColor?: string + accentColor?: string + accentHoverColor?: string + backgroundColor?: string +} + +export interface BrandConfig { + name: string + logoUrl?: string + faviconUrl?: string + customCssUrl?: string + supportEmail?: string + documentationUrl?: string + termsUrl?: string + privacyUrl?: string + theme?: ThemeColors +} diff --git a/apps/sim/lib/logs/execution/snapshot/service.test.ts b/apps/sim/lib/logs/execution/snapshot/service.test.ts index a0f775516..09353f7b2 100644 --- a/apps/sim/lib/logs/execution/snapshot/service.test.ts +++ b/apps/sim/lib/logs/execution/snapshot/service.test.ts @@ -1,16 +1,11 @@ -import { beforeEach, describe, expect, test } from 'vitest' +import { describe, expect, it } from 'vitest' import { SnapshotService } from '@/lib/logs/execution/snapshot/service' import type { WorkflowState } from '@/lib/logs/types' describe('SnapshotService', () => { - let service: SnapshotService - - beforeEach(() => { - service = new SnapshotService() - }) - describe('computeStateHash', () => { - test('should generate consistent hashes for identical states', () => { + it.concurrent('should generate consistent hashes for identical states', () => { + const service = new SnapshotService() const state: WorkflowState = { blocks: { block1: { @@ -39,7 +34,8 @@ describe('SnapshotService', () => { expect(hash1).toHaveLength(64) // SHA-256 hex string }) - test('should ignore position changes', () => { + it.concurrent('should ignore position changes', () => { + const service = new SnapshotService() const baseState: WorkflowState = { blocks: { block1: { @@ -77,7 +73,8 @@ describe('SnapshotService', () => { expect(hash1).toBe(hash2) }) - test('should detect meaningful changes', () => { + it.concurrent('should detect meaningful changes', () => { + const service = new SnapshotService() const baseState: WorkflowState = { blocks: { block1: { @@ -128,7 +125,8 @@ describe('SnapshotService', () => { expect(hash1).not.toBe(hash2) }) - test('should handle edge order consistently', () => { + it.concurrent('should handle edge order consistently', () => { + const service = new SnapshotService() const state1: WorkflowState = { blocks: {}, edges: [ @@ -155,7 +153,8 @@ describe('SnapshotService', () => { expect(hash1).toBe(hash2) // Should be same despite different order }) - test('should handle empty states', () => { + it.concurrent('should handle empty states', () => { + const service = new SnapshotService() const emptyState: WorkflowState = { blocks: {}, edges: [], @@ -167,7 +166,8 @@ describe('SnapshotService', () => { expect(hash).toHaveLength(64) }) - test('should handle complex nested structures', () => { + it.concurrent('should handle complex nested structures', () => { + const service = new SnapshotService() const complexState: WorkflowState = { blocks: { block1: { @@ -224,7 +224,8 @@ describe('SnapshotService', () => { expect(hash).toBe(hash2) }) - test('should include variables in hash computation', () => { + it.concurrent('should include variables in hash computation', () => { + const service = new SnapshotService() const stateWithVariables: WorkflowState = { blocks: {}, edges: [], @@ -253,7 +254,8 @@ describe('SnapshotService', () => { expect(hashWith).not.toBe(hashWithout) }) - test('should detect changes in variable values', () => { + it.concurrent('should detect changes in variable values', () => { + const service = new SnapshotService() const state1: WorkflowState = { blocks: {}, edges: [], @@ -290,7 +292,8 @@ describe('SnapshotService', () => { expect(hash1).not.toBe(hash2) }) - test('should generate consistent hashes for states with variables', () => { + it.concurrent('should generate consistent hashes for states with variables', () => { + const service = new SnapshotService() const stateWithVariables: WorkflowState = { blocks: { block1: { diff --git a/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts b/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts index 157ca17f6..987318d5e 100644 --- a/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts +++ b/apps/sim/lib/logs/execution/trace-spans/trace-spans.test.ts @@ -1,10 +1,10 @@ -import { describe, expect, test } from 'vitest' +import { describe, expect, it } from 'vitest' import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans' import { stripCustomToolPrefix } from '@/executor/constants' import type { ExecutionResult } from '@/executor/types' describe('buildTraceSpans', () => { - test('should extract sequential segments from timeSegments data', () => { + it.concurrent('extracts sequential segments from timeSegments data', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { content: 'Final output' }, @@ -119,7 +119,7 @@ describe('buildTraceSpans', () => { expect(segments[3].status).toBe('success') }) - test('should fallback to toolCalls extraction when timeSegments not available', () => { + it.concurrent('falls back to toolCalls extraction when timeSegments not available', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { content: 'Final output' }, @@ -194,60 +194,63 @@ describe('buildTraceSpans', () => { expect(secondToolCall.output).toEqual({ status: 200, data: 'response' }) }) - test('should extract tool calls from agent block output with direct toolCalls array format (fallback)', () => { - const mockExecutionResult: ExecutionResult = { - success: true, - output: { content: 'Final output' }, - logs: [ - { - blockId: 'agent-2', - blockName: 'Test Agent 2', - blockType: 'agent', - startedAt: '2024-01-01T10:00:00.000Z', - endedAt: '2024-01-01T10:00:03.000Z', - durationMs: 3000, - success: true, - input: { userPrompt: 'Test prompt' }, - output: { - content: 'Agent response', - model: 'gpt-4o', - providerTiming: { - duration: 2500, - startTime: '2024-01-01T10:00:00.250Z', - endTime: '2024-01-01T10:00:02.750Z', - // No timeSegments - should fallback to toolCalls - }, - toolCalls: [ - { - name: 'serper_search', - arguments: { query: 'test search' }, - result: { results: ['result1', 'result2'] }, - duration: 1500, - startTime: '2024-01-01T10:00:00.500Z', - endTime: '2024-01-01T10:00:02.000Z', + it.concurrent( + 'extracts tool calls from agent block output with direct toolCalls array format', + () => { + const mockExecutionResult: ExecutionResult = { + success: true, + output: { content: 'Final output' }, + logs: [ + { + blockId: 'agent-2', + blockName: 'Test Agent 2', + blockType: 'agent', + startedAt: '2024-01-01T10:00:00.000Z', + endedAt: '2024-01-01T10:00:03.000Z', + durationMs: 3000, + success: true, + input: { userPrompt: 'Test prompt' }, + output: { + content: 'Agent response', + model: 'gpt-4o', + providerTiming: { + duration: 2500, + startTime: '2024-01-01T10:00:00.250Z', + endTime: '2024-01-01T10:00:02.750Z', + // No timeSegments - should fallback to toolCalls }, - ], + toolCalls: [ + { + name: 'serper_search', + arguments: { query: 'test search' }, + result: { results: ['result1', 'result2'] }, + duration: 1500, + startTime: '2024-01-01T10:00:00.500Z', + endTime: '2024-01-01T10:00:02.000Z', + }, + ], + }, }, - }, - ], + ], + } + + const { traceSpans } = buildTraceSpans(mockExecutionResult) + + expect(traceSpans).toHaveLength(1) + const agentSpan = traceSpans[0] + expect(agentSpan.toolCalls).toBeDefined() + expect(agentSpan.toolCalls).toHaveLength(1) + + const toolCall = agentSpan.toolCalls![0] + expect(toolCall.name).toBe('serper_search') + expect(toolCall.duration).toBe(1500) + expect(toolCall.status).toBe('success') + expect(toolCall.input).toEqual({ query: 'test search' }) + expect(toolCall.output).toEqual({ results: ['result1', 'result2'] }) } + ) - const { traceSpans } = buildTraceSpans(mockExecutionResult) - - expect(traceSpans).toHaveLength(1) - const agentSpan = traceSpans[0] - expect(agentSpan.toolCalls).toBeDefined() - expect(agentSpan.toolCalls).toHaveLength(1) - - const toolCall = agentSpan.toolCalls![0] - expect(toolCall.name).toBe('serper_search') - expect(toolCall.duration).toBe(1500) - expect(toolCall.status).toBe('success') - expect(toolCall.input).toEqual({ query: 'test search' }) - expect(toolCall.output).toEqual({ results: ['result1', 'result2'] }) - }) - - test('should extract tool calls from streaming response with executionData format (fallback)', () => { + it.concurrent('extracts tool calls from streaming response with executionData format', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { content: 'Final output' }, @@ -301,7 +304,7 @@ describe('buildTraceSpans', () => { expect(toolCall.output).toEqual({ analysis: 'completed' }) }) - test('should handle tool calls with errors in timeSegments', () => { + it.concurrent('handles tool calls with errors in timeSegments', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { content: 'Final output' }, @@ -380,7 +383,7 @@ describe('buildTraceSpans', () => { expect(toolSegment.output).toEqual({ error: 'Tool execution failed' }) }) - test('should handle blocks without tool calls', () => { + it.concurrent('handles blocks without tool calls', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { content: 'Final output' }, @@ -407,7 +410,7 @@ describe('buildTraceSpans', () => { expect(textSpan.toolCalls).toBeUndefined() }) - test('should handle complex multi-iteration agent execution with sequential segments', () => { + it.concurrent('handles complex multi-iteration agent execution with sequential segments', () => { // This test simulates a real agent execution with multiple tool calls and model iterations const mockExecutionResult: ExecutionResult = { success: true, @@ -581,7 +584,7 @@ describe('buildTraceSpans', () => { expect(agentSpan.toolCalls).toBeUndefined() }) - test('should flatten nested child workflow trace spans recursively', () => { + it.concurrent('flattens nested child workflow trace spans recursively', () => { const nestedChildSpan = { id: 'nested-workflow-span', name: 'Nested Workflow Block', @@ -685,7 +688,7 @@ describe('buildTraceSpans', () => { expect(syntheticWrappers).toHaveLength(0) }) - test('should handle nested child workflow errors with proper hierarchy', () => { + it.concurrent('handles nested child workflow errors with proper hierarchy', () => { const functionErrorSpan = { id: 'function-error-span', name: 'Function 1', @@ -770,7 +773,7 @@ describe('buildTraceSpans', () => { expect((functionSpan?.output as { error?: string })?.error).toContain('Syntax Error') }) - test('should remove childTraceSpans from output after integrating them as children', () => { + it.concurrent('removes childTraceSpans from output after integrating them as children', () => { const mockExecutionResult: ExecutionResult = { success: true, output: { result: 'parent output' }, @@ -843,15 +846,157 @@ describe('buildTraceSpans', () => { data: 'some result', }) }) + + it.concurrent('matches multiple tool calls with same name by sequential order', () => { + // This test verifies that when an agent makes multiple calls to the same tool + // (e.g., search_tool called 3 times with different queries), each tool segment + // is matched to the correct tool call by their sequential order, not just by name. + const mockExecutionResult: ExecutionResult = { + success: true, + output: { content: 'Final output with multiple searches' }, + logs: [ + { + blockId: 'agent-multi-search', + blockName: 'Multi-Search Agent', + blockType: 'agent', + startedAt: '2024-01-01T10:00:00.000Z', + endedAt: '2024-01-01T10:00:10.000Z', + durationMs: 10000, + success: true, + input: { userPrompt: 'Search for multiple topics' }, + output: { + content: 'Results from multiple searches', + model: 'gpt-4o', + tokens: { input: 50, output: 100, total: 150 }, + providerTiming: { + duration: 10000, + startTime: '2024-01-01T10:00:00.000Z', + endTime: '2024-01-01T10:00:10.000Z', + timeSegments: [ + { + type: 'model', + name: 'Initial response', + startTime: 1704103200000, // 2024-01-01T10:00:00.000Z + endTime: 1704103201000, + duration: 1000, + }, + { + type: 'tool', + name: 'search_tool', + startTime: 1704103201000, // 2024-01-01T10:00:01.000Z + endTime: 1704103202000, + duration: 1000, + }, + { + type: 'model', + name: 'Model response (iteration 1)', + startTime: 1704103202000, + endTime: 1704103203000, + duration: 1000, + }, + { + type: 'tool', + name: 'search_tool', + startTime: 1704103203000, // 2024-01-01T10:00:03.000Z + endTime: 1704103204500, + duration: 1500, + }, + { + type: 'model', + name: 'Model response (iteration 2)', + startTime: 1704103204500, + endTime: 1704103206000, + duration: 1500, + }, + { + type: 'tool', + name: 'search_tool', + startTime: 1704103206000, // 2024-01-01T10:00:06.000Z + endTime: 1704103208000, + duration: 2000, + }, + { + type: 'model', + name: 'Model response (iteration 3)', + startTime: 1704103208000, + endTime: 1704103210000, + duration: 2000, + }, + ], + }, + toolCalls: { + list: [ + { + name: 'search_tool', + arguments: { query: 'first query' }, + result: { results: ['first result'] }, + duration: 1000, + startTime: '2024-01-01T10:00:01.000Z', // Matches first segment + endTime: '2024-01-01T10:00:02.000Z', + }, + { + name: 'search_tool', + arguments: { query: 'second query' }, + result: { results: ['second result'] }, + duration: 1500, + startTime: '2024-01-01T10:00:03.000Z', // Matches second segment + endTime: '2024-01-01T10:00:04.500Z', + }, + { + name: 'search_tool', + arguments: { query: 'third query' }, + result: { results: ['third result'] }, + duration: 2000, + startTime: '2024-01-01T10:00:06.000Z', // Matches third segment + endTime: '2024-01-01T10:00:08.000Z', + }, + ], + count: 3, + }, + }, + }, + ], + } + + const { traceSpans } = buildTraceSpans(mockExecutionResult) + + expect(traceSpans).toHaveLength(1) + const agentSpan = traceSpans[0] + expect(agentSpan.children).toBeDefined() + expect(agentSpan.children).toHaveLength(7) + + const segments = agentSpan.children! + + // First search_tool call should have "first query" + const firstToolSegment = segments[1] + expect(firstToolSegment.name).toBe('search_tool') + expect(firstToolSegment.type).toBe('tool') + expect(firstToolSegment.input).toEqual({ query: 'first query' }) + expect(firstToolSegment.output).toEqual({ results: ['first result'] }) + + // Second search_tool call should have "second query" + const secondToolSegment = segments[3] + expect(secondToolSegment.name).toBe('search_tool') + expect(secondToolSegment.type).toBe('tool') + expect(secondToolSegment.input).toEqual({ query: 'second query' }) + expect(secondToolSegment.output).toEqual({ results: ['second result'] }) + + // Third search_tool call should have "third query" + const thirdToolSegment = segments[5] + expect(thirdToolSegment.name).toBe('search_tool') + expect(thirdToolSegment.type).toBe('tool') + expect(thirdToolSegment.input).toEqual({ query: 'third query' }) + expect(thirdToolSegment.output).toEqual({ results: ['third result'] }) + }) }) describe('stripCustomToolPrefix', () => { - test('should strip custom_ prefix from tool names', () => { + it.concurrent('strips custom_ prefix from tool names', () => { expect(stripCustomToolPrefix('custom_test_tool')).toBe('test_tool') expect(stripCustomToolPrefix('custom_analysis')).toBe('analysis') }) - test('should leave non-custom tool names unchanged', () => { + it.concurrent('leaves non-custom tool names unchanged', () => { expect(stripCustomToolPrefix('http_request')).toBe('http_request') expect(stripCustomToolPrefix('serper_search')).toBe('serper_search') expect(stripCustomToolPrefix('regular_tool')).toBe('regular_tool') diff --git a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts index 33d671865..a4b35330d 100644 --- a/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts +++ b/apps/sim/lib/logs/execution/trace-spans/trace-spans.ts @@ -233,6 +233,17 @@ export function buildTraceSpans(result: ExecutionResult): { const timeSegments = log.output.providerTiming.timeSegments const toolCallsData = log.output?.toolCalls?.list || log.output?.toolCalls || [] + const toolCallsByName = new Map>>() + for (const tc of toolCallsData as Array<{ name?: string; [key: string]: unknown }>) { + const normalizedName = stripCustomToolPrefix(tc.name || '') + if (!toolCallsByName.has(normalizedName)) { + toolCallsByName.set(normalizedName, []) + } + toolCallsByName.get(normalizedName)!.push(tc) + } + + const toolCallIndices = new Map() + span.children = timeSegments.map( ( segment: { @@ -259,14 +270,25 @@ export function buildTraceSpans(result: ExecutionResult): { } if (segment.type === 'tool') { - const matchingToolCall = toolCallsData.find( - (tc: { name?: string; [key: string]: unknown }) => - tc.name === segment.name || stripCustomToolPrefix(tc.name || '') === segment.name - ) + const normalizedName = stripCustomToolPrefix(segment.name || '') + + const toolCallsForName = toolCallsByName.get(normalizedName) || [] + const currentIndex = toolCallIndices.get(normalizedName) || 0 + const matchingToolCall = toolCallsForName[currentIndex] as + | { + error?: string + arguments?: Record + input?: Record + result?: Record + output?: Record + } + | undefined + + toolCallIndices.set(normalizedName, currentIndex + 1) return { id: `${span.id}-segment-${index}`, - name: stripCustomToolPrefix(segment.name || ''), + name: normalizedName, type: 'tool', duration: segment.duration, startTime: segmentStartTime, diff --git a/apps/sim/providers/anthropic/core.ts b/apps/sim/providers/anthropic/core.ts new file mode 100644 index 000000000..3cd16eb4d --- /dev/null +++ b/apps/sim/providers/anthropic/core.ts @@ -0,0 +1,1221 @@ +import type Anthropic from '@anthropic-ai/sdk' +import { transformJSONSchema } from '@anthropic-ai/sdk/lib/transform-json-schema' +import type { Logger } from '@sim/logger' +import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + checkForForcedToolUsage, + createReadableStreamFromAnthropicStream, +} from '@/providers/anthropic/utils' +import { + getMaxOutputTokensForModel, + getThinkingCapability, + supportsNativeStructuredOutputs, +} from '@/providers/models' +import type { ProviderRequest, ProviderResponse, TimeSegment } from '@/providers/types' +import { + calculateCost, + prepareToolExecution, + prepareToolsWithUsageControl, +} from '@/providers/utils' +import { executeTool } from '@/tools' + +/** + * Configuration for creating an Anthropic provider instance. + */ +export interface AnthropicProviderConfig { + /** Provider identifier (e.g., 'anthropic', 'azure-anthropic') */ + providerId: string + /** Human-readable label for logging */ + providerLabel: string + /** Factory function to create the Anthropic client */ + createClient: (apiKey: string, useNativeStructuredOutputs: boolean) => Anthropic + /** Logger instance */ + logger: Logger +} + +/** + * Generates prompt-based schema instructions for older models that don't support native structured outputs. + * This is a fallback approach that adds schema requirements to the system prompt. + */ +function generateSchemaInstructions(schema: any, schemaName?: string): string { + const name = schemaName || 'response' + return `IMPORTANT: You must respond with a valid JSON object that conforms to the following schema. +Do not include any text before or after the JSON object. Only output the JSON. + +Schema name: ${name} +JSON Schema: +${JSON.stringify(schema, null, 2)} + +Your response must be valid JSON that exactly matches this schema structure.` +} + +/** + * Maps thinking level strings to budget_tokens values for Anthropic extended thinking. + * These values are calibrated for typical use cases: + * - low: Quick reasoning for simple tasks + * - medium: Balanced reasoning for most tasks + * - high: Deep reasoning for complex problems + */ +const THINKING_BUDGET_TOKENS: Record = { + low: 2048, + medium: 8192, + high: 32768, +} + +/** + * Checks if a model supports adaptive thinking (Opus 4.6+) + */ +function supportsAdaptiveThinking(modelId: string): boolean { + const normalizedModel = modelId.toLowerCase() + return normalizedModel.includes('opus-4-6') || normalizedModel.includes('opus-4.6') +} + +/** + * Builds the thinking configuration for the Anthropic API based on model capabilities and level. + * + * - Opus 4.6: Uses adaptive thinking with effort parameter (recommended by Anthropic) + * - Other models: Uses budget_tokens-based extended thinking + * + * Returns both the thinking config and optional output_config for adaptive thinking. + */ +function buildThinkingConfig( + modelId: string, + thinkingLevel: string +): { + thinking: { type: 'enabled'; budget_tokens: number } | { type: 'adaptive' } + outputConfig?: { effort: string } +} | null { + const capability = getThinkingCapability(modelId) + if (!capability || !capability.levels.includes(thinkingLevel)) { + return null + } + + // Opus 4.6 uses adaptive thinking with effort parameter + if (supportsAdaptiveThinking(modelId)) { + return { + thinking: { type: 'adaptive' }, + outputConfig: { effort: thinkingLevel }, + } + } + + // Other models use budget_tokens-based extended thinking + const budgetTokens = THINKING_BUDGET_TOKENS[thinkingLevel] + if (!budgetTokens) { + return null + } + + return { + thinking: { + type: 'enabled', + budget_tokens: budgetTokens, + }, + } +} + +/** + * Executes a request using the Anthropic API with full tool loop support. + * This is the shared core implementation used by both the standard Anthropic provider + * and the Azure Anthropic provider. + */ +export async function executeAnthropicProviderRequest( + request: ProviderRequest, + config: AnthropicProviderConfig +): Promise { + const { logger, providerId, providerLabel } = config + + if (!request.apiKey) { + throw new Error(`API key is required for ${providerLabel}`) + } + + const modelId = request.model + const useNativeStructuredOutputs = !!( + request.responseFormat && supportsNativeStructuredOutputs(modelId) + ) + + const anthropic = config.createClient(request.apiKey, useNativeStructuredOutputs) + + const messages: any[] = [] + let systemPrompt = request.systemPrompt || '' + + if (request.context) { + messages.push({ + role: 'user', + content: request.context, + }) + } + + if (request.messages) { + request.messages.forEach((msg) => { + if (msg.role === 'function') { + messages.push({ + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: msg.name, + content: msg.content, + }, + ], + }) + } else if (msg.function_call) { + const toolUseId = `${msg.function_call.name}-${Date.now()}` + messages.push({ + role: 'assistant', + content: [ + { + type: 'tool_use', + id: toolUseId, + name: msg.function_call.name, + input: JSON.parse(msg.function_call.arguments), + }, + ], + }) + } else { + messages.push({ + role: msg.role === 'assistant' ? 'assistant' : 'user', + content: msg.content ? [{ type: 'text', text: msg.content }] : [], + }) + } + }) + } + + if (messages.length === 0) { + messages.push({ + role: 'user', + content: [{ type: 'text', text: systemPrompt || 'Hello' }], + }) + systemPrompt = '' + } + + let anthropicTools = request.tools?.length + ? request.tools.map((tool) => ({ + name: tool.id, + description: tool.description, + input_schema: { + type: 'object', + properties: tool.parameters.properties, + required: tool.parameters.required, + }, + })) + : undefined + + let toolChoice: 'none' | 'auto' | { type: 'tool'; name: string } = 'auto' + let preparedTools: ReturnType | null = null + + if (anthropicTools?.length) { + try { + preparedTools = prepareToolsWithUsageControl( + anthropicTools, + request.tools, + logger, + providerId + ) + const { tools: filteredTools, toolChoice: tc } = preparedTools + + if (filteredTools?.length) { + anthropicTools = filteredTools + + if (typeof tc === 'object' && tc !== null) { + if (tc.type === 'tool') { + toolChoice = tc + logger.info(`Using ${providerLabel} tool_choice format: force tool "${tc.name}"`) + } else { + toolChoice = 'auto' + logger.warn(`Received non-${providerLabel} tool_choice format, defaulting to auto`) + } + } else if (tc === 'auto' || tc === 'none') { + toolChoice = tc + logger.info(`Using tool_choice mode: ${tc}`) + } else { + toolChoice = 'auto' + logger.warn('Unexpected tool_choice format, defaulting to auto') + } + } + } catch (error) { + logger.error('Error in prepareToolsWithUsageControl:', { error }) + toolChoice = 'auto' + } + } + + const payload: any = { + model: request.model, + messages, + system: systemPrompt, + max_tokens: + Number.parseInt(String(request.maxTokens)) || + getMaxOutputTokensForModel(request.model, request.stream ?? false), + temperature: Number.parseFloat(String(request.temperature ?? 0.7)), + } + + if (request.responseFormat) { + const schema = request.responseFormat.schema || request.responseFormat + + if (useNativeStructuredOutputs) { + const transformedSchema = transformJSONSchema(schema) + payload.output_format = { + type: 'json_schema', + schema: transformedSchema, + } + logger.info(`Using native structured outputs for model: ${modelId}`) + } else { + const schemaInstructions = generateSchemaInstructions(schema, request.responseFormat.name) + payload.system = payload.system + ? `${payload.system}\n\n${schemaInstructions}` + : schemaInstructions + logger.info(`Using prompt-based structured outputs for model: ${modelId}`) + } + } + + // Add extended thinking configuration if supported and requested + if (request.thinkingLevel) { + const thinkingConfig = buildThinkingConfig(request.model, request.thinkingLevel) + if (thinkingConfig) { + payload.thinking = thinkingConfig.thinking + if (thinkingConfig.outputConfig) { + payload.output_config = thinkingConfig.outputConfig + } + const isAdaptive = thinkingConfig.thinking.type === 'adaptive' + logger.info( + `Using ${isAdaptive ? 'adaptive' : 'extended'} thinking for model: ${modelId} with ${isAdaptive ? `effort: ${request.thinkingLevel}` : `budget: ${(thinkingConfig.thinking as { budget_tokens: number }).budget_tokens}`}` + ) + } else { + logger.warn( + `Thinking level "${request.thinkingLevel}" not supported for model: ${modelId}, ignoring` + ) + } + } + + if (anthropicTools?.length) { + payload.tools = anthropicTools + if (toolChoice !== 'auto') { + payload.tool_choice = toolChoice + } + } + + const shouldStreamToolCalls = request.streamToolCalls ?? false + + if (request.stream && (!anthropicTools || anthropicTools.length === 0)) { + logger.info(`Using streaming response for ${providerLabel} request (no tools)`) + + const providerStartTime = Date.now() + const providerStartTimeISO = new Date(providerStartTime).toISOString() + + const streamResponse: any = await anthropic.messages.create({ + ...payload, + stream: true, + }) + + const streamingResult = { + stream: createReadableStreamFromAnthropicStream(streamResponse, (content, usage) => { + streamingResult.execution.output.content = content + streamingResult.execution.output.tokens = { + input: usage.input_tokens, + output: usage.output_tokens, + total: usage.input_tokens + usage.output_tokens, + } + + const costResult = calculateCost(request.model, usage.input_tokens, usage.output_tokens) + streamingResult.execution.output.cost = { + input: costResult.input, + output: costResult.output, + total: costResult.total, + } + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + + if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) { + streamingResult.execution.output.providerTiming.timeSegments[0].endTime = streamEndTime + streamingResult.execution.output.providerTiming.timeSegments[0].duration = + streamEndTime - providerStartTime + } + } + }), + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { input: 0, output: 0, total: 0 }, + toolCalls: undefined, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + timeSegments: [ + { + type: 'model', + name: 'Streaming response', + startTime: providerStartTime, + endTime: Date.now(), + duration: Date.now() - providerStartTime, + }, + ], + }, + cost: { + total: 0.0, + input: 0.0, + output: 0.0, + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + isStreaming: true, + }, + } + + return streamingResult as StreamingExecution + } + + if (request.stream && !shouldStreamToolCalls) { + logger.info( + `Using non-streaming mode for ${providerLabel} request (tool calls executed silently)` + ) + + const providerStartTime = Date.now() + const providerStartTimeISO = new Date(providerStartTime).toISOString() + + // Cap intermediate calls at non-streaming limit to avoid SDK timeout errors, + // but allow users to set lower values if desired + const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false) + const nonStreamingMaxTokens = request.maxTokens + ? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit) + : nonStreamingLimit + const intermediatePayload = { ...payload, max_tokens: nonStreamingMaxTokens } + + try { + const initialCallTime = Date.now() + const originalToolChoice = intermediatePayload.tool_choice + const forcedTools = preparedTools?.forcedTools || [] + let usedForcedTools: string[] = [] + + let currentResponse = await anthropic.messages.create(intermediatePayload) + const firstResponseTime = Date.now() - initialCallTime + + let content = '' + + if (Array.isArray(currentResponse.content)) { + content = currentResponse.content + .filter((item) => item.type === 'text') + .map((item) => item.text) + .join('\n') + } + + const tokens = { + input: currentResponse.usage?.input_tokens || 0, + output: currentResponse.usage?.output_tokens || 0, + total: + (currentResponse.usage?.input_tokens || 0) + (currentResponse.usage?.output_tokens || 0), + } + + const toolCalls = [] + const toolResults = [] + const currentMessages = [...messages] + let iterationCount = 0 + let hasUsedForcedTool = false + let modelTime = firstResponseTime + let toolsTime = 0 + + const timeSegments: TimeSegment[] = [ + { + type: 'model', + name: 'Initial response', + startTime: initialCallTime, + endTime: initialCallTime + firstResponseTime, + duration: firstResponseTime, + }, + ] + + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + if (firstCheckResult) { + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools + } + + try { + while (iterationCount < MAX_TOOL_ITERATIONS) { + const textContent = currentResponse.content + .filter((item) => item.type === 'text') + .map((item) => item.text) + .join('\n') + + if (textContent) { + content = textContent + } + + const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') + if (!toolUses || toolUses.length === 0) { + break + } + + const toolsStartTime = Date.now() + + const toolExecutionPromises = toolUses.map(async (toolUse) => { + const toolCallStartTime = Date.now() + const toolName = toolUse.name + const toolArgs = toolUse.input as Record + + try { + const tool = request.tools?.find((t: any) => t.id === toolName) + if (!tool) return null + + const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) + const result = await executeTool(toolName, executionParams) + const toolCallEndTime = Date.now() + + return { + toolUse, + toolName, + toolArgs, + toolParams, + result, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } catch (error) { + const toolCallEndTime = Date.now() + logger.error('Error processing tool call:', { error, toolName }) + + return { + toolUse, + toolName, + toolArgs, + toolParams: {}, + result: { + success: false, + output: undefined, + error: error instanceof Error ? error.message : 'Tool execution failed', + }, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } + }) + + const executionResults = await Promise.allSettled(toolExecutionPromises) + + // Collect all tool_use and tool_result blocks for batching + const toolUseBlocks: Array<{ + type: 'tool_use' + id: string + name: string + input: Record + }> = [] + const toolResultBlocks: Array<{ + type: 'tool_result' + tool_use_id: string + content: string + }> = [] + + for (const settledResult of executionResults) { + if (settledResult.status === 'rejected' || !settledResult.value) continue + + const { + toolUse, + toolName, + toolArgs, + toolParams, + result, + startTime, + endTime, + duration, + } = settledResult.value + + timeSegments.push({ + type: 'tool', + name: toolName, + startTime: startTime, + endTime: endTime, + duration: duration, + }) + + let resultContent: unknown + if (result.success) { + toolResults.push(result.output) + resultContent = result.output + } else { + resultContent = { + error: true, + message: result.error || 'Tool execution failed', + tool: toolName, + } + } + + toolCalls.push({ + name: toolName, + arguments: toolParams, + startTime: new Date(startTime).toISOString(), + endTime: new Date(endTime).toISOString(), + duration: duration, + result: resultContent, + success: result.success, + }) + + // Add to batched arrays using the ORIGINAL ID from Claude's response + toolUseBlocks.push({ + type: 'tool_use', + id: toolUse.id, + name: toolName, + input: toolArgs, + }) + + toolResultBlocks.push({ + type: 'tool_result', + tool_use_id: toolUse.id, + content: JSON.stringify(resultContent), + }) + } + + // Add ONE assistant message with ALL tool_use blocks + if (toolUseBlocks.length > 0) { + currentMessages.push({ + role: 'assistant', + content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[], + }) + } + + // Add ONE user message with ALL tool_result blocks + if (toolResultBlocks.length > 0) { + currentMessages.push({ + role: 'user', + content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[], + }) + } + + const thisToolsTime = Date.now() - toolsStartTime + toolsTime += thisToolsTime + + const nextPayload = { + ...intermediatePayload, + messages: currentMessages, + } + + if ( + typeof originalToolChoice === 'object' && + hasUsedForcedTool && + forcedTools.length > 0 + ) { + const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool)) + + if (remainingTools.length > 0) { + nextPayload.tool_choice = { + type: 'tool', + name: remainingTools[0], + } + logger.info(`Forcing next tool: ${remainingTools[0]}`) + } else { + nextPayload.tool_choice = undefined + logger.info('All forced tools have been used, removing tool_choice parameter') + } + } else if (hasUsedForcedTool && typeof originalToolChoice === 'object') { + nextPayload.tool_choice = undefined + logger.info( + 'Removing tool_choice parameter for subsequent requests after forced tool was used' + ) + } + + const nextModelStartTime = Date.now() + + currentResponse = await anthropic.messages.create(nextPayload) + + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + if (nextCheckResult) { + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools + } + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + + timeSegments.push({ + type: 'model', + name: `Model response (iteration ${iterationCount + 1})`, + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + modelTime += thisModelTime + + if (currentResponse.usage) { + tokens.input += currentResponse.usage.input_tokens || 0 + tokens.output += currentResponse.usage.output_tokens || 0 + tokens.total += + (currentResponse.usage.input_tokens || 0) + (currentResponse.usage.output_tokens || 0) + } + + iterationCount++ + } + } catch (error) { + logger.error(`Error in ${providerLabel} request:`, { error }) + throw error + } + + const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output) + + const streamingPayload = { + ...payload, + messages: currentMessages, + stream: true, + tool_choice: undefined, + } + + const streamResponse: any = await anthropic.messages.create(streamingPayload) + + const streamingResult = { + stream: createReadableStreamFromAnthropicStream(streamResponse, (streamContent, usage) => { + streamingResult.execution.output.content = streamContent + streamingResult.execution.output.tokens = { + input: tokens.input + usage.input_tokens, + output: tokens.output + usage.output_tokens, + total: tokens.total + usage.input_tokens + usage.output_tokens, + } + + const streamCost = calculateCost(request.model, usage.input_tokens, usage.output_tokens) + streamingResult.execution.output.cost = { + input: accumulatedCost.input + streamCost.input, + output: accumulatedCost.output + streamCost.output, + total: accumulatedCost.total + streamCost.total, + } + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + } + }), + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { + input: tokens.input, + output: tokens.output, + total: tokens.total, + }, + toolCalls: + toolCalls.length > 0 + ? { + list: toolCalls, + count: toolCalls.length, + } + : undefined, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + cost: { + input: accumulatedCost.input, + output: accumulatedCost.output, + total: accumulatedCost.total, + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + isStreaming: true, + }, + } + + return streamingResult as StreamingExecution + } catch (error) { + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + logger.error(`Error in ${providerLabel} request:`, { + error, + duration: totalDuration, + }) + + const enhancedError = new Error(error instanceof Error ? error.message : String(error)) + // @ts-ignore + enhancedError.timing = { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + } + + throw enhancedError + } + } + + const providerStartTime = Date.now() + const providerStartTimeISO = new Date(providerStartTime).toISOString() + + // Cap intermediate calls at non-streaming limit to avoid SDK timeout errors, + // but allow users to set lower values if desired + const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false) + const toolLoopMaxTokens = request.maxTokens + ? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit) + : nonStreamingLimit + const toolLoopPayload = { ...payload, max_tokens: toolLoopMaxTokens } + + try { + const initialCallTime = Date.now() + const originalToolChoice = toolLoopPayload.tool_choice + const forcedTools = preparedTools?.forcedTools || [] + let usedForcedTools: string[] = [] + + let currentResponse = await anthropic.messages.create(toolLoopPayload) + const firstResponseTime = Date.now() - initialCallTime + + let content = '' + + if (Array.isArray(currentResponse.content)) { + content = currentResponse.content + .filter((item) => item.type === 'text') + .map((item) => item.text) + .join('\n') + } + + const tokens = { + input: currentResponse.usage?.input_tokens || 0, + output: currentResponse.usage?.output_tokens || 0, + total: + (currentResponse.usage?.input_tokens || 0) + (currentResponse.usage?.output_tokens || 0), + } + + const initialCost = calculateCost( + request.model, + currentResponse.usage?.input_tokens || 0, + currentResponse.usage?.output_tokens || 0 + ) + const cost = { + input: initialCost.input, + output: initialCost.output, + total: initialCost.total, + } + + const toolCalls = [] + const toolResults = [] + const currentMessages = [...messages] + let iterationCount = 0 + let hasUsedForcedTool = false + let modelTime = firstResponseTime + let toolsTime = 0 + + const timeSegments: TimeSegment[] = [ + { + type: 'model', + name: 'Initial response', + startTime: initialCallTime, + endTime: initialCallTime + firstResponseTime, + duration: firstResponseTime, + }, + ] + + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + if (firstCheckResult) { + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools + } + + try { + while (iterationCount < MAX_TOOL_ITERATIONS) { + const textContent = currentResponse.content + .filter((item) => item.type === 'text') + .map((item) => item.text) + .join('\n') + + if (textContent) { + content = textContent + } + + const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') + if (!toolUses || toolUses.length === 0) { + break + } + + const toolsStartTime = Date.now() + + const toolExecutionPromises = toolUses.map(async (toolUse) => { + const toolCallStartTime = Date.now() + const toolName = toolUse.name + const toolArgs = toolUse.input as Record + // Preserve the original tool_use ID from Claude's response + const toolUseId = toolUse.id + + try { + const tool = request.tools?.find((t) => t.id === toolName) + if (!tool) return null + + const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) + const result = await executeTool(toolName, executionParams, true) + const toolCallEndTime = Date.now() + + return { + toolUseId, + toolName, + toolArgs, + toolParams, + result, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } catch (error) { + const toolCallEndTime = Date.now() + logger.error('Error processing tool call:', { error, toolName }) + + return { + toolUseId, + toolName, + toolArgs, + toolParams: {}, + result: { + success: false, + output: undefined, + error: error instanceof Error ? error.message : 'Tool execution failed', + }, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } + }) + + const executionResults = await Promise.allSettled(toolExecutionPromises) + + // Collect all tool_use and tool_result blocks for batching + const toolUseBlocks: Array<{ + type: 'tool_use' + id: string + name: string + input: Record + }> = [] + const toolResultBlocks: Array<{ + type: 'tool_result' + tool_use_id: string + content: string + }> = [] + + for (const settledResult of executionResults) { + if (settledResult.status === 'rejected' || !settledResult.value) continue + + const { + toolUseId, + toolName, + toolArgs, + toolParams, + result, + startTime, + endTime, + duration, + } = settledResult.value + + timeSegments.push({ + type: 'tool', + name: toolName, + startTime: startTime, + endTime: endTime, + duration: duration, + }) + + let resultContent: unknown + if (result.success) { + toolResults.push(result.output) + resultContent = result.output + } else { + resultContent = { + error: true, + message: result.error || 'Tool execution failed', + tool: toolName, + } + } + + toolCalls.push({ + name: toolName, + arguments: toolParams, + startTime: new Date(startTime).toISOString(), + endTime: new Date(endTime).toISOString(), + duration: duration, + result: resultContent, + success: result.success, + }) + + // Add to batched arrays using the ORIGINAL ID from Claude's response + toolUseBlocks.push({ + type: 'tool_use', + id: toolUseId, + name: toolName, + input: toolArgs, + }) + + toolResultBlocks.push({ + type: 'tool_result', + tool_use_id: toolUseId, + content: JSON.stringify(resultContent), + }) + } + + // Add ONE assistant message with ALL tool_use blocks + if (toolUseBlocks.length > 0) { + currentMessages.push({ + role: 'assistant', + content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[], + }) + } + + // Add ONE user message with ALL tool_result blocks + if (toolResultBlocks.length > 0) { + currentMessages.push({ + role: 'user', + content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[], + }) + } + + const thisToolsTime = Date.now() - toolsStartTime + toolsTime += thisToolsTime + + const nextPayload = { + ...toolLoopPayload, + messages: currentMessages, + } + + if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) { + const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool)) + + if (remainingTools.length > 0) { + nextPayload.tool_choice = { + type: 'tool', + name: remainingTools[0], + } + logger.info(`Forcing next tool: ${remainingTools[0]}`) + } else { + nextPayload.tool_choice = undefined + logger.info('All forced tools have been used, removing tool_choice parameter') + } + } else if (hasUsedForcedTool && typeof originalToolChoice === 'object') { + nextPayload.tool_choice = undefined + logger.info( + 'Removing tool_choice parameter for subsequent requests after forced tool was used' + ) + } + + const nextModelStartTime = Date.now() + + currentResponse = await anthropic.messages.create(nextPayload) + + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + if (nextCheckResult) { + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools + } + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + + timeSegments.push({ + type: 'model', + name: `Model response (iteration ${iterationCount + 1})`, + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + modelTime += thisModelTime + + if (currentResponse.usage) { + tokens.input += currentResponse.usage.input_tokens || 0 + tokens.output += currentResponse.usage.output_tokens || 0 + tokens.total += + (currentResponse.usage.input_tokens || 0) + (currentResponse.usage.output_tokens || 0) + + const iterationCost = calculateCost( + request.model, + currentResponse.usage.input_tokens || 0, + currentResponse.usage.output_tokens || 0 + ) + cost.input += iterationCost.input + cost.output += iterationCost.output + cost.total += iterationCost.total + } + + iterationCount++ + } + } catch (error) { + logger.error(`Error in ${providerLabel} request:`, { error }) + throw error + } + + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + if (request.stream) { + logger.info(`Using streaming for final ${providerLabel} response after tool processing`) + + const streamingPayload = { + ...payload, + messages: currentMessages, + stream: true, + tool_choice: undefined, + } + + const streamResponse: any = await anthropic.messages.create(streamingPayload) + + const streamingResult = { + stream: createReadableStreamFromAnthropicStream(streamResponse, (streamContent, usage) => { + streamingResult.execution.output.content = streamContent + streamingResult.execution.output.tokens = { + input: tokens.input + usage.input_tokens, + output: tokens.output + usage.output_tokens, + total: tokens.total + usage.input_tokens + usage.output_tokens, + } + + const streamCost = calculateCost(request.model, usage.input_tokens, usage.output_tokens) + streamingResult.execution.output.cost = { + input: cost.input + streamCost.input, + output: cost.output + streamCost.output, + total: cost.total + streamCost.total, + } + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + } + }), + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { + input: tokens.input, + output: tokens.output, + total: tokens.total, + }, + toolCalls: + toolCalls.length > 0 + ? { + list: toolCalls, + count: toolCalls.length, + } + : undefined, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + cost: { + input: cost.input, + output: cost.output, + total: cost.total, + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + isStreaming: true, + }, + } + + return streamingResult as StreamingExecution + } + + return { + content, + model: request.model, + tokens, + toolCalls: + toolCalls.length > 0 + ? toolCalls.map((tc) => ({ + name: tc.name, + arguments: tc.arguments as Record, + startTime: tc.startTime, + endTime: tc.endTime, + duration: tc.duration, + result: tc.result as Record | undefined, + })) + : undefined, + toolResults: toolResults.length > 0 ? toolResults : undefined, + timing: { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + } + } catch (error) { + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + logger.error(`Error in ${providerLabel} request:`, { + error, + duration: totalDuration, + }) + + const enhancedError = new Error(error instanceof Error ? error.message : String(error)) + // @ts-ignore + enhancedError.timing = { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + } + + throw enhancedError + } +} diff --git a/apps/sim/providers/anthropic/index.ts b/apps/sim/providers/anthropic/index.ts index 6ce89f589..543c328fb 100644 --- a/apps/sim/providers/anthropic/index.ts +++ b/apps/sim/providers/anthropic/index.ts @@ -1,49 +1,12 @@ import Anthropic from '@anthropic-ai/sdk' -import { transformJSONSchema } from '@anthropic-ai/sdk/lib/transform-json-schema' import { createLogger } from '@sim/logger' import type { StreamingExecution } from '@/executor/types' -import { MAX_TOOL_ITERATIONS } from '@/providers' -import { - checkForForcedToolUsage, - createReadableStreamFromAnthropicStream, -} from '@/providers/anthropic/utils' -import { - getMaxOutputTokensForModel, - getProviderDefaultModel, - getProviderModels, - supportsNativeStructuredOutputs, -} from '@/providers/models' -import type { - ProviderConfig, - ProviderRequest, - ProviderResponse, - TimeSegment, -} from '@/providers/types' -import { - calculateCost, - prepareToolExecution, - prepareToolsWithUsageControl, -} from '@/providers/utils' -import { executeTool } from '@/tools' +import { executeAnthropicProviderRequest } from '@/providers/anthropic/core' +import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types' const logger = createLogger('AnthropicProvider') -/** - * Generates prompt-based schema instructions for older models that don't support native structured outputs. - * This is a fallback approach that adds schema requirements to the system prompt. - */ -function generateSchemaInstructions(schema: any, schemaName?: string): string { - const name = schemaName || 'response' - return `IMPORTANT: You must respond with a valid JSON object that conforms to the following schema. -Do not include any text before or after the JSON object. Only output the JSON. - -Schema name: ${name} -JSON Schema: -${JSON.stringify(schema, null, 2)} - -Your response must be valid JSON that exactly matches this schema structure.` -} - export const anthropicProvider: ProviderConfig = { id: 'anthropic', name: 'Anthropic', @@ -55,1101 +18,17 @@ export const anthropicProvider: ProviderConfig = { executeRequest: async ( request: ProviderRequest ): Promise => { - if (!request.apiKey) { - throw new Error('API key is required for Anthropic') - } - - const modelId = request.model - const useNativeStructuredOutputs = !!( - request.responseFormat && supportsNativeStructuredOutputs(modelId) - ) - - const anthropic = new Anthropic({ - apiKey: request.apiKey, - defaultHeaders: useNativeStructuredOutputs - ? { 'anthropic-beta': 'structured-outputs-2025-11-13' } - : undefined, - }) - - const messages: any[] = [] - let systemPrompt = request.systemPrompt || '' - - if (request.context) { - messages.push({ - role: 'user', - content: request.context, - }) - } - - if (request.messages) { - request.messages.forEach((msg) => { - if (msg.role === 'function') { - messages.push({ - role: 'user', - content: [ - { - type: 'tool_result', - tool_use_id: msg.name, - content: msg.content, - }, - ], - }) - } else if (msg.function_call) { - const toolUseId = `${msg.function_call.name}-${Date.now()}` - messages.push({ - role: 'assistant', - content: [ - { - type: 'tool_use', - id: toolUseId, - name: msg.function_call.name, - input: JSON.parse(msg.function_call.arguments), - }, - ], - }) - } else { - messages.push({ - role: msg.role === 'assistant' ? 'assistant' : 'user', - content: msg.content ? [{ type: 'text', text: msg.content }] : [], - }) - } - }) - } - - if (messages.length === 0) { - messages.push({ - role: 'user', - content: [{ type: 'text', text: systemPrompt || 'Hello' }], - }) - systemPrompt = '' - } - - let anthropicTools = request.tools?.length - ? request.tools.map((tool) => ({ - name: tool.id, - description: tool.description, - input_schema: { - type: 'object', - properties: tool.parameters.properties, - required: tool.parameters.required, - }, - })) - : undefined - - let toolChoice: 'none' | 'auto' | { type: 'tool'; name: string } = 'auto' - let preparedTools: ReturnType | null = null - - if (anthropicTools?.length) { - try { - preparedTools = prepareToolsWithUsageControl( - anthropicTools, - request.tools, - logger, - 'anthropic' - ) - const { tools: filteredTools, toolChoice: tc } = preparedTools - - if (filteredTools?.length) { - anthropicTools = filteredTools - - if (typeof tc === 'object' && tc !== null) { - if (tc.type === 'tool') { - toolChoice = tc - logger.info(`Using Anthropic tool_choice format: force tool "${tc.name}"`) - } else { - toolChoice = 'auto' - logger.warn('Received non-Anthropic tool_choice format, defaulting to auto') - } - } else if (tc === 'auto' || tc === 'none') { - toolChoice = tc - logger.info(`Using tool_choice mode: ${tc}`) - } else { - toolChoice = 'auto' - logger.warn('Unexpected tool_choice format, defaulting to auto') - } - } - } catch (error) { - logger.error('Error in prepareToolsWithUsageControl:', { error }) - toolChoice = 'auto' - } - } - - const payload: any = { - model: request.model, - messages, - system: systemPrompt, - max_tokens: - Number.parseInt(String(request.maxTokens)) || - getMaxOutputTokensForModel(request.model, request.stream ?? false), - temperature: Number.parseFloat(String(request.temperature ?? 0.7)), - } - - if (request.responseFormat) { - const schema = request.responseFormat.schema || request.responseFormat - - if (useNativeStructuredOutputs) { - const transformedSchema = transformJSONSchema(schema) - payload.output_format = { - type: 'json_schema', - schema: transformedSchema, - } - logger.info(`Using native structured outputs for model: ${modelId}`) - } else { - const schemaInstructions = generateSchemaInstructions(schema, request.responseFormat.name) - payload.system = payload.system - ? `${payload.system}\n\n${schemaInstructions}` - : schemaInstructions - logger.info(`Using prompt-based structured outputs for model: ${modelId}`) - } - } - - if (anthropicTools?.length) { - payload.tools = anthropicTools - if (toolChoice !== 'auto') { - payload.tool_choice = toolChoice - } - } - - const shouldStreamToolCalls = request.streamToolCalls ?? false - - if (request.stream && (!anthropicTools || anthropicTools.length === 0)) { - logger.info('Using streaming response for Anthropic request (no tools)') - - const providerStartTime = Date.now() - const providerStartTimeISO = new Date(providerStartTime).toISOString() - - const streamResponse: any = await anthropic.messages.create({ - ...payload, - stream: true, - }) - - const streamingResult = { - stream: createReadableStreamFromAnthropicStream(streamResponse, (content, usage) => { - streamingResult.execution.output.content = content - streamingResult.execution.output.tokens = { - input: usage.input_tokens, - output: usage.output_tokens, - total: usage.input_tokens + usage.output_tokens, - } - - const costResult = calculateCost(request.model, usage.input_tokens, usage.output_tokens) - streamingResult.execution.output.cost = { - input: costResult.input, - output: costResult.output, - total: costResult.total, - } - - const streamEndTime = Date.now() - const streamEndTimeISO = new Date(streamEndTime).toISOString() - - if (streamingResult.execution.output.providerTiming) { - streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO - streamingResult.execution.output.providerTiming.duration = - streamEndTime - providerStartTime - - if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) { - streamingResult.execution.output.providerTiming.timeSegments[0].endTime = - streamEndTime - streamingResult.execution.output.providerTiming.timeSegments[0].duration = - streamEndTime - providerStartTime - } - } - }), - execution: { - success: true, - output: { - content: '', - model: request.model, - tokens: { input: 0, output: 0, total: 0 }, - toolCalls: undefined, - providerTiming: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - timeSegments: [ - { - type: 'model', - name: 'Streaming response', - startTime: providerStartTime, - endTime: Date.now(), - duration: Date.now() - providerStartTime, - }, - ], - }, - cost: { - total: 0.0, - input: 0.0, - output: 0.0, - }, - }, - logs: [], - metadata: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - }, - isStreaming: true, - }, - } - - return streamingResult as StreamingExecution - } - - if (request.stream && !shouldStreamToolCalls) { - logger.info('Using non-streaming mode for Anthropic request (tool calls executed silently)') - - const providerStartTime = Date.now() - const providerStartTimeISO = new Date(providerStartTime).toISOString() - - // Cap intermediate calls at non-streaming limit to avoid SDK timeout errors, - // but allow users to set lower values if desired - const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false) - const nonStreamingMaxTokens = request.maxTokens - ? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit) - : nonStreamingLimit - const intermediatePayload = { ...payload, max_tokens: nonStreamingMaxTokens } - - try { - const initialCallTime = Date.now() - const originalToolChoice = intermediatePayload.tool_choice - const forcedTools = preparedTools?.forcedTools || [] - let usedForcedTools: string[] = [] - - let currentResponse = await anthropic.messages.create(intermediatePayload) - const firstResponseTime = Date.now() - initialCallTime - - let content = '' - - if (Array.isArray(currentResponse.content)) { - content = currentResponse.content - .filter((item) => item.type === 'text') - .map((item) => item.text) - .join('\n') - } - - const tokens = { - input: currentResponse.usage?.input_tokens || 0, - output: currentResponse.usage?.output_tokens || 0, - total: - (currentResponse.usage?.input_tokens || 0) + - (currentResponse.usage?.output_tokens || 0), - } - - const toolCalls = [] - const toolResults = [] - const currentMessages = [...messages] - let iterationCount = 0 - let hasUsedForcedTool = false - let modelTime = firstResponseTime - let toolsTime = 0 - - const timeSegments: TimeSegment[] = [ - { - type: 'model', - name: 'Initial response', - startTime: initialCallTime, - endTime: initialCallTime + firstResponseTime, - duration: firstResponseTime, - }, - ] - - const firstCheckResult = checkForForcedToolUsage( - currentResponse, - originalToolChoice, - forcedTools, - usedForcedTools - ) - if (firstCheckResult) { - hasUsedForcedTool = firstCheckResult.hasUsedForcedTool - usedForcedTools = firstCheckResult.usedForcedTools - } - - try { - while (iterationCount < MAX_TOOL_ITERATIONS) { - const textContent = currentResponse.content - .filter((item) => item.type === 'text') - .map((item) => item.text) - .join('\n') - - if (textContent) { - content = textContent - } - - const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') - if (!toolUses || toolUses.length === 0) { - break - } - - const toolsStartTime = Date.now() - - const toolExecutionPromises = toolUses.map(async (toolUse) => { - const toolCallStartTime = Date.now() - const toolName = toolUse.name - const toolArgs = toolUse.input as Record - - try { - const tool = request.tools?.find((t: any) => t.id === toolName) - if (!tool) return null - - const { toolParams, executionParams } = prepareToolExecution( - tool, - toolArgs, - request - ) - const result = await executeTool(toolName, executionParams) - const toolCallEndTime = Date.now() - - return { - toolUse, - toolName, - toolArgs, - toolParams, - result, - startTime: toolCallStartTime, - endTime: toolCallEndTime, - duration: toolCallEndTime - toolCallStartTime, - } - } catch (error) { - const toolCallEndTime = Date.now() - logger.error('Error processing tool call:', { error, toolName }) - - return { - toolUse, - toolName, - toolArgs, - toolParams: {}, - result: { - success: false, - output: undefined, - error: error instanceof Error ? error.message : 'Tool execution failed', - }, - startTime: toolCallStartTime, - endTime: toolCallEndTime, - duration: toolCallEndTime - toolCallStartTime, - } - } - }) - - const executionResults = await Promise.allSettled(toolExecutionPromises) - - // Collect all tool_use and tool_result blocks for batching - const toolUseBlocks: Array<{ - type: 'tool_use' - id: string - name: string - input: Record - }> = [] - const toolResultBlocks: Array<{ - type: 'tool_result' - tool_use_id: string - content: string - }> = [] - - for (const settledResult of executionResults) { - if (settledResult.status === 'rejected' || !settledResult.value) continue - - const { - toolUse, - toolName, - toolArgs, - toolParams, - result, - startTime, - endTime, - duration, - } = settledResult.value - - timeSegments.push({ - type: 'tool', - name: toolName, - startTime: startTime, - endTime: endTime, - duration: duration, - }) - - let resultContent: unknown - if (result.success) { - toolResults.push(result.output) - resultContent = result.output - } else { - resultContent = { - error: true, - message: result.error || 'Tool execution failed', - tool: toolName, - } - } - - toolCalls.push({ - name: toolName, - arguments: toolParams, - startTime: new Date(startTime).toISOString(), - endTime: new Date(endTime).toISOString(), - duration: duration, - result: resultContent, - success: result.success, - }) - - // Add to batched arrays using the ORIGINAL ID from Claude's response - toolUseBlocks.push({ - type: 'tool_use', - id: toolUse.id, - name: toolName, - input: toolArgs, - }) - - toolResultBlocks.push({ - type: 'tool_result', - tool_use_id: toolUse.id, - content: JSON.stringify(resultContent), - }) - } - - // Add ONE assistant message with ALL tool_use blocks - if (toolUseBlocks.length > 0) { - currentMessages.push({ - role: 'assistant', - content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[], - }) - } - - // Add ONE user message with ALL tool_result blocks - if (toolResultBlocks.length > 0) { - currentMessages.push({ - role: 'user', - content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[], - }) - } - - const thisToolsTime = Date.now() - toolsStartTime - toolsTime += thisToolsTime - - const nextPayload = { - ...intermediatePayload, - messages: currentMessages, - } - - if ( - typeof originalToolChoice === 'object' && - hasUsedForcedTool && - forcedTools.length > 0 - ) { - const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool)) - - if (remainingTools.length > 0) { - nextPayload.tool_choice = { - type: 'tool', - name: remainingTools[0], - } - logger.info(`Forcing next tool: ${remainingTools[0]}`) - } else { - nextPayload.tool_choice = undefined - logger.info('All forced tools have been used, removing tool_choice parameter') - } - } else if (hasUsedForcedTool && typeof originalToolChoice === 'object') { - nextPayload.tool_choice = undefined - logger.info( - 'Removing tool_choice parameter for subsequent requests after forced tool was used' - ) - } - - const nextModelStartTime = Date.now() - - currentResponse = await anthropic.messages.create(nextPayload) - - const nextCheckResult = checkForForcedToolUsage( - currentResponse, - nextPayload.tool_choice, - forcedTools, - usedForcedTools - ) - if (nextCheckResult) { - hasUsedForcedTool = nextCheckResult.hasUsedForcedTool - usedForcedTools = nextCheckResult.usedForcedTools - } - - const nextModelEndTime = Date.now() - const thisModelTime = nextModelEndTime - nextModelStartTime - - timeSegments.push({ - type: 'model', - name: `Model response (iteration ${iterationCount + 1})`, - startTime: nextModelStartTime, - endTime: nextModelEndTime, - duration: thisModelTime, - }) - - modelTime += thisModelTime - - if (currentResponse.usage) { - tokens.input += currentResponse.usage.input_tokens || 0 - tokens.output += currentResponse.usage.output_tokens || 0 - tokens.total += - (currentResponse.usage.input_tokens || 0) + - (currentResponse.usage.output_tokens || 0) - } - - iterationCount++ - } - } catch (error) { - logger.error('Error in Anthropic request:', { error }) - throw error - } - - const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output) - - const streamingPayload = { - ...payload, - messages: currentMessages, - stream: true, - tool_choice: undefined, - } - - const streamResponse: any = await anthropic.messages.create(streamingPayload) - - const streamingResult = { - stream: createReadableStreamFromAnthropicStream( - streamResponse, - (streamContent, usage) => { - streamingResult.execution.output.content = streamContent - streamingResult.execution.output.tokens = { - input: tokens.input + usage.input_tokens, - output: tokens.output + usage.output_tokens, - total: tokens.total + usage.input_tokens + usage.output_tokens, - } - - const streamCost = calculateCost( - request.model, - usage.input_tokens, - usage.output_tokens - ) - streamingResult.execution.output.cost = { - input: accumulatedCost.input + streamCost.input, - output: accumulatedCost.output + streamCost.output, - total: accumulatedCost.total + streamCost.total, - } - - const streamEndTime = Date.now() - const streamEndTimeISO = new Date(streamEndTime).toISOString() - - if (streamingResult.execution.output.providerTiming) { - streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO - streamingResult.execution.output.providerTiming.duration = - streamEndTime - providerStartTime - } - } - ), - execution: { - success: true, - output: { - content: '', - model: request.model, - tokens: { - input: tokens.input, - output: tokens.output, - total: tokens.total, - }, - toolCalls: - toolCalls.length > 0 - ? { - list: toolCalls, - count: toolCalls.length, - } - : undefined, - providerTiming: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - modelTime: modelTime, - toolsTime: toolsTime, - firstResponseTime: firstResponseTime, - iterations: iterationCount + 1, - timeSegments: timeSegments, - }, - cost: { - input: accumulatedCost.input, - output: accumulatedCost.output, - total: accumulatedCost.total, - }, - }, - logs: [], - metadata: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - }, - isStreaming: true, - }, - } - - return streamingResult as StreamingExecution - } catch (error) { - const providerEndTime = Date.now() - const providerEndTimeISO = new Date(providerEndTime).toISOString() - const totalDuration = providerEndTime - providerStartTime - - logger.error('Error in Anthropic request:', { - error, - duration: totalDuration, - }) - - const enhancedError = new Error(error instanceof Error ? error.message : String(error)) - // @ts-ignore - enhancedError.timing = { - startTime: providerStartTimeISO, - endTime: providerEndTimeISO, - duration: totalDuration, - } - - throw enhancedError - } - } - - const providerStartTime = Date.now() - const providerStartTimeISO = new Date(providerStartTime).toISOString() - - // Cap intermediate calls at non-streaming limit to avoid SDK timeout errors, - // but allow users to set lower values if desired - const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false) - const toolLoopMaxTokens = request.maxTokens - ? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit) - : nonStreamingLimit - const toolLoopPayload = { ...payload, max_tokens: toolLoopMaxTokens } - - try { - const initialCallTime = Date.now() - const originalToolChoice = toolLoopPayload.tool_choice - const forcedTools = preparedTools?.forcedTools || [] - let usedForcedTools: string[] = [] - - let currentResponse = await anthropic.messages.create(toolLoopPayload) - const firstResponseTime = Date.now() - initialCallTime - - let content = '' - - if (Array.isArray(currentResponse.content)) { - content = currentResponse.content - .filter((item) => item.type === 'text') - .map((item) => item.text) - .join('\n') - } - - const tokens = { - input: currentResponse.usage?.input_tokens || 0, - output: currentResponse.usage?.output_tokens || 0, - total: - (currentResponse.usage?.input_tokens || 0) + (currentResponse.usage?.output_tokens || 0), - } - - const initialCost = calculateCost( - request.model, - currentResponse.usage?.input_tokens || 0, - currentResponse.usage?.output_tokens || 0 - ) - const cost = { - input: initialCost.input, - output: initialCost.output, - total: initialCost.total, - } - - const toolCalls = [] - const toolResults = [] - const currentMessages = [...messages] - let iterationCount = 0 - let hasUsedForcedTool = false - let modelTime = firstResponseTime - let toolsTime = 0 - - const timeSegments: TimeSegment[] = [ - { - type: 'model', - name: 'Initial response', - startTime: initialCallTime, - endTime: initialCallTime + firstResponseTime, - duration: firstResponseTime, - }, - ] - - const firstCheckResult = checkForForcedToolUsage( - currentResponse, - originalToolChoice, - forcedTools, - usedForcedTools - ) - if (firstCheckResult) { - hasUsedForcedTool = firstCheckResult.hasUsedForcedTool - usedForcedTools = firstCheckResult.usedForcedTools - } - - try { - while (iterationCount < MAX_TOOL_ITERATIONS) { - const textContent = currentResponse.content - .filter((item) => item.type === 'text') - .map((item) => item.text) - .join('\n') - - if (textContent) { - content = textContent - } - - const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') - if (!toolUses || toolUses.length === 0) { - break - } - - const toolsStartTime = Date.now() - - const toolExecutionPromises = toolUses.map(async (toolUse) => { - const toolCallStartTime = Date.now() - const toolName = toolUse.name - const toolArgs = toolUse.input as Record - // Preserve the original tool_use ID from Claude's response - const toolUseId = toolUse.id - - try { - const tool = request.tools?.find((t) => t.id === toolName) - if (!tool) return null - - const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) - const result = await executeTool(toolName, executionParams, true) - const toolCallEndTime = Date.now() - - return { - toolUseId, - toolName, - toolArgs, - toolParams, - result, - startTime: toolCallStartTime, - endTime: toolCallEndTime, - duration: toolCallEndTime - toolCallStartTime, - } - } catch (error) { - const toolCallEndTime = Date.now() - logger.error('Error processing tool call:', { error, toolName }) - - return { - toolUseId, - toolName, - toolArgs, - toolParams: {}, - result: { - success: false, - output: undefined, - error: error instanceof Error ? error.message : 'Tool execution failed', - }, - startTime: toolCallStartTime, - endTime: toolCallEndTime, - duration: toolCallEndTime - toolCallStartTime, - } - } - }) - - const executionResults = await Promise.allSettled(toolExecutionPromises) - - // Collect all tool_use and tool_result blocks for batching - const toolUseBlocks: Array<{ - type: 'tool_use' - id: string - name: string - input: Record - }> = [] - const toolResultBlocks: Array<{ - type: 'tool_result' - tool_use_id: string - content: string - }> = [] - - for (const settledResult of executionResults) { - if (settledResult.status === 'rejected' || !settledResult.value) continue - - const { - toolUseId, - toolName, - toolArgs, - toolParams, - result, - startTime, - endTime, - duration, - } = settledResult.value - - timeSegments.push({ - type: 'tool', - name: toolName, - startTime: startTime, - endTime: endTime, - duration: duration, - }) - - let resultContent: unknown - if (result.success) { - toolResults.push(result.output) - resultContent = result.output - } else { - resultContent = { - error: true, - message: result.error || 'Tool execution failed', - tool: toolName, - } - } - - toolCalls.push({ - name: toolName, - arguments: toolParams, - startTime: new Date(startTime).toISOString(), - endTime: new Date(endTime).toISOString(), - duration: duration, - result: resultContent, - success: result.success, - }) - - // Add to batched arrays using the ORIGINAL ID from Claude's response - toolUseBlocks.push({ - type: 'tool_use', - id: toolUseId, - name: toolName, - input: toolArgs, - }) - - toolResultBlocks.push({ - type: 'tool_result', - tool_use_id: toolUseId, - content: JSON.stringify(resultContent), - }) - } - - // Add ONE assistant message with ALL tool_use blocks - if (toolUseBlocks.length > 0) { - currentMessages.push({ - role: 'assistant', - content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[], - }) - } - - // Add ONE user message with ALL tool_result blocks - if (toolResultBlocks.length > 0) { - currentMessages.push({ - role: 'user', - content: toolResultBlocks as unknown as Anthropic.Messages.ContentBlockParam[], - }) - } - - const thisToolsTime = Date.now() - toolsStartTime - toolsTime += thisToolsTime - - const nextPayload = { - ...toolLoopPayload, - messages: currentMessages, - } - - if ( - typeof originalToolChoice === 'object' && - hasUsedForcedTool && - forcedTools.length > 0 - ) { - const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool)) - - if (remainingTools.length > 0) { - nextPayload.tool_choice = { - type: 'tool', - name: remainingTools[0], - } - logger.info(`Forcing next tool: ${remainingTools[0]}`) - } else { - nextPayload.tool_choice = undefined - logger.info('All forced tools have been used, removing tool_choice parameter') - } - } else if (hasUsedForcedTool && typeof originalToolChoice === 'object') { - nextPayload.tool_choice = undefined - logger.info( - 'Removing tool_choice parameter for subsequent requests after forced tool was used' - ) - } - - const nextModelStartTime = Date.now() - - currentResponse = await anthropic.messages.create(nextPayload) - - const nextCheckResult = checkForForcedToolUsage( - currentResponse, - nextPayload.tool_choice, - forcedTools, - usedForcedTools - ) - if (nextCheckResult) { - hasUsedForcedTool = nextCheckResult.hasUsedForcedTool - usedForcedTools = nextCheckResult.usedForcedTools - } - - const nextModelEndTime = Date.now() - const thisModelTime = nextModelEndTime - nextModelStartTime - - timeSegments.push({ - type: 'model', - name: `Model response (iteration ${iterationCount + 1})`, - startTime: nextModelStartTime, - endTime: nextModelEndTime, - duration: thisModelTime, - }) - - modelTime += thisModelTime - - if (currentResponse.usage) { - tokens.input += currentResponse.usage.input_tokens || 0 - tokens.output += currentResponse.usage.output_tokens || 0 - tokens.total += - (currentResponse.usage.input_tokens || 0) + (currentResponse.usage.output_tokens || 0) - - const iterationCost = calculateCost( - request.model, - currentResponse.usage.input_tokens || 0, - currentResponse.usage.output_tokens || 0 - ) - cost.input += iterationCost.input - cost.output += iterationCost.output - cost.total += iterationCost.total - } - - iterationCount++ - } - } catch (error) { - logger.error('Error in Anthropic request:', { error }) - throw error - } - - const providerEndTime = Date.now() - const providerEndTimeISO = new Date(providerEndTime).toISOString() - const totalDuration = providerEndTime - providerStartTime - - if (request.stream) { - logger.info('Using streaming for final Anthropic response after tool processing') - - const streamingPayload = { - ...payload, - messages: currentMessages, - stream: true, - tool_choice: undefined, - } - - const streamResponse: any = await anthropic.messages.create(streamingPayload) - - const streamingResult = { - stream: createReadableStreamFromAnthropicStream(streamResponse, (content, usage) => { - streamingResult.execution.output.content = content - streamingResult.execution.output.tokens = { - input: tokens.input + usage.input_tokens, - output: tokens.output + usage.output_tokens, - total: tokens.total + usage.input_tokens + usage.output_tokens, - } - - const streamCost = calculateCost(request.model, usage.input_tokens, usage.output_tokens) - streamingResult.execution.output.cost = { - input: cost.input + streamCost.input, - output: cost.output + streamCost.output, - total: cost.total + streamCost.total, - } - - const streamEndTime = Date.now() - const streamEndTimeISO = new Date(streamEndTime).toISOString() - - if (streamingResult.execution.output.providerTiming) { - streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO - streamingResult.execution.output.providerTiming.duration = - streamEndTime - providerStartTime - } - }), - execution: { - success: true, - output: { - content: '', - model: request.model, - tokens: { - input: tokens.input, - output: tokens.output, - total: tokens.total, - }, - toolCalls: - toolCalls.length > 0 - ? { - list: toolCalls, - count: toolCalls.length, - } - : undefined, - providerTiming: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - modelTime: modelTime, - toolsTime: toolsTime, - firstResponseTime: firstResponseTime, - iterations: iterationCount + 1, - timeSegments: timeSegments, - }, - cost: { - input: cost.input, - output: cost.output, - total: cost.total, - }, - }, - logs: [], - metadata: { - startTime: providerStartTimeISO, - endTime: new Date().toISOString(), - duration: Date.now() - providerStartTime, - }, - isStreaming: true, - }, - } - - return streamingResult as StreamingExecution - } - - return { - content, - model: request.model, - tokens, - toolCalls: - toolCalls.length > 0 - ? toolCalls.map((tc) => ({ - name: tc.name, - arguments: tc.arguments as Record, - startTime: tc.startTime, - endTime: tc.endTime, - duration: tc.duration, - result: tc.result as Record | undefined, - })) + return executeAnthropicProviderRequest(request, { + providerId: 'anthropic', + providerLabel: 'Anthropic', + createClient: (apiKey, useNativeStructuredOutputs) => + new Anthropic({ + apiKey, + defaultHeaders: useNativeStructuredOutputs + ? { 'anthropic-beta': 'structured-outputs-2025-11-13' } : undefined, - toolResults: toolResults.length > 0 ? toolResults : undefined, - timing: { - startTime: providerStartTimeISO, - endTime: providerEndTimeISO, - duration: totalDuration, - modelTime: modelTime, - toolsTime: toolsTime, - firstResponseTime: firstResponseTime, - iterations: iterationCount + 1, - timeSegments: timeSegments, - }, - } - } catch (error) { - const providerEndTime = Date.now() - const providerEndTimeISO = new Date(providerEndTime).toISOString() - const totalDuration = providerEndTime - providerStartTime - - logger.error('Error in Anthropic request:', { - error, - duration: totalDuration, - }) - - const enhancedError = new Error(error instanceof Error ? error.message : String(error)) - // @ts-ignore - enhancedError.timing = { - startTime: providerStartTimeISO, - endTime: providerEndTimeISO, - duration: totalDuration, - } - - throw enhancedError - } + }), + logger, + }) }, } diff --git a/apps/sim/providers/azure-anthropic/index.ts b/apps/sim/providers/azure-anthropic/index.ts new file mode 100644 index 000000000..efb131be1 --- /dev/null +++ b/apps/sim/providers/azure-anthropic/index.ts @@ -0,0 +1,62 @@ +import Anthropic from '@anthropic-ai/sdk' +import { createLogger } from '@sim/logger' +import type { StreamingExecution } from '@/executor/types' +import { executeAnthropicProviderRequest } from '@/providers/anthropic/core' +import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types' + +const logger = createLogger('AzureAnthropicProvider') + +export const azureAnthropicProvider: ProviderConfig = { + id: 'azure-anthropic', + name: 'Azure Anthropic', + description: 'Anthropic Claude models via Azure AI Foundry', + version: '1.0.0', + models: getProviderModels('azure-anthropic'), + defaultModel: getProviderDefaultModel('azure-anthropic'), + + executeRequest: async ( + request: ProviderRequest + ): Promise => { + if (!request.azureEndpoint) { + throw new Error( + 'Azure endpoint is required for Azure Anthropic. Please provide it via the azureEndpoint parameter.' + ) + } + + if (!request.apiKey) { + throw new Error('API key is required for Azure Anthropic') + } + + // Strip the azure-anthropic/ prefix from the model name if present + const modelName = request.model.replace(/^azure-anthropic\//, '') + + // Azure AI Foundry hosts Anthropic models at {endpoint}/anthropic + // The SDK appends /v1/messages automatically + const baseURL = `${request.azureEndpoint.replace(/\/$/, '')}/anthropic` + + return executeAnthropicProviderRequest( + { + ...request, + model: modelName, + }, + { + providerId: 'azure-anthropic', + providerLabel: 'Azure Anthropic', + createClient: (apiKey, useNativeStructuredOutputs) => + new Anthropic({ + baseURL, + apiKey, + defaultHeaders: { + 'api-key': apiKey, + 'anthropic-version': '2023-06-01', + ...(useNativeStructuredOutputs + ? { 'anthropic-beta': 'structured-outputs-2025-11-13' } + : {}), + }, + }), + logger, + } + ) + }, +} diff --git a/apps/sim/providers/azure-openai/index.ts b/apps/sim/providers/azure-openai/index.ts index da11e0017..ca63904df 100644 --- a/apps/sim/providers/azure-openai/index.ts +++ b/apps/sim/providers/azure-openai/index.ts @@ -1,12 +1,583 @@ import { createLogger } from '@sim/logger' +import { AzureOpenAI } from 'openai' +import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions' import { env } from '@/lib/core/config/env' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + checkForForcedToolUsage, + createReadableStreamFromAzureOpenAIStream, + extractApiVersionFromUrl, + extractBaseUrl, + extractDeploymentFromUrl, + isChatCompletionsEndpoint, + isResponsesEndpoint, +} from '@/providers/azure-openai/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import { executeResponsesProviderRequest } from '@/providers/openai/core' -import type { ProviderConfig, ProviderRequest, ProviderResponse } from '@/providers/types' +import type { + ProviderConfig, + ProviderRequest, + ProviderResponse, + TimeSegment, +} from '@/providers/types' +import { + calculateCost, + prepareToolExecution, + prepareToolsWithUsageControl, +} from '@/providers/utils' +import { executeTool } from '@/tools' const logger = createLogger('AzureOpenAIProvider') +/** + * Executes a request using the chat completions API. + * Used when the endpoint URL indicates chat completions. + */ +async function executeChatCompletionsRequest( + request: ProviderRequest, + azureEndpoint: string, + azureApiVersion: string, + deploymentName: string +): Promise { + logger.info('Using Azure OpenAI Chat Completions API', { + model: request.model, + endpoint: azureEndpoint, + deploymentName, + apiVersion: azureApiVersion, + hasSystemPrompt: !!request.systemPrompt, + hasMessages: !!request.messages?.length, + hasTools: !!request.tools?.length, + toolCount: request.tools?.length || 0, + hasResponseFormat: !!request.responseFormat, + stream: !!request.stream, + }) + + const azureOpenAI = new AzureOpenAI({ + apiKey: request.apiKey, + apiVersion: azureApiVersion, + endpoint: azureEndpoint, + }) + + const allMessages: any[] = [] + + if (request.systemPrompt) { + allMessages.push({ + role: 'system', + content: request.systemPrompt, + }) + } + + if (request.context) { + allMessages.push({ + role: 'user', + content: request.context, + }) + } + + if (request.messages) { + allMessages.push(...request.messages) + } + + const tools = request.tools?.length + ? request.tools.map((tool) => ({ + type: 'function', + function: { + name: tool.id, + description: tool.description, + parameters: tool.parameters, + }, + })) + : undefined + + const payload: any = { + model: deploymentName, + messages: allMessages, + } + + if (request.temperature !== undefined) payload.temperature = request.temperature + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens + + if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort + if (request.verbosity !== undefined) payload.verbosity = request.verbosity + + if (request.responseFormat) { + payload.response_format = { + type: 'json_schema', + json_schema: { + name: request.responseFormat.name || 'response_schema', + schema: request.responseFormat.schema || request.responseFormat, + strict: request.responseFormat.strict !== false, + }, + } + + logger.info('Added JSON schema response format to Azure OpenAI request') + } + + let preparedTools: ReturnType | null = null + + if (tools?.length) { + preparedTools = prepareToolsWithUsageControl(tools, request.tools, logger, 'azure-openai') + const { tools: filteredTools, toolChoice } = preparedTools + + if (filteredTools?.length && toolChoice) { + payload.tools = filteredTools + payload.tool_choice = toolChoice + + logger.info('Azure OpenAI request configuration:', { + toolCount: filteredTools.length, + toolChoice: + typeof toolChoice === 'string' + ? toolChoice + : toolChoice.type === 'function' + ? `force:${toolChoice.function.name}` + : toolChoice.type === 'tool' + ? `force:${toolChoice.name}` + : toolChoice.type === 'any' + ? `force:${toolChoice.any?.name || 'unknown'}` + : 'unknown', + model: deploymentName, + }) + } + } + + const providerStartTime = Date.now() + const providerStartTimeISO = new Date(providerStartTime).toISOString() + + try { + if (request.stream && (!tools || tools.length === 0)) { + logger.info('Using streaming response for Azure OpenAI request') + + const streamingParams: ChatCompletionCreateParamsStreaming = { + ...payload, + stream: true, + stream_options: { include_usage: true }, + } + const streamResponse = await azureOpenAI.chat.completions.create(streamingParams) + + const streamingResult = { + stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => { + streamingResult.execution.output.content = content + streamingResult.execution.output.tokens = { + input: usage.prompt_tokens, + output: usage.completion_tokens, + total: usage.total_tokens, + } + + const costResult = calculateCost( + request.model, + usage.prompt_tokens, + usage.completion_tokens + ) + streamingResult.execution.output.cost = { + input: costResult.input, + output: costResult.output, + total: costResult.total, + } + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + + if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) { + streamingResult.execution.output.providerTiming.timeSegments[0].endTime = + streamEndTime + streamingResult.execution.output.providerTiming.timeSegments[0].duration = + streamEndTime - providerStartTime + } + } + }), + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { input: 0, output: 0, total: 0 }, + toolCalls: undefined, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + timeSegments: [ + { + type: 'model', + name: 'Streaming response', + startTime: providerStartTime, + endTime: Date.now(), + duration: Date.now() - providerStartTime, + }, + ], + }, + cost: { input: 0, output: 0, total: 0 }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + }, + } as StreamingExecution + + return streamingResult as StreamingExecution + } + + const initialCallTime = Date.now() + const originalToolChoice = payload.tool_choice + const forcedTools = preparedTools?.forcedTools || [] + let usedForcedTools: string[] = [] + + let currentResponse = await azureOpenAI.chat.completions.create(payload) + const firstResponseTime = Date.now() - initialCallTime + + let content = currentResponse.choices[0]?.message?.content || '' + const tokens = { + input: currentResponse.usage?.prompt_tokens || 0, + output: currentResponse.usage?.completion_tokens || 0, + total: currentResponse.usage?.total_tokens || 0, + } + const toolCalls = [] + const toolResults = [] + const currentMessages = [...allMessages] + let iterationCount = 0 + let modelTime = firstResponseTime + let toolsTime = 0 + let hasUsedForcedTool = false + + const timeSegments: TimeSegment[] = [ + { + type: 'model', + name: 'Initial response', + startTime: initialCallTime, + endTime: initialCallTime + firstResponseTime, + duration: firstResponseTime, + }, + ] + + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + logger, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools + + while (iterationCount < MAX_TOOL_ITERATIONS) { + if (currentResponse.choices[0]?.message?.content) { + content = currentResponse.choices[0].message.content + } + + const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls + if (!toolCallsInResponse || toolCallsInResponse.length === 0) { + break + } + + logger.info( + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` + ) + + const toolsStartTime = Date.now() + + const toolExecutionPromises = toolCallsInResponse.map(async (toolCall) => { + const toolCallStartTime = Date.now() + const toolName = toolCall.function.name + + try { + const toolArgs = JSON.parse(toolCall.function.arguments) + const tool = request.tools?.find((t) => t.id === toolName) + + if (!tool) return null + + const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) + const result = await executeTool(toolName, executionParams) + const toolCallEndTime = Date.now() + + return { + toolCall, + toolName, + toolParams, + result, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } catch (error) { + const toolCallEndTime = Date.now() + logger.error('Error processing tool call:', { error, toolName }) + + return { + toolCall, + toolName, + toolParams: {}, + result: { + success: false, + output: undefined, + error: error instanceof Error ? error.message : 'Tool execution failed', + }, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallEndTime - toolCallStartTime, + } + } + }) + + const executionResults = await Promise.allSettled(toolExecutionPromises) + + currentMessages.push({ + role: 'assistant', + content: null, + tool_calls: toolCallsInResponse.map((tc) => ({ + id: tc.id, + type: 'function', + function: { + name: tc.function.name, + arguments: tc.function.arguments, + }, + })), + }) + + for (const settledResult of executionResults) { + if (settledResult.status === 'rejected' || !settledResult.value) continue + + const { toolCall, toolName, toolParams, result, startTime, endTime, duration } = + settledResult.value + + timeSegments.push({ + type: 'tool', + name: toolName, + startTime: startTime, + endTime: endTime, + duration: duration, + }) + + let resultContent: any + if (result.success) { + toolResults.push(result.output) + resultContent = result.output + } else { + resultContent = { + error: true, + message: result.error || 'Tool execution failed', + tool: toolName, + } + } + + toolCalls.push({ + name: toolName, + arguments: toolParams, + startTime: new Date(startTime).toISOString(), + endTime: new Date(endTime).toISOString(), + duration: duration, + result: resultContent, + success: result.success, + }) + + currentMessages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify(resultContent), + }) + } + + const thisToolsTime = Date.now() - toolsStartTime + toolsTime += thisToolsTime + + const nextPayload = { + ...payload, + messages: currentMessages, + } + + if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) { + const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool)) + + if (remainingTools.length > 0) { + nextPayload.tool_choice = { + type: 'function', + function: { name: remainingTools[0] }, + } + logger.info(`Forcing next tool: ${remainingTools[0]}`) + } else { + nextPayload.tool_choice = 'auto' + logger.info('All forced tools have been used, switching to auto tool_choice') + } + } + + const nextModelStartTime = Date.now() + currentResponse = await azureOpenAI.chat.completions.create(nextPayload) + + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + logger, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + + timeSegments.push({ + type: 'model', + name: `Model response (iteration ${iterationCount + 1})`, + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + modelTime += thisModelTime + + if (currentResponse.choices[0]?.message?.content) { + content = currentResponse.choices[0].message.content + } + + if (currentResponse.usage) { + tokens.input += currentResponse.usage.prompt_tokens || 0 + tokens.output += currentResponse.usage.completion_tokens || 0 + tokens.total += currentResponse.usage.total_tokens || 0 + } + + iterationCount++ + } + + if (request.stream) { + logger.info('Using streaming for final response after tool processing') + + const accumulatedCost = calculateCost(request.model, tokens.input, tokens.output) + + const streamingParams: ChatCompletionCreateParamsStreaming = { + ...payload, + messages: currentMessages, + tool_choice: 'auto', + stream: true, + stream_options: { include_usage: true }, + } + const streamResponse = await azureOpenAI.chat.completions.create(streamingParams) + + const streamingResult = { + stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => { + streamingResult.execution.output.content = content + streamingResult.execution.output.tokens = { + input: tokens.input + usage.prompt_tokens, + output: tokens.output + usage.completion_tokens, + total: tokens.total + usage.total_tokens, + } + + const streamCost = calculateCost( + request.model, + usage.prompt_tokens, + usage.completion_tokens + ) + streamingResult.execution.output.cost = { + input: accumulatedCost.input + streamCost.input, + output: accumulatedCost.output + streamCost.output, + total: accumulatedCost.total + streamCost.total, + } + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + } + }), + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { + input: tokens.input, + output: tokens.output, + total: tokens.total, + }, + toolCalls: + toolCalls.length > 0 + ? { + list: toolCalls, + count: toolCalls.length, + } + : undefined, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + cost: { + input: accumulatedCost.input, + output: accumulatedCost.output, + total: accumulatedCost.total, + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + }, + } as StreamingExecution + + return streamingResult as StreamingExecution + } + + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + return { + content, + model: request.model, + tokens, + toolCalls: toolCalls.length > 0 ? toolCalls : undefined, + toolResults: toolResults.length > 0 ? toolResults : undefined, + timing: { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + } + } catch (error) { + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + logger.error('Error in Azure OpenAI chat completions request:', { + error, + duration: totalDuration, + }) + + const enhancedError = new Error(error instanceof Error ? error.message : String(error)) + // @ts-ignore - Adding timing property to the error + enhancedError.timing = { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + } + + throw enhancedError + } +} + /** * Azure OpenAI provider configuration */ @@ -22,8 +593,6 @@ export const azureOpenAIProvider: ProviderConfig = { request: ProviderRequest ): Promise => { const azureEndpoint = request.azureEndpoint || env.AZURE_OPENAI_ENDPOINT - const azureApiVersion = - request.azureApiVersion || env.AZURE_OPENAI_API_VERSION || '2024-07-01-preview' if (!azureEndpoint) { throw new Error( @@ -35,6 +604,60 @@ export const azureOpenAIProvider: ProviderConfig = { throw new Error('API key is required for Azure OpenAI') } + // Check if the endpoint is a full chat completions URL + if (isChatCompletionsEndpoint(azureEndpoint)) { + logger.info('Detected chat completions endpoint URL') + + // Extract the base URL for the SDK (it needs just the host, not the full path) + const baseUrl = extractBaseUrl(azureEndpoint) + + // Try to extract deployment from URL, fall back to model name + const urlDeployment = extractDeploymentFromUrl(azureEndpoint) + const deploymentName = urlDeployment || request.model.replace('azure/', '') + + // Try to extract api-version from URL, fall back to request param or env or default + const urlApiVersion = extractApiVersionFromUrl(azureEndpoint) + const azureApiVersion = + urlApiVersion || + request.azureApiVersion || + env.AZURE_OPENAI_API_VERSION || + '2024-07-01-preview' + + logger.info('Chat completions configuration:', { + originalEndpoint: azureEndpoint, + baseUrl, + deploymentName, + apiVersion: azureApiVersion, + }) + + return executeChatCompletionsRequest(request, baseUrl, azureApiVersion, deploymentName) + } + + // Check if the endpoint is already a full responses API URL + if (isResponsesEndpoint(azureEndpoint)) { + logger.info('Detected full responses endpoint URL, using it directly') + + const deploymentName = request.model.replace('azure/', '') + + // Use the URL as-is since it's already complete + return executeResponsesProviderRequest(request, { + providerId: 'azure-openai', + providerLabel: 'Azure OpenAI', + modelName: deploymentName, + endpoint: azureEndpoint, + headers: { + 'Content-Type': 'application/json', + 'OpenAI-Beta': 'responses=v1', + 'api-key': request.apiKey, + }, + logger, + }) + } + + // Default: base URL provided, construct the responses API URL + logger.info('Using base endpoint, constructing Responses API URL') + const azureApiVersion = + request.azureApiVersion || env.AZURE_OPENAI_API_VERSION || '2024-07-01-preview' const deploymentName = request.model.replace('azure/', '') const apiUrl = `${azureEndpoint.replace(/\/$/, '')}/openai/v1/responses?api-version=${azureApiVersion}` diff --git a/apps/sim/providers/azure-openai/utils.ts b/apps/sim/providers/azure-openai/utils.ts new file mode 100644 index 000000000..36e65e678 --- /dev/null +++ b/apps/sim/providers/azure-openai/utils.ts @@ -0,0 +1,118 @@ +import type { Logger } from '@sim/logger' +import type { ChatCompletionChunk } from 'openai/resources/chat/completions' +import type { CompletionUsage } from 'openai/resources/completions' +import type { Stream } from 'openai/streaming' +import { checkForForcedToolUsageOpenAI, createOpenAICompatibleStream } from '@/providers/utils' + +/** + * Creates a ReadableStream from an Azure OpenAI streaming response. + * Uses the shared OpenAI-compatible streaming utility. + */ +export function createReadableStreamFromAzureOpenAIStream( + azureOpenAIStream: Stream, + onComplete?: (content: string, usage: CompletionUsage) => void +): ReadableStream { + return createOpenAICompatibleStream(azureOpenAIStream, 'Azure OpenAI', onComplete) +} + +/** + * Checks if a forced tool was used in an Azure OpenAI response. + * Uses the shared OpenAI-compatible forced tool usage helper. + */ +export function checkForForcedToolUsage( + response: any, + toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }, + _logger: Logger, + forcedTools: string[], + usedForcedTools: string[] +): { hasUsedForcedTool: boolean; usedForcedTools: string[] } { + return checkForForcedToolUsageOpenAI( + response, + toolChoice, + 'Azure OpenAI', + forcedTools, + usedForcedTools, + _logger + ) +} + +/** + * Determines if an Azure OpenAI endpoint URL is for the chat completions API. + * Returns true for URLs containing /chat/completions pattern. + * + * @param endpoint - The Azure OpenAI endpoint URL + * @returns true if the endpoint is for chat completions API + */ +export function isChatCompletionsEndpoint(endpoint: string): boolean { + const normalizedEndpoint = endpoint.toLowerCase() + return normalizedEndpoint.includes('/chat/completions') +} + +/** + * Determines if an Azure OpenAI endpoint URL is already a complete responses API URL. + * Returns true for URLs containing /responses pattern (but not /chat/completions). + * + * @param endpoint - The Azure OpenAI endpoint URL + * @returns true if the endpoint is already a responses API URL + */ +export function isResponsesEndpoint(endpoint: string): boolean { + const normalizedEndpoint = endpoint.toLowerCase() + return ( + normalizedEndpoint.includes('/responses') && !normalizedEndpoint.includes('/chat/completions') + ) +} + +/** + * Extracts the base URL from a full Azure OpenAI chat completions URL. + * For example: + * Input: https://resource.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2024-01-01 + * Output: https://resource.openai.azure.com + * + * @param fullUrl - The full chat completions URL + * @returns The base URL (scheme + host) + */ +export function extractBaseUrl(fullUrl: string): string { + try { + const url = new URL(fullUrl) + return `${url.protocol}//${url.host}` + } catch { + // If parsing fails, try to extract up to .com or .azure.com + const match = fullUrl.match(/^(https?:\/\/[^/]+)/) + return match ? match[1] : fullUrl + } +} + +/** + * Extracts the deployment name from a full Azure OpenAI URL. + * For example: + * Input: https://resource.openai.azure.com/openai/deployments/gpt-4.1-mini/chat/completions?api-version=2024-01-01 + * Output: gpt-4.1-mini + * + * @param fullUrl - The full Azure OpenAI URL + * @returns The deployment name or null if not found + */ +export function extractDeploymentFromUrl(fullUrl: string): string | null { + // Match /deployments/{deployment-name}/ pattern + const match = fullUrl.match(/\/deployments\/([^/]+)/i) + return match ? match[1] : null +} + +/** + * Extracts the api-version from a full Azure OpenAI URL query string. + * For example: + * Input: https://resource.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2025-01-01-preview + * Output: 2025-01-01-preview + * + * @param fullUrl - The full Azure OpenAI URL + * @returns The api-version or null if not found + */ +export function extractApiVersionFromUrl(fullUrl: string): string | null { + try { + const url = new URL(fullUrl) + return url.searchParams.get('api-version') + } catch { + // Fallback regex for malformed URLs + const match = fullUrl.match(/[?&]api-version=([^&]+)/i) + return match ? match[1] : null + } +} diff --git a/apps/sim/providers/models.ts b/apps/sim/providers/models.ts index b89172f27..3662e1ca5 100644 --- a/apps/sim/providers/models.ts +++ b/apps/sim/providers/models.ts @@ -369,6 +369,183 @@ export const PROVIDER_DEFINITIONS: Record = { }, ], }, + anthropic: { + id: 'anthropic', + name: 'Anthropic', + description: "Anthropic's Claude models", + defaultModel: 'claude-sonnet-4-5', + modelPatterns: [/^claude/], + icon: AnthropicIcon, + capabilities: { + toolUsageControl: true, + }, + models: [ + { + id: 'claude-opus-4-6', + pricing: { + input: 5.0, + cachedInput: 0.5, + output: 25.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 128000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high', 'max'], + default: 'high', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-opus-4-5', + pricing: { + input: 5.0, + cachedInput: 0.5, + output: 25.0, + updatedAt: '2025-11-24', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-opus-4-1', + pricing: { + input: 15.0, + cachedInput: 1.5, + output: 75.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-opus-4-0', + pricing: { + input: 15.0, + cachedInput: 1.5, + output: 75.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-sonnet-4-5', + pricing: { + input: 3.0, + cachedInput: 0.3, + output: 15.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-sonnet-4-0', + pricing: { + input: 3.0, + cachedInput: 0.3, + output: 15.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-haiku-4-5', + pricing: { + input: 1.0, + cachedInput: 0.1, + output: 5.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + { + id: 'claude-3-haiku-20240307', + pricing: { + input: 0.25, + cachedInput: 0.025, + output: 1.25, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + maxOutputTokens: { max: 4096, default: 4096 }, + }, + contextWindow: 200000, + }, + { + id: 'claude-3-7-sonnet-latest', + pricing: { + input: 3.0, + cachedInput: 0.3, + output: 15.0, + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + computerUse: true, + maxOutputTokens: { max: 8192, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, + }, + contextWindow: 200000, + }, + ], + }, 'azure-openai': { id: 'azure-openai', name: 'Azure OpenAI', @@ -602,132 +779,109 @@ export const PROVIDER_DEFINITIONS: Record = { }, ], }, - anthropic: { - id: 'anthropic', - name: 'Anthropic', - description: "Anthropic's Claude models", - defaultModel: 'claude-sonnet-4-5', - modelPatterns: [/^claude/], - icon: AnthropicIcon, + 'azure-anthropic': { + id: 'azure-anthropic', + name: 'Azure Anthropic', + description: 'Anthropic Claude models via Azure AI Foundry', + defaultModel: 'azure-anthropic/claude-sonnet-4-5', + modelPatterns: [/^azure-anthropic\//], + icon: AzureIcon, capabilities: { toolUsageControl: true, }, models: [ { - id: 'claude-haiku-4-5', - pricing: { - input: 1.0, - cachedInput: 0.5, - output: 5.0, - updatedAt: '2025-10-11', - }, - capabilities: { - temperature: { min: 0, max: 1 }, - nativeStructuredOutputs: true, - maxOutputTokens: { max: 64000, default: 8192 }, - }, - contextWindow: 200000, - }, - { - id: 'claude-sonnet-4-5', - pricing: { - input: 3.0, - cachedInput: 1.5, - output: 15.0, - updatedAt: '2025-10-11', - }, - capabilities: { - temperature: { min: 0, max: 1 }, - nativeStructuredOutputs: true, - maxOutputTokens: { max: 64000, default: 8192 }, - }, - contextWindow: 200000, - }, - { - id: 'claude-sonnet-4-0', - pricing: { - input: 3.0, - cachedInput: 1.5, - output: 15.0, - updatedAt: '2025-06-17', - }, - capabilities: { - temperature: { min: 0, max: 1 }, - maxOutputTokens: { max: 64000, default: 8192 }, - }, - contextWindow: 200000, - }, - { - id: 'claude-opus-4-5', + id: 'azure-anthropic/claude-opus-4-6', pricing: { input: 5.0, cachedInput: 0.5, output: 25.0, - updatedAt: '2025-11-24', + updatedAt: '2026-02-05', + }, + capabilities: { + temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 128000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high', 'max'], + default: 'high', + }, + }, + contextWindow: 200000, + }, + { + id: 'azure-anthropic/claude-opus-4-5', + pricing: { + input: 5.0, + cachedInput: 0.5, + output: 25.0, + updatedAt: '2026-02-05', }, capabilities: { temperature: { min: 0, max: 1 }, nativeStructuredOutputs: true, maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, }, contextWindow: 200000, }, { - id: 'claude-opus-4-1', + id: 'azure-anthropic/claude-sonnet-4-5', pricing: { - input: 15.0, - cachedInput: 7.5, - output: 75.0, - updatedAt: '2025-10-11', + input: 3.0, + cachedInput: 0.3, + output: 15.0, + updatedAt: '2026-02-05', }, capabilities: { temperature: { min: 0, max: 1 }, nativeStructuredOutputs: true, maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, }, contextWindow: 200000, }, { - id: 'claude-opus-4-0', + id: 'azure-anthropic/claude-opus-4-1', pricing: { input: 15.0, - cachedInput: 7.5, + cachedInput: 1.5, output: 75.0, - updatedAt: '2025-06-17', + updatedAt: '2026-02-05', }, capabilities: { temperature: { min: 0, max: 1 }, + nativeStructuredOutputs: true, maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, }, contextWindow: 200000, }, { - id: 'claude-3-7-sonnet-latest', + id: 'azure-anthropic/claude-haiku-4-5', pricing: { - input: 3.0, - cachedInput: 1.5, - output: 15.0, - updatedAt: '2025-06-17', + input: 1.0, + cachedInput: 0.1, + output: 5.0, + updatedAt: '2026-02-05', }, capabilities: { temperature: { min: 0, max: 1 }, - computerUse: true, - maxOutputTokens: { max: 8192, default: 8192 }, - }, - contextWindow: 200000, - }, - { - id: 'claude-3-5-sonnet-latest', - pricing: { - input: 3.0, - cachedInput: 1.5, - output: 15.0, - updatedAt: '2025-06-17', - }, - capabilities: { - temperature: { min: 0, max: 1 }, - computerUse: true, - maxOutputTokens: { max: 8192, default: 8192 }, + nativeStructuredOutputs: true, + maxOutputTokens: { max: 64000, default: 8192 }, + thinking: { + levels: ['low', 'medium', 'high'], + default: 'medium', + }, }, contextWindow: 200000, }, diff --git a/apps/sim/providers/registry.ts b/apps/sim/providers/registry.ts index 1b12656b9..3f7be20c9 100644 --- a/apps/sim/providers/registry.ts +++ b/apps/sim/providers/registry.ts @@ -1,5 +1,6 @@ import { createLogger } from '@sim/logger' import { anthropicProvider } from '@/providers/anthropic' +import { azureAnthropicProvider } from '@/providers/azure-anthropic' import { azureOpenAIProvider } from '@/providers/azure-openai' import { bedrockProvider } from '@/providers/bedrock' import { cerebrasProvider } from '@/providers/cerebras' @@ -20,6 +21,7 @@ const logger = createLogger('ProviderRegistry') const providerRegistry: Record = { openai: openaiProvider, anthropic: anthropicProvider, + 'azure-anthropic': azureAnthropicProvider, google: googleProvider, vertex: vertexProvider, deepseek: deepseekProvider, diff --git a/apps/sim/providers/types.ts b/apps/sim/providers/types.ts index 54b4acbb7..eb11061d9 100644 --- a/apps/sim/providers/types.ts +++ b/apps/sim/providers/types.ts @@ -4,6 +4,7 @@ export type ProviderId = | 'openai' | 'azure-openai' | 'anthropic' + | 'azure-anthropic' | 'google' | 'vertex' | 'deepseek' diff --git a/apps/sim/providers/utils.test.ts b/apps/sim/providers/utils.test.ts index f2181c392..68575b875 100644 --- a/apps/sim/providers/utils.test.ts +++ b/apps/sim/providers/utils.test.ts @@ -173,7 +173,6 @@ describe('Model Capabilities', () => { 'claude-sonnet-4-0', 'claude-opus-4-0', 'claude-3-7-sonnet-latest', - 'claude-3-5-sonnet-latest', 'grok-3-latest', 'grok-3-fast-latest', 'deepseek-v3', @@ -256,7 +255,6 @@ describe('Model Capabilities', () => { 'claude-sonnet-4-0', 'claude-opus-4-0', 'claude-3-7-sonnet-latest', - 'claude-3-5-sonnet-latest', 'grok-3-latest', 'grok-3-fast-latest', ] diff --git a/apps/sim/providers/utils.ts b/apps/sim/providers/utils.ts index b064b4220..50bcec5c6 100644 --- a/apps/sim/providers/utils.ts +++ b/apps/sim/providers/utils.ts @@ -123,6 +123,7 @@ export const providers: Record = { getProviderModelsFromDefinitions('anthropic').includes(model) ), }, + 'azure-anthropic': buildProviderMetadata('azure-anthropic'), google: buildProviderMetadata('google'), vertex: buildProviderMetadata('vertex'), deepseek: buildProviderMetadata('deepseek'), diff --git a/apps/sim/serializer/index.ts b/apps/sim/serializer/index.ts index e22c654c4..622667d9f 100644 --- a/apps/sim/serializer/index.ts +++ b/apps/sim/serializer/index.ts @@ -520,7 +520,9 @@ export class Serializer { } // Check if value is missing - const fieldValue = params[subBlockConfig.id] + // For canonical subBlocks, look up the canonical param value (original IDs were deleted) + const canonicalId = canonicalIndex.canonicalIdBySubBlockId[subBlockConfig.id] + const fieldValue = canonicalId ? params[canonicalId] : params[subBlockConfig.id] if (fieldValue === undefined || fieldValue === null || fieldValue === '') { missingFields.push(subBlockConfig.title || subBlockConfig.id) } diff --git a/apps/sim/tools/confluence/add_label.ts b/apps/sim/tools/confluence/add_label.ts new file mode 100644 index 000000000..db931b3cf --- /dev/null +++ b/apps/sim/tools/confluence/add_label.ts @@ -0,0 +1,123 @@ +import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceAddLabelParams { + accessToken: string + domain: string + pageId: string + labelName: string + prefix?: string + cloudId?: string +} + +export interface ConfluenceAddLabelResponse { + success: boolean + output: { + ts: string + pageId: string + labelName: string + labelId: string + } +} + +export const confluenceAddLabelTool: ToolConfig< + ConfluenceAddLabelParams, + ConfluenceAddLabelResponse +> = { + id: 'confluence_add_label', + name: 'Confluence Add Label', + description: 'Add a label to a Confluence page for organization and categorization.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Confluence page ID to add the label to', + }, + labelName: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Name of the label to add', + }, + prefix: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Label prefix: global (default), my, team, or system', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/labels', + method: 'POST', + headers: (params: ConfluenceAddLabelParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceAddLabelParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + labelName: params.labelName?.trim(), + prefix: params.prefix || 'global', + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + labelName: data.labelName ?? data.name ?? '', + labelId: data.id ?? '', + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { + type: 'string', + description: 'Page ID that the label was added to', + }, + labelName: { + type: 'string', + description: 'Name of the added label', + }, + labelId: { + type: 'string', + description: 'ID of the added label', + }, + }, +} diff --git a/apps/sim/tools/confluence/create_blogpost.ts b/apps/sim/tools/confluence/create_blogpost.ts new file mode 100644 index 000000000..b39e91b7f --- /dev/null +++ b/apps/sim/tools/confluence/create_blogpost.ts @@ -0,0 +1,151 @@ +import { + CONTENT_BODY_OUTPUT_PROPERTIES, + TIMESTAMP_OUTPUT, + VERSION_OUTPUT_PROPERTIES, +} from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceCreateBlogPostParams { + accessToken: string + domain: string + spaceId: string + title: string + content: string + status?: string + cloudId?: string +} + +export interface ConfluenceCreateBlogPostResponse { + success: boolean + output: { + ts: string + id: string + title: string + status: string | null + spaceId: string + authorId: string | null + body: Record | null + version: Record | null + webUrl: string | null + } +} + +export const confluenceCreateBlogPostTool: ToolConfig< + ConfluenceCreateBlogPostParams, + ConfluenceCreateBlogPostResponse +> = { + id: 'confluence_create_blogpost', + name: 'Confluence Create Blog Post', + description: 'Create a new blog post in a Confluence space.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + spaceId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the space to create the blog post in', + }, + title: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Title of the blog post', + }, + content: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Blog post content in Confluence storage format (HTML)', + }, + status: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Blog post status: current (default) or draft', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/blogposts', + method: 'POST', + headers: (params: ConfluenceCreateBlogPostParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceCreateBlogPostParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + spaceId: params.spaceId?.trim(), + title: params.title, + content: params.content, + status: params.status || 'current', + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + id: data.id ?? '', + title: data.title ?? '', + status: data.status ?? null, + spaceId: data.spaceId ?? '', + authorId: data.authorId ?? null, + body: data.body ?? null, + version: data.version ?? null, + webUrl: data.webUrl ?? data._links?.webui ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + id: { type: 'string', description: 'Created blog post ID' }, + title: { type: 'string', description: 'Blog post title' }, + status: { type: 'string', description: 'Blog post status', optional: true }, + spaceId: { type: 'string', description: 'Space ID' }, + authorId: { type: 'string', description: 'Author account ID', optional: true }, + body: { + type: 'object', + description: 'Blog post body content', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, + }, + version: { + type: 'object', + description: 'Blog post version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + webUrl: { type: 'string', description: 'URL to view the blog post', optional: true }, + }, +} diff --git a/apps/sim/tools/confluence/create_page.ts b/apps/sim/tools/confluence/create_page.ts index 69b83dc1e..7a4fec8a8 100644 --- a/apps/sim/tools/confluence/create_page.ts +++ b/apps/sim/tools/confluence/create_page.ts @@ -1,3 +1,4 @@ +import { CONTENT_BODY_OUTPUT_PROPERTIES, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' import type { ToolConfig } from '@/tools/types' export interface ConfluenceCreatePageParams { @@ -16,6 +17,11 @@ export interface ConfluenceCreatePageResponse { ts: string pageId: string title: string + status: string | null + spaceId: string | null + parentId: string | null + body: Record | null + version: Record | null url: string } } @@ -109,8 +115,13 @@ export const confluenceCreatePageTool: ToolConfig< success: true, output: { ts: new Date().toISOString(), - pageId: data.id, - title: data.title, + pageId: data.id ?? '', + title: data.title ?? '', + status: data.status ?? null, + spaceId: data.spaceId ?? null, + parentId: data.parentId ?? null, + body: data.body ?? null, + version: data.version ?? null, url: data.url || data._links?.webui || '', }, } @@ -120,6 +131,21 @@ export const confluenceCreatePageTool: ToolConfig< ts: { type: 'string', description: 'Timestamp of creation' }, pageId: { type: 'string', description: 'Created page ID' }, title: { type: 'string', description: 'Page title' }, + status: { type: 'string', description: 'Page status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + parentId: { type: 'string', description: 'Parent page ID', optional: true }, + body: { + type: 'object', + description: 'Page body content', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, + }, + version: { + type: 'object', + description: 'Page version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, url: { type: 'string', description: 'Page URL' }, }, } diff --git a/apps/sim/tools/confluence/create_page_property.ts b/apps/sim/tools/confluence/create_page_property.ts new file mode 100644 index 000000000..36ebfb04a --- /dev/null +++ b/apps/sim/tools/confluence/create_page_property.ts @@ -0,0 +1,127 @@ +import { TIMESTAMP_OUTPUT, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceCreatePagePropertyParams { + accessToken: string + domain: string + pageId: string + key: string + value: any + cloudId?: string +} + +export interface ConfluenceCreatePagePropertyResponse { + success: boolean + output: { + ts: string + pageId: string + propertyId: string + key: string + value: any + version: { + number: number + } | null + } +} + +export const confluenceCreatePagePropertyTool: ToolConfig< + ConfluenceCreatePagePropertyParams, + ConfluenceCreatePagePropertyResponse +> = { + id: 'confluence_create_page_property', + name: 'Confluence Create Page Property', + description: 'Create a new custom property (metadata) on a Confluence page.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the page to add the property to', + }, + key: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The key/name for the property', + }, + value: { + type: 'json', + required: true, + visibility: 'user-or-llm', + description: 'The value for the property (can be any JSON value)', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/page-properties', + method: 'POST', + headers: (params: ConfluenceCreatePagePropertyParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceCreatePagePropertyParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + key: params.key, + value: params.value, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + propertyId: data.id ?? '', + key: data.key ?? '', + value: data.value ?? null, + version: data.version ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { type: 'string', description: 'ID of the page' }, + propertyId: { type: 'string', description: 'ID of the created property' }, + key: { type: 'string', description: 'Property key' }, + value: { type: 'json', description: 'Property value' }, + version: { + type: 'object', + description: 'Version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/delete_page.ts b/apps/sim/tools/confluence/delete_page.ts index a9b35c33f..a648a2b37 100644 --- a/apps/sim/tools/confluence/delete_page.ts +++ b/apps/sim/tools/confluence/delete_page.ts @@ -4,6 +4,7 @@ export interface ConfluenceDeletePageParams { accessToken: string domain: string pageId: string + purge?: boolean cloudId?: string } @@ -22,7 +23,8 @@ export const confluenceDeletePageTool: ToolConfig< > = { id: 'confluence_delete_page', name: 'Confluence Delete Page', - description: 'Delete a Confluence page (moves it to trash where it can be restored).', + description: + 'Delete a Confluence page. By default moves to trash; use purge=true to permanently delete.', version: '1.0.0', oauth: { @@ -49,6 +51,13 @@ export const confluenceDeletePageTool: ToolConfig< visibility: 'user-or-llm', description: 'Confluence page ID to delete', }, + purge: { + type: 'boolean', + required: false, + visibility: 'user-or-llm', + description: + 'If true, permanently deletes the page instead of moving to trash (default: false)', + }, cloudId: { type: 'string', required: false, @@ -72,6 +81,7 @@ export const confluenceDeletePageTool: ToolConfig< domain: params.domain, accessToken: params.accessToken, pageId: params.pageId, + purge: params.purge || false, cloudId: params.cloudId, } }, diff --git a/apps/sim/tools/confluence/get_blogpost.ts b/apps/sim/tools/confluence/get_blogpost.ts new file mode 100644 index 000000000..94c9b02de --- /dev/null +++ b/apps/sim/tools/confluence/get_blogpost.ts @@ -0,0 +1,144 @@ +import { + CONTENT_BODY_OUTPUT_PROPERTIES, + TIMESTAMP_OUTPUT, + VERSION_OUTPUT_PROPERTIES, +} from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceGetBlogPostParams { + accessToken: string + domain: string + blogPostId: string + bodyFormat?: string + cloudId?: string +} + +export interface ConfluenceGetBlogPostResponse { + success: boolean + output: { + ts: string + id: string + title: string + status: string | null + spaceId: string | null + authorId: string | null + createdAt: string | null + version: { + number: number + message?: string + createdAt?: string + } | null + body: { + storage?: { value: string } + } | null + webUrl: string | null + } +} + +export const confluenceGetBlogPostTool: ToolConfig< + ConfluenceGetBlogPostParams, + ConfluenceGetBlogPostResponse +> = { + id: 'confluence_get_blogpost', + name: 'Confluence Get Blog Post', + description: 'Get a specific Confluence blog post by ID, including its content.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + blogPostId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the blog post to retrieve', + }, + bodyFormat: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Format for blog post body: storage, atlas_doc_format, or view', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/blogposts', + method: 'POST', + headers: (params: ConfluenceGetBlogPostParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceGetBlogPostParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + blogPostId: params.blogPostId?.trim(), + bodyFormat: params.bodyFormat || 'storage', + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + id: data.id ?? '', + title: data.title ?? '', + status: data.status ?? null, + spaceId: data.spaceId ?? null, + authorId: data.authorId ?? null, + createdAt: data.createdAt ?? null, + version: data.version ?? null, + body: data.body ?? null, + webUrl: data.webUrl ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + id: { type: 'string', description: 'Blog post ID' }, + title: { type: 'string', description: 'Blog post title' }, + status: { type: 'string', description: 'Blog post status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + authorId: { type: 'string', description: 'Author account ID', optional: true }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + version: { + type: 'object', + description: 'Version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + body: { + type: 'object', + description: 'Blog post body content in requested format(s)', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, + }, + webUrl: { type: 'string', description: 'URL to view the blog post', optional: true }, + }, +} diff --git a/apps/sim/tools/confluence/get_page_ancestors.ts b/apps/sim/tools/confluence/get_page_ancestors.ts new file mode 100644 index 000000000..20b7be3ca --- /dev/null +++ b/apps/sim/tools/confluence/get_page_ancestors.ts @@ -0,0 +1,126 @@ +import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceGetPageAncestorsParams { + accessToken: string + domain: string + pageId: string + limit?: number + cloudId?: string +} + +export interface ConfluenceGetPageAncestorsResponse { + success: boolean + output: { + ts: string + pageId: string + ancestors: Array<{ + id: string + title: string + status: string | null + spaceId: string | null + webUrl: string | null + }> + } +} + +export const confluenceGetPageAncestorsTool: ToolConfig< + ConfluenceGetPageAncestorsParams, + ConfluenceGetPageAncestorsResponse +> = { + id: 'confluence_get_page_ancestors', + name: 'Confluence Get Page Ancestors', + description: + 'Get the ancestor (parent) pages of a specific Confluence page. Returns the full hierarchy from the page up to the root.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the page to get ancestors for', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of ancestors to return (default: 25, max: 250)', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/page-ancestors', + method: 'POST', + headers: (params: ConfluenceGetPageAncestorsParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceGetPageAncestorsParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + limit: params.limit ? Number(params.limit) : 25, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + ancestors: data.ancestors ?? [], + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { + type: 'string', + description: 'ID of the page whose ancestors were retrieved', + }, + ancestors: { + type: 'array', + description: 'Array of ancestor pages, ordered from direct parent to root', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Ancestor page ID' }, + title: { type: 'string', description: 'Ancestor page title' }, + status: { type: 'string', description: 'Page status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + webUrl: { type: 'string', description: 'URL to view the page', optional: true }, + }, + }, + }, + }, +} diff --git a/apps/sim/tools/confluence/get_page_children.ts b/apps/sim/tools/confluence/get_page_children.ts new file mode 100644 index 000000000..7ca7ca10e --- /dev/null +++ b/apps/sim/tools/confluence/get_page_children.ts @@ -0,0 +1,143 @@ +import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceGetPageChildrenParams { + accessToken: string + domain: string + pageId: string + limit?: number + cursor?: string + cloudId?: string +} + +export interface ConfluenceGetPageChildrenResponse { + success: boolean + output: { + ts: string + parentId: string + children: Array<{ + id: string + title: string + status: string | null + spaceId: string | null + childPosition: number | null + webUrl: string | null + }> + nextCursor: string | null + } +} + +export const confluenceGetPageChildrenTool: ToolConfig< + ConfluenceGetPageChildrenParams, + ConfluenceGetPageChildrenResponse +> = { + id: 'confluence_get_page_children', + name: 'Confluence Get Page Children', + description: + 'Get all child pages of a specific Confluence page. Useful for navigating page hierarchies.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the parent page to get children from', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of child pages to return (default: 50, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response to get the next page of results', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/page-children', + method: 'POST', + headers: (params: ConfluenceGetPageChildrenParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceGetPageChildrenParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + limit: params.limit ? Number(params.limit) : 50, + cursor: params.cursor, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + parentId: data.parentId ?? '', + children: data.children ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + parentId: { + type: 'string', + description: 'ID of the parent page', + }, + children: { + type: 'array', + description: 'Array of child pages', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Child page ID' }, + title: { type: 'string', description: 'Child page title' }, + status: { type: 'string', description: 'Page status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + childPosition: { type: 'number', description: 'Position among siblings', optional: true }, + webUrl: { type: 'string', description: 'URL to view the page', optional: true }, + }, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/get_page_version.ts b/apps/sim/tools/confluence/get_page_version.ts new file mode 100644 index 000000000..c162e2546 --- /dev/null +++ b/apps/sim/tools/confluence/get_page_version.ts @@ -0,0 +1,123 @@ +import { DETAILED_VERSION_OUTPUT_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceGetPageVersionParams { + accessToken: string + domain: string + pageId: string + versionNumber: number + cloudId?: string +} + +export interface ConfluenceGetPageVersionResponse { + success: boolean + output: { + ts: string + pageId: string + version: { + number: number + message: string | null + minorEdit: boolean + authorId: string | null + createdAt: string | null + contentTypeModified: boolean | null + collaborators: string[] | null + prevVersion: number | null + nextVersion: number | null + } + } +} + +export const confluenceGetPageVersionTool: ToolConfig< + ConfluenceGetPageVersionParams, + ConfluenceGetPageVersionResponse +> = { + id: 'confluence_get_page_version', + name: 'Confluence Get Page Version', + description: 'Get details about a specific version of a Confluence page.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the page', + }, + versionNumber: { + type: 'number', + required: true, + visibility: 'user-or-llm', + description: 'The version number to retrieve (e.g., 1, 2, 3)', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/page-versions', + method: 'POST', + headers: (params: ConfluenceGetPageVersionParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceGetPageVersionParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + versionNumber: Number(params.versionNumber), + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + version: data.version ?? { + number: 0, + message: null, + minorEdit: false, + authorId: null, + createdAt: null, + }, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { type: 'string', description: 'ID of the page' }, + version: { + type: 'object', + description: 'Detailed version information', + properties: DETAILED_VERSION_OUTPUT_PROPERTIES, + }, + }, +} diff --git a/apps/sim/tools/confluence/get_space.ts b/apps/sim/tools/confluence/get_space.ts index a6608f143..fbadd7a65 100644 --- a/apps/sim/tools/confluence/get_space.ts +++ b/apps/sim/tools/confluence/get_space.ts @@ -1,3 +1,4 @@ +import { SPACE_DESCRIPTION_OUTPUT_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types' import type { ToolConfig } from '@/tools/types' export interface ConfluenceGetSpaceParams { @@ -17,6 +18,13 @@ export interface ConfluenceGetSpaceResponse { type: string status: string url: string + authorId: string | null + createdAt: string | null + homepageId: string | null + description: { + value: string + representation: string + } | null } } @@ -95,17 +103,34 @@ export const confluenceGetSpaceTool: ToolConfig< type: data.type, status: data.status, url: data._links?.webui || '', + authorId: data.authorId ?? null, + createdAt: data.createdAt ?? null, + homepageId: data.homepageId ?? null, + description: data.description ?? null, }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of retrieval' }, + ts: TIMESTAMP_OUTPUT, spaceId: { type: 'string', description: 'Space ID' }, name: { type: 'string', description: 'Space name' }, key: { type: 'string', description: 'Space key' }, - type: { type: 'string', description: 'Space type' }, - status: { type: 'string', description: 'Space status' }, - url: { type: 'string', description: 'Space URL' }, + type: { type: 'string', description: 'Space type (global, personal)' }, + status: { type: 'string', description: 'Space status (current, archived)' }, + url: { type: 'string', description: 'URL to view the space in Confluence' }, + authorId: { type: 'string', description: 'Account ID of the space creator', optional: true }, + createdAt: { + type: 'string', + description: 'ISO 8601 timestamp when the space was created', + optional: true, + }, + homepageId: { type: 'string', description: 'ID of the space homepage', optional: true }, + description: { + type: 'object', + description: 'Space description content', + properties: SPACE_DESCRIPTION_OUTPUT_PROPERTIES, + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/index.ts b/apps/sim/tools/confluence/index.ts index 33f5f2b96..d78645b15 100644 --- a/apps/sim/tools/confluence/index.ts +++ b/apps/sim/tools/confluence/index.ts @@ -1,24 +1,42 @@ +import { confluenceAddLabelTool } from '@/tools/confluence/add_label' +import { confluenceCreateBlogPostTool } from '@/tools/confluence/create_blogpost' import { confluenceCreateCommentTool } from '@/tools/confluence/create_comment' import { confluenceCreatePageTool } from '@/tools/confluence/create_page' +import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property' import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment' import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment' import { confluenceDeletePageTool } from '@/tools/confluence/delete_page' +import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost' +import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors' +import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children' +import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version' import { confluenceGetSpaceTool } from '@/tools/confluence/get_space' import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments' +import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts' +import { confluenceListBlogPostsInSpaceTool } from '@/tools/confluence/list_blogposts_in_space' import { confluenceListCommentsTool } from '@/tools/confluence/list_comments' import { confluenceListLabelsTool } from '@/tools/confluence/list_labels' +import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties' +import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions' +import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space' import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces' import { confluenceRetrieveTool } from '@/tools/confluence/retrieve' import { confluenceSearchTool } from '@/tools/confluence/search' +import { confluenceSearchInSpaceTool } from '@/tools/confluence/search_in_space' import { ATTACHMENT_ITEM_PROPERTIES, ATTACHMENT_OUTPUT, ATTACHMENTS_OUTPUT, + BODY_FORMAT_PROPERTIES, COMMENT_BODY_OUTPUT_PROPERTIES, COMMENT_ITEM_PROPERTIES, COMMENT_OUTPUT, COMMENTS_OUTPUT, + CONTENT_BODY_OUTPUT, + CONTENT_BODY_OUTPUT_PROPERTIES, DELETED_OUTPUT, + DETAILED_VERSION_OUTPUT, + DETAILED_VERSION_OUTPUT_PROPERTIES, LABEL_ITEM_PROPERTIES, LABEL_OUTPUT, LABELS_OUTPUT, @@ -46,20 +64,41 @@ import { confluenceUpdateCommentTool } from '@/tools/confluence/update_comment' import { confluenceUploadAttachmentTool } from '@/tools/confluence/upload_attachment' export { - // Tools + // Page Tools confluenceRetrieveTool, confluenceUpdateTool, confluenceCreatePageTool, confluenceDeletePageTool, + confluenceListPagesInSpaceTool, + confluenceGetPageChildrenTool, + confluenceGetPageAncestorsTool, + // Page Version Tools + confluenceListPageVersionsTool, + confluenceGetPageVersionTool, + // Page Properties Tools + confluenceListPagePropertiesTool, + confluenceCreatePagePropertyTool, + // Blog Post Tools + confluenceListBlogPostsTool, + confluenceGetBlogPostTool, + confluenceCreateBlogPostTool, + confluenceListBlogPostsInSpaceTool, + // Search Tools confluenceSearchTool, + confluenceSearchInSpaceTool, + // Comment Tools confluenceCreateCommentTool, confluenceListCommentsTool, confluenceUpdateCommentTool, confluenceDeleteCommentTool, + // Attachment Tools confluenceListAttachmentsTool, confluenceDeleteAttachmentTool, confluenceUploadAttachmentTool, + // Label Tools confluenceListLabelsTool, + confluenceAddLabelTool, + // Space Tools confluenceGetSpaceTool, confluenceListSpacesTool, // Item property constants (for use in outputs) @@ -70,7 +109,10 @@ export { SEARCH_RESULT_ITEM_PROPERTIES, SPACE_ITEM_PROPERTIES, VERSION_OUTPUT_PROPERTIES, + DETAILED_VERSION_OUTPUT_PROPERTIES, COMMENT_BODY_OUTPUT_PROPERTIES, + CONTENT_BODY_OUTPUT_PROPERTIES, + BODY_FORMAT_PROPERTIES, SPACE_DESCRIPTION_OUTPUT_PROPERTIES, SEARCH_RESULT_SPACE_PROPERTIES, PAGINATION_LINKS_PROPERTIES, @@ -79,6 +121,8 @@ export { ATTACHMENTS_OUTPUT, COMMENT_OUTPUT, COMMENTS_OUTPUT, + CONTENT_BODY_OUTPUT, + DETAILED_VERSION_OUTPUT, LABEL_OUTPUT, LABELS_OUTPUT, PAGE_OUTPUT, diff --git a/apps/sim/tools/confluence/list_attachments.ts b/apps/sim/tools/confluence/list_attachments.ts index 103869617..932aa9b68 100644 --- a/apps/sim/tools/confluence/list_attachments.ts +++ b/apps/sim/tools/confluence/list_attachments.ts @@ -6,6 +6,7 @@ export interface ConfluenceListAttachmentsParams { domain: string pageId: string limit?: number + cursor?: string cloudId?: string } @@ -20,6 +21,7 @@ export interface ConfluenceListAttachmentsResponse { mediaType: string downloadUrl: string }> + nextCursor: string | null } } @@ -60,7 +62,13 @@ export const confluenceListAttachmentsTool: ToolConfig< type: 'number', required: false, visibility: 'user-or-llm', - description: 'Maximum number of attachments to return (default: 25)', + description: 'Maximum number of attachments to return (default: 50, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', }, cloudId: { type: 'string', @@ -77,8 +85,11 @@ export const confluenceListAttachmentsTool: ToolConfig< domain: params.domain, accessToken: params.accessToken, pageId: params.pageId, - limit: String(params.limit || 25), + limit: String(params.limit || 50), }) + if (params.cursor) { + query.set('cursor', params.cursor) + } if (params.cloudId) { query.set('cloudId', params.cloudId) } @@ -91,15 +102,6 @@ export const confluenceListAttachmentsTool: ToolConfig< Authorization: `Bearer ${params.accessToken}`, } }, - body: (params: ConfluenceListAttachmentsParams) => { - return { - domain: params.domain, - accessToken: params.accessToken, - cloudId: params.cloudId, - pageId: params.pageId, - limit: params.limit ? Number(params.limit) : 25, - } - }, }, transformResponse: async (response: Response) => { @@ -109,6 +111,7 @@ export const confluenceListAttachmentsTool: ToolConfig< output: { ts: new Date().toISOString(), attachments: data.attachments || [], + nextCursor: data.nextCursor ?? null, }, } }, @@ -116,5 +119,10 @@ export const confluenceListAttachmentsTool: ToolConfig< outputs: { ts: TIMESTAMP_OUTPUT, attachments: ATTACHMENTS_OUTPUT, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/list_blogposts.ts b/apps/sim/tools/confluence/list_blogposts.ts new file mode 100644 index 000000000..a6b78e2b5 --- /dev/null +++ b/apps/sim/tools/confluence/list_blogposts.ts @@ -0,0 +1,167 @@ +import { TIMESTAMP_OUTPUT, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceListBlogPostsParams { + accessToken: string + domain: string + limit?: number + status?: string + sort?: string + cursor?: string + cloudId?: string +} + +export interface ConfluenceListBlogPostsResponse { + success: boolean + output: { + ts: string + blogPosts: Array<{ + id: string + title: string + status: string | null + spaceId: string | null + authorId: string | null + createdAt: string | null + version: { + number: number + message?: string + createdAt?: string + } | null + webUrl: string | null + }> + nextCursor: string | null + } +} + +export const confluenceListBlogPostsTool: ToolConfig< + ConfluenceListBlogPostsParams, + ConfluenceListBlogPostsResponse +> = { + id: 'confluence_list_blogposts', + name: 'Confluence List Blog Posts', + description: 'List all blog posts across all accessible Confluence spaces.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of blog posts to return (default: 25, max: 250)', + }, + status: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter by status: current, archived, trashed, or draft', + }, + sort: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'Sort order: created-date, -created-date, modified-date, -modified-date, title, -title', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: (params: ConfluenceListBlogPostsParams) => { + const query = new URLSearchParams({ + domain: params.domain, + accessToken: params.accessToken, + limit: String(params.limit || 25), + }) + if (params.status) { + query.set('status', params.status) + } + if (params.sort) { + query.set('sort', params.sort) + } + if (params.cursor) { + query.set('cursor', params.cursor) + } + if (params.cloudId) { + query.set('cloudId', params.cloudId) + } + return `/api/tools/confluence/blogposts?${query.toString()}` + }, + method: 'GET', + headers: (params: ConfluenceListBlogPostsParams) => ({ + Accept: 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + blogPosts: data.blogPosts ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + blogPosts: { + type: 'array', + description: 'Array of blog posts', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Blog post ID' }, + title: { type: 'string', description: 'Blog post title' }, + status: { type: 'string', description: 'Blog post status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + authorId: { type: 'string', description: 'Author account ID', optional: true }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + version: { + type: 'object', + description: 'Version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + webUrl: { type: 'string', description: 'URL to view the blog post', optional: true }, + }, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/list_blogposts_in_space.ts b/apps/sim/tools/confluence/list_blogposts_in_space.ts new file mode 100644 index 000000000..d32fcd9f4 --- /dev/null +++ b/apps/sim/tools/confluence/list_blogposts_in_space.ts @@ -0,0 +1,178 @@ +import { + CONTENT_BODY_OUTPUT_PROPERTIES, + TIMESTAMP_OUTPUT, + VERSION_OUTPUT_PROPERTIES, +} from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceListBlogPostsInSpaceParams { + accessToken: string + domain: string + spaceId: string + limit?: number + status?: string + bodyFormat?: string + cursor?: string + cloudId?: string +} + +export interface ConfluenceListBlogPostsInSpaceResponse { + success: boolean + output: { + ts: string + blogPosts: Array<{ + id: string + title: string + status: string | null + spaceId: string | null + authorId: string | null + createdAt: string | null + version: { + number: number + message?: string + createdAt?: string + } | null + body: { + storage?: { value: string } + } | null + webUrl: string | null + }> + nextCursor: string | null + } +} + +export const confluenceListBlogPostsInSpaceTool: ToolConfig< + ConfluenceListBlogPostsInSpaceParams, + ConfluenceListBlogPostsInSpaceResponse +> = { + id: 'confluence_list_blogposts_in_space', + name: 'Confluence List Blog Posts in Space', + description: 'List all blog posts within a specific Confluence space.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + spaceId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the Confluence space to list blog posts from', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of blog posts to return (default: 25, max: 250)', + }, + status: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter by status: current, archived, trashed, or draft', + }, + bodyFormat: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Format for blog post body: storage, atlas_doc_format, or view', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/space-blogposts', + method: 'POST', + headers: (params: ConfluenceListBlogPostsInSpaceParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceListBlogPostsInSpaceParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + spaceId: params.spaceId?.trim(), + limit: params.limit ? Number(params.limit) : 25, + status: params.status, + bodyFormat: params.bodyFormat, + cursor: params.cursor, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + blogPosts: data.blogPosts ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + blogPosts: { + type: 'array', + description: 'Array of blog posts in the space', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Blog post ID' }, + title: { type: 'string', description: 'Blog post title' }, + status: { type: 'string', description: 'Blog post status', optional: true }, + spaceId: { type: 'string', description: 'Space ID', optional: true }, + authorId: { type: 'string', description: 'Author account ID', optional: true }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + version: { + type: 'object', + description: 'Version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + body: { + type: 'object', + description: 'Blog post body content', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, + }, + webUrl: { type: 'string', description: 'URL to view the blog post', optional: true }, + }, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/list_comments.ts b/apps/sim/tools/confluence/list_comments.ts index d4876b4b6..d841dd660 100644 --- a/apps/sim/tools/confluence/list_comments.ts +++ b/apps/sim/tools/confluence/list_comments.ts @@ -6,6 +6,8 @@ export interface ConfluenceListCommentsParams { domain: string pageId: string limit?: number + bodyFormat?: string + cursor?: string cloudId?: string } @@ -19,6 +21,7 @@ export interface ConfluenceListCommentsResponse { createdAt: string authorId: string }> + nextCursor: string | null } } @@ -61,6 +64,19 @@ export const confluenceListCommentsTool: ToolConfig< visibility: 'user-or-llm', description: 'Maximum number of comments to return (default: 25)', }, + bodyFormat: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'Format for the comment body: storage, atlas_doc_format, view, or export_view (default: storage)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, cloudId: { type: 'string', required: false, @@ -78,6 +94,12 @@ export const confluenceListCommentsTool: ToolConfig< pageId: params.pageId, limit: String(params.limit || 25), }) + if (params.bodyFormat) { + query.set('bodyFormat', params.bodyFormat) + } + if (params.cursor) { + query.set('cursor', params.cursor) + } if (params.cloudId) { query.set('cloudId', params.cloudId) } @@ -90,15 +112,6 @@ export const confluenceListCommentsTool: ToolConfig< Authorization: `Bearer ${params.accessToken}`, } }, - body: (params: ConfluenceListCommentsParams) => { - return { - domain: params.domain, - accessToken: params.accessToken, - cloudId: params.cloudId, - pageId: params.pageId, - limit: params.limit ? Number(params.limit) : 25, - } - }, }, transformResponse: async (response: Response) => { @@ -108,6 +121,7 @@ export const confluenceListCommentsTool: ToolConfig< output: { ts: new Date().toISOString(), comments: data.comments || [], + nextCursor: data.nextCursor ?? null, }, } }, @@ -115,5 +129,10 @@ export const confluenceListCommentsTool: ToolConfig< outputs: { ts: TIMESTAMP_OUTPUT, comments: COMMENTS_OUTPUT, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/list_labels.ts b/apps/sim/tools/confluence/list_labels.ts index 4adb838ad..c28bac10b 100644 --- a/apps/sim/tools/confluence/list_labels.ts +++ b/apps/sim/tools/confluence/list_labels.ts @@ -5,6 +5,8 @@ export interface ConfluenceListLabelsParams { accessToken: string domain: string pageId: string + limit?: number + cursor?: string cloudId?: string } @@ -17,6 +19,7 @@ export interface ConfluenceListLabelsResponse { name: string prefix: string }> + nextCursor: string | null } } @@ -53,6 +56,18 @@ export const confluenceListLabelsTool: ToolConfig< visibility: 'user-or-llm', description: 'Confluence page ID to list labels from', }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of labels to return (default: 25, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, cloudId: { type: 'string', required: false, @@ -68,7 +83,11 @@ export const confluenceListLabelsTool: ToolConfig< domain: params.domain, accessToken: params.accessToken, pageId: params.pageId, + limit: String(params.limit || 25), }) + if (params.cursor) { + query.set('cursor', params.cursor) + } if (params.cloudId) { query.set('cloudId', params.cloudId) } @@ -90,6 +109,7 @@ export const confluenceListLabelsTool: ToolConfig< output: { ts: new Date().toISOString(), labels: data.labels || [], + nextCursor: data.nextCursor ?? null, }, } }, @@ -104,5 +124,10 @@ export const confluenceListLabelsTool: ToolConfig< properties: LABEL_ITEM_PROPERTIES, }, }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/list_page_properties.ts b/apps/sim/tools/confluence/list_page_properties.ts new file mode 100644 index 000000000..cd26739c4 --- /dev/null +++ b/apps/sim/tools/confluence/list_page_properties.ts @@ -0,0 +1,149 @@ +import { TIMESTAMP_OUTPUT, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceListPagePropertiesParams { + accessToken: string + domain: string + pageId: string + limit?: number + cursor?: string + cloudId?: string +} + +export interface ConfluenceListPagePropertiesResponse { + success: boolean + output: { + ts: string + pageId: string + properties: Array<{ + id: string + key: string + value: any + version: { + number: number + message?: string + createdAt?: string + } | null + }> + nextCursor: string | null + } +} + +export const confluenceListPagePropertiesTool: ToolConfig< + ConfluenceListPagePropertiesParams, + ConfluenceListPagePropertiesResponse +> = { + id: 'confluence_list_page_properties', + name: 'Confluence List Page Properties', + description: 'List all custom properties (metadata) attached to a Confluence page.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the page to list properties from', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of properties to return (default: 50, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: (params: ConfluenceListPagePropertiesParams) => { + const query = new URLSearchParams({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId, + limit: String(params.limit || 50), + }) + if (params.cursor) { + query.set('cursor', params.cursor) + } + if (params.cloudId) { + query.set('cloudId', params.cloudId) + } + return `/api/tools/confluence/page-properties?${query.toString()}` + }, + method: 'GET', + headers: (params: ConfluenceListPagePropertiesParams) => ({ + Accept: 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + properties: data.properties ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { type: 'string', description: 'ID of the page' }, + properties: { + type: 'array', + description: 'Array of content properties', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Property ID' }, + key: { type: 'string', description: 'Property key' }, + value: { type: 'json', description: 'Property value (can be any JSON)' }, + version: { + type: 'object', + description: 'Version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, + }, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/list_page_versions.ts b/apps/sim/tools/confluence/list_page_versions.ts new file mode 100644 index 000000000..8e97f9fde --- /dev/null +++ b/apps/sim/tools/confluence/list_page_versions.ts @@ -0,0 +1,131 @@ +import { TIMESTAMP_OUTPUT, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceListPageVersionsParams { + accessToken: string + domain: string + pageId: string + limit?: number + cursor?: string + cloudId?: string +} + +export interface ConfluenceListPageVersionsResponse { + success: boolean + output: { + ts: string + pageId: string + versions: Array<{ + number: number + message: string | null + minorEdit: boolean + authorId: string | null + createdAt: string | null + }> + nextCursor: string | null + } +} + +export const confluenceListPageVersionsTool: ToolConfig< + ConfluenceListPageVersionsParams, + ConfluenceListPageVersionsResponse +> = { + id: 'confluence_list_page_versions', + name: 'Confluence List Page Versions', + description: 'List all versions (revision history) of a Confluence page.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + pageId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the page to get versions for', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of versions to return (default: 50, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/page-versions', + method: 'POST', + headers: (params: ConfluenceListPageVersionsParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceListPageVersionsParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + pageId: params.pageId?.trim(), + limit: params.limit ? Number(params.limit) : 50, + cursor: params.cursor, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pageId: data.pageId ?? '', + versions: data.versions ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pageId: { type: 'string', description: 'ID of the page' }, + versions: { + type: 'array', + description: 'Array of page versions', + items: { + type: 'object', + properties: VERSION_OUTPUT_PROPERTIES, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/list_pages_in_space.ts b/apps/sim/tools/confluence/list_pages_in_space.ts new file mode 100644 index 000000000..558a8c50e --- /dev/null +++ b/apps/sim/tools/confluence/list_pages_in_space.ts @@ -0,0 +1,174 @@ +import { + CONTENT_BODY_OUTPUT_PROPERTIES, + PAGE_ITEM_PROPERTIES, + TIMESTAMP_OUTPUT, +} from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceListPagesInSpaceParams { + accessToken: string + domain: string + spaceId: string + limit?: number + status?: string + bodyFormat?: string + cursor?: string + cloudId?: string +} + +export interface ConfluenceListPagesInSpaceResponse { + success: boolean + output: { + ts: string + pages: Array<{ + id: string + title: string + status: string | null + spaceId: string | null + parentId: string | null + authorId: string | null + createdAt: string | null + version: { + number: number + message?: string + createdAt?: string + } | null + body: { + storage?: { value: string } + } | null + webUrl: string | null + }> + nextCursor: string | null + } +} + +export const confluenceListPagesInSpaceTool: ToolConfig< + ConfluenceListPagesInSpaceParams, + ConfluenceListPagesInSpaceResponse +> = { + id: 'confluence_list_pages_in_space', + name: 'Confluence List Pages in Space', + description: + 'List all pages within a specific Confluence space. Supports pagination and filtering by status.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + spaceId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The ID of the Confluence space to list pages from', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of pages to return (default: 50, max: 250)', + }, + status: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter pages by status: current, archived, trashed, or draft', + }, + bodyFormat: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'Format for page body content: storage, atlas_doc_format, or view. If not specified, body is not included.', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response to get the next page of results', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/space-pages', + method: 'POST', + headers: (params: ConfluenceListPagesInSpaceParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceListPagesInSpaceParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + spaceId: params.spaceId?.trim(), + limit: params.limit ? Number(params.limit) : 50, + status: params.status, + bodyFormat: params.bodyFormat, + cursor: params.cursor, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + pages: data.pages ?? [], + nextCursor: data.nextCursor ?? null, + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + pages: { + type: 'array', + description: 'Array of pages in the space', + items: { + type: 'object', + properties: { + ...PAGE_ITEM_PROPERTIES, + body: { + type: 'object', + description: 'Page body content (if bodyFormat was specified)', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, + }, + webUrl: { + type: 'string', + description: 'URL to view the page in Confluence', + optional: true, + }, + }, + }, + }, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, + }, +} diff --git a/apps/sim/tools/confluence/list_spaces.ts b/apps/sim/tools/confluence/list_spaces.ts index 77fcbfe0e..3859aad2b 100644 --- a/apps/sim/tools/confluence/list_spaces.ts +++ b/apps/sim/tools/confluence/list_spaces.ts @@ -5,6 +5,7 @@ export interface ConfluenceListSpacesParams { accessToken: string domain: string limit?: number + cursor?: string cloudId?: string } @@ -19,6 +20,7 @@ export interface ConfluenceListSpacesResponse { type: string status: string }> + nextCursor: string | null } } @@ -53,7 +55,13 @@ export const confluenceListSpacesTool: ToolConfig< type: 'number', required: false, visibility: 'user-or-llm', - description: 'Maximum number of spaces to return (default: 25)', + description: 'Maximum number of spaces to return (default: 25, max: 250)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Pagination cursor from previous response', }, cloudId: { type: 'string', @@ -71,6 +79,9 @@ export const confluenceListSpacesTool: ToolConfig< accessToken: params.accessToken, limit: String(params.limit || 25), }) + if (params.cursor) { + query.set('cursor', params.cursor) + } if (params.cloudId) { query.set('cloudId', params.cloudId) } @@ -83,14 +94,6 @@ export const confluenceListSpacesTool: ToolConfig< Authorization: `Bearer ${params.accessToken}`, } }, - body: (params: ConfluenceListSpacesParams) => { - return { - domain: params.domain, - accessToken: params.accessToken, - cloudId: params.cloudId, - limit: params.limit ? Number(params.limit) : 25, - } - }, }, transformResponse: async (response: Response) => { @@ -100,6 +103,7 @@ export const confluenceListSpacesTool: ToolConfig< output: { ts: new Date().toISOString(), spaces: data.spaces || [], + nextCursor: data.nextCursor ?? null, }, } }, @@ -107,5 +111,10 @@ export const confluenceListSpacesTool: ToolConfig< outputs: { ts: TIMESTAMP_OUTPUT, spaces: SPACES_OUTPUT, + nextCursor: { + type: 'string', + description: 'Cursor for fetching the next page of results', + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/retrieve.ts b/apps/sim/tools/confluence/retrieve.ts index 9af6af7e0..ded0fda90 100644 --- a/apps/sim/tools/confluence/retrieve.ts +++ b/apps/sim/tools/confluence/retrieve.ts @@ -1,4 +1,9 @@ import type { ConfluenceRetrieveParams, ConfluenceRetrieveResponse } from '@/tools/confluence/types' +import { + BODY_FORMAT_PROPERTIES, + TIMESTAMP_OUTPUT, + VERSION_OUTPUT_PROPERTIES, +} from '@/tools/confluence/types' import { transformPageData } from '@/tools/confluence/utils' import type { ToolConfig } from '@/tools/types' @@ -71,9 +76,42 @@ export const confluenceRetrieveTool: ToolConfig< }, outputs: { - ts: { type: 'string', description: 'Timestamp of retrieval' }, + ts: TIMESTAMP_OUTPUT, pageId: { type: 'string', description: 'Confluence page ID' }, - content: { type: 'string', description: 'Page content with HTML tags stripped' }, title: { type: 'string', description: 'Page title' }, + content: { type: 'string', description: 'Page content with HTML tags stripped' }, + status: { + type: 'string', + description: 'Page status (current, archived, trashed, draft)', + optional: true, + }, + spaceId: { type: 'string', description: 'ID of the space containing the page', optional: true }, + parentId: { type: 'string', description: 'ID of the parent page', optional: true }, + authorId: { type: 'string', description: 'Account ID of the page author', optional: true }, + createdAt: { + type: 'string', + description: 'ISO 8601 timestamp when the page was created', + optional: true, + }, + url: { type: 'string', description: 'URL to view the page in Confluence', optional: true }, + body: { + type: 'object', + description: 'Raw page body content in storage format', + properties: { + storage: { + type: 'object', + description: 'Body in storage format (Confluence markup)', + properties: BODY_FORMAT_PROPERTIES, + optional: true, + }, + }, + optional: true, + }, + version: { + type: 'object', + description: 'Page version information', + properties: VERSION_OUTPUT_PROPERTIES, + optional: true, + }, }, } diff --git a/apps/sim/tools/confluence/search_in_space.ts b/apps/sim/tools/confluence/search_in_space.ts new file mode 100644 index 000000000..5b10a5c62 --- /dev/null +++ b/apps/sim/tools/confluence/search_in_space.ts @@ -0,0 +1,144 @@ +import { SEARCH_RESULT_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types' +import type { ToolConfig } from '@/tools/types' + +export interface ConfluenceSearchInSpaceParams { + accessToken: string + domain: string + spaceKey: string + query?: string + contentType?: string + limit?: number + cloudId?: string +} + +export interface ConfluenceSearchInSpaceResponse { + success: boolean + output: { + ts: string + spaceKey: string + totalSize: number + results: Array<{ + id: string + title: string + type: string + status: string | null + url: string + excerpt: string + lastModified: string | null + }> + } +} + +export const confluenceSearchInSpaceTool: ToolConfig< + ConfluenceSearchInSpaceParams, + ConfluenceSearchInSpaceResponse +> = { + id: 'confluence_search_in_space', + name: 'Confluence Search in Space', + description: + 'Search for content within a specific Confluence space. Optionally filter by text query and content type.', + version: '1.0.0', + + oauth: { + required: true, + provider: 'confluence', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Confluence', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)', + }, + spaceKey: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The key of the Confluence space to search in (e.g., "ENG", "HR")', + }, + query: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Text search query. If not provided, returns all content in the space.', + }, + contentType: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter by content type: page, blogpost, attachment, or comment', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Maximum number of results to return (default: 25, max: 250)', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'user-only', + description: + 'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.', + }, + }, + + request: { + url: () => '/api/tools/confluence/search-in-space', + method: 'POST', + headers: (params: ConfluenceSearchInSpaceParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), + body: (params: ConfluenceSearchInSpaceParams) => ({ + domain: params.domain, + accessToken: params.accessToken, + spaceKey: params.spaceKey?.trim(), + query: params.query, + contentType: params.contentType, + limit: params.limit ? Number(params.limit) : 25, + cloudId: params.cloudId, + }), + }, + + transformResponse: async (response: Response) => { + const data = await response.json() + return { + success: true, + output: { + ts: new Date().toISOString(), + spaceKey: data.spaceKey ?? '', + totalSize: data.totalSize ?? 0, + results: data.results ?? [], + }, + } + }, + + outputs: { + ts: TIMESTAMP_OUTPUT, + spaceKey: { + type: 'string', + description: 'The space key that was searched', + }, + totalSize: { + type: 'number', + description: 'Total number of matching results', + }, + results: { + type: 'array', + description: 'Array of search results', + items: { + type: 'object', + properties: SEARCH_RESULT_ITEM_PROPERTIES, + }, + }, + }, +} diff --git a/apps/sim/tools/confluence/types.ts b/apps/sim/tools/confluence/types.ts index 5ac68d69c..a786e9924 100644 --- a/apps/sim/tools/confluence/types.ts +++ b/apps/sim/tools/confluence/types.ts @@ -26,6 +26,52 @@ export const VERSION_OUTPUT_PROPERTIES = { }, } as const satisfies Record +/** + * Detailed version object properties for get_page_version endpoint. + * Based on Confluence API v2 DetailedVersion schema. + */ +export const DETAILED_VERSION_OUTPUT_PROPERTIES = { + number: { type: 'number', description: 'Version number' }, + message: { type: 'string', description: 'Version message', optional: true }, + minorEdit: { type: 'boolean', description: 'Whether this is a minor edit' }, + authorId: { type: 'string', description: 'Account ID of the version author', optional: true }, + createdAt: { + type: 'string', + description: 'ISO 8601 timestamp of version creation', + optional: true, + }, + contentTypeModified: { + type: 'boolean', + description: 'Whether the content type was modified in this version', + optional: true, + }, + collaborators: { + type: 'array', + description: 'List of collaborator account IDs for this version', + items: { type: 'string' }, + optional: true, + }, + prevVersion: { + type: 'number', + description: 'Previous version number', + optional: true, + }, + nextVersion: { + type: 'number', + description: 'Next version number', + optional: true, + }, +} as const satisfies Record + +/** + * Complete detailed version object output definition. + */ +export const DETAILED_VERSION_OUTPUT: OutputProperty = { + type: 'object', + description: 'Detailed version information', + properties: DETAILED_VERSION_OUTPUT_PROPERTIES, +} + /** * Complete version object output definition. */ @@ -137,6 +183,54 @@ export const SPACES_OUTPUT: OutputProperty = { }, } +/** + * Body format inner object properties (storage, view, atlas_doc_format). + * Based on Confluence API v2 body structure. + */ +export const BODY_FORMAT_PROPERTIES = { + value: { type: 'string', description: 'The content value in the specified format' }, + representation: { + type: 'string', + description: 'Content representation type', + optional: true, + }, +} as const satisfies Record + +/** + * Page/Blog post body object properties. + * Based on Confluence API v2 body structure with multiple format options. + */ +export const CONTENT_BODY_OUTPUT_PROPERTIES = { + storage: { + type: 'object', + description: 'Body in storage format (Confluence markup)', + properties: BODY_FORMAT_PROPERTIES, + optional: true, + }, + view: { + type: 'object', + description: 'Body in view format (rendered HTML)', + properties: BODY_FORMAT_PROPERTIES, + optional: true, + }, + atlas_doc_format: { + type: 'object', + description: 'Body in Atlassian Document Format (ADF)', + properties: BODY_FORMAT_PROPERTIES, + optional: true, + }, +} as const satisfies Record + +/** + * Complete body object output definition for pages and blog posts. + */ +export const CONTENT_BODY_OUTPUT: OutputProperty = { + type: 'object', + description: 'Page or blog post body content in requested format(s)', + properties: CONTENT_BODY_OUTPUT_PROPERTIES, + optional: true, +} + /** * Comment body object properties. * Based on Confluence API v2 comment body structure. diff --git a/apps/sim/tools/confluence/update.ts b/apps/sim/tools/confluence/update.ts index b5c5847f0..f9dcd0107 100644 --- a/apps/sim/tools/confluence/update.ts +++ b/apps/sim/tools/confluence/update.ts @@ -1,4 +1,5 @@ import type { ConfluenceUpdateParams, ConfluenceUpdateResponse } from '@/tools/confluence/types' +import { CONTENT_BODY_OUTPUT_PROPERTIES, VERSION_OUTPUT_PROPERTIES } from '@/tools/confluence/types' import type { ToolConfig } from '@/tools/types' export const confluenceUpdateTool: ToolConfig = { @@ -98,9 +99,13 @@ export const confluenceUpdateTool: ToolConfig = { confluence_update: confluenceUpdateTool, confluence_create_page: confluenceCreatePageTool, confluence_delete_page: confluenceDeletePageTool, + confluence_list_pages_in_space: confluenceListPagesInSpaceTool, + confluence_get_page_children: confluenceGetPageChildrenTool, + confluence_get_page_ancestors: confluenceGetPageAncestorsTool, + confluence_list_page_versions: confluenceListPageVersionsTool, + confluence_get_page_version: confluenceGetPageVersionTool, + confluence_list_page_properties: confluenceListPagePropertiesTool, + confluence_create_page_property: confluenceCreatePagePropertyTool, + confluence_list_blogposts: confluenceListBlogPostsTool, + confluence_get_blogpost: confluenceGetBlogPostTool, + confluence_create_blogpost: confluenceCreateBlogPostTool, + confluence_list_blogposts_in_space: confluenceListBlogPostsInSpaceTool, confluence_search: confluenceSearchTool, + confluence_search_in_space: confluenceSearchInSpaceTool, confluence_create_comment: confluenceCreateCommentTool, confluence_list_comments: confluenceListCommentsTool, confluence_update_comment: confluenceUpdateCommentTool, @@ -2617,6 +2642,7 @@ export const tools: Record = { confluence_upload_attachment: confluenceUploadAttachmentTool, confluence_delete_attachment: confluenceDeleteAttachmentTool, confluence_list_labels: confluenceListLabelsTool, + confluence_add_label: confluenceAddLabelTool, confluence_get_space: confluenceGetSpaceTool, confluence_list_spaces: confluenceListSpacesTool, cursor_list_agents: cursorListAgentsTool, diff --git a/apps/sim/triggers/googleforms/webhook.ts b/apps/sim/triggers/googleforms/webhook.ts index 12106c74f..0f74fb1a9 100644 --- a/apps/sim/triggers/googleforms/webhook.ts +++ b/apps/sim/triggers/googleforms/webhook.ts @@ -42,7 +42,7 @@ export const googleFormsWebhookTrigger: TriggerConfig = { mode: 'trigger', }, { - id: 'formId', + id: 'triggerFormId', title: 'Form ID', type: 'short-input', placeholder: '1FAIpQLSd... (Google Form ID)', diff --git a/apps/sim/triggers/microsoftteams/chat_webhook.ts b/apps/sim/triggers/microsoftteams/chat_webhook.ts index dcd155a57..9ef0b4390 100644 --- a/apps/sim/triggers/microsoftteams/chat_webhook.ts +++ b/apps/sim/triggers/microsoftteams/chat_webhook.ts @@ -47,7 +47,7 @@ export const microsoftTeamsChatSubscriptionTrigger: TriggerConfig = { }, }, { - id: 'chatId', + id: 'triggerChatId', title: 'Chat ID', type: 'short-input', placeholder: 'Enter chat ID', diff --git a/apps/sim/triggers/webflow/collection_item_changed.ts b/apps/sim/triggers/webflow/collection_item_changed.ts index 3b6c580bd..e0c43fd36 100644 --- a/apps/sim/triggers/webflow/collection_item_changed.ts +++ b/apps/sim/triggers/webflow/collection_item_changed.ts @@ -30,7 +30,7 @@ export const webflowCollectionItemChangedTrigger: TriggerConfig = { }, }, { - id: 'siteId', + id: 'triggerSiteId', title: 'Site', type: 'dropdown', placeholder: 'Select a site', @@ -96,7 +96,7 @@ export const webflowCollectionItemChangedTrigger: TriggerConfig = { dependsOn: ['triggerCredentials'], }, { - id: 'collectionId', + id: 'triggerCollectionId', title: 'Collection', type: 'dropdown', placeholder: 'Select a collection (optional)', @@ -112,7 +112,9 @@ export const webflowCollectionItemChangedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) { return [] } @@ -142,7 +144,9 @@ export const webflowCollectionItemChangedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) return null try { const response = await fetch('/api/tools/webflow/collections', { @@ -161,7 +165,7 @@ export const webflowCollectionItemChangedTrigger: TriggerConfig = { return null } }, - dependsOn: ['triggerCredentials', 'siteId'], + dependsOn: ['triggerCredentials', 'triggerSiteId'], }, { id: 'triggerSave', diff --git a/apps/sim/triggers/webflow/collection_item_created.ts b/apps/sim/triggers/webflow/collection_item_created.ts index 2ba0a11b6..3c2c6cee4 100644 --- a/apps/sim/triggers/webflow/collection_item_created.ts +++ b/apps/sim/triggers/webflow/collection_item_created.ts @@ -44,7 +44,7 @@ export const webflowCollectionItemCreatedTrigger: TriggerConfig = { }, }, { - id: 'siteId', + id: 'triggerSiteId', title: 'Site', type: 'dropdown', placeholder: 'Select a site', @@ -110,7 +110,7 @@ export const webflowCollectionItemCreatedTrigger: TriggerConfig = { dependsOn: ['triggerCredentials'], }, { - id: 'collectionId', + id: 'triggerCollectionId', title: 'Collection', type: 'dropdown', placeholder: 'Select a collection (optional)', @@ -126,7 +126,9 @@ export const webflowCollectionItemCreatedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) { return [] } @@ -156,7 +158,9 @@ export const webflowCollectionItemCreatedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) return null try { const response = await fetch('/api/tools/webflow/collections', { @@ -175,7 +179,7 @@ export const webflowCollectionItemCreatedTrigger: TriggerConfig = { return null } }, - dependsOn: ['triggerCredentials', 'siteId'], + dependsOn: ['triggerCredentials', 'triggerSiteId'], }, { id: 'triggerSave', diff --git a/apps/sim/triggers/webflow/collection_item_deleted.ts b/apps/sim/triggers/webflow/collection_item_deleted.ts index c7568568e..80011af97 100644 --- a/apps/sim/triggers/webflow/collection_item_deleted.ts +++ b/apps/sim/triggers/webflow/collection_item_deleted.ts @@ -30,7 +30,7 @@ export const webflowCollectionItemDeletedTrigger: TriggerConfig = { }, }, { - id: 'siteId', + id: 'triggerSiteId', title: 'Site', type: 'dropdown', placeholder: 'Select a site', @@ -96,7 +96,7 @@ export const webflowCollectionItemDeletedTrigger: TriggerConfig = { dependsOn: ['triggerCredentials'], }, { - id: 'collectionId', + id: 'triggerCollectionId', title: 'Collection', type: 'dropdown', placeholder: 'Select a collection (optional)', @@ -112,7 +112,9 @@ export const webflowCollectionItemDeletedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) { return [] } @@ -142,7 +144,9 @@ export const webflowCollectionItemDeletedTrigger: TriggerConfig = { const credentialId = useSubBlockStore.getState().getValue(blockId, 'triggerCredentials') as | string | null - const siteId = useSubBlockStore.getState().getValue(blockId, 'siteId') as string | null + const siteId = useSubBlockStore.getState().getValue(blockId, 'triggerSiteId') as + | string + | null if (!credentialId || !siteId) return null try { const response = await fetch('/api/tools/webflow/collections', { @@ -161,7 +165,7 @@ export const webflowCollectionItemDeletedTrigger: TriggerConfig = { return null } }, - dependsOn: ['triggerCredentials', 'siteId'], + dependsOn: ['triggerCredentials', 'triggerSiteId'], }, { id: 'triggerSave', diff --git a/apps/sim/triggers/webflow/form_submission.ts b/apps/sim/triggers/webflow/form_submission.ts index 59efc5e00..2d698daa0 100644 --- a/apps/sim/triggers/webflow/form_submission.ts +++ b/apps/sim/triggers/webflow/form_submission.ts @@ -30,7 +30,7 @@ export const webflowFormSubmissionTrigger: TriggerConfig = { }, }, { - id: 'siteId', + id: 'triggerSiteId', title: 'Site', type: 'dropdown', placeholder: 'Select a site',