Compare commits

..

54 Commits

Author SHA1 Message Date
Vikhyath Mondreti
27973953f6 v0.5.87: workflow block auth fix 2026-02-10 22:33:55 -08:00
Waleed
50585273ce v0.5.86: server side copilot, copilot mcp, error notifications, jira outputs destructuring, slack trigger improvements 2026-02-10 21:49:58 -08:00
Vikhyath Mondreti
654cb2b407 v0.5.85: deployment improvements 2026-02-09 10:49:33 -08:00
Waleed
6c66521d64 v0.5.84: model request sanitization 2026-02-07 19:06:53 -08:00
Vikhyath Mondreti
479cd347ad v0.5.83: agent skills, concurrent workers for v8s, airweave integration 2026-02-07 12:27:11 -08:00
Waleed
a3a99eda19 v0.5.82: slack trigger files, pagination for linear, executor fixes 2026-02-06 00:41:52 -08:00
Waleed
1a66d48add v0.5.81: traces fix, additional confluence tools, azure anthropic support, opus 4.6 2026-02-05 11:28:54 -08:00
Waleed
46822e91f3 v0.5.80: lock feature, enterprise modules, time formatting consolidation, files, UX and UI improvements, longer timeouts 2026-02-04 18:27:05 -08:00
Waleed
2bb68335ee v0.5.79: longer MCP tools timeout, optimize loop/parallel regeneration, enrich.so integration 2026-01-31 21:57:56 -08:00
Waleed
8528fbe2d2 v0.5.78: billing fixes, mcp timeout increase, reactquery migrations, updated tool param visibilities, DSPy and Google Maps integrations 2026-01-31 13:48:22 -08:00
Waleed
31fdd2be13 v0.5.77: room manager redis migration, tool outputs, ui fixes 2026-01-30 14:57:17 -08:00
Waleed
028bc652c2 v0.5.76: posthog improvements, readme updates 2026-01-29 00:13:19 -08:00
Waleed
c6bf5cd58c v0.5.75: search modal overhaul, helm chart updates, run from block, terminal and visual debugging improvements 2026-01-28 22:54:13 -08:00
Vikhyath Mondreti
11dc18a80d v0.5.74: autolayout improvements, clerk integration, auth enforcements 2026-01-27 20:37:39 -08:00
Waleed
ab4e9dc72f v0.5.73: ci, helm updates, kb, ui fixes, note block enhancements 2026-01-26 22:04:35 -08:00
Vikhyath Mondreti
1c58c35bd8 v0.5.72: azure connection string, supabase improvement, multitrigger resolution, docs quick reference 2026-01-25 23:42:27 -08:00
Waleed
d63a5cb504 v0.5.71: ux, ci improvements, docs updates 2026-01-25 03:08:08 -08:00
Waleed
8bd5d41723 v0.5.70: router fix, anthropic agent response format adherence 2026-01-24 20:57:02 -08:00
Waleed
c12931bc50 v0.5.69: kb upgrades, blog, copilot improvements, auth consolidation (#2973)
* fix(subflows): tag dropdown + resolution logic (#2949)

* fix(subflows): tag dropdown + resolution logic

* fixes;

* revert parallel change

* chore(deps): bump posthog-js to 1.334.1 (#2948)

* fix(idempotency): add conflict target to atomicallyClaimDb query + remove redundant db namespace tracking (#2950)

* fix(idempotency): add conflict target to atomicallyClaimDb query

* delete needs to account for namespace

* simplify namespace filtering logic

* fix cleanup

* consistent target

* improvement(kb): add document filtering, select all, and React Query migration (#2951)

* improvement(kb): add document filtering, select all, and React Query migration

* test(kb): update tests for enabledFilter and removed userId params

* fix(kb): remove non-null assertion, add explicit guard

* improvement(logs): trace span, details (#2952)

* improvement(action-bar): ordering

* improvement(logs): details, trace span

* feat(blog): v0.5 release post (#2953)

* feat(blog): v0.5 post

* improvement(blog): simplify title and remove code block header

- Simplified blog title from Introducing Sim Studio v0.5 to Introducing Sim v0.5
- Removed language label header and copy button from code blocks for cleaner appearance

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* ack PR comments

* small styling improvements

* created system to create post-specific components

* updated componnet

* cache invalidation

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>

* feat(admin): add credits endpoint to issue credits to users (#2954)

* feat(admin): add credits endpoint to issue credits to users

* fix(admin): use existing credit functions and handle enterprise seats

* fix(admin): reject NaN and Infinity in amount validation

* styling

* fix(admin): validate userId and email are strings

* improvement(copilot): fast mode, subagent tool responses and allow preferences (#2955)

* Improvements

* Fix actions mapping

* Remove console logs

* fix(billing): handle missing userStats and prevent crashes (#2956)

* fix(billing): handle missing userStats and prevent crashes

* fix(billing): correct import path for getFilledPillColor

* fix(billing): add Number.isFinite check to lastPeriodCost

* fix(logs): refresh logic to refresh logs details (#2958)

* fix(security): add authentication and input validation to API routes (#2959)

* fix(security): add authentication and input validation to API routes

* moved utils

* remove extraneous commetns

* removed unused dep

* improvement(helm): add internal ingress support and same-host path consolidation (#2960)

* improvement(helm): add internal ingress support and same-host path consolidation

* improvement(helm): clean up ingress template comments

Simplify verbose inline Helm comments and section dividers to match the
minimal style used in services.yaml.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(helm): add missing copilot path consolidation for realtime host

When copilot.host equals realtime.host but differs from app.host,
copilot paths were not being routed. Added logic to consolidate
copilot paths into the realtime rule for this scenario.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* improvement(helm): follow ingress best practices

- Remove orphan comments that appeared when services were disabled
- Add documentation about path ordering requirements
- Paths rendered in order: realtime, copilot, app (specific before catch-all)
- Clean template output matching industry Helm chart standards

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>

* feat(blog): enterprise post (#2961)

* feat(blog): enterprise post

* added more images, styling

* more content

* updated v0-5 post

* remove unused transition

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>

* fix(envvars): resolution standardized (#2957)

* fix(envvars): resolution standardized

* remove comments

* address bugbot

* fix highlighting for env vars

* remove comments

* address greptile

* address bugbot

* fix(copilot): mask credentials fix (#2963)

* Fix copilot masking

* Clean up

* Lint

* improvement(webhooks): remove dead code (#2965)

* fix(webhooks): subscription recreation path

* improvement(webhooks): remove dead code

* fix tests

* address bugbot comments

* fix restoration edge case

* fix more edge cases

* address bugbot comments

* fix gmail polling

* add warnings for UI indication for credential sets

* fix(preview): subblock values (#2969)

* fix(child-workflow): nested spans handoff (#2966)

* fix(child-workflow): nested spans handoff

* remove overly defensive programming

* update type check

* type more code

* remove more dead code

* address bugbot comments

* fix(security): restrict API key access on internal-only routes (#2964)

* fix(security): restrict API key access on internal-only routes

* test(security): update function execute tests for checkInternalAuth

* updated agent handler

* move session check higher in checkSessionOrInternalAuth

* extracted duplicate code into helper for resolving user from jwt

* fix(copilot): update copilot chat title (#2968)

* fix(hitl): fix condition blocks after hitl (#2967)

* fix(notes): ghost edges (#2970)

* fix(notes): ghost edges

* fix deployed state fallback

* fallback

* remove UI level checks

* annotation missing from autoconnect source check

* improvement(docs): loop and parallel var reference syntax (#2975)

* fix(blog): slash actions description (#2976)

* improvement(docs): loop and parallel var reference syntax

* fix(blog): slash actions description

* fix(auth): copilot routes (#2977)

* Fix copilot auth

* Fix

* Fix

* Fix

* fix(copilot): fix edit summary for loops/parallels (#2978)

* fix(integrations): hide from tool bar (#2544)

* fix(landing): ui (#2979)

* fix(edge-validation): race condition on collaborative add (#2980)

* fix(variables): boolean type support and input improvements (#2981)

* fix(variables): boolean type support and input improvements

* fix formatting

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Siddharth Ganesan <33737564+Sg312@users.noreply.github.com>
Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
2026-01-24 14:29:53 -08:00
Waleed
e9c4251c1c v0.5.68: router block reasoning, executor improvements, variable resolution consolidation, helm updates (#2946)
* improvement(workflow-item): stabilize avatar layout and fix name truncation (#2939)

* improvement(workflow-item): stabilize avatar layout and fix name truncation

* fix(avatars): revert overflow bg to hardcoded color for contrast

* fix(executor): stop parallel execution when block errors (#2940)

* improvement(helm): add per-deployment extraVolumes support (#2942)

* fix(gmail): expose messageId field in read email block (#2943)

* fix(resolver): consolidate reference resolution  (#2941)

* fix(resolver): consolidate code to resolve references

* fix edge cases

* use already formatted error

* fix multi index

* fix backwards compat reachability

* handle backwards compatibility accurately

* use shared constant correctly

* feat(router): expose reasoning output in router v2 block (#2945)

* fix(copilot): always allow, credential masking (#2947)

* Fix always allow, credential validation

* Credential masking

* Autoload

* fix(executor): handle condition dead-end branches in loops (#2944)

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Siddharth Ganesan <33737564+Sg312@users.noreply.github.com>
2026-01-22 13:48:15 -08:00
Waleed
cc2be33d6b v0.5.67: loading, password reset, ui improvements, helm updates (#2928)
* fix(zustand): updated to useShallow from deprecated createWithEqualityFn (#2919)

* fix(logger): use direct env access for webpack inlining (#2920)

* fix(notifications): text overflow with line-clamp (#2921)

* chore(helm): add env vars for Vertex AI, orgs, and telemetry (#2922)

* fix(auth): improve reset password flow and consolidate brand detection (#2924)

* fix(auth): improve reset password flow and consolidate brand detection

* fix(auth): set errorHandled for EMAIL_NOT_VERIFIED to prevent duplicate error

* fix(auth): clear success message on login errors

* chore(auth): fix import order per lint

* fix(action-bar): duplicate subflows with children (#2923)

* fix(action-bar): duplicate subflows with children

* fix(action-bar): add validateTriggerPaste for subflow duplicate

* fix(resolver): agent response format, input formats, root level (#2925)

* fix(resolvers): agent response format, input formats, root level

* fix response block initial seeding

* fix tests

* fix(messages-input): fix cursor alignment and auto-resize with overlay (#2926)

* fix(messages-input): fix cursor alignment and auto-resize with overlay

* fixed remaining zustand warnings

* fix(stores): remove dead code causing log spam on startup (#2927)

* fix(stores): remove dead code causing log spam on startup

* fix(stores): replace custom tools zustand store with react query cache

* improvement(ui): use BrandedButton and BrandedLink components (#2930)

- Refactor auth forms to use BrandedButton component
- Add BrandedLink component for changelog page
- Reduce code duplication in login, signup, reset-password forms
- Update star count default value

* fix(custom-tools): remove unsafe title fallback in getCustomTool (#2929)

* fix(custom-tools): remove unsafe title fallback in getCustomTool

* fix(custom-tools): restore title fallback in getCustomTool lookup

Custom tools are referenced by title (custom_${title}), not database ID.
The title fallback is required for client-side tool resolution to work.

* fix(null-bodies): empty bodies handling (#2931)

* fix(null-statuses): empty bodies handling

* address bugbot comment

* fix(token-refresh): microsoft, notion, x, linear (#2933)

* fix(microsoft): proactive refresh needed

* fix(x): missing token refresh flag

* notion and linear missing flag too

* address bugbot comment

* fix(auth): handle EMAIL_NOT_VERIFIED in onError callback (#2932)

* fix(auth): handle EMAIL_NOT_VERIFIED in onError callback

* refactor(auth): extract redirectToVerify helper to reduce duplication

* fix(workflow-selector): use dedicated selector for workflow dropdown (#2934)

* feat(workflow-block): preview (#2935)

* improvement(copilot): tool configs to show nested props (#2936)

* fix(auth): add genericOAuth providers to trustedProviders (#2937)

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
2026-01-21 22:53:25 -08:00
Vikhyath Mondreti
45371e521e v0.5.66: external http requests fix, ring highlighting 2026-01-21 02:55:39 -08:00
Waleed
0ce0f98aa5 v0.5.65: gemini updates, textract integration, ui updates (#2909)
* fix(google): wrap primitive tool responses for Gemini API compatibility (#2900)

* fix(canonical): copilot path + update parent (#2901)

* fix(rss): add top-level title, link, pubDate fields to RSS trigger output (#2902)

* fix(rss): add top-level title, link, pubDate fields to RSS trigger output

* fix(imap): add top-level fields to IMAP trigger output

* improvement(browseruse): add profile id param (#2903)

* improvement(browseruse): add profile id param

* make request a stub since we have directExec

* improvement(executor): upgraded abort controller to handle aborts for loops and parallels (#2880)

* improvement(executor): upgraded abort controller to handle aborts for loops and parallels

* comments

* improvement(files): update execution for passing base64 strings (#2906)

* progress

* improvement(execution): update execution for passing base64 strings

* fix types

* cleanup comments

* path security vuln

* reject promise correctly

* fix redirect case

* remove proxy routes

* fix tests

* use ipaddr

* feat(tools): added textract, added v2 for mistral, updated tag dropdown (#2904)

* feat(tools): added textract

* cleanup

* ack pr comments

* reorder

* removed upload for textract async version

* fix additional fields dropdown in editor, update parser to leave validation to be done on the server

* added mistral v2, files v2, and finalized textract

* updated the rest of the old file patterns, updated mistral outputs for v2

* updated tag dropdown to parse non-operation fields as well

* updated extension finder

* cleanup

* added description for inputs to workflow

* use helper for internal route check

* fix tag dropdown merge conflict change

* remove duplicate code

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>

* fix(ui): change add inputs button to match output selector (#2907)

* fix(canvas): removed invite to workspace from canvas popover (#2908)

* fix(canvas): removed invite to workspace

* removed unused props

* fix(copilot): legacy tool display names (#2911)

* fix(a2a): canonical merge  (#2912)

* fix canonical merge

* fix empty array case

* fix(change-detection): copilot diffs have extra field (#2913)

* improvement(logs): improved logs ui bugs, added subflow disable UI (#2910)

* improvement(logs): improved logs ui bugs, added subflow disable UI

* added duplicate to action bar for subflows

* feat(broadcast): email v0.5 (#2905)

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
2026-01-20 23:54:55 -08:00
Waleed
dff1c9d083 v0.5.64: unsubscribe, search improvements, metrics, additional SSO configuration 2026-01-20 00:34:11 -08:00
Vikhyath Mondreti
b09f683072 v0.5.63: ui and performance improvements, more google tools 2026-01-18 15:22:42 -08:00
Vikhyath Mondreti
a8bb0db660 v0.5.62: webhook bug fixes, seeding default subblock values, block selection fixes 2026-01-16 20:27:06 -08:00
Waleed
af82820a28 v0.5.61: webhook improvements, workflow controls, react query for deployment status, chat fixes, reducto and pulse OCR, linear fixes 2026-01-16 18:06:23 -08:00
Waleed
4372841797 v0.5.60: invitation flow improvements, chat fixes, a2a improvements, additional copilot actions 2026-01-15 00:02:18 -08:00
Waleed
5e8c843241 v0.5.59: a2a support, documentation 2026-01-13 13:21:21 -08:00
Waleed
7bf3d73ee6 v0.5.58: export folders, new tools, permissions groups enhancements 2026-01-13 00:56:59 -08:00
Vikhyath Mondreti
7ffc11a738 v0.5.57: subagents, context menu improvements, bug fixes 2026-01-11 11:38:40 -08:00
Waleed
be578e2ed7 v0.5.56: batch operations, access control and permission groups, billing fixes 2026-01-10 00:31:34 -08:00
Waleed
f415e5edc4 v0.5.55: polling groups, bedrock provider, devcontainer fixes, workflow preview enhancements 2026-01-08 23:36:56 -08:00
Waleed
13a6e6c3fa v0.5.54: seo, model blacklist, helm chart updates, fireflies integration, autoconnect improvements, billing fixes 2026-01-07 16:09:45 -08:00
Waleed
f5ab7f21ae v0.5.53: hotkey improvements, added redis fallback, fixes for workflow tool 2026-01-06 23:34:52 -08:00
Waleed
bfb6fffe38 v0.5.52: new port-based router block, combobox expression and variable support 2026-01-06 16:14:10 -08:00
Waleed
4fbec0a43f v0.5.51: triggers, kb, condition block improvements, supabase and grain integration updates 2026-01-06 14:26:46 -08:00
Waleed
585f5e365b v0.5.50: import improvements, ui upgrades, kb styling and performance improvements 2026-01-05 00:35:55 -08:00
Waleed
3792bdd252 v0.5.49: hitl improvements, new email styles, imap trigger, logs context menu (#2672)
* feat(logs-context-menu): consolidated logs utils and types, added logs record context menu (#2659)

* feat(email): welcome email; improvement(emails): ui/ux (#2658)

* feat(email): welcome email; improvement(emails): ui/ux

* improvement(emails): links, accounts, preview

* refactor(emails): file structure and wrapper components

* added envvar for personal emails sent, added isHosted gate

* fixed failing tests, added env mock

* fix: removed comment

---------

Co-authored-by: waleed <walif6@gmail.com>

* fix(logging): hitl + trigger dev crash protection (#2664)

* hitl gaps

* deal with trigger worker crashes

* cleanup import strcuture

* feat(imap): added support for imap trigger (#2663)

* feat(tools): added support for imap trigger

* feat(imap): added parity, tested

* ack PR comments

* final cleanup

* feat(i18n): update translations (#2665)

Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>

* fix(grain): updated grain trigger to auto-establish trigger (#2666)

Co-authored-by: aadamgough <adam@sim.ai>

* feat(admin): routes to manage deployments (#2667)

* feat(admin): routes to manage deployments

* fix naming fo deployed by

* feat(time-picker): added timepicker emcn component, added to playground, added searchable prop for dropdown, added more timezones for schedule, updated license and notice date (#2668)

* feat(time-picker): added timepicker emcn component, added to playground, added searchable prop for dropdown, added more timezones for schedule, updated license and notice date

* removed unused params, cleaned up redundant utils

* improvement(invite): aligned styling (#2669)

* improvement(invite): aligned with rest of app

* fix(invite): error handling

* fix: addressed comments

---------

Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
Co-authored-by: Adam Gough <77861281+aadamgough@users.noreply.github.com>
Co-authored-by: aadamgough <adam@sim.ai>
2026-01-03 13:19:18 -08:00
Waleed
eb5d1f3e5b v0.5.48: copy-paste workflow blocks, docs updates, mcp tool fixes 2025-12-31 18:00:04 -08:00
Waleed
54ab82c8dd v0.5.47: deploy workflow as mcp, kb chunks tokenizer, UI improvements, jira service management tools 2025-12-30 23:18:58 -08:00
Waleed
f895bf469b v0.5.46: build improvements, greptile, light mode improvements 2025-12-29 02:17:52 -08:00
Waleed
dd3209af06 v0.5.45: light mode fixes, realtime usage indicator, docker build improvements 2025-12-27 19:57:42 -08:00
Waleed
b6ba3b50a7 v0.5.44: keyboard shortcuts, autolayout, light mode, byok, testing improvements 2025-12-26 21:25:19 -08:00
Waleed
b304233062 v0.5.43: export logs, circleback, grain, vertex, code hygiene, schedule improvements 2025-12-23 19:19:18 -08:00
Vikhyath Mondreti
57e4b49bd6 v0.5.42: fix memory migration 2025-12-23 01:24:54 -08:00
Vikhyath Mondreti
e12dd204ed v0.5.41: memory fixes, copilot improvements, knowledgebase improvements, LLM providers standardization 2025-12-23 00:15:18 -08:00
Vikhyath Mondreti
3d9d9cbc54 v0.5.40: supabase ops to allow non-public schemas, jira uuid 2025-12-21 22:28:05 -08:00
Waleed
0f4ec962ad v0.5.39: notion, workflow variables fixes 2025-12-20 20:44:00 -08:00
Waleed
4827866f9a v0.5.38: snap to grid, copilot ux improvements, billing line items 2025-12-20 17:24:38 -08:00
Waleed
3e697d9ed9 v0.5.37: redaction utils consolidation, logs updates, autoconnect improvements, additional kb tag types 2025-12-19 22:31:55 -08:00
Martin Yankov
4431a1a484 fix(helm): add custom egress rules to realtime network policy (#2481)
The realtime service network policy was missing the custom egress rules section
that allows configuration of additional egress rules via values.yaml. This caused
the realtime pods to be unable to connect to external databases (e.g., PostgreSQL
on port 5432) when using external database configurations.

The app network policy already had this section, but the realtime network policy
was missing it, creating an inconsistency and preventing the realtime service
from accessing external databases configured via networkPolicy.egress values.

This fix adds the same custom egress rules template section to the realtime
network policy, matching the app network policy behavior and allowing users to
configure database connectivity via values.yaml.
2025-12-19 18:59:08 -08:00
Waleed
4d1a9a3f22 v0.5.36: hitl improvements, opengraph, slack fixes, one-click unsubscribe, auth checks, new db indexes 2025-12-19 01:27:49 -08:00
Vikhyath Mondreti
eb07a080fb v0.5.35: helm updates, copilot improvements, 404 for docs, salesforce fixes, subflow resize clamping 2025-12-18 16:23:19 -08:00
84 changed files with 488 additions and 4336 deletions

View File

@@ -41,6 +41,9 @@ Diese Tastenkombinationen wechseln zwischen den Panel-Tabs auf der rechten Seite
| Tastenkombination | Aktion | | Tastenkombination | Aktion |
|----------|--------| |----------|--------|
| `C` | Copilot-Tab fokussieren |
| `T` | Toolbar-Tab fokussieren |
| `E` | Editor-Tab fokussieren |
| `Mod` + `F` | Toolbar-Suche fokussieren | | `Mod` + `F` | Toolbar-Suche fokussieren |
## Globale Navigation ## Globale Navigation

View File

@@ -43,6 +43,9 @@ These shortcuts switch between panel tabs on the right side of the canvas.
| Shortcut | Action | | Shortcut | Action |
|----------|--------| |----------|--------|
| `C` | Focus Copilot tab |
| `T` | Focus Toolbar tab |
| `E` | Focus Editor tab |
| `Mod` + `F` | Focus Toolbar search | | `Mod` + `F` | Focus Toolbar search |
## Global Navigation ## Global Navigation

View File

@@ -399,28 +399,6 @@ Create a new custom property (metadata) on a Confluence page.
| ↳ `authorId` | string | Account ID of the version author | | ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | | ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
### `confluence_delete_page_property`
Delete a content property from a Confluence page by its property ID.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | The ID of the page containing the property |
| `propertyId` | string | Yes | The ID of the property to delete |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | ID of the page |
| `propertyId` | string | ID of the deleted property |
| `deleted` | boolean | Deletion status |
### `confluence_search` ### `confluence_search`
Search for content across Confluence pages, blog posts, and other content. Search for content across Confluence pages, blog posts, and other content.
@@ -894,90 +872,6 @@ Add a label to a Confluence page for organization and categorization.
| `labelName` | string | Name of the added label | | `labelName` | string | Name of the added label |
| `labelId` | string | ID of the added label | | `labelId` | string | ID of the added label |
### `confluence_delete_label`
Remove a label from a Confluence page.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | Confluence page ID to remove the label from |
| `labelName` | string | Yes | Name of the label to remove |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | Page ID the label was removed from |
| `labelName` | string | Name of the removed label |
| `deleted` | boolean | Deletion status |
### `confluence_get_pages_by_label`
Retrieve all pages that have a specific label applied.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `labelId` | string | Yes | The ID of the label to get pages for |
| `limit` | number | No | Maximum number of pages to return \(default: 50, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `labelId` | string | ID of the label |
| `pages` | array | Array of pages with this label |
| ↳ `id` | string | Unique page identifier |
| ↳ `title` | string | Page title |
| ↳ `status` | string | Page status \(e.g., current, archived, trashed, draft\) |
| ↳ `spaceId` | string | ID of the space containing the page |
| ↳ `parentId` | string | ID of the parent page \(null if top-level\) |
| ↳ `authorId` | string | Account ID of the page author |
| ↳ `createdAt` | string | ISO 8601 timestamp when the page was created |
| ↳ `version` | object | Page version information |
| ↳ `number` | number | Version number |
| ↳ `message` | string | Version message |
| ↳ `minorEdit` | boolean | Whether this is a minor edit |
| ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_list_space_labels`
List all labels associated with a Confluence space.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `spaceId` | string | Yes | The ID of the Confluence space to list labels from |
| `limit` | number | No | Maximum number of labels to return \(default: 25, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `spaceId` | string | ID of the space |
| `labels` | array | Array of labels on the space |
| ↳ `id` | string | Unique label identifier |
| ↳ `name` | string | Label name |
| ↳ `prefix` | string | Label prefix/type \(e.g., global, my, team\) |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_get_space` ### `confluence_get_space`
Get details about a specific Confluence space. Get details about a specific Confluence space.

View File

@@ -42,6 +42,9 @@ Estos atajos cambian entre las pestañas del panel en el lado derecho del lienzo
| Atajo | Acción | | Atajo | Acción |
|----------|--------| |----------|--------|
| `C` | Enfocar pestaña Copilot |
| `T` | Enfocar pestaña Barra de herramientas |
| `E` | Enfocar pestaña Editor |
| `Mod` + `F` | Enfocar búsqueda de Barra de herramientas | | `Mod` + `F` | Enfocar búsqueda de Barra de herramientas |
## Navegación global ## Navegación global

View File

@@ -42,6 +42,9 @@ Ces raccourcis permettent de basculer entre les onglets du panneau sur le côté
| Raccourci | Action | | Raccourci | Action |
|----------|--------| |----------|--------|
| `C` | Activer l'onglet Copilot |
| `T` | Activer l'onglet Barre d'outils |
| `E` | Activer l'onglet Éditeur |
| `Mod` + `F` | Activer la recherche dans la barre d'outils | | `Mod` + `F` | Activer la recherche dans la barre d'outils |
## Navigation globale ## Navigation globale

View File

@@ -41,6 +41,9 @@ import { Callout } from 'fumadocs-ui/components/callout'
| ショートカット | 操作 | | ショートカット | 操作 |
|----------|--------| |----------|--------|
| `C` | Copilotタブにフォーカス |
| `T` | Toolbarタブにフォーカス |
| `E` | Editorタブにフォーカス |
| `Mod` + `F` | Toolbar検索にフォーカス | | `Mod` + `F` | Toolbar検索にフォーカス |
## グローバルナビゲーション ## グローバルナビゲーション

View File

@@ -41,6 +41,9 @@ import { Callout } from 'fumadocs-ui/components/callout'
| 快捷键 | 操作 | | 快捷键 | 操作 |
|----------|--------| |----------|--------|
| `C` | 聚焦 Copilot 标签页 |
| `T` | 聚焦 Toolbar 标签页 |
| `E` | 聚焦 Editor 标签页 |
| `Mod` + `F` | 聚焦 Toolbar 搜索 | | `Mod` + `F` | 聚焦 Toolbar 搜索 |
## 全局导航 ## 全局导航

View File

@@ -4,10 +4,20 @@
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnThis(),
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockReturnValue([]),
update: vi.fn().mockReturnThis(),
set: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
},
}))
vi.mock('@/lib/oauth/oauth', () => ({ vi.mock('@/lib/oauth/oauth', () => ({
refreshOAuthToken: vi.fn(), refreshOAuthToken: vi.fn(),
@@ -24,36 +34,13 @@ import {
refreshTokenIfNeeded, refreshTokenIfNeeded,
} from '@/app/api/auth/oauth/utils' } from '@/app/api/auth/oauth/utils'
const mockDb = db as any const mockDbTyped = db as any
const mockRefreshOAuthToken = refreshOAuthToken as any const mockRefreshOAuthToken = refreshOAuthToken as any
/**
* Creates a chainable mock for db.select() calls.
* Returns a nested chain: select() -> from() -> where() -> limit() / orderBy()
*/
function mockSelectChain(limitResult: unknown[]) {
const mockLimit = vi.fn().mockReturnValue(limitResult)
const mockOrderBy = vi.fn().mockReturnValue(limitResult)
const mockWhere = vi.fn().mockReturnValue({ limit: mockLimit, orderBy: mockOrderBy })
const mockFrom = vi.fn().mockReturnValue({ where: mockWhere })
mockDb.select.mockReturnValueOnce({ from: mockFrom })
return { mockFrom, mockWhere, mockLimit }
}
/**
* Creates a chainable mock for db.update() calls.
* Returns a nested chain: update() -> set() -> where()
*/
function mockUpdateChain() {
const mockWhere = vi.fn().mockResolvedValue({})
const mockSet = vi.fn().mockReturnValue({ where: mockWhere })
mockDb.update.mockReturnValueOnce({ set: mockSet })
return { mockSet, mockWhere }
}
describe('OAuth Utils', () => { describe('OAuth Utils', () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks() vi.clearAllMocks()
mockDbTyped.limit.mockReturnValue([])
}) })
afterEach(() => { afterEach(() => {
@@ -63,20 +50,20 @@ describe('OAuth Utils', () => {
describe('getCredential', () => { describe('getCredential', () => {
it('should return credential when found', async () => { it('should return credential when found', async () => {
const mockCredential = { id: 'credential-id', userId: 'test-user-id' } const mockCredential = { id: 'credential-id', userId: 'test-user-id' }
const { mockFrom, mockWhere, mockLimit } = mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const credential = await getCredential('request-id', 'credential-id', 'test-user-id') const credential = await getCredential('request-id', 'credential-id', 'test-user-id')
expect(mockDb.select).toHaveBeenCalled() expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockFrom).toHaveBeenCalled() expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled() expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockLimit).toHaveBeenCalledWith(1) expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(credential).toEqual(mockCredential) expect(credential).toEqual(mockCredential)
}) })
it('should return undefined when credential is not found', async () => { it('should return undefined when credential is not found', async () => {
mockSelectChain([]) mockDbTyped.limit.mockReturnValueOnce([])
const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id') const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id')
@@ -115,12 +102,11 @@ describe('OAuth Utils', () => {
refreshToken: 'new-refresh-token', refreshToken: 'new-refresh-token',
}) })
mockUpdateChain()
const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id') const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled() expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true }) expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
}) })
@@ -166,7 +152,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
@@ -183,8 +169,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockUpdateChain()
mockRefreshOAuthToken.mockResolvedValueOnce({ mockRefreshOAuthToken.mockResolvedValueOnce({
accessToken: 'new-token', accessToken: 'new-token',
@@ -195,12 +180,13 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled() expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(token).toBe('new-token') expect(token).toBe('new-token')
}) })
it('should return null if credential not found', async () => { it('should return null if credential not found', async () => {
mockSelectChain([]) mockDbTyped.limit.mockReturnValueOnce([])
const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id')
@@ -216,7 +202,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce(null) mockRefreshOAuthToken.mockResolvedValueOnce(null)

View File

@@ -85,7 +85,7 @@ const ChatMessageSchema = z.object({
chatId: z.string().optional(), chatId: z.string().optional(),
workflowId: z.string().optional(), workflowId: z.string().optional(),
workflowName: z.string().optional(), workflowName: z.string().optional(),
model: z.string().optional().default('claude-opus-4-5'), model: z.string().optional().default('claude-opus-4-6'),
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(), prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false), createNewChat: z.boolean().optional().default(false),
@@ -113,7 +113,6 @@ const ChatMessageSchema = z.object({
workflowId: z.string().optional(), workflowId: z.string().optional(),
knowledgeId: z.string().optional(), knowledgeId: z.string().optional(),
blockId: z.string().optional(), blockId: z.string().optional(),
blockIds: z.array(z.string()).optional(),
templateId: z.string().optional(), templateId: z.string().optional(),
executionId: z.string().optional(), executionId: z.string().optional(),
// For workflow_block, provide both workflowId and blockId // For workflow_block, provide both workflowId and blockId
@@ -160,20 +159,6 @@ export async function POST(req: NextRequest) {
commands, commands,
} = ChatMessageSchema.parse(body) } = ChatMessageSchema.parse(body)
const normalizedContexts = Array.isArray(contexts)
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
return ctx
})
: contexts
// Resolve workflowId - if not provided, use first workflow or find by name // Resolve workflowId - if not provided, use first workflow or find by name
const resolved = await resolveWorkflowIdForUser( const resolved = await resolveWorkflowIdForUser(
authenticatedUserId, authenticatedUserId,
@@ -191,10 +176,10 @@ export async function POST(req: NextRequest) {
const userMessageIdToUse = userMessageId || crypto.randomUUID() const userMessageIdToUse = userMessageId || crypto.randomUUID()
try { try {
logger.info(`[${tracker.requestId}] Received chat POST`, { logger.info(`[${tracker.requestId}] Received chat POST`, {
hasContexts: Array.isArray(normalizedContexts), hasContexts: Array.isArray(contexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0, contextsCount: Array.isArray(contexts) ? contexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts) contextsPreview: Array.isArray(contexts)
? normalizedContexts.map((c: any) => ({ ? contexts.map((c: any) => ({
kind: c?.kind, kind: c?.kind,
chatId: c?.chatId, chatId: c?.chatId,
workflowId: c?.workflowId, workflowId: c?.workflowId,
@@ -206,25 +191,17 @@ export async function POST(req: NextRequest) {
} catch {} } catch {}
// Preprocess contexts server-side // Preprocess contexts server-side
let agentContexts: Array<{ type: string; content: string }> = [] let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) { if (Array.isArray(contexts) && contexts.length > 0) {
try { try {
const { processContextsServer } = await import('@/lib/copilot/process-contents') const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer( const processed = await processContextsServer(contexts as any, authenticatedUserId, message)
normalizedContexts as any,
authenticatedUserId,
message
)
agentContexts = processed agentContexts = processed
logger.info(`[${tracker.requestId}] Contexts processed for request`, { logger.info(`[${tracker.requestId}] Contexts processed for request`, {
processedCount: agentContexts.length, processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type), kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0), lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
}) })
if ( if (Array.isArray(contexts) && contexts.length > 0 && agentContexts.length === 0) {
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
logger.warn( logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.` `[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
) )
@@ -238,7 +215,7 @@ export async function POST(req: NextRequest) {
let currentChat: any = null let currentChat: any = null
let conversationHistory: any[] = [] let conversationHistory: any[] = []
let actualChatId = chatId let actualChatId = chatId
const selectedModel = model || 'claude-opus-4-5' const selectedModel = model || 'claude-opus-4-6'
if (chatId || createNewChat) { if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({ const chatResult = await resolveOrCreateChat({
@@ -269,13 +246,11 @@ export async function POST(req: NextRequest) {
mode, mode,
model: selectedModel, model: selectedModel,
provider, provider,
conversationId: effectiveConversationId,
conversationHistory, conversationHistory,
contexts: agentContexts, contexts: agentContexts,
fileAttachments, fileAttachments,
commands, commands,
chatId: actualChatId, chatId: actualChatId,
prefetch,
implicitFeedback, implicitFeedback,
}, },
{ {
@@ -457,15 +432,10 @@ export async function POST(req: NextRequest) {
content: message, content: message,
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) && ...(Array.isArray(contexts) && contexts.length > 0 && { contexts }),
normalizedContexts.length > 0 && { ...(Array.isArray(contexts) &&
contexts: normalizedContexts, contexts.length > 0 && {
}), contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }],
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}), }),
} }

View File

@@ -4,12 +4,16 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { createEnvMock, databaseMock, loggerMock } from '@sim/testing' import { createEnvMock, createMockLogger } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
const loggerMock = vi.hoisted(() => ({
createLogger: () => createMockLogger(),
}))
vi.mock('drizzle-orm') vi.mock('drizzle-orm')
vi.mock('@sim/logger', () => loggerMock) vi.mock('@sim/logger', () => loggerMock)
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db')
vi.mock('@/lib/knowledge/documents/utils', () => ({ vi.mock('@/lib/knowledge/documents/utils', () => ({
retryWithExponentialBackoff: (fn: any) => fn(), retryWithExponentialBackoff: (fn: any) => fn(),
})) }))

View File

@@ -38,7 +38,7 @@ import {
const logger = createLogger('CopilotMcpAPI') const logger = createLogger('CopilotMcpAPI')
const mcpRateLimiter = new RateLimiter() const mcpRateLimiter = new RateLimiter()
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-5' const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
export const runtime = 'nodejs' export const runtime = 'nodejs'

View File

@@ -3,14 +3,17 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission } = vi.hoisted(() => ({ const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission, mockDbSelect, mockDbUpdate } =
vi.hoisted(() => ({
mockGetSession: vi.fn(), mockGetSession: vi.fn(),
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(), mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
})) mockDbSelect: vi.fn(),
mockDbUpdate: vi.fn(),
}))
vi.mock('@/lib/auth', () => ({ vi.mock('@/lib/auth', () => ({
getSession: mockGetSession, getSession: mockGetSession,
@@ -20,7 +23,12 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission, authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission,
})) }))
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: mockDbSelect,
update: mockDbUpdate,
},
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' }, workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
@@ -51,9 +59,6 @@ function createParams(id: string): { params: Promise<{ id: string }> } {
return { params: Promise.resolve({ id }) } return { params: Promise.resolve({ id }) }
} }
const mockDbSelect = databaseMock.db.select as ReturnType<typeof vi.fn>
const mockDbUpdate = databaseMock.db.update as ReturnType<typeof vi.fn>
function mockDbChain(selectResults: unknown[][]) { function mockDbChain(selectResults: unknown[][]) {
let selectCallIndex = 0 let selectCallIndex = 0
mockDbSelect.mockImplementation(() => ({ mockDbSelect.mockImplementation(() => ({

View File

@@ -3,14 +3,17 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission } = vi.hoisted(() => ({ const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission, mockDbSelect } = vi.hoisted(
() => ({
mockGetSession: vi.fn(), mockGetSession: vi.fn(),
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(), mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
})) mockDbSelect: vi.fn(),
})
)
vi.mock('@/lib/auth', () => ({ vi.mock('@/lib/auth', () => ({
getSession: mockGetSession, getSession: mockGetSession,
@@ -20,7 +23,11 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission, authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission,
})) }))
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: mockDbSelect,
},
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' }, workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
@@ -55,8 +62,6 @@ function createRequest(url: string): NextRequest {
return new NextRequest(new URL(url), { method: 'GET' }) return new NextRequest(new URL(url), { method: 'GET' })
} }
const mockDbSelect = databaseMock.db.select as ReturnType<typeof vi.fn>
function mockDbChain(results: any[]) { function mockDbChain(results: any[]) {
let callIndex = 0 let callIndex = 0
mockDbSelect.mockImplementation(() => ({ mockDbSelect.mockImplementation(() => ({

View File

@@ -191,84 +191,3 @@ export async function GET(request: NextRequest) {
) )
} }
} }
// Delete a label from a page
export async function DELETE(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const {
domain,
accessToken,
cloudId: providedCloudId,
pageId,
labelName,
} = await request.json()
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!pageId) {
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
}
if (!labelName) {
return NextResponse.json({ error: 'Label name is required' }, { status: 400 })
}
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
if (!pageIdValidation.isValid) {
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const encodedLabel = encodeURIComponent(labelName.trim())
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/content/${pageId}/label?name=${encodedLabel}`
const response = await fetch(url, {
method: 'DELETE',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage =
errorData?.message || `Failed to delete Confluence label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
return NextResponse.json({
pageId,
labelName,
deleted: true,
})
} catch (error) {
logger.error('Error deleting Confluence label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -1,103 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePagesByLabelAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const labelId = searchParams.get('labelId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '50'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!labelId) {
return NextResponse.json({ error: 'Label ID is required' }, { status: 400 })
}
const labelIdValidation = validateAlphanumericId(labelId, 'labelId', 255)
if (!labelIdValidation.isValid) {
return NextResponse.json({ error: labelIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/labels/${labelId}/pages?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to get pages by label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const pages = (data.results || []).map((page: any) => ({
id: page.id,
title: page.title,
status: page.status ?? null,
spaceId: page.spaceId ?? null,
parentId: page.parentId ?? null,
authorId: page.authorId ?? null,
createdAt: page.createdAt ?? null,
version: page.version ?? null,
}))
return NextResponse.json({
pages,
labelId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error getting pages by label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -1,98 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluenceSpaceLabelsAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const spaceId = searchParams.get('spaceId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '25'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!spaceId) {
return NextResponse.json({ error: 'Space ID is required' }, { status: 400 })
}
const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255)
if (!spaceIdValidation.isValid) {
return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/labels?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to list space labels (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const labels = (data.results || []).map((label: any) => ({
id: label.id,
name: label.name,
prefix: label.prefix || 'global',
}))
return NextResponse.json({
labels,
spaceId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error listing space labels:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -8,7 +8,7 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth' import { authenticateV1Request } from '@/app/api/v1/auth'
const logger = createLogger('CopilotHeadlessAPI') const logger = createLogger('CopilotHeadlessAPI')
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-5' const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
const RequestSchema = z.object({ const RequestSchema = z.object({
message: z.string().min(1, 'message is required'), message: z.string().min(1, 'message is required'),

View File

@@ -29,7 +29,7 @@ const patchBodySchema = z
description: z description: z
.string() .string()
.trim() .trim()
.max(2000, 'Description must be 2000 characters or less') .max(500, 'Description must be 500 characters or less')
.nullable() .nullable()
.optional(), .optional(),
isActive: z.literal(true).optional(), // Set to true to activate this version isActive: z.literal(true).optional(), // Set to true to activate this version

View File

@@ -12,7 +12,7 @@ import {
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse' import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { createExecutionEventWriter, setExecutionMeta } from '@/lib/execution/event-buffer' import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { processInputFileFields } from '@/lib/execution/files' import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing' import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session' import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -700,29 +700,17 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync) const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync)
let isStreamClosed = false let isStreamClosed = false
const eventWriter = createExecutionEventWriter(executionId)
setExecutionMeta(executionId, {
status: 'active',
userId: actorUserId,
workflowId,
}).catch(() => {})
const stream = new ReadableStream<Uint8Array>({ const stream = new ReadableStream<Uint8Array>({
async start(controller) { async start(controller) {
let finalMetaStatus: 'complete' | 'error' | 'cancelled' | null = null
const sendEvent = (event: ExecutionEvent) => { const sendEvent = (event: ExecutionEvent) => {
if (!isStreamClosed) { if (isStreamClosed) return
try { try {
controller.enqueue(encodeSSEEvent(event)) controller.enqueue(encodeSSEEvent(event))
} catch { } catch {
isStreamClosed = true isStreamClosed = true
} }
} }
if (event.type !== 'stream:chunk' && event.type !== 'stream:done') {
eventWriter.write(event).catch(() => {})
}
}
try { try {
const startTime = new Date() const startTime = new Date()
@@ -841,12 +829,14 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const reader = streamingExec.stream.getReader() const reader = streamingExec.stream.getReader()
const decoder = new TextDecoder() const decoder = new TextDecoder()
let chunkCount = 0
try { try {
while (true) { while (true) {
const { done, value } = await reader.read() const { done, value } = await reader.read()
if (done) break if (done) break
chunkCount++
const chunk = decoder.decode(value, { stream: true }) const chunk = decoder.decode(value, { stream: true })
sendEvent({ sendEvent({
type: 'stream:chunk', type: 'stream:chunk',
@@ -961,7 +951,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} else { } else {
logger.info(`[${requestId}] Workflow execution was cancelled`) logger.info(`[${requestId}] Workflow execution was cancelled`)
@@ -974,7 +963,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'cancelled'
} }
return return
} }
@@ -998,7 +986,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
endTime: result.metadata?.endTime || new Date().toISOString(), endTime: result.metadata?.endTime || new Date().toISOString(),
}, },
}) })
finalMetaStatus = 'complete'
} catch (error: unknown) { } catch (error: unknown) {
const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut() const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut()
const errorMessage = isTimeout const errorMessage = isTimeout
@@ -1030,18 +1017,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: executionResult?.metadata?.duration || 0, duration: executionResult?.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} finally { } finally {
try {
await eventWriter.close()
} catch (closeError) {
logger.warn(`[${requestId}] Failed to close event writer`, {
error: closeError instanceof Error ? closeError.message : String(closeError),
})
}
if (finalMetaStatus) {
setExecutionMeta(executionId, { status: finalMetaStatus }).catch(() => {})
}
timeoutController.cleanup() timeoutController.cleanup()
if (executionId) { if (executionId) {
await cleanupExecutionBase64Cache(executionId) await cleanupExecutionBase64Cache(executionId)
@@ -1056,7 +1032,10 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}, },
cancel() { cancel() {
isStreamClosed = true isStreamClosed = true
logger.info(`[${requestId}] Client disconnected from SSE stream`) timeoutController.cleanup()
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
timeoutController.abort()
markExecutionCancelled(executionId).catch(() => {})
}, },
}) })

View File

@@ -1,170 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import {
type ExecutionStreamStatus,
getExecutionMeta,
readExecutionEvents,
} from '@/lib/execution/event-buffer'
import { formatSSEEvent } from '@/lib/workflows/executor/execution-events'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('ExecutionStreamReconnectAPI')
const POLL_INTERVAL_MS = 500
const MAX_POLL_DURATION_MS = 10 * 60 * 1000 // 10 minutes
function isTerminalStatus(status: ExecutionStreamStatus): boolean {
return status === 'complete' || status === 'error' || status === 'cancelled'
}
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function GET(
req: NextRequest,
{ params }: { params: Promise<{ id: string; executionId: string }> }
) {
const { id: workflowId, executionId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const workflowAuthorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: auth.userId,
action: 'read',
})
if (!workflowAuthorization.allowed) {
return NextResponse.json(
{ error: workflowAuthorization.message || 'Access denied' },
{ status: workflowAuthorization.status }
)
}
const meta = await getExecutionMeta(executionId)
if (!meta) {
return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 })
}
if (meta.workflowId && meta.workflowId !== workflowId) {
return NextResponse.json(
{ error: 'Execution does not belong to this workflow' },
{ status: 403 }
)
}
const fromParam = req.nextUrl.searchParams.get('from')
const parsed = fromParam ? Number.parseInt(fromParam, 10) : 0
const fromEventId = Number.isFinite(parsed) && parsed >= 0 ? parsed : 0
logger.info('Reconnection stream requested', {
workflowId,
executionId,
fromEventId,
metaStatus: meta.status,
})
const encoder = new TextEncoder()
let closed = false
const stream = new ReadableStream<Uint8Array>({
async start(controller) {
let lastEventId = fromEventId
const pollDeadline = Date.now() + MAX_POLL_DURATION_MS
const enqueue = (text: string) => {
if (closed) return
try {
controller.enqueue(encoder.encode(text))
} catch {
closed = true
}
}
try {
const events = await readExecutionEvents(executionId, lastEventId)
for (const entry of events) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const currentMeta = await getExecutionMeta(executionId)
if (!currentMeta || isTerminalStatus(currentMeta.status)) {
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
while (!closed && Date.now() < pollDeadline) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
if (closed) return
const newEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of newEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const polledMeta = await getExecutionMeta(executionId)
if (!polledMeta || isTerminalStatus(polledMeta.status)) {
const finalEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of finalEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
}
if (!closed) {
logger.warn('Reconnection stream poll deadline reached', { executionId })
enqueue('data: [DONE]\n\n')
controller.close()
}
} catch (error) {
logger.error('Error in reconnection stream', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
if (!closed) {
try {
controller.close()
} catch {}
}
}
},
cancel() {
closed = true
logger.info('Client disconnected from reconnection stream', { executionId })
},
})
return new NextResponse(stream, {
headers: {
...SSE_HEADERS,
'X-Execution-Id': executionId,
},
})
} catch (error: any) {
logger.error('Failed to start reconnection stream', {
workflowId,
executionId,
error: error.message,
})
return NextResponse.json(
{ error: error.message || 'Failed to start reconnection stream' },
{ status: 500 }
)
}
}

View File

@@ -5,7 +5,7 @@
* @vitest-environment node * @vitest-environment node
*/ */
import { loggerMock, setupGlobalFetchMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
@@ -284,7 +284,9 @@ describe('Workflow By ID API Route', () => {
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]), where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}) })
setupGlobalFetchMock({ ok: true }) global.fetch = vi.fn().mockResolvedValue({
ok: true,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', { const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'DELETE', method: 'DELETE',
@@ -329,7 +331,9 @@ describe('Workflow By ID API Route', () => {
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]), where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}) })
setupGlobalFetchMock({ ok: true }) global.fetch = vi.fn().mockResolvedValue({
ok: true,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', { const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'DELETE', method: 'DELETE',

View File

@@ -12,7 +12,7 @@ import { getUserEntityPermissions, getWorkspaceById } from '@/lib/workspaces/per
const logger = createLogger('WorkspaceBYOKKeysAPI') const logger = createLogger('WorkspaceBYOKKeysAPI')
const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'mistral', 'serper'] as const const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'mistral'] as const
const UpsertKeySchema = z.object({ const UpsertKeySchema = z.object({
providerId: z.enum(VALID_PROVIDERS), providerId: z.enum(VALID_PROVIDERS),

View File

@@ -13,6 +13,9 @@ export type CommandId =
| 'goto-logs' | 'goto-logs'
| 'open-search' | 'open-search'
| 'run-workflow' | 'run-workflow'
| 'focus-copilot-tab'
| 'focus-toolbar-tab'
| 'focus-editor-tab'
| 'clear-terminal-console' | 'clear-terminal-console'
| 'focus-toolbar-search' | 'focus-toolbar-search'
| 'clear-notifications' | 'clear-notifications'
@@ -72,6 +75,21 @@ export const COMMAND_DEFINITIONS: Record<CommandId, CommandDefinition> = {
shortcut: 'Mod+Enter', shortcut: 'Mod+Enter',
allowInEditable: false, allowInEditable: false,
}, },
'focus-copilot-tab': {
id: 'focus-copilot-tab',
shortcut: 'C',
allowInEditable: false,
},
'focus-toolbar-tab': {
id: 'focus-toolbar-tab',
shortcut: 'T',
allowInEditable: false,
},
'focus-editor-tab': {
id: 'focus-editor-tab',
shortcut: 'E',
allowInEditable: false,
},
'clear-terminal-console': { 'clear-terminal-console': {
id: 'clear-terminal-console', id: 'clear-terminal-console',
shortcut: 'Mod+D', shortcut: 'Mod+D',

View File

@@ -113,7 +113,7 @@ export function VersionDescriptionModal({
className='min-h-[120px] resize-none' className='min-h-[120px] resize-none'
value={description} value={description}
onChange={(e) => setDescription(e.target.value)} onChange={(e) => setDescription(e.target.value)}
maxLength={2000} maxLength={500}
disabled={isGenerating} disabled={isGenerating}
/> />
<div className='flex items-center justify-between'> <div className='flex items-center justify-between'>
@@ -123,7 +123,7 @@ export function VersionDescriptionModal({
</p> </p>
)} )}
{!updateMutation.error && !generateMutation.error && <div />} {!updateMutation.error && !generateMutation.error && <div />}
<p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/2000</p> <p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/500</p>
</div> </div>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>

View File

@@ -57,21 +57,6 @@ export function useChangeDetection({
} }
} }
if (block.triggerMode) {
const triggerConfigValue = blockSubValues?.triggerConfig
if (
triggerConfigValue &&
typeof triggerConfigValue === 'object' &&
!subBlocks.triggerConfig
) {
subBlocks.triggerConfig = {
id: 'triggerConfig',
type: 'short-input',
value: triggerConfigValue,
}
}
}
blocksWithSubBlocks[blockId] = { blocksWithSubBlocks[blockId] = {
...block, ...block,
subBlocks, subBlocks,

View File

@@ -3,7 +3,6 @@ import {
buildCanonicalIndex, buildCanonicalIndex,
evaluateSubBlockCondition, evaluateSubBlockCondition,
isSubBlockFeatureEnabled, isSubBlockFeatureEnabled,
isSubBlockHiddenByHostedKey,
isSubBlockVisibleForMode, isSubBlockVisibleForMode,
} from '@/lib/workflows/subblocks/visibility' } from '@/lib/workflows/subblocks/visibility'
import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types' import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types'
@@ -109,9 +108,6 @@ export function useEditorSubblockLayout(
// Check required feature if specified - declarative feature gating // Check required feature if specified - declarative feature gating
if (!isSubBlockFeatureEnabled(block)) return false if (!isSubBlockFeatureEnabled(block)) return false
// Hide tool API key fields when hosted key is available
if (isSubBlockHiddenByHostedKey(block)) return false
// Special handling for trigger-config type (legacy trigger configuration UI) // Special handling for trigger-config type (legacy trigger configuration UI)
if (block.type === ('trigger-config' as SubBlockType)) { if (block.type === ('trigger-config' as SubBlockType)) {
const isPureTriggerBlock = config?.triggers?.enabled && config.category === 'triggers' const isPureTriggerBlock = config?.triggers?.enabled && config.category === 'triggers'

View File

@@ -340,7 +340,13 @@ export const Panel = memo(function Panel() {
* Register global keyboard shortcuts using the central commands registry. * Register global keyboard shortcuts using the central commands registry.
* *
* - Mod+Enter: Run / cancel workflow (matches the Run button behavior) * - Mod+Enter: Run / cancel workflow (matches the Run button behavior)
* - C: Focus Copilot tab
* - T: Focus Toolbar tab
* - E: Focus Editor tab
* - Mod+F: Focus Toolbar tab and search input * - Mod+F: Focus Toolbar tab and search input
*
* The tab-switching commands are disabled inside editable elements so typing
* in inputs or textareas is not interrupted.
*/ */
useRegisterGlobalCommands(() => useRegisterGlobalCommands(() =>
createCommands([ createCommands([
@@ -357,6 +363,33 @@ export const Panel = memo(function Panel() {
allowInEditable: false, allowInEditable: false,
}, },
}, },
{
id: 'focus-copilot-tab',
handler: () => {
setActiveTab('copilot')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-toolbar-tab',
handler: () => {
setActiveTab('toolbar')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-editor-tab',
handler: () => {
setActiveTab('editor')
},
overrides: {
allowInEditable: false,
},
},
{ {
id: 'focus-toolbar-search', id: 'focus-toolbar-search',
handler: () => { handler: () => {

View File

@@ -15,7 +15,6 @@ import {
evaluateSubBlockCondition, evaluateSubBlockCondition,
hasAdvancedValues, hasAdvancedValues,
isSubBlockFeatureEnabled, isSubBlockFeatureEnabled,
isSubBlockHiddenByHostedKey,
isSubBlockVisibleForMode, isSubBlockVisibleForMode,
resolveDependencyValue, resolveDependencyValue,
} from '@/lib/workflows/subblocks/visibility' } from '@/lib/workflows/subblocks/visibility'
@@ -829,7 +828,6 @@ export const WorkflowBlock = memo(function WorkflowBlock({
if (block.hidden) return false if (block.hidden) return false
if (block.hideFromPreview) return false if (block.hideFromPreview) return false
if (!isSubBlockFeatureEnabled(block)) return false if (!isSubBlockFeatureEnabled(block)) return false
if (isSubBlockHiddenByHostedKey(block)) return false
const isPureTriggerBlock = config?.triggers?.enabled && config.category === 'triggers' const isPureTriggerBlock = config?.triggers?.enabled && config.category === 'triggers'

View File

@@ -1,4 +1,4 @@
import { useCallback, useEffect, useRef, useState } from 'react' import { useCallback, useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query' import { useQueryClient } from '@tanstack/react-query'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
@@ -46,13 +46,7 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('useWorkflowExecution') const logger = createLogger('useWorkflowExecution')
/** // Debug state validation result
* Module-level Set tracking which workflows have an active reconnection effect.
* Prevents multiple hook instances (from different components) from starting
* concurrent reconnection streams for the same workflow during the same mount cycle.
*/
const activeReconnections = new Set<string>()
interface DebugValidationResult { interface DebugValidationResult {
isValid: boolean isValid: boolean
error?: string error?: string
@@ -60,7 +54,7 @@ interface DebugValidationResult {
interface BlockEventHandlerConfig { interface BlockEventHandlerConfig {
workflowId?: string workflowId?: string
executionIdRef: { current: string } executionId?: string
workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }> workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }>
activeBlocksSet: Set<string> activeBlocksSet: Set<string>
accumulatedBlockLogs: BlockLog[] accumulatedBlockLogs: BlockLog[]
@@ -114,15 +108,12 @@ export function useWorkflowExecution() {
const queryClient = useQueryClient() const queryClient = useQueryClient()
const currentWorkflow = useCurrentWorkflow() const currentWorkflow = useCurrentWorkflow()
const { activeWorkflowId, workflows } = useWorkflowRegistry() const { activeWorkflowId, workflows } = useWorkflowRegistry()
const { toggleConsole, addConsole, updateConsole, cancelRunningEntries, clearExecutionEntries } = const { toggleConsole, addConsole, updateConsole, cancelRunningEntries } =
useTerminalConsoleStore() useTerminalConsoleStore()
const hasHydrated = useTerminalConsoleStore((s) => s._hasHydrated)
const { getAllVariables } = useEnvironmentStore() const { getAllVariables } = useEnvironmentStore()
const { getVariablesByWorkflowId, variables } = useVariablesStore() const { getVariablesByWorkflowId, variables } = useVariablesStore()
const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } = const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } =
useCurrentWorkflowExecution() useCurrentWorkflowExecution()
const setCurrentExecutionId = useExecutionStore((s) => s.setCurrentExecutionId)
const getCurrentExecutionId = useExecutionStore((s) => s.getCurrentExecutionId)
const setIsExecuting = useExecutionStore((s) => s.setIsExecuting) const setIsExecuting = useExecutionStore((s) => s.setIsExecuting)
const setIsDebugging = useExecutionStore((s) => s.setIsDebugging) const setIsDebugging = useExecutionStore((s) => s.setIsDebugging)
const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks) const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks)
@@ -306,7 +297,7 @@ export function useWorkflowExecution() {
(config: BlockEventHandlerConfig) => { (config: BlockEventHandlerConfig) => {
const { const {
workflowId, workflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -317,14 +308,6 @@ export function useWorkflowExecution() {
onBlockCompleteCallback, onBlockCompleteCallback,
} = config } = config
/** Returns true if this execution was cancelled or superseded by another run. */
const isStaleExecution = () =>
!!(
workflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(workflowId) !== executionIdRef.current
)
const updateActiveBlocks = (blockId: string, isActive: boolean) => { const updateActiveBlocks = (blockId: string, isActive: boolean) => {
if (!workflowId) return if (!workflowId) return
if (isActive) { if (isActive) {
@@ -377,7 +360,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -400,7 +383,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -427,7 +410,7 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionIdRef.current executionId
) )
} }
@@ -449,12 +432,11 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionIdRef.current executionId
) )
} }
const onBlockStarted = (data: BlockStartedData) => { const onBlockStarted = (data: BlockStartedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, true) updateActiveBlocks(data.blockId, true)
markIncomingEdges(data.blockId) markIncomingEdges(data.blockId)
@@ -471,7 +453,7 @@ export function useWorkflowExecution() {
endedAt: undefined, endedAt: undefined,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
isRunning: true, isRunning: true,
@@ -483,7 +465,6 @@ export function useWorkflowExecution() {
} }
const onBlockCompleted = (data: BlockCompletedData) => { const onBlockCompleted = (data: BlockCompletedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success')
@@ -514,7 +495,6 @@ export function useWorkflowExecution() {
} }
const onBlockError = (data: BlockErrorData) => { const onBlockError = (data: BlockErrorData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error')
@@ -922,6 +902,10 @@ export function useWorkflowExecution() {
// Update block logs with actual stream completion times // Update block logs with actual stream completion times
if (result.logs && streamCompletionTimes.size > 0) { if (result.logs && streamCompletionTimes.size > 0) {
const streamCompletionEndTime = new Date(
Math.max(...Array.from(streamCompletionTimes.values()))
).toISOString()
result.logs.forEach((log: BlockLog) => { result.logs.forEach((log: BlockLog) => {
if (streamCompletionTimes.has(log.blockId)) { if (streamCompletionTimes.has(log.blockId)) {
const completionTime = streamCompletionTimes.get(log.blockId)! const completionTime = streamCompletionTimes.get(log.blockId)!
@@ -1003,6 +987,7 @@ export function useWorkflowExecution() {
return { success: true, stream } return { success: true, stream }
} }
// For manual (non-chat) execution
const manualExecutionId = uuidv4() const manualExecutionId = uuidv4()
try { try {
const result = await executeWorkflow( const result = await executeWorkflow(
@@ -1017,10 +1002,29 @@ export function useWorkflowExecution() {
if (result.metadata.pendingBlocks) { if (result.metadata.pendingBlocks) {
setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks) setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks)
} }
} else if (result && 'success' in result) {
setExecutionResult(result)
// Reset execution state after successful non-debug execution
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
if (isChatExecution) {
if (!result.metadata) {
result.metadata = { duration: 0, startTime: new Date().toISOString() }
}
;(result.metadata as any).source = 'chat'
}
// Invalidate subscription queries to update usage
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
} }
return result return result
} catch (error: any) { } catch (error: any) {
const errorResult = handleExecutionError(error, { executionId: manualExecutionId }) const errorResult = handleExecutionError(error, { executionId: manualExecutionId })
// Note: Error logs are already persisted server-side via execution-core.ts
return errorResult return errorResult
} }
}, },
@@ -1271,7 +1275,7 @@ export function useWorkflowExecution() {
if (activeWorkflowId) { if (activeWorkflowId) {
logger.info('Using server-side executor') logger.info('Using server-side executor')
const executionIdRef = { current: '' } const executionId = uuidv4()
let executionResult: ExecutionResult = { let executionResult: ExecutionResult = {
success: false, success: false,
@@ -1289,7 +1293,7 @@ export function useWorkflowExecution() {
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -1322,10 +1326,6 @@ export function useWorkflowExecution() {
loops: clientWorkflowState.loops, loops: clientWorkflowState.loops,
parallels: clientWorkflowState.parallels, parallels: clientWorkflowState.parallels,
}, },
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(activeWorkflowId, id)
},
callbacks: { callbacks: {
onExecutionStarted: (data) => { onExecutionStarted: (data) => {
logger.info('Server execution started:', data) logger.info('Server execution started:', data)
@@ -1368,18 +1368,6 @@ export function useWorkflowExecution() {
}, },
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: data.success, success: data.success,
output: data.output, output: data.output,
@@ -1437,33 +1425,9 @@ export function useWorkflowExecution() {
}) })
} }
} }
const workflowExecState = activeWorkflowId
? useExecutionStore.getState().getWorkflowExecution(activeWorkflowId)
: null
if (activeWorkflowId && !workflowExecState?.isDebugging) {
setExecutionResult(executionResult)
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
}
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: false, success: false,
output: {}, output: {},
@@ -1477,53 +1441,43 @@ export function useWorkflowExecution() {
const isPreExecutionError = accumulatedBlockLogs.length === 0 const isPreExecutionError = accumulatedBlockLogs.length === 0
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId: executionIdRef.current, executionId,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
isPreExecutionError, isPreExecutionError,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId: executionIdRef.current, executionId,
durationMs: data?.duration, durationMs: data?.duration,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
}, },
}) })
return executionResult return executionResult
} catch (error: any) { } catch (error: any) {
// Don't log abort errors - they're intentional user actions
if (error.name === 'AbortError' || error.message?.includes('aborted')) { if (error.name === 'AbortError' || error.message?.includes('aborted')) {
logger.info('Execution aborted by user') logger.info('Execution aborted by user')
return executionResult
// Reset execution state
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
// Return gracefully without error
return {
success: false,
output: {},
metadata: { duration: 0 },
logs: [],
}
} }
logger.error('Server-side execution failed:', error) logger.error('Server-side execution failed:', error)
@@ -1531,6 +1485,7 @@ export function useWorkflowExecution() {
} }
} }
// Fallback: should never reach here
throw new Error('Server-side execution is required') throw new Error('Server-side execution is required')
} }
@@ -1762,28 +1717,25 @@ export function useWorkflowExecution() {
* Handles cancelling the current workflow execution * Handles cancelling the current workflow execution
*/ */
const handleCancelExecution = useCallback(() => { const handleCancelExecution = useCallback(() => {
if (!activeWorkflowId) return
logger.info('Workflow execution cancellation requested') logger.info('Workflow execution cancellation requested')
const storedExecutionId = getCurrentExecutionId(activeWorkflowId) // Cancel the execution stream for this workflow (server-side)
executionStream.cancel(activeWorkflowId ?? undefined)
if (storedExecutionId) { // Mark current chat execution as superseded so its cleanup won't affect new executions
setCurrentExecutionId(activeWorkflowId, null)
fetch(`/api/workflows/${activeWorkflowId}/executions/${storedExecutionId}/cancel`, {
method: 'POST',
}).catch(() => {})
handleExecutionCancelledConsole({
workflowId: activeWorkflowId,
executionId: storedExecutionId,
})
}
executionStream.cancel(activeWorkflowId)
currentChatExecutionIdRef.current = null currentChatExecutionIdRef.current = null
// Mark all running entries as canceled in the terminal
if (activeWorkflowId) {
cancelRunningEntries(activeWorkflowId)
// Reset execution state - this triggers chat stream cleanup via useEffect in chat.tsx
setIsExecuting(activeWorkflowId, false) setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false) setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set()) setActiveBlocks(activeWorkflowId, new Set())
}
// If in debug mode, also reset debug state
if (isDebugging) { if (isDebugging) {
resetDebugState() resetDebugState()
} }
@@ -1795,9 +1747,7 @@ export function useWorkflowExecution() {
setIsDebugging, setIsDebugging,
setActiveBlocks, setActiveBlocks,
activeWorkflowId, activeWorkflowId,
getCurrentExecutionId, cancelRunningEntries,
setCurrentExecutionId,
handleExecutionCancelledConsole,
]) ])
/** /**
@@ -1897,7 +1847,7 @@ export function useWorkflowExecution() {
} }
setIsExecuting(workflowId, true) setIsExecuting(workflowId, true)
const executionIdRef = { current: '' } const executionId = uuidv4()
const accumulatedBlockLogs: BlockLog[] = [] const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>() const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>() const executedBlockIds = new Set<string>()
@@ -1906,7 +1856,7 @@ export function useWorkflowExecution() {
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId, workflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -1921,10 +1871,6 @@ export function useWorkflowExecution() {
startBlockId: blockId, startBlockId: blockId,
sourceSnapshot: effectiveSnapshot, sourceSnapshot: effectiveSnapshot,
input: workflowInput, input: workflowInput,
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(workflowId, id)
},
callbacks: { callbacks: {
onBlockStarted: blockHandlers.onBlockStarted, onBlockStarted: blockHandlers.onBlockStarted,
onBlockCompleted: blockHandlers.onBlockCompleted, onBlockCompleted: blockHandlers.onBlockCompleted,
@@ -1932,6 +1878,7 @@ export function useWorkflowExecution() {
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (data.success) { if (data.success) {
// Add the start block (trigger) to executed blocks
executedBlockIds.add(blockId) executedBlockIds.add(blockId)
const mergedBlockStates: Record<string, BlockState> = { const mergedBlockStates: Record<string, BlockState> = {
@@ -1955,10 +1902,6 @@ export function useWorkflowExecution() {
} }
setLastExecutionSnapshot(workflowId, updatedSnapshot) setLastExecutionSnapshot(workflowId, updatedSnapshot)
} }
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
@@ -1978,27 +1921,19 @@ export function useWorkflowExecution() {
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId, workflowId,
executionId: executionIdRef.current, executionId,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId, workflowId,
executionId: executionIdRef.current, executionId,
durationMs: data?.duration, durationMs: data?.duration,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
}, },
}) })
@@ -2007,20 +1942,14 @@ export function useWorkflowExecution() {
logger.error('Run-from-block failed:', error) logger.error('Run-from-block failed:', error)
} }
} finally { } finally {
const currentId = getCurrentExecutionId(workflowId)
if (currentId === null || currentId === executionIdRef.current) {
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false) setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set()) setActiveBlocks(workflowId, new Set())
} }
}
}, },
[ [
getLastExecutionSnapshot, getLastExecutionSnapshot,
setLastExecutionSnapshot, setLastExecutionSnapshot,
clearLastExecutionSnapshot, clearLastExecutionSnapshot,
getCurrentExecutionId,
setCurrentExecutionId,
setIsExecuting, setIsExecuting,
setActiveBlocks, setActiveBlocks,
setBlockRunStatus, setBlockRunStatus,
@@ -2050,213 +1979,29 @@ export function useWorkflowExecution() {
const executionId = uuidv4() const executionId = uuidv4()
try { try {
await executeWorkflow(undefined, undefined, executionId, undefined, 'manual', blockId) const result = await executeWorkflow(
undefined,
undefined,
executionId,
undefined,
'manual',
blockId
)
if (result && 'success' in result) {
setExecutionResult(result)
}
} catch (error) { } catch (error) {
const errorResult = handleExecutionError(error, { executionId }) const errorResult = handleExecutionError(error, { executionId })
return errorResult return errorResult
} finally { } finally {
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false) setIsExecuting(workflowId, false)
setIsDebugging(workflowId, false) setIsDebugging(workflowId, false)
setActiveBlocks(workflowId, new Set()) setActiveBlocks(workflowId, new Set())
} }
}, },
[ [activeWorkflowId, setExecutionResult, setIsExecuting, setIsDebugging, setActiveBlocks]
activeWorkflowId,
setCurrentExecutionId,
setExecutionResult,
setIsExecuting,
setIsDebugging,
setActiveBlocks,
]
) )
useEffect(() => {
if (!activeWorkflowId || !hasHydrated) return
const entries = useTerminalConsoleStore.getState().entries
const runningEntries = entries.filter(
(e) => e.isRunning && e.workflowId === activeWorkflowId && e.executionId
)
if (runningEntries.length === 0) return
if (activeReconnections.has(activeWorkflowId)) return
activeReconnections.add(activeWorkflowId)
executionStream.cancel(activeWorkflowId)
const sorted = [...runningEntries].sort((a, b) => {
const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0
const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0
return bTime - aTime
})
const executionId = sorted[0].executionId!
const otherExecutionIds = new Set(
sorted.filter((e) => e.executionId !== executionId).map((e) => e.executionId!)
)
if (otherExecutionIds.size > 0) {
cancelRunningEntries(activeWorkflowId)
}
setCurrentExecutionId(activeWorkflowId, executionId)
setIsExecuting(activeWorkflowId, true)
const workflowEdges = useWorkflowStore.getState().edges
const activeBlocksSet = new Set<string>()
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>()
const executionIdRef = { current: executionId }
const handlers = buildBlockEventHandlers({
workflowId: activeWorkflowId,
executionIdRef,
workflowEdges,
activeBlocksSet,
accumulatedBlockLogs,
accumulatedBlockStates,
executedBlockIds,
consoleMode: 'update',
includeStartConsoleEntry: true,
})
const originalEntries = entries
.filter((e) => e.executionId === executionId)
.map((e) => ({ ...e }))
let cleared = false
let reconnectionComplete = false
let cleanupRan = false
const clearOnce = () => {
if (!cleared) {
cleared = true
clearExecutionEntries(executionId)
}
}
const reconnectWorkflowId = activeWorkflowId
executionStream
.reconnect({
workflowId: reconnectWorkflowId,
executionId,
callbacks: {
onBlockStarted: (data) => {
clearOnce()
handlers.onBlockStarted(data)
},
onBlockCompleted: (data) => {
clearOnce()
handlers.onBlockCompleted(data)
},
onBlockError: (data) => {
clearOnce()
handlers.onBlockError(data)
},
onExecutionCompleted: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
},
onExecutionError: (data) => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionErrorConsole({
workflowId: reconnectWorkflowId,
executionId,
error: data.error,
blockLogs: accumulatedBlockLogs,
})
},
onExecutionCancelled: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionCancelledConsole({
workflowId: reconnectWorkflowId,
executionId,
})
},
},
})
.catch((error) => {
logger.warn('Execution reconnection failed', { executionId, error })
})
.finally(() => {
if (reconnectionComplete || cleanupRan) return
const currentId = useExecutionStore.getState().getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) return
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole({
workflowId: entry.workflowId,
blockId: entry.blockId,
blockName: entry.blockName,
blockType: entry.blockType,
executionId: entry.executionId,
executionOrder: entry.executionOrder,
isRunning: false,
warning: 'Execution result unavailable — check the logs page',
})
}
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
})
return () => {
cleanupRan = true
executionStream.cancel(reconnectWorkflowId)
activeReconnections.delete(reconnectWorkflowId)
if (cleared && !reconnectionComplete) {
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole(entry)
}
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [activeWorkflowId, hasHydrated])
return { return {
isExecuting, isExecuting,
isDebugging, isDebugging,

View File

@@ -13,7 +13,7 @@ import {
ModalFooter, ModalFooter,
ModalHeader, ModalHeader,
} from '@/components/emcn' } from '@/components/emcn'
import { AnthropicIcon, GeminiIcon, MistralIcon, OpenAIIcon, SerperIcon } from '@/components/icons' import { AnthropicIcon, GeminiIcon, MistralIcon, OpenAIIcon } from '@/components/icons'
import { Skeleton } from '@/components/ui' import { Skeleton } from '@/components/ui'
import { import {
type BYOKKey, type BYOKKey,
@@ -60,13 +60,6 @@ const PROVIDERS: {
description: 'LLM calls and Knowledge Base OCR', description: 'LLM calls and Knowledge Base OCR',
placeholder: 'Enter your API key', placeholder: 'Enter your API key',
}, },
{
id: 'serper',
name: 'Serper',
icon: SerperIcon,
description: 'Web search tool',
placeholder: 'Enter your Serper API key',
},
] ]
function BYOKKeySkeleton() { function BYOKKeySkeleton() {

View File

@@ -589,7 +589,6 @@ export async function executeScheduleJob(payload: ScheduleExecutionPayload) {
export const scheduleExecution = task({ export const scheduleExecution = task({
id: 'schedule-execution', id: 'schedule-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -669,7 +669,6 @@ async function executeWebhookJobInternal(
export const webhookExecution = task({ export const webhookExecution = task({
id: 'webhook-execution', id: 'webhook-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -197,6 +197,5 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
export const workflowExecutionTask = task({ export const workflowExecutionTask = task({
id: 'workflow-execution', id: 'workflow-execution',
machine: 'medium-1x',
run: executeWorkflowJob, run: executeWorkflowJob,
}) })

View File

@@ -10,11 +10,9 @@ import {
getReasoningEffortValuesForModel, getReasoningEffortValuesForModel,
getThinkingLevelsForModel, getThinkingLevelsForModel,
getVerbosityValuesForModel, getVerbosityValuesForModel,
MODELS_WITH_DEEP_RESEARCH,
MODELS_WITH_REASONING_EFFORT, MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_THINKING, MODELS_WITH_THINKING,
MODELS_WITH_VERBOSITY, MODELS_WITH_VERBOSITY,
MODELS_WITHOUT_MEMORY,
providers, providers,
supportsTemperature, supportsTemperature,
} from '@/providers/utils' } from '@/providers/utils'
@@ -414,22 +412,12 @@ Return ONLY the JSON array.`,
title: 'Tools', title: 'Tools',
type: 'tool-input', type: 'tool-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'skills', id: 'skills',
title: 'Skills', title: 'Skills',
type: 'skill-input', type: 'skill-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'memoryType', id: 'memoryType',
@@ -443,11 +431,6 @@ Return ONLY the JSON array.`,
{ label: 'Sliding window (tokens)', id: 'sliding_window_tokens' }, { label: 'Sliding window (tokens)', id: 'sliding_window_tokens' },
], ],
defaultValue: 'none', defaultValue: 'none',
condition: {
field: 'model',
value: MODELS_WITHOUT_MEMORY,
not: true,
},
}, },
{ {
id: 'conversationId', id: 'conversationId',
@@ -461,7 +444,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['conversation', 'sliding_window', 'sliding_window_tokens'], value: ['conversation', 'sliding_window', 'sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -472,7 +454,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window'], value: ['sliding_window'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -483,7 +464,6 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window_tokens'], value: ['sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -497,13 +477,9 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => (model) => supportsTemperature(model) && getMaxTemperature(model) === 1
supportsTemperature(model) &&
getMaxTemperature(model) === 1 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -519,13 +495,9 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => (model) => supportsTemperature(model) && getMaxTemperature(model) === 2
supportsTemperature(model) &&
getMaxTemperature(model) === 2 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -536,11 +508,6 @@ Return ONLY the JSON array.`,
type: 'short-input', type: 'short-input',
placeholder: 'Enter max tokens (e.g., 4096)...', placeholder: 'Enter max tokens (e.g., 4096)...',
mode: 'advanced', mode: 'advanced',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'responseFormat', id: 'responseFormat',
@@ -548,11 +515,6 @@ Return ONLY the JSON array.`,
type: 'code', type: 'code',
placeholder: 'Enter JSON schema...', placeholder: 'Enter JSON schema...',
language: 'json', language: 'json',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
wandConfig: { wandConfig: {
enabled: true, enabled: true,
maintainHistory: true, maintainHistory: true,
@@ -645,16 +607,6 @@ Example 3 (Array Input):
generationType: 'json-schema', generationType: 'json-schema',
}, },
}, },
{
id: 'previousInteractionId',
title: 'Previous Interaction ID',
type: 'short-input',
placeholder: 'e.g., {{agent_1.interactionId}}',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
], ],
tools: { tools: {
access: [ access: [
@@ -818,13 +770,5 @@ Example 3 (Array Input):
description: 'Provider timing information', description: 'Provider timing information',
}, },
cost: { type: 'json', description: 'Cost of the API call' }, cost: { type: 'json', description: 'Cost of the API call' },
interactionId: {
type: 'string',
description: 'Interaction ID for multi-turn deep research follow-ups',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
}, },
} }

View File

@@ -394,7 +394,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Page Property Operations // Page Property Operations
{ label: 'List Page Properties', id: 'list_page_properties' }, { label: 'List Page Properties', id: 'list_page_properties' },
{ label: 'Create Page Property', id: 'create_page_property' }, { label: 'Create Page Property', id: 'create_page_property' },
{ label: 'Delete Page Property', id: 'delete_page_property' },
// Search Operations // Search Operations
{ label: 'Search Content', id: 'search' }, { label: 'Search Content', id: 'search' },
{ label: 'Search in Space', id: 'search_in_space' }, { label: 'Search in Space', id: 'search_in_space' },
@@ -415,9 +414,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Operations // Label Operations
{ label: 'List Labels', id: 'list_labels' }, { label: 'List Labels', id: 'list_labels' },
{ label: 'Add Label', id: 'add_label' }, { label: 'Add Label', id: 'add_label' },
{ label: 'Delete Label', id: 'delete_label' },
{ label: 'Get Pages by Label', id: 'get_pages_by_label' },
{ label: 'List Space Labels', id: 'list_space_labels' },
// Space Operations // Space Operations
{ label: 'Get Space', id: 'get_space' }, { label: 'Get Space', id: 'get_space' },
{ label: 'List Spaces', id: 'list_spaces' }, { label: 'List Spaces', id: 'list_spaces' },
@@ -489,8 +485,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -506,8 +500,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -535,8 +527,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -552,8 +542,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -578,7 +566,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'create_blogpost', 'create_blogpost',
'list_blogposts_in_space', 'list_blogposts_in_space',
'list_space_labels',
], ],
}, },
}, },
@@ -614,14 +601,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
required: true, required: true,
condition: { field: 'operation', value: 'create_page_property' }, condition: { field: 'operation', value: 'create_page_property' },
}, },
{
id: 'propertyId',
title: 'Property ID',
type: 'short-input',
placeholder: 'Enter property ID to delete',
required: true,
condition: { field: 'operation', value: 'delete_page_property' },
},
{ {
id: 'title', id: 'title',
title: 'Title', title: 'Title',
@@ -715,7 +694,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
type: 'short-input', type: 'short-input',
placeholder: 'Enter label name', placeholder: 'Enter label name',
required: true, required: true,
condition: { field: 'operation', value: ['add_label', 'delete_label'] }, condition: { field: 'operation', value: 'add_label' },
}, },
{ {
id: 'labelPrefix', id: 'labelPrefix',
@@ -730,14 +709,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
value: () => 'global', value: () => 'global',
condition: { field: 'operation', value: 'add_label' }, condition: { field: 'operation', value: 'add_label' },
}, },
{
id: 'labelId',
title: 'Label ID',
type: 'short-input',
placeholder: 'Enter label ID',
required: true,
condition: { field: 'operation', value: 'get_pages_by_label' },
},
{ {
id: 'blogPostStatus', id: 'blogPostStatus',
title: 'Status', title: 'Status',
@@ -788,8 +759,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -811,8 +780,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -833,7 +800,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Property Tools // Property Tools
'confluence_list_page_properties', 'confluence_list_page_properties',
'confluence_create_page_property', 'confluence_create_page_property',
'confluence_delete_page_property',
// Search Tools // Search Tools
'confluence_search', 'confluence_search',
'confluence_search_in_space', 'confluence_search_in_space',
@@ -854,9 +820,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Tools // Label Tools
'confluence_list_labels', 'confluence_list_labels',
'confluence_add_label', 'confluence_add_label',
'confluence_delete_label',
'confluence_get_pages_by_label',
'confluence_list_space_labels',
// Space Tools // Space Tools
'confluence_get_space', 'confluence_get_space',
'confluence_list_spaces', 'confluence_list_spaces',
@@ -889,8 +852,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_page_properties' return 'confluence_list_page_properties'
case 'create_page_property': case 'create_page_property':
return 'confluence_create_page_property' return 'confluence_create_page_property'
case 'delete_page_property':
return 'confluence_delete_page_property'
// Search Operations // Search Operations
case 'search': case 'search':
return 'confluence_search' return 'confluence_search'
@@ -926,12 +887,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_labels' return 'confluence_list_labels'
case 'add_label': case 'add_label':
return 'confluence_add_label' return 'confluence_add_label'
case 'delete_label':
return 'confluence_delete_label'
case 'get_pages_by_label':
return 'confluence_get_pages_by_label'
case 'list_space_labels':
return 'confluence_list_space_labels'
// Space Operations // Space Operations
case 'get_space': case 'get_space':
return 'confluence_get_space' return 'confluence_get_space'
@@ -953,9 +908,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
versionNumber, versionNumber,
propertyKey, propertyKey,
propertyValue, propertyValue,
propertyId,
labelPrefix, labelPrefix,
labelId,
blogPostStatus, blogPostStatus,
purge, purge,
bodyFormat, bodyFormat,
@@ -1006,9 +959,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
// Operations that support generic cursor pagination. // Operations that support cursor pagination
// get_pages_by_label and list_space_labels have dedicated handlers
// below that pass cursor along with their required params (labelId, spaceId).
const supportsCursor = [ const supportsCursor = [
'list_attachments', 'list_attachments',
'list_spaces', 'list_spaces',
@@ -1045,35 +996,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
if (operation === 'delete_page_property') {
return {
credential,
pageId: effectivePageId,
operation,
propertyId,
...rest,
}
}
if (operation === 'get_pages_by_label') {
return {
credential,
operation,
labelId,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'list_space_labels') {
return {
credential,
operation,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'upload_attachment') { if (operation === 'upload_attachment') {
const normalizedFile = normalizeFileInput(attachmentFile, { single: true }) const normalizedFile = normalizeFileInput(attachmentFile, { single: true })
if (!normalizedFile) { if (!normalizedFile) {
@@ -1122,9 +1044,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
attachmentFileName: { type: 'string', description: 'Custom file name for attachment' }, attachmentFileName: { type: 'string', description: 'Custom file name for attachment' },
attachmentComment: { type: 'string', description: 'Comment for the attachment' }, attachmentComment: { type: 'string', description: 'Comment for the attachment' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' }, labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' },
propertyId: { type: 'string', description: 'Property identifier' },
blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' }, blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' },
purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' }, purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' },
bodyFormat: { type: 'string', description: 'Body format for comments' }, bodyFormat: { type: 'string', description: 'Body format for comments' },
@@ -1160,7 +1080,6 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Results // Label Results
labels: { type: 'array', description: 'List of labels' }, labels: { type: 'array', description: 'List of labels' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
// Space Results // Space Results
spaces: { type: 'array', description: 'List of spaces' }, spaces: { type: 'array', description: 'List of spaces' },
spaceId: { type: 'string', description: 'Space identifier' }, spaceId: { type: 'string', description: 'Space identifier' },

View File

@@ -58,16 +58,6 @@ export const S3Block: BlockConfig<S3Response> = {
}, },
required: true, required: true,
}, },
{
id: 'getObjectRegion',
title: 'AWS Region',
type: 'short-input',
placeholder: 'Used when S3 URL does not include region',
condition: {
field: 'operation',
value: ['get_object'],
},
},
{ {
id: 'bucketName', id: 'bucketName',
title: 'Bucket Name', title: 'Bucket Name',
@@ -301,12 +291,35 @@ export const S3Block: BlockConfig<S3Response> = {
if (!params.s3Uri) { if (!params.s3Uri) {
throw new Error('S3 Object URL is required') throw new Error('S3 Object URL is required')
} }
// Parse S3 URI for get_object
try {
const url = new URL(params.s3Uri)
const hostname = url.hostname
const bucketName = hostname.split('.')[0]
const regionMatch = hostname.match(/s3[.-]([^.]+)\.amazonaws\.com/)
const region = regionMatch ? regionMatch[1] : params.region
const objectKey = url.pathname.startsWith('/')
? url.pathname.substring(1)
: url.pathname
if (!bucketName || !objectKey) {
throw new Error('Could not parse S3 URL')
}
return { return {
accessKeyId: params.accessKeyId, accessKeyId: params.accessKeyId,
secretAccessKey: params.secretAccessKey, secretAccessKey: params.secretAccessKey,
region: params.getObjectRegion || params.region, region,
bucketName,
objectKey,
s3Uri: params.s3Uri, s3Uri: params.s3Uri,
} }
} catch (_error) {
throw new Error(
'Invalid S3 Object URL format. Expected: https://bucket-name.s3.region.amazonaws.com/path/to/file'
)
}
} }
case 'list_objects': case 'list_objects':
@@ -388,7 +401,6 @@ export const S3Block: BlockConfig<S3Response> = {
acl: { type: 'string', description: 'Access control list' }, acl: { type: 'string', description: 'Access control list' },
// Download inputs // Download inputs
s3Uri: { type: 'string', description: 'S3 object URL' }, s3Uri: { type: 'string', description: 'S3 object URL' },
getObjectRegion: { type: 'string', description: 'Optional AWS region override for downloads' },
// List inputs // List inputs
prefix: { type: 'string', description: 'Prefix filter' }, prefix: { type: 'string', description: 'Prefix filter' },
maxKeys: { type: 'number', description: 'Maximum results' }, maxKeys: { type: 'number', description: 'Maximum results' },

View File

@@ -78,7 +78,6 @@ export const SerperBlock: BlockConfig<SearchResponse> = {
placeholder: 'Enter your Serper API key', placeholder: 'Enter your Serper API key',
password: true, password: true,
required: true, required: true,
hideWhenHosted: true,
}, },
], ],
tools: { tools: {

View File

@@ -243,7 +243,6 @@ export interface SubBlockConfig {
hidden?: boolean hidden?: boolean
hideFromPreview?: boolean // Hide this subblock from the workflow block preview hideFromPreview?: boolean // Hide this subblock from the workflow block preview
requiresFeature?: string // Environment variable name that must be truthy for this subblock to be visible requiresFeature?: string // Environment variable name that must be truthy for this subblock to be visible
hideWhenHosted?: boolean // Hide this subblock when running on hosted sim
description?: string description?: string
tooltip?: string // Tooltip text displayed via info icon next to the title tooltip?: string // Tooltip text displayed via info icon next to the title
value?: (params: Record<string, any>) => string value?: (params: Record<string, any>) => string

View File

@@ -2,8 +2,8 @@
slug: enterprise slug: enterprise
title: 'Build with Sim for Enterprise' title: 'Build with Sim for Enterprise'
description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.' description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.'
date: 2026-02-11 date: 2026-01-23
updated: 2026-02-11 updated: 2026-01-23
authors: authors:
- vik - vik
readingTime: 10 readingTime: 10
@@ -13,8 +13,8 @@ ogAlt: 'Sim Enterprise features overview'
about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting'] about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting']
timeRequired: PT10M timeRequired: PT10M
canonical: https://sim.ai/studio/enterprise canonical: https://sim.ai/studio/enterprise
featured: true featured: false
draft: false draft: true
--- ---
We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API. We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API.

View File

@@ -1,4 +1,3 @@
import { setupGlobalFetchMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
import { getAllBlocks } from '@/blocks' import { getAllBlocks } from '@/blocks'
import { BlockType, isMcpTool } from '@/executor/constants' import { BlockType, isMcpTool } from '@/executor/constants'
@@ -62,30 +61,6 @@ vi.mock('@/providers', () => ({
}), }),
})) }))
vi.mock('@/executor/utils/http', () => ({
buildAuthHeaders: vi.fn().mockResolvedValue({ 'Content-Type': 'application/json' }),
buildAPIUrl: vi.fn((path: string, params?: Record<string, string>) => {
const url = new URL(path, 'http://localhost:3000')
if (params) {
for (const [key, value] of Object.entries(params)) {
if (value !== undefined && value !== null) {
url.searchParams.set(key, value)
}
}
}
return url
}),
extractAPIErrorMessage: vi.fn(async (response: Response) => {
const defaultMessage = `API request failed with status ${response.status}`
try {
const errorData = await response.json()
return errorData.error || defaultMessage
} catch {
return defaultMessage
}
}),
}))
vi.mock('@sim/db', () => ({ vi.mock('@sim/db', () => ({
db: { db: {
select: vi.fn().mockReturnValue({ select: vi.fn().mockReturnValue({
@@ -109,7 +84,7 @@ vi.mock('@sim/db/schema', () => ({
}, },
})) }))
setupGlobalFetchMock() global.fetch = Object.assign(vi.fn(), { preconnect: vi.fn() }) as typeof fetch
const mockGetAllBlocks = getAllBlocks as Mock const mockGetAllBlocks = getAllBlocks as Mock
const mockExecuteTool = executeTool as Mock const mockExecuteTool = executeTool as Mock
@@ -1926,301 +1901,5 @@ describe('AgentBlockHandler', () => {
expect(discoveryCalls[0].url).toContain('serverId=mcp-legacy-server') expect(discoveryCalls[0].url).toContain('serverId=mcp-legacy-server')
}) })
describe('customToolId resolution - DB as source of truth', () => {
const staleInlineSchema = {
function: {
name: 'formatReport',
description: 'Formats a report',
parameters: {
type: 'object',
properties: {
title: { type: 'string', description: 'Report title' },
content: { type: 'string', description: 'Report content' },
},
required: ['title', 'content'],
},
},
}
const dbSchema = {
function: {
name: 'formatReport',
description: 'Formats a report',
parameters: {
type: 'object',
properties: {
title: { type: 'string', description: 'Report title' },
content: { type: 'string', description: 'Report content' },
format: { type: 'string', description: 'Output format' },
},
required: ['title', 'content', 'format'],
},
},
}
const staleInlineCode = 'return { title, content };'
const dbCode = 'return { title, content, format };'
function mockFetchForCustomTool(toolId: string) {
mockFetch.mockImplementation((url: string) => {
if (typeof url === 'string' && url.includes('/api/tools/custom')) {
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () =>
Promise.resolve({
data: [
{
id: toolId,
title: 'formatReport',
schema: dbSchema,
code: dbCode,
},
],
}),
})
}
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
})
}
function mockFetchFailure() {
mockFetch.mockImplementation((url: string) => {
if (typeof url === 'string' && url.includes('/api/tools/custom')) {
return Promise.resolve({
ok: false,
status: 500,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
}
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
})
}
beforeEach(() => {
Object.defineProperty(global, 'window', {
value: undefined,
writable: true,
configurable: true,
})
})
it('should always fetch latest schema from DB when customToolId is present', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
// DB schema wins over stale inline — includes format param
expect(tools[0].parameters.required).toContain('format')
expect(tools[0].parameters.properties).toHaveProperty('format')
})
it('should fetch from DB when customToolId has no inline schema', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).toContain('format')
})
it('should fall back to inline schema when DB fetch fails and inline exists', async () => {
mockFetchFailure()
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: 'custom-tool-123',
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).not.toContain('format')
})
it('should return null when DB fetch fails and no inline schema exists', async () => {
mockFetchFailure()
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: 'custom-tool-123',
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(0)
})
it('should use DB code for executeFunction when customToolId resolves', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
let capturedTools: any[] = []
Promise.all = vi.fn().mockImplementation((promises: Promise<any>[]) => {
const result = originalPromiseAll.call(Promise, promises)
result.then((tools: any[]) => {
if (tools?.length) {
capturedTools = tools.filter((t) => t !== null)
}
})
return result
})
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(capturedTools.length).toBe(1)
expect(typeof capturedTools[0].executeFunction).toBe('function')
await capturedTools[0].executeFunction({ title: 'Q1', format: 'pdf' })
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
expect.objectContaining({
code: dbCode,
}),
false,
expect.any(Object)
)
})
it('should not fetch from DB when no customToolId is present', async () => {
const inputs = {
model: 'gpt-4o',
userPrompt: 'Use the tool',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
const customToolFetches = mockFetch.mock.calls.filter(
(call: any[]) => typeof call[0] === 'string' && call[0].includes('/api/tools/custom')
)
expect(customToolFetches.length).toBe(0)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).not.toContain('format')
})
})
}) })
}) })

View File

@@ -272,16 +272,15 @@ export class AgentBlockHandler implements BlockHandler {
let code = tool.code let code = tool.code
let title = tool.title let title = tool.title
if (tool.customToolId) { if (tool.customToolId && !schema) {
const resolved = await this.fetchCustomToolById(ctx, tool.customToolId) const resolved = await this.fetchCustomToolById(ctx, tool.customToolId)
if (resolved) { if (!resolved) {
schema = resolved.schema
code = resolved.code
title = resolved.title
} else if (!schema) {
logger.error(`Custom tool not found: ${tool.customToolId}`) logger.error(`Custom tool not found: ${tool.customToolId}`)
return null return null
} }
schema = resolved.schema
code = resolved.code
title = resolved.title
} }
if (!schema?.function) { if (!schema?.function) {
@@ -1000,7 +999,6 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: inputs.reasoningEffort, reasoningEffort: inputs.reasoningEffort,
verbosity: inputs.verbosity, verbosity: inputs.verbosity,
thinkingLevel: inputs.thinkingLevel, thinkingLevel: inputs.thinkingLevel,
previousInteractionId: inputs.previousInteractionId,
} }
} }
@@ -1071,7 +1069,6 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: providerRequest.reasoningEffort, reasoningEffort: providerRequest.reasoningEffort,
verbosity: providerRequest.verbosity, verbosity: providerRequest.verbosity,
thinkingLevel: providerRequest.thinkingLevel, thinkingLevel: providerRequest.thinkingLevel,
previousInteractionId: providerRequest.previousInteractionId,
}) })
return this.processProviderResponse(response, block, responseFormat) return this.processProviderResponse(response, block, responseFormat)
@@ -1272,7 +1269,6 @@ export class AgentBlockHandler implements BlockHandler {
content: result.content, content: result.content,
model: result.model, model: result.model,
...this.createResponseMetadata(result), ...this.createResponseMetadata(result),
...(result.interactionId && { interactionId: result.interactionId }),
} }
} }

View File

@@ -20,8 +20,6 @@ export interface AgentInputs {
conversationId?: string // Required for all non-none memory types conversationId?: string // Required for all non-none memory types
slidingWindowSize?: string // For message-based sliding window slidingWindowSize?: string // For message-based sliding window
slidingWindowTokens?: string // For token-based sliding window slidingWindowTokens?: string // For token-based sliding window
// Deep research multi-turn
previousInteractionId?: string // Interactions API previous interaction reference
// LLM parameters // LLM parameters
temperature?: string temperature?: string
maxTokens?: string maxTokens?: string

View File

@@ -1,4 +1,3 @@
import { setupGlobalFetchMock } from '@sim/testing'
import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest' import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
import { BlockType } from '@/executor/constants' import { BlockType } from '@/executor/constants'
import { WorkflowBlockHandler } from '@/executor/handlers/workflow/workflow-handler' import { WorkflowBlockHandler } from '@/executor/handlers/workflow/workflow-handler'
@@ -10,7 +9,7 @@ vi.mock('@/lib/auth/internal', () => ({
})) }))
// Mock fetch globally // Mock fetch globally
setupGlobalFetchMock() global.fetch = vi.fn()
describe('WorkflowBlockHandler', () => { describe('WorkflowBlockHandler', () => {
let handler: WorkflowBlockHandler let handler: WorkflowBlockHandler

View File

@@ -4,7 +4,7 @@ import { API_ENDPOINTS } from '@/stores/constants'
const logger = createLogger('BYOKKeysQueries') const logger = createLogger('BYOKKeysQueries')
export type BYOKProviderId = 'openai' | 'anthropic' | 'google' | 'mistral' | 'serper' export type BYOKProviderId = 'openai' | 'anthropic' | 'google' | 'mistral'
export interface BYOKKey { export interface BYOKKey {
id: string id: string

View File

@@ -423,7 +423,7 @@ interface GenerateVersionDescriptionVariables {
const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform. const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform.
Write a brief, factual description (1-3 sentences, under 2000 characters) that states what changed between versions. Write a brief, factual description (1-3 sentences, under 400 characters) that states what changed between versions.
Guidelines: Guidelines:
- Use the specific values provided (credential names, channel names, model names) - Use the specific values provided (credential names, channel names, model names)

View File

@@ -1,4 +1,4 @@
import { useCallback } from 'react' import { useCallback, useRef } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import type { import type {
BlockCompletedData, BlockCompletedData,
@@ -16,18 +16,6 @@ import type { SerializableExecutionState } from '@/executor/execution/types'
const logger = createLogger('useExecutionStream') const logger = createLogger('useExecutionStream')
/**
* Detects errors caused by the browser killing a fetch (page refresh, navigation, tab close).
* These should be treated as clean disconnects, not execution errors.
*/
function isClientDisconnectError(error: any): boolean {
if (error.name === 'AbortError') return true
const msg = (error.message ?? '').toLowerCase()
return (
msg.includes('network error') || msg.includes('failed to fetch') || msg.includes('load failed')
)
}
/** /**
* Processes SSE events from a response body and invokes appropriate callbacks. * Processes SSE events from a response body and invokes appropriate callbacks.
*/ */
@@ -133,7 +121,6 @@ export interface ExecuteStreamOptions {
parallels?: Record<string, any> parallels?: Record<string, any>
} }
stopAfterBlockId?: string stopAfterBlockId?: string
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
@@ -142,40 +129,30 @@ export interface ExecuteFromBlockOptions {
startBlockId: string startBlockId: string
sourceSnapshot: SerializableExecutionState sourceSnapshot: SerializableExecutionState
input?: any input?: any
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
export interface ReconnectStreamOptions {
workflowId: string
executionId: string
fromEventId?: number
callbacks?: ExecutionStreamCallbacks
}
/**
* Module-level map shared across all hook instances.
* Ensures ANY instance can cancel streams started by ANY other instance,
* which is critical for SPA navigation where the original hook instance unmounts
* but the SSE stream must be cancellable from the new instance.
*/
const sharedAbortControllers = new Map<string, AbortController>()
/** /**
* Hook for executing workflows via server-side SSE streaming. * Hook for executing workflows via server-side SSE streaming.
* Supports concurrent executions via per-workflow AbortController maps. * Supports concurrent executions via per-workflow AbortController maps.
*/ */
export function useExecutionStream() { export function useExecutionStream() {
const execute = useCallback(async (options: ExecuteStreamOptions) => { const abortControllersRef = useRef<Map<string, AbortController>>(new Map())
const { workflowId, callbacks = {}, onExecutionId, ...payload } = options const currentExecutionsRef = useRef<Map<string, { workflowId: string; executionId: string }>>(
new Map()
)
const existing = sharedAbortControllers.get(workflowId) const execute = useCallback(async (options: ExecuteStreamOptions) => {
const { workflowId, callbacks = {}, ...payload } = options
const existing = abortControllersRef.current.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController) abortControllersRef.current.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -200,48 +177,42 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const serverExecutionId = response.headers.get('X-Execution-Id') const executionId = response.headers.get('X-Execution-Id')
if (serverExecutionId) { if (executionId) {
onExecutionId?.(serverExecutionId) currentExecutionsRef.current.set(workflowId, { workflowId, executionId })
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Execution') await processSSEStream(reader, callbacks, 'Execution')
} catch (error: any) { } catch (error: any) {
if (isClientDisconnectError(error)) { if (error.name === 'AbortError') {
logger.info('Execution stream disconnected (page unload or abort)') logger.info('Execution stream cancelled')
return callbacks.onExecutionCancelled?.({ duration: 0 })
} } else {
logger.error('Execution stream error:', error) logger.error('Execution stream error:', error)
callbacks.onExecutionError?.({ callbacks.onExecutionError?.({
error: error.message || 'Unknown error', error: error.message || 'Unknown error',
duration: 0, duration: 0,
}) })
}
throw error throw error
} finally { } finally {
if (sharedAbortControllers.get(workflowId) === abortController) { abortControllersRef.current.delete(workflowId)
sharedAbortControllers.delete(workflowId) currentExecutionsRef.current.delete(workflowId)
}
} }
}, []) }, [])
const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => { const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => {
const { const { workflowId, startBlockId, sourceSnapshot, input, callbacks = {} } = options
workflowId,
startBlockId,
sourceSnapshot,
input,
onExecutionId,
callbacks = {},
} = options
const existing = sharedAbortControllers.get(workflowId) const existing = abortControllersRef.current.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController) abortControllersRef.current.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -275,80 +246,64 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const serverExecutionId = response.headers.get('X-Execution-Id') const executionId = response.headers.get('X-Execution-Id')
if (serverExecutionId) { if (executionId) {
onExecutionId?.(serverExecutionId) currentExecutionsRef.current.set(workflowId, { workflowId, executionId })
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Run-from-block') await processSSEStream(reader, callbacks, 'Run-from-block')
} catch (error: any) { } catch (error: any) {
if (isClientDisconnectError(error)) { if (error.name === 'AbortError') {
logger.info('Run-from-block stream disconnected (page unload or abort)') logger.info('Run-from-block execution cancelled')
return callbacks.onExecutionCancelled?.({ duration: 0 })
} } else {
logger.error('Run-from-block execution error:', error) logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({ callbacks.onExecutionError?.({
error: error.message || 'Unknown error', error: error.message || 'Unknown error',
duration: 0, duration: 0,
}) })
}
throw error throw error
} finally { } finally {
if (sharedAbortControllers.get(workflowId) === abortController) { abortControllersRef.current.delete(workflowId)
sharedAbortControllers.delete(workflowId) currentExecutionsRef.current.delete(workflowId)
}
}
}, [])
const reconnect = useCallback(async (options: ReconnectStreamOptions) => {
const { workflowId, executionId, fromEventId = 0, callbacks = {} } = options
const existing = sharedAbortControllers.get(workflowId)
if (existing) {
existing.abort()
}
const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController)
try {
const response = await fetch(
`/api/workflows/${workflowId}/executions/${executionId}/stream?from=${fromEventId}`,
{ signal: abortController.signal }
)
if (!response.ok) throw new Error(`Reconnect failed (${response.status})`)
if (!response.body) throw new Error('No response body')
await processSSEStream(response.body.getReader(), callbacks, 'Reconnect')
} catch (error: any) {
if (isClientDisconnectError(error)) return
logger.error('Reconnection stream error:', error)
throw error
} finally {
if (sharedAbortControllers.get(workflowId) === abortController) {
sharedAbortControllers.delete(workflowId)
}
} }
}, []) }, [])
const cancel = useCallback((workflowId?: string) => { const cancel = useCallback((workflowId?: string) => {
if (workflowId) { if (workflowId) {
const controller = sharedAbortControllers.get(workflowId) const execution = currentExecutionsRef.current.get(workflowId)
if (execution) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
const controller = abortControllersRef.current.get(workflowId)
if (controller) { if (controller) {
controller.abort() controller.abort()
sharedAbortControllers.delete(workflowId) abortControllersRef.current.delete(workflowId)
} }
currentExecutionsRef.current.delete(workflowId)
} else { } else {
for (const [, controller] of sharedAbortControllers) { for (const [, execution] of currentExecutionsRef.current) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
for (const [, controller] of abortControllersRef.current) {
controller.abort() controller.abort()
} }
sharedAbortControllers.clear() abortControllersRef.current.clear()
currentExecutionsRef.current.clear()
} }
}, []) }, [])
return { return {
execute, execute,
executeFromBlock, executeFromBlock,
reconnect,
cancel, cancel,
} }
} }

View File

@@ -10,7 +10,7 @@ import { useProvidersStore } from '@/stores/providers/store'
const logger = createLogger('BYOKKeys') const logger = createLogger('BYOKKeys')
export type BYOKProviderId = 'openai' | 'anthropic' | 'google' | 'mistral' | 'serper' export type BYOKProviderId = 'openai' | 'anthropic' | 'google' | 'mistral'
export interface BYOKKeyResult { export interface BYOKKeyResult {
apiKey: string apiKey: string

View File

@@ -20,8 +20,6 @@ export interface BuildPayloadParams {
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
commands?: string[] commands?: string[]
chatId?: string chatId?: string
conversationId?: string
prefetch?: boolean
implicitFeedback?: string implicitFeedback?: string
} }
@@ -66,10 +64,6 @@ export async function buildCopilotRequestPayload(
fileAttachments, fileAttachments,
commands, commands,
chatId, chatId,
conversationId,
prefetch,
conversationHistory,
implicitFeedback,
} = params } = params
const selectedModel = options.selectedModel const selectedModel = options.selectedModel
@@ -160,12 +154,6 @@ export async function buildCopilotRequestPayload(
version: SIM_AGENT_VERSION, version: SIM_AGENT_VERSION,
...(contexts && contexts.length > 0 ? { context: contexts } : {}), ...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}), ...(chatId ? { chatId } : {}),
...(conversationId ? { conversationId } : {}),
...(Array.isArray(conversationHistory) && conversationHistory.length > 0
? { conversationHistory }
: {}),
...(typeof prefetch === 'boolean' ? { prefetch } : {}),
...(implicitFeedback ? { implicitFeedback } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
...(integrationTools.length > 0 ? { integrationTools } : {}), ...(integrationTools.length > 0 ? { integrationTools } : {}),
...(credentials ? { credentials } : {}), ...(credentials ? { credentials } : {}),

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db' import { db } from '@sim/db'
import { customTools, workflow } from '@sim/db/schema' import { workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { and, desc, eq, isNull, or } from 'drizzle-orm' import { eq } from 'drizzle-orm'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { import type {
ExecutionContext, ExecutionContext,
@@ -12,7 +12,6 @@ import { routeExecution } from '@/lib/copilot/tools/server/router'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { upsertCustomTools } from '@/lib/workflows/custom-tools/operations'
import { getTool, resolveToolId } from '@/tools/utils' import { getTool, resolveToolId } from '@/tools/utils'
import { import {
executeCheckDeploymentStatus, executeCheckDeploymentStatus,
@@ -77,247 +76,6 @@ import {
const logger = createLogger('CopilotToolExecutor') const logger = createLogger('CopilotToolExecutor')
type ManageCustomToolOperation = 'add' | 'edit' | 'delete' | 'list'
interface ManageCustomToolSchema {
type: 'function'
function: {
name: string
description?: string
parameters: Record<string, unknown>
}
}
interface ManageCustomToolParams {
operation?: string
toolId?: string
schema?: ManageCustomToolSchema
code?: string
title?: string
workspaceId?: string
}
async function executeManageCustomTool(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as ManageCustomToolParams
const operation = String(params.operation || '').toLowerCase() as ManageCustomToolOperation
const workspaceId = params.workspaceId || context.workspaceId
if (!operation) {
return { success: false, error: "Missing required 'operation' argument" }
}
try {
if (operation === 'list') {
const toolsForUser = workspaceId
? await db
.select()
.from(customTools)
.where(
or(
eq(customTools.workspaceId, workspaceId),
and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId))
)
)
.orderBy(desc(customTools.createdAt))
: await db
.select()
.from(customTools)
.where(and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId)))
.orderBy(desc(customTools.createdAt))
return {
success: true,
output: {
success: true,
operation,
tools: toolsForUser,
count: toolsForUser.length,
},
}
}
if (operation === 'add') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'add'",
}
}
if (!params.schema || !params.code) {
return {
success: false,
error: "Both 'schema' and 'code' are required for operation 'add'",
}
}
const title = params.title || params.schema.function?.name
if (!title) {
return { success: false, error: "Missing tool title or schema.function.name for 'add'" }
}
const resultTools = await upsertCustomTools({
tools: [
{
title,
schema: params.schema,
code: params.code,
},
],
workspaceId,
userId: context.userId,
})
const created = resultTools.find((tool) => tool.title === title)
return {
success: true,
output: {
success: true,
operation,
toolId: created?.id,
title,
message: `Created custom tool "${title}"`,
},
}
}
if (operation === 'edit') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'edit'",
}
}
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'edit'" }
}
if (!params.schema && !params.code) {
return {
success: false,
error: "At least one of 'schema' or 'code' is required for operation 'edit'",
}
}
const workspaceTool = await db
.select()
.from(customTools)
.where(and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId)))
.limit(1)
const legacyTool =
workspaceTool.length === 0
? await db
.select()
.from(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.limit(1)
: []
const existing = workspaceTool[0] || legacyTool[0]
if (!existing) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
const mergedSchema = params.schema || (existing.schema as ManageCustomToolSchema)
const mergedCode = params.code || existing.code
const title = params.title || mergedSchema.function?.name || existing.title
await upsertCustomTools({
tools: [
{
id: params.toolId,
title,
schema: mergedSchema,
code: mergedCode,
},
],
workspaceId,
userId: context.userId,
})
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
title,
message: `Updated custom tool "${title}"`,
},
}
}
if (operation === 'delete') {
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'delete'" }
}
const workspaceDelete =
workspaceId != null
? await db
.delete(customTools)
.where(
and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId))
)
.returning({ id: customTools.id })
: []
const legacyDelete =
workspaceDelete.length === 0
? await db
.delete(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.returning({ id: customTools.id })
: []
const deleted = workspaceDelete[0] || legacyDelete[0]
if (!deleted) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
message: 'Deleted custom tool',
},
}
}
return {
success: false,
error: `Unsupported operation for manage_custom_tool: ${operation}`,
}
} catch (error) {
logger.error('manage_custom_tool execution failed', {
operation,
workspaceId,
userId: context.userId,
error: error instanceof Error ? error.message : String(error),
})
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to manage custom tool',
}
}
}
const SERVER_TOOLS = new Set<string>([ const SERVER_TOOLS = new Set<string>([
'get_blocks_and_tools', 'get_blocks_and_tools',
'get_blocks_metadata', 'get_blocks_metadata',
@@ -403,19 +161,6 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
} }
} }
}, },
oauth_request_access: async (p, _c) => {
const providerName = (p.providerName || p.provider_name || 'the provider') as string
return {
success: true,
output: {
success: true,
status: 'requested',
providerName,
message: `Requested ${providerName} OAuth connection. The user should complete the OAuth modal in the UI, then retry credential-dependent actions.`,
},
}
},
manage_custom_tool: (p, c) => executeManageCustomTool(p, c),
} }
/** /**

View File

@@ -1,246 +0,0 @@
import { createLogger } from '@sim/logger'
import { getRedisClient } from '@/lib/core/config/redis'
import type { ExecutionEvent } from '@/lib/workflows/executor/execution-events'
const logger = createLogger('ExecutionEventBuffer')
const REDIS_PREFIX = 'execution:stream:'
const TTL_SECONDS = 60 * 60 // 1 hour
const EVENT_LIMIT = 1000
const RESERVE_BATCH = 100
const FLUSH_INTERVAL_MS = 15
const FLUSH_MAX_BATCH = 200
function getEventsKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:events`
}
function getSeqKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:seq`
}
function getMetaKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:meta`
}
export type ExecutionStreamStatus = 'active' | 'complete' | 'error' | 'cancelled'
export interface ExecutionStreamMeta {
status: ExecutionStreamStatus
userId?: string
workflowId?: string
updatedAt?: string
}
export interface ExecutionEventEntry {
eventId: number
executionId: string
event: ExecutionEvent
}
export interface ExecutionEventWriter {
write: (event: ExecutionEvent) => Promise<ExecutionEventEntry>
flush: () => Promise<void>
close: () => Promise<void>
}
export async function setExecutionMeta(
executionId: string,
meta: Partial<ExecutionStreamMeta>
): Promise<void> {
const redis = getRedisClient()
if (!redis) {
logger.warn('setExecutionMeta: Redis client unavailable', { executionId })
return
}
try {
const key = getMetaKey(executionId)
const payload: Record<string, string> = {
updatedAt: new Date().toISOString(),
}
if (meta.status) payload.status = meta.status
if (meta.userId) payload.userId = meta.userId
if (meta.workflowId) payload.workflowId = meta.workflowId
await redis.hset(key, payload)
await redis.expire(key, TTL_SECONDS)
} catch (error) {
logger.warn('Failed to update execution meta', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function getExecutionMeta(executionId: string): Promise<ExecutionStreamMeta | null> {
const redis = getRedisClient()
if (!redis) {
logger.warn('getExecutionMeta: Redis client unavailable', { executionId })
return null
}
try {
const key = getMetaKey(executionId)
const meta = await redis.hgetall(key)
if (!meta || Object.keys(meta).length === 0) return null
return meta as unknown as ExecutionStreamMeta
} catch (error) {
logger.warn('Failed to read execution meta', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
export async function readExecutionEvents(
executionId: string,
afterEventId: number
): Promise<ExecutionEventEntry[]> {
const redis = getRedisClient()
if (!redis) return []
try {
const raw = await redis.zrangebyscore(getEventsKey(executionId), afterEventId + 1, '+inf')
return raw
.map((entry) => {
try {
return JSON.parse(entry) as ExecutionEventEntry
} catch {
return null
}
})
.filter((entry): entry is ExecutionEventEntry => Boolean(entry))
} catch (error) {
logger.warn('Failed to read execution events', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
return []
}
}
export function createExecutionEventWriter(executionId: string): ExecutionEventWriter {
const redis = getRedisClient()
if (!redis) {
logger.warn(
'createExecutionEventWriter: Redis client unavailable, events will not be buffered',
{
executionId,
}
)
return {
write: async (event) => ({ eventId: 0, executionId, event }),
flush: async () => {},
close: async () => {},
}
}
let pending: ExecutionEventEntry[] = []
let nextEventId = 0
let maxReservedId = 0
let flushTimer: ReturnType<typeof setTimeout> | null = null
const scheduleFlush = () => {
if (flushTimer) return
flushTimer = setTimeout(() => {
flushTimer = null
void flush()
}, FLUSH_INTERVAL_MS)
}
const reserveIds = async (minCount: number) => {
const reserveCount = Math.max(RESERVE_BATCH, minCount)
const newMax = await redis.incrby(getSeqKey(executionId), reserveCount)
const startId = newMax - reserveCount + 1
if (nextEventId === 0 || nextEventId > maxReservedId) {
nextEventId = startId
maxReservedId = newMax
}
}
let flushPromise: Promise<void> | null = null
let closed = false
const inflightWrites = new Set<Promise<ExecutionEventEntry>>()
const doFlush = async () => {
if (pending.length === 0) return
const batch = pending
pending = []
try {
const key = getEventsKey(executionId)
const zaddArgs: (string | number)[] = []
for (const entry of batch) {
zaddArgs.push(entry.eventId, JSON.stringify(entry))
}
const pipeline = redis.pipeline()
pipeline.zadd(key, ...zaddArgs)
pipeline.expire(key, TTL_SECONDS)
pipeline.expire(getSeqKey(executionId), TTL_SECONDS)
pipeline.zremrangebyrank(key, 0, -EVENT_LIMIT - 1)
await pipeline.exec()
} catch (error) {
logger.warn('Failed to flush execution events', {
executionId,
batchSize: batch.length,
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
pending = batch.concat(pending)
}
}
const flush = async () => {
if (flushPromise) {
await flushPromise
return
}
flushPromise = doFlush()
try {
await flushPromise
} finally {
flushPromise = null
if (pending.length > 0) scheduleFlush()
}
}
const writeCore = async (event: ExecutionEvent): Promise<ExecutionEventEntry> => {
if (closed) return { eventId: 0, executionId, event }
if (nextEventId === 0 || nextEventId > maxReservedId) {
await reserveIds(1)
}
const eventId = nextEventId++
const entry: ExecutionEventEntry = { eventId, executionId, event }
pending.push(entry)
if (pending.length >= FLUSH_MAX_BATCH) {
await flush()
} else {
scheduleFlush()
}
return entry
}
const write = (event: ExecutionEvent): Promise<ExecutionEventEntry> => {
const p = writeCore(event)
inflightWrites.add(p)
const remove = () => inflightWrites.delete(p)
p.then(remove, remove)
return p
}
const close = async () => {
closed = true
if (flushTimer) {
clearTimeout(flushTimer)
flushTimer = null
}
if (inflightWrites.size > 0) {
await Promise.allSettled(inflightWrites)
}
if (flushPromise) {
await flushPromise
}
if (pending.length > 0) {
await doFlush()
}
}
return { write, flush, close }
}

View File

@@ -1,4 +1,4 @@
import { createEnvMock, loggerMock } from '@sim/testing' import { createEnvMock, createMockLogger } from '@sim/testing'
import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest' import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
/** /**
@@ -10,6 +10,10 @@ import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
* mock functions can intercept. * mock functions can intercept.
*/ */
const loggerMock = vi.hoisted(() => ({
createLogger: () => createMockLogger(),
}))
const mockSend = vi.fn() const mockSend = vi.fn()
const mockBatchSend = vi.fn() const mockBatchSend = vi.fn()
const mockAzureBeginSend = vi.fn() const mockAzureBeginSend = vi.fn()

View File

@@ -1,8 +1,20 @@
import { createEnvMock, databaseMock, loggerMock } from '@sim/testing' import { createEnvMock, createMockLogger } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { EmailType } from '@/lib/messaging/email/mailer' import type { EmailType } from '@/lib/messaging/email/mailer'
vi.mock('@sim/db', () => databaseMock) const loggerMock = vi.hoisted(() => ({
createLogger: () => createMockLogger(),
}))
const mockDb = vi.hoisted(() => ({
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
}))
vi.mock('@sim/db', () => ({
db: mockDb,
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
user: { id: 'id', email: 'email' }, user: { id: 'id', email: 'email' },
@@ -18,8 +30,6 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((a, b) => ({ type: 'eq', left: a, right: b })), eq: vi.fn((a, b) => ({ type: 'eq', left: a, right: b })),
})) }))
const mockDb = databaseMock.db as Record<string, ReturnType<typeof vi.fn>>
vi.mock('@/lib/core/config/env', () => createEnvMock({ BETTER_AUTH_SECRET: 'test-secret-key' })) vi.mock('@/lib/core/config/env', () => createEnvMock({ BETTER_AUTH_SECRET: 'test-secret-key' }))
vi.mock('@sim/logger', () => loggerMock) vi.mock('@sim/logger', () => loggerMock)

View File

@@ -2364,261 +2364,6 @@ describe('hasWorkflowChanged', () => {
}) })
}) })
describe('Trigger Config Normalization (False Positive Prevention)', () => {
it.concurrent(
'should not detect change when deployed has null fields but current has values from triggerConfig',
() => {
// Core scenario: deployed state has null individual fields, current state has
// values populated from triggerConfig at runtime by populateTriggerFieldsFromConfig
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent(
'should detect change when user edits a trigger field to a different value',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'new-secret' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(true)
}
)
it.concurrent('should not detect change when both sides have no triggerConfig', () => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
})
it.concurrent(
'should not detect change when deployed has empty fields and triggerConfig populates them',
() => {
// Empty string is also treated as "empty" by normalizeTriggerConfigValues
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: '' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent('should not detect change when triggerId differs', () => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
triggerId: { value: null },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
triggerId: { value: 'slack_webhook' },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
})
it.concurrent(
'should not detect change for namespaced system subBlock IDs like samplePayload_slack_webhook',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
samplePayload_slack_webhook: { value: 'old payload' },
triggerInstructions_slack_webhook: { value: 'old instructions' },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
samplePayload_slack_webhook: { value: 'new payload' },
triggerInstructions_slack_webhook: { value: 'new instructions' },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent(
'should handle mixed scenario: some fields from triggerConfig, some user-edited',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
includeFiles: { id: 'includeFiles', type: 'switch', value: false },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
includeFiles: { id: 'includeFiles', type: 'switch', value: true },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
// includeFiles changed from false to true — this IS a real change
expect(hasWorkflowChanged(currentState, deployedState)).toBe(true)
}
)
})
describe('Trigger Runtime Metadata (Should Not Trigger Change)', () => { describe('Trigger Runtime Metadata (Should Not Trigger Change)', () => {
it.concurrent('should not detect change when webhookId differs', () => { it.concurrent('should not detect change when webhookId differs', () => {
const deployedState = createWorkflowState({ const deployedState = createWorkflowState({

View File

@@ -9,7 +9,6 @@ import {
normalizeLoop, normalizeLoop,
normalizeParallel, normalizeParallel,
normalizeSubBlockValue, normalizeSubBlockValue,
normalizeTriggerConfigValues,
normalizeValue, normalizeValue,
normalizeVariables, normalizeVariables,
sanitizeVariable, sanitizeVariable,
@@ -173,18 +172,14 @@ export function generateWorkflowDiffSummary(
} }
} }
// Normalize trigger config values for both states before comparison
const normalizedCurrentSubs = normalizeTriggerConfigValues(currentSubBlocks)
const normalizedPreviousSubs = normalizeTriggerConfigValues(previousSubBlocks)
// Compare subBlocks using shared helper for filtering (single source of truth) // Compare subBlocks using shared helper for filtering (single source of truth)
const allSubBlockIds = filterSubBlockIds([ const allSubBlockIds = filterSubBlockIds([
...new Set([...Object.keys(normalizedCurrentSubs), ...Object.keys(normalizedPreviousSubs)]), ...new Set([...Object.keys(currentSubBlocks), ...Object.keys(previousSubBlocks)]),
]) ])
for (const subId of allSubBlockIds) { for (const subId of allSubBlockIds) {
const currentSub = normalizedCurrentSubs[subId] as Record<string, unknown> | undefined const currentSub = currentSubBlocks[subId] as Record<string, unknown> | undefined
const previousSub = normalizedPreviousSubs[subId] as Record<string, unknown> | undefined const previousSub = previousSubBlocks[subId] as Record<string, unknown> | undefined
if (!currentSub || !previousSub) { if (!currentSub || !previousSub) {
changes.push({ changes.push({

View File

@@ -4,12 +4,10 @@
import { describe, expect, it } from 'vitest' import { describe, expect, it } from 'vitest'
import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import type { Loop, Parallel } from '@/stores/workflows/workflow/types'
import { import {
filterSubBlockIds,
normalizedStringify, normalizedStringify,
normalizeEdge, normalizeEdge,
normalizeLoop, normalizeLoop,
normalizeParallel, normalizeParallel,
normalizeTriggerConfigValues,
normalizeValue, normalizeValue,
sanitizeInputFormat, sanitizeInputFormat,
sanitizeTools, sanitizeTools,
@@ -586,214 +584,4 @@ describe('Workflow Normalization Utilities', () => {
expect(result2).toBe(result3) expect(result2).toBe(result3)
}) })
}) })
describe('filterSubBlockIds', () => {
it.concurrent('should exclude exact SYSTEM_SUBBLOCK_IDS', () => {
const ids = ['signingSecret', 'samplePayload', 'triggerInstructions', 'botToken']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['botToken', 'signingSecret'])
})
it.concurrent('should exclude namespaced SYSTEM_SUBBLOCK_IDS (prefix matching)', () => {
const ids = [
'signingSecret',
'samplePayload_slack_webhook',
'triggerInstructions_slack_webhook',
'webhookUrlDisplay_slack_webhook',
'botToken',
]
const result = filterSubBlockIds(ids)
expect(result).toEqual(['botToken', 'signingSecret'])
})
it.concurrent('should exclude exact TRIGGER_RUNTIME_SUBBLOCK_IDS', () => {
const ids = ['webhookId', 'triggerPath', 'triggerConfig', 'triggerId', 'signingSecret']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['signingSecret'])
})
it.concurrent('should not exclude IDs that merely contain a system ID substring', () => {
const ids = ['mySamplePayload', 'notSamplePayload']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['mySamplePayload', 'notSamplePayload'])
})
it.concurrent('should return sorted results', () => {
const ids = ['zebra', 'alpha', 'middle']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['alpha', 'middle', 'zebra'])
})
it.concurrent('should handle empty array', () => {
expect(filterSubBlockIds([])).toEqual([])
})
it.concurrent('should handle all IDs being excluded', () => {
const ids = ['webhookId', 'triggerPath', 'samplePayload', 'triggerConfig']
const result = filterSubBlockIds(ids)
expect(result).toEqual([])
})
it.concurrent('should exclude setupScript and scheduleInfo namespaced variants', () => {
const ids = ['setupScript_google_sheets_row', 'scheduleInfo_cron_trigger', 'realField']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['realField'])
})
it.concurrent('should exclude triggerCredentials namespaced variants', () => {
const ids = ['triggerCredentials_slack_webhook', 'signingSecret']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['signingSecret'])
})
})
describe('normalizeTriggerConfigValues', () => {
it.concurrent('should return subBlocks unchanged when no triggerConfig exists', () => {
const subBlocks = {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
})
it.concurrent('should return subBlocks unchanged when triggerConfig value is null', () => {
const subBlocks = {
triggerConfig: { id: 'triggerConfig', type: 'short-input', value: null },
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
})
it.concurrent(
'should return subBlocks unchanged when triggerConfig value is not an object',
() => {
const subBlocks = {
triggerConfig: { id: 'triggerConfig', type: 'short-input', value: 'string-value' },
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
}
)
it.concurrent('should populate null individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
expect((result.botToken as Record<string, unknown>).value).toBe('token456')
})
it.concurrent('should populate undefined individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: undefined },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
})
it.concurrent('should populate empty string individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: '' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
})
it.concurrent('should NOT overwrite existing non-empty individual field values', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'user-edited-secret' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('user-edited-secret')
})
it.concurrent('should skip triggerConfig fields that are null/undefined', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: null, botToken: undefined },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe(null)
expect((result.botToken as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should skip fields from triggerConfig that have no matching subBlock', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { nonExistentField: 'value123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result.nonExistentField).toBeUndefined()
expect((result.signingSecret as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should not mutate the original subBlocks object', () => {
const original = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
normalizeTriggerConfigValues(original)
expect((original.signingSecret as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should preserve other subBlock properties when populating value', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: {
id: 'signingSecret',
type: 'short-input',
value: null,
placeholder: 'Enter signing secret',
},
}
const result = normalizeTriggerConfigValues(subBlocks)
const normalized = result.signingSecret as Record<string, unknown>
expect(normalized.value).toBe('secret123')
expect(normalized.id).toBe('signingSecret')
expect(normalized.type).toBe('short-input')
expect(normalized.placeholder).toBe('Enter signing secret')
})
})
}) })

View File

@@ -418,48 +418,10 @@ export function extractBlockFieldsForComparison(block: BlockState): ExtractedBlo
*/ */
export function filterSubBlockIds(subBlockIds: string[]): string[] { export function filterSubBlockIds(subBlockIds: string[]): string[] {
return subBlockIds return subBlockIds
.filter((id) => { .filter((id) => !SYSTEM_SUBBLOCK_IDS.includes(id) && !TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(id))
if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(id)) return false
if (SYSTEM_SUBBLOCK_IDS.some((sysId) => id === sysId || id.startsWith(`${sysId}_`)))
return false
return true
})
.sort() .sort()
} }
/**
* Normalizes trigger block subBlocks by populating null/empty individual fields
* from the triggerConfig aggregate subBlock. This compensates for the runtime
* population done by populateTriggerFieldsFromConfig, ensuring consistent
* comparison between client state (with populated values) and deployed state
* (with null values from DB).
*/
export function normalizeTriggerConfigValues(
subBlocks: Record<string, unknown>
): Record<string, unknown> {
const triggerConfigSub = subBlocks.triggerConfig as Record<string, unknown> | undefined
const triggerConfigValue = triggerConfigSub?.value
if (!triggerConfigValue || typeof triggerConfigValue !== 'object') {
return subBlocks
}
const result = { ...subBlocks }
for (const [fieldId, configValue] of Object.entries(
triggerConfigValue as Record<string, unknown>
)) {
if (configValue === null || configValue === undefined) continue
const existingSub = result[fieldId] as Record<string, unknown> | undefined
if (
existingSub &&
(existingSub.value === null || existingSub.value === undefined || existingSub.value === '')
) {
result[fieldId] = { ...existingSub, value: configValue }
}
}
return result
}
/** /**
* Normalizes a subBlock value with sanitization for specific subBlock types. * Normalizes a subBlock value with sanitization for specific subBlock types.
* Sanitizes: tools (removes isExpanded), inputFormat (removes collapsed) * Sanitizes: tools (removes isExpanded), inputFormat (removes collapsed)

View File

@@ -1,11 +1,18 @@
/** /**
* @vitest-environment node * @vitest-environment node
*/ */
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { BlockState, WorkflowState } from '@/stores/workflows/workflow/types' import type { BlockState, WorkflowState } from '@/stores/workflows/workflow/types'
vi.mock('@sim/logger', () => loggerMock) // Mock all external dependencies before imports
vi.mock('@sim/logger', () => ({
createLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
vi.mock('@/stores/workflows/workflow/store', () => ({ vi.mock('@/stores/workflows/workflow/store', () => ({
useWorkflowStore: { useWorkflowStore: {

View File

@@ -1,5 +1,4 @@
import { getEnv, isTruthy } from '@/lib/core/config/env' import { getEnv, isTruthy } from '@/lib/core/config/env'
import { isHosted } from '@/lib/core/config/feature-flags'
import type { SubBlockConfig } from '@/blocks/types' import type { SubBlockConfig } from '@/blocks/types'
export type CanonicalMode = 'basic' | 'advanced' export type CanonicalMode = 'basic' | 'advanced'
@@ -271,12 +270,3 @@ export function isSubBlockFeatureEnabled(subBlock: SubBlockConfig): boolean {
if (!subBlock.requiresFeature) return true if (!subBlock.requiresFeature) return true
return isTruthy(getEnv(subBlock.requiresFeature)) return isTruthy(getEnv(subBlock.requiresFeature))
} }
/**
* Check if a subblock should be hidden because we're running on hosted Sim.
* Used for tool API key fields that should be hidden when Sim provides hosted keys.
*/
export function isSubBlockHiddenByHostedKey(subBlock: SubBlockConfig): boolean {
if (!subBlock.hideWhenHosted) return false
return isHosted
}

View File

@@ -14,15 +14,22 @@ import {
databaseMock, databaseMock,
expectWorkflowAccessDenied, expectWorkflowAccessDenied,
expectWorkflowAccessGranted, expectWorkflowAccessGranted,
mockAuth,
} from '@sim/testing' } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
const mockDb = databaseMock.db vi.mock('@sim/db', () => databaseMock)
// Mock the auth module
vi.mock('@/lib/auth', () => ({
getSession: vi.fn(),
}))
import { db } from '@sim/db'
import { getSession } from '@/lib/auth'
// Import after mocks are set up
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
describe('validateWorkflowPermissions', () => { describe('validateWorkflowPermissions', () => {
const auth = mockAuth()
const mockSession = createSession({ userId: 'user-1', email: 'user1@test.com' }) const mockSession = createSession({ userId: 'user-1', email: 'user1@test.com' })
const mockWorkflow = createWorkflowRecord({ const mockWorkflow = createWorkflowRecord({
id: 'wf-1', id: 'wf-1',
@@ -35,17 +42,13 @@ describe('validateWorkflowPermissions', () => {
}) })
beforeEach(() => { beforeEach(() => {
vi.resetModules()
vi.clearAllMocks() vi.clearAllMocks()
vi.doMock('@sim/db', () => databaseMock)
}) })
describe('authentication', () => { describe('authentication', () => {
it('should return 401 when no session exists', async () => { it('should return 401 when no session exists', async () => {
auth.setUnauthenticated() vi.mocked(getSession).mockResolvedValue(null)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read')
expectWorkflowAccessDenied(result, 401) expectWorkflowAccessDenied(result, 401)
@@ -53,9 +56,8 @@ describe('validateWorkflowPermissions', () => {
}) })
it('should return 401 when session has no user id', async () => { it('should return 401 when session has no user id', async () => {
auth.mockGetSession.mockResolvedValue({ user: {} } as any) vi.mocked(getSession).mockResolvedValue({ user: {} } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read')
expectWorkflowAccessDenied(result, 401) expectWorkflowAccessDenied(result, 401)
@@ -64,14 +66,14 @@ describe('validateWorkflowPermissions', () => {
describe('workflow not found', () => { describe('workflow not found', () => {
it('should return 404 when workflow does not exist', async () => { it('should return 404 when workflow does not exist', async () => {
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
// Mock workflow query to return empty
const mockLimit = vi.fn().mockResolvedValue([]) const mockLimit = vi.fn().mockResolvedValue([])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('non-existent', 'req-1', 'read') const result = await validateWorkflowPermissions('non-existent', 'req-1', 'read')
expectWorkflowAccessDenied(result, 404) expectWorkflowAccessDenied(result, 404)
@@ -81,42 +83,43 @@ describe('validateWorkflowPermissions', () => {
describe('owner access', () => { describe('owner access', () => {
it('should deny access to workflow owner without workspace permissions for read action', async () => { it('should deny access to workflow owner without workspace permissions for read action', async () => {
auth.setAuthenticated({ id: 'owner-1', email: 'owner-1@test.com' }) const ownerSession = createSession({ userId: 'owner-1' })
vi.mocked(getSession).mockResolvedValue(ownerSession as any)
// Mock workflow query
const mockLimit = vi.fn().mockResolvedValue([mockWorkflow]) const mockLimit = vi.fn().mockResolvedValue([mockWorkflow])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
}) })
it('should deny access to workflow owner without workspace permissions for write action', async () => { it('should deny access to workflow owner without workspace permissions for write action', async () => {
auth.setAuthenticated({ id: 'owner-1', email: 'owner-1@test.com' }) const ownerSession = createSession({ userId: 'owner-1' })
vi.mocked(getSession).mockResolvedValue(ownerSession as any)
const mockLimit = vi.fn().mockResolvedValue([mockWorkflow]) const mockLimit = vi.fn().mockResolvedValue([mockWorkflow])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
}) })
it('should deny access to workflow owner without workspace permissions for admin action', async () => { it('should deny access to workflow owner without workspace permissions for admin action', async () => {
auth.setAuthenticated({ id: 'owner-1', email: 'owner-1@test.com' }) const ownerSession = createSession({ userId: 'owner-1' })
vi.mocked(getSession).mockResolvedValue(ownerSession as any)
const mockLimit = vi.fn().mockResolvedValue([mockWorkflow]) const mockLimit = vi.fn().mockResolvedValue([mockWorkflow])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -125,10 +128,11 @@ describe('validateWorkflowPermissions', () => {
describe('workspace member access with permissions', () => { describe('workspace member access with permissions', () => {
beforeEach(() => { beforeEach(() => {
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
}) })
it('should grant read access to user with read permission', async () => { it('should grant read access to user with read permission', async () => {
// First call: workflow query, second call: workspace owner, third call: permission
let callCount = 0 let callCount = 0
const mockLimit = vi.fn().mockImplementation(() => { const mockLimit = vi.fn().mockImplementation(() => {
callCount++ callCount++
@@ -137,9 +141,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read')
expectWorkflowAccessGranted(result) expectWorkflowAccessGranted(result)
@@ -154,9 +157,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -172,9 +174,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write')
expectWorkflowAccessGranted(result) expectWorkflowAccessGranted(result)
@@ -189,9 +190,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'write')
expectWorkflowAccessGranted(result) expectWorkflowAccessGranted(result)
@@ -206,9 +206,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -224,9 +223,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'admin')
expectWorkflowAccessGranted(result) expectWorkflowAccessGranted(result)
@@ -235,19 +233,18 @@ describe('validateWorkflowPermissions', () => {
describe('no workspace permission', () => { describe('no workspace permission', () => {
it('should deny access to user without any workspace permission', async () => { it('should deny access to user without any workspace permission', async () => {
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
let callCount = 0 let callCount = 0
const mockLimit = vi.fn().mockImplementation(() => { const mockLimit = vi.fn().mockImplementation(() => {
callCount++ callCount++
if (callCount === 1) return Promise.resolve([mockWorkflow]) if (callCount === 1) return Promise.resolve([mockWorkflow])
return Promise.resolve([]) return Promise.resolve([]) // No permission record
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-1', 'req-1', 'read')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -262,14 +259,13 @@ describe('validateWorkflowPermissions', () => {
workspaceId: null, workspaceId: null,
}) })
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
const mockLimit = vi.fn().mockResolvedValue([workflowWithoutWorkspace]) const mockLimit = vi.fn().mockResolvedValue([workflowWithoutWorkspace])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-2', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-2', 'req-1', 'read')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -282,14 +278,13 @@ describe('validateWorkflowPermissions', () => {
workspaceId: null, workspaceId: null,
}) })
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
const mockLimit = vi.fn().mockResolvedValue([workflowWithoutWorkspace]) const mockLimit = vi.fn().mockResolvedValue([workflowWithoutWorkspace])
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-2', 'req-1', 'read') const result = await validateWorkflowPermissions('wf-2', 'req-1', 'read')
expectWorkflowAccessDenied(result, 403) expectWorkflowAccessDenied(result, 403)
@@ -298,7 +293,7 @@ describe('validateWorkflowPermissions', () => {
describe('default action', () => { describe('default action', () => {
it('should default to read action when not specified', async () => { it('should default to read action when not specified', async () => {
auth.mockGetSession.mockResolvedValue(mockSession as any) vi.mocked(getSession).mockResolvedValue(mockSession as any)
let callCount = 0 let callCount = 0
const mockLimit = vi.fn().mockImplementation(() => { const mockLimit = vi.fn().mockImplementation(() => {
@@ -308,9 +303,8 @@ describe('validateWorkflowPermissions', () => {
}) })
const mockWhere = vi.fn(() => ({ limit: mockLimit })) const mockWhere = vi.fn(() => ({ limit: mockLimit }))
const mockFrom = vi.fn(() => ({ where: mockWhere })) const mockFrom = vi.fn(() => ({ where: mockWhere }))
vi.mocked(mockDb.select).mockReturnValue({ from: mockFrom } as any) vi.mocked(db.select).mockReturnValue({ from: mockFrom } as any)
const { validateWorkflowPermissions } = await import('@/lib/workflows/utils')
const result = await validateWorkflowPermissions('wf-1', 'req-1') const result = await validateWorkflowPermissions('wf-1', 'req-1')
expectWorkflowAccessGranted(result) expectWorkflowAccessGranted(result)

View File

@@ -1,7 +1,17 @@
import { databaseMock, drizzleOrmMock } from '@sim/testing' import { drizzleOrmMock } from '@sim/testing/mocks'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: vi.fn(),
from: vi.fn(),
where: vi.fn(),
limit: vi.fn(),
innerJoin: vi.fn(),
leftJoin: vi.fn(),
orderBy: vi.fn(),
},
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
permissions: { permissions: {

View File

@@ -5,7 +5,6 @@ import {
type GenerateContentConfig, type GenerateContentConfig,
type GenerateContentResponse, type GenerateContentResponse,
type GoogleGenAI, type GoogleGenAI,
type Interactions,
type Part, type Part,
type Schema, type Schema,
type ThinkingConfig, type ThinkingConfig,
@@ -28,7 +27,6 @@ import {
import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types' import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
import { import {
calculateCost, calculateCost,
isDeepResearchModel,
prepareToolExecution, prepareToolExecution,
prepareToolsWithUsageControl, prepareToolsWithUsageControl,
} from '@/providers/utils' } from '@/providers/utils'
@@ -383,468 +381,6 @@ export interface GeminiExecutionConfig {
providerType: GeminiProviderType providerType: GeminiProviderType
} }
const DEEP_RESEARCH_POLL_INTERVAL_MS = 10_000
const DEEP_RESEARCH_MAX_DURATION_MS = 60 * 60 * 1000
/**
* Sleeps for the specified number of milliseconds
*/
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}
/**
* Collapses a ProviderRequest into a single input string and optional system instruction
* for the Interactions API, which takes a flat input rather than a messages array.
*
* Deep research is single-turn only — it takes one research query and returns a report.
* Memory/conversation history is hidden in the UI for deep research models, so only
* the last user message is used as input. System messages are passed via system_instruction.
*/
function collapseMessagesToInput(request: ProviderRequest): {
input: string
systemInstruction: string | undefined
} {
const systemParts: string[] = []
const userParts: string[] = []
if (request.systemPrompt) {
systemParts.push(request.systemPrompt)
}
if (request.messages) {
for (const msg of request.messages) {
if (msg.role === 'system' && msg.content) {
systemParts.push(msg.content)
} else if (msg.role === 'user' && msg.content) {
userParts.push(msg.content)
}
}
}
return {
input:
userParts.length > 0
? userParts[userParts.length - 1]
: 'Please conduct research on the provided topic.',
systemInstruction: systemParts.length > 0 ? systemParts.join('\n\n') : undefined,
}
}
/**
* Extracts text content from a completed interaction's outputs array.
* The outputs array can contain text, thought, google_search_result, and other types.
* We concatenate all text outputs to get the full research report.
*/
function extractTextFromInteractionOutputs(outputs: Interactions.Interaction['outputs']): string {
if (!outputs || outputs.length === 0) return ''
const textParts: string[] = []
for (const output of outputs) {
if (output.type === 'text') {
const text = (output as Interactions.TextContent).text
if (text) textParts.push(text)
}
}
return textParts.join('\n\n')
}
/**
* Extracts token usage from an Interaction's Usage object.
* The Interactions API provides total_input_tokens, total_output_tokens, total_tokens,
* and total_reasoning_tokens (for thinking models).
*
* Also handles the raw API field name total_thought_tokens which the SDK may
* map to total_reasoning_tokens.
*/
function extractInteractionUsage(usage: Interactions.Usage | undefined): {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
} {
if (!usage) {
return { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
}
const usageLogger = createLogger('DeepResearchUsage')
usageLogger.info('Raw interaction usage', { usage: JSON.stringify(usage) })
const inputTokens = usage.total_input_tokens ?? 0
const outputTokens = usage.total_output_tokens ?? 0
const reasoningTokens =
usage.total_reasoning_tokens ??
((usage as Record<string, unknown>).total_thought_tokens as number) ??
0
const totalTokens = usage.total_tokens ?? inputTokens + outputTokens
return { inputTokens, outputTokens, reasoningTokens, totalTokens }
}
/**
* Builds a standard ProviderResponse from a completed deep research interaction.
*/
function buildDeepResearchResponse(
content: string,
model: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
providerStartTime: number,
providerStartTimeISO: string,
interactionId?: string
): ProviderResponse {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
return {
content,
model,
tokens: {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
},
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
modelTime: duration,
toolsTime: 0,
firstResponseTime: duration,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research',
startTime: providerStartTime,
endTime: providerEndTime,
duration,
},
],
},
cost: calculateCost(model, usage.inputTokens, usage.outputTokens),
interactionId,
}
}
/**
* Creates a ReadableStream from a deep research streaming interaction.
*
* Deep research streaming returns InteractionSSEEvent chunks including:
* - interaction.start: initial interaction with ID
* - content.delta: incremental text and thought_summary updates
* - content.start / content.stop: output boundaries
* - interaction.complete: final event (outputs is undefined in streaming; must reconstruct)
* - error: error events
*
* We stream text deltas to the client and track usage from the interaction.complete event.
*/
function createDeepResearchStream(
stream: AsyncIterable<Interactions.InteractionSSEEvent>,
onComplete?: (
content: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
interactionId?: string
) => void
): ReadableStream<Uint8Array> {
const streamLogger = createLogger('DeepResearchStream')
let fullContent = ''
let completionUsage = { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
let completedInteractionId: string | undefined
return new ReadableStream({
async start(controller) {
try {
for await (const event of stream) {
if (event.event_type === 'content.delta') {
const delta = (event as Interactions.ContentDelta).delta
if (delta?.type === 'text' && 'text' in delta && delta.text) {
fullContent += delta.text
controller.enqueue(new TextEncoder().encode(delta.text))
}
} else if (event.event_type === 'interaction.complete') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.usage) {
completionUsage = extractInteractionUsage(interaction.usage)
}
completedInteractionId = interaction?.id
} else if (event.event_type === 'interaction.start') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.id) {
completedInteractionId = interaction.id
}
} else if (event.event_type === 'error') {
const errorEvent = event as { error?: { code?: string; message?: string } }
const message = errorEvent.error?.message ?? 'Unknown deep research stream error'
streamLogger.error('Deep research stream error', {
code: errorEvent.error?.code,
message,
})
controller.error(new Error(message))
return
}
}
onComplete?.(fullContent, completionUsage, completedInteractionId)
controller.close()
} catch (error) {
streamLogger.error('Error reading deep research stream', {
error: error instanceof Error ? error.message : String(error),
})
controller.error(error)
}
},
})
}
/**
* Executes a deep research request using the Interactions API.
*
* Deep research uses the Interactions API ({@link https://ai.google.dev/api/interactions-api}),
* a completely different surface from generateContent. It creates a background interaction
* that performs comprehensive research (up to 60 minutes).
*
* Supports both streaming and non-streaming modes:
* - Streaming: returns a StreamingExecution with a ReadableStream of text deltas
* - Non-streaming: polls until completion and returns a ProviderResponse
*
* Deep research does NOT support custom function calling tools, MCP servers,
* or structured output (response_format). These are gracefully ignored.
*/
export async function executeDeepResearchRequest(
config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info('Preparing deep research request', {
model,
hasSystemPrompt: !!request.systemPrompt,
hasMessages: !!request.messages?.length,
streaming: !!request.stream,
hasPreviousInteractionId: !!request.previousInteractionId,
})
if (request.tools?.length) {
logger.warn('Deep research does not support custom tools — ignoring tools parameter')
}
if (request.responseFormat) {
logger.warn(
'Deep research does not support structured output — ignoring responseFormat parameter'
)
}
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
try {
const { input, systemInstruction } = collapseMessagesToInput(request)
// Deep research requires background=true and store=true (store defaults to true,
// but we set it explicitly per API requirements)
const baseParams = {
agent: model as Interactions.CreateAgentInteractionParamsNonStreaming['agent'],
input,
background: true,
store: true,
...(systemInstruction && { system_instruction: systemInstruction }),
...(request.previousInteractionId && {
previous_interaction_id: request.previousInteractionId,
}),
agent_config: {
type: 'deep-research' as const,
thinking_summaries: 'auto' as const,
},
}
logger.info('Creating deep research interaction', {
inputLength: input.length,
hasSystemInstruction: !!systemInstruction,
streaming: !!request.stream,
})
// Streaming mode: create a streaming interaction and return a StreamingExecution
if (request.stream) {
const streamParams: Interactions.CreateAgentInteractionParamsStreaming = {
...baseParams,
stream: true,
}
const streamResponse = await ai.interactions.create(streamParams)
const firstResponseTime = Date.now() - providerStartTime
const streamingResult: StreamingExecution = {
stream: undefined as unknown as ReadableStream<Uint8Array>,
execution: {
success: true,
output: {
content: '',
model,
tokens: { input: 0, output: 0, total: 0 },
providerTiming: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
modelTime: firstResponseTime,
toolsTime: 0,
firstResponseTime,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research (streaming)',
startTime: providerStartTime,
endTime: providerStartTime + firstResponseTime,
duration: firstResponseTime,
},
],
},
cost: {
input: 0,
output: 0,
total: 0,
pricing: { input: 0, output: 0, updatedAt: new Date().toISOString() },
},
},
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
},
isStreaming: true,
},
}
streamingResult.stream = createDeepResearchStream(
streamResponse,
(content, usage, streamInteractionId) => {
streamingResult.execution.output.content = content
streamingResult.execution.output.tokens = {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
}
streamingResult.execution.output.interactionId = streamInteractionId
const cost = calculateCost(model, usage.inputTokens, usage.outputTokens)
streamingResult.execution.output.cost = cost
const streamEndTime = Date.now()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = new Date(
streamEndTime
).toISOString()
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
const segments = streamingResult.execution.output.providerTiming.timeSegments
if (segments?.[0]) {
segments[0].endTime = streamEndTime
segments[0].duration = streamEndTime - providerStartTime
}
}
}
)
return streamingResult
}
// Non-streaming mode: create and poll
const createParams: Interactions.CreateAgentInteractionParamsNonStreaming = {
...baseParams,
stream: false,
}
const interaction = await ai.interactions.create(createParams)
const interactionId = interaction.id
logger.info('Deep research interaction created', { interactionId, status: interaction.status })
// Poll until a terminal status
const pollStartTime = Date.now()
let result: Interactions.Interaction = interaction
while (Date.now() - pollStartTime < DEEP_RESEARCH_MAX_DURATION_MS) {
if (result.status === 'completed') {
break
}
if (result.status === 'failed') {
throw new Error(`Deep research interaction failed: ${interactionId}`)
}
if (result.status === 'cancelled') {
throw new Error(`Deep research interaction was cancelled: ${interactionId}`)
}
logger.info('Deep research in progress, polling...', {
interactionId,
status: result.status,
elapsedMs: Date.now() - pollStartTime,
})
await sleep(DEEP_RESEARCH_POLL_INTERVAL_MS)
result = await ai.interactions.get(interactionId)
}
if (result.status !== 'completed') {
throw new Error(
`Deep research timed out after ${DEEP_RESEARCH_MAX_DURATION_MS / 1000}s (status: ${result.status})`
)
}
const content = extractTextFromInteractionOutputs(result.outputs)
const usage = extractInteractionUsage(result.usage)
logger.info('Deep research completed', {
interactionId,
contentLength: content.length,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens,
reasoningTokens: usage.reasoningTokens,
totalTokens: usage.totalTokens,
durationMs: Date.now() - providerStartTime,
})
return buildDeepResearchResponse(
content,
model,
usage,
providerStartTime,
providerStartTimeISO,
interactionId
)
} catch (error) {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
logger.error('Error in deep research request:', {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
const enhancedError = error instanceof Error ? error : new Error(String(error))
Object.assign(enhancedError, {
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
},
})
throw enhancedError
}
}
/** /**
* Executes a request using the Gemini API * Executes a request using the Gemini API
* *
@@ -855,12 +391,6 @@ export async function executeGeminiRequest(
config: GeminiExecutionConfig config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> { ): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config const { ai, model, request, providerType } = config
// Route deep research models to the interactions API
if (isDeepResearchModel(model)) {
return executeDeepResearchRequest(config)
}
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider') const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info(`Preparing ${providerType} Gemini request`, { logger.info(`Preparing ${providerType} Gemini request`, {

View File

@@ -46,9 +46,6 @@ export interface ModelCapabilities {
levels: string[] levels: string[]
default?: string default?: string
} }
deepResearch?: boolean
/** Whether this model supports conversation memory. Defaults to true if omitted. */
memory?: boolean
} }
export interface ModelDefinition { export interface ModelDefinition {
@@ -828,7 +825,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
name: 'Google', name: 'Google',
description: "Google's Gemini models", description: "Google's Gemini models",
defaultModel: 'gemini-2.5-pro', defaultModel: 'gemini-2.5-pro',
modelPatterns: [/^gemini/, /^deep-research/], modelPatterns: [/^gemini/],
capabilities: { capabilities: {
toolUsageControl: true, toolUsageControl: true,
}, },
@@ -931,19 +928,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
vertex: { vertex: {
@@ -1054,19 +1038,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'vertex/deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
deepseek: { deepseek: {
@@ -2509,37 +2480,6 @@ export function getThinkingLevelsForModel(modelId: string): string[] | null {
return capability?.levels ?? null return capability?.levels ?? null
} }
/**
* Get all models that support deep research capability
*/
export function getModelsWithDeepResearch(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.deepResearch) {
models.push(model.id)
}
}
}
return models
}
/**
* Get all models that explicitly disable memory support (memory: false).
* Models without this capability default to supporting memory.
*/
export function getModelsWithoutMemory(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.memory === false) {
models.push(model.id)
}
}
}
return models
}
/** /**
* Get the max output tokens for a specific model. * Get the max output tokens for a specific model.
* *

View File

@@ -95,8 +95,6 @@ export interface ProviderResponse {
total: number total: number
pricing: ModelPricing pricing: ModelPricing
} }
/** Interaction ID returned by the Interactions API (used for multi-turn deep research) */
interactionId?: string
} }
export type ToolUsageControl = 'auto' | 'force' | 'none' export type ToolUsageControl = 'auto' | 'force' | 'none'
@@ -171,8 +169,6 @@ export interface ProviderRequest {
verbosity?: string verbosity?: string
thinkingLevel?: string thinkingLevel?: string
isDeployedContext?: boolean isDeployedContext?: boolean
/** Previous interaction ID for multi-turn Interactions API requests (deep research follow-ups) */
previousInteractionId?: string
} }
export const providers: Record<string, ProviderConfig> = {} export const providers: Record<string, ProviderConfig> = {}

View File

@@ -12,8 +12,6 @@ import {
getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions, getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions,
getMaxTemperature as getMaxTempFromDefinitions, getMaxTemperature as getMaxTempFromDefinitions,
getModelPricing as getModelPricingFromDefinitions, getModelPricing as getModelPricingFromDefinitions,
getModelsWithDeepResearch,
getModelsWithoutMemory,
getModelsWithReasoningEffort, getModelsWithReasoningEffort,
getModelsWithTemperatureSupport, getModelsWithTemperatureSupport,
getModelsWithTempRange01, getModelsWithTempRange01,
@@ -955,8 +953,6 @@ export const MODELS_WITH_TEMPERATURE_SUPPORT = getModelsWithTemperatureSupport()
export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort() export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort()
export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity() export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity()
export const MODELS_WITH_THINKING = getModelsWithThinking() export const MODELS_WITH_THINKING = getModelsWithThinking()
export const MODELS_WITH_DEEP_RESEARCH = getModelsWithDeepResearch()
export const MODELS_WITHOUT_MEMORY = getModelsWithoutMemory()
export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl() export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl()
export function supportsTemperature(model: string): boolean { export function supportsTemperature(model: string): boolean {
@@ -975,10 +971,6 @@ export function supportsThinking(model: string): boolean {
return MODELS_WITH_THINKING.includes(model.toLowerCase()) return MODELS_WITH_THINKING.includes(model.toLowerCase())
} }
export function isDeepResearchModel(model: string): boolean {
return MODELS_WITH_DEEP_RESEARCH.includes(model.toLowerCase())
}
/** /**
* Get the maximum temperature value for a model * Get the maximum temperature value for a model
* @returns Maximum temperature value (1 or 2) or undefined if temperature not supported * @returns Maximum temperature value (1 or 2) or undefined if temperature not supported

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

View File

@@ -10,7 +10,6 @@ import {
isCanonicalPair, isCanonicalPair,
isNonEmptyValue, isNonEmptyValue,
isSubBlockFeatureEnabled, isSubBlockFeatureEnabled,
isSubBlockHiddenByHostedKey,
resolveCanonicalMode, resolveCanonicalMode,
} from '@/lib/workflows/subblocks/visibility' } from '@/lib/workflows/subblocks/visibility'
import { getBlock } from '@/blocks' import { getBlock } from '@/blocks'
@@ -50,7 +49,6 @@ function shouldSerializeSubBlock(
canonicalModeOverrides?: CanonicalModeOverrides canonicalModeOverrides?: CanonicalModeOverrides
): boolean { ): boolean {
if (!isSubBlockFeatureEnabled(subBlockConfig)) return false if (!isSubBlockFeatureEnabled(subBlockConfig)) return false
if (isSubBlockHiddenByHostedKey(subBlockConfig)) return false
if (subBlockConfig.mode === 'trigger') { if (subBlockConfig.mode === 'trigger') {
if (!isTriggerContext && !isTriggerCategory) return false if (!isTriggerContext && !isTriggerCategory) return false

View File

@@ -129,18 +129,6 @@ export const useExecutionStore = create<ExecutionState & ExecutionActions>()((se
}) })
}, },
setCurrentExecutionId: (workflowId, executionId) => {
set({
workflowExecutions: updatedMap(get().workflowExecutions, workflowId, {
currentExecutionId: executionId,
}),
})
},
getCurrentExecutionId: (workflowId) => {
return getOrCreate(get().workflowExecutions, workflowId).currentExecutionId
},
clearRunPath: (workflowId) => { clearRunPath: (workflowId) => {
set({ set({
workflowExecutions: updatedMap(get().workflowExecutions, workflowId, { workflowExecutions: updatedMap(get().workflowExecutions, workflowId, {

View File

@@ -35,8 +35,6 @@ export interface WorkflowExecutionState {
lastRunPath: Map<string, BlockRunStatus> lastRunPath: Map<string, BlockRunStatus>
/** Maps edge IDs to their run result from the last execution */ /** Maps edge IDs to their run result from the last execution */
lastRunEdges: Map<string, EdgeRunStatus> lastRunEdges: Map<string, EdgeRunStatus>
/** The execution ID of the currently running execution */
currentExecutionId: string | null
} }
/** /**
@@ -56,7 +54,6 @@ export const defaultWorkflowExecutionState: WorkflowExecutionState = {
debugContext: null, debugContext: null,
lastRunPath: new Map(), lastRunPath: new Map(),
lastRunEdges: new Map(), lastRunEdges: new Map(),
currentExecutionId: null,
} }
/** /**
@@ -99,10 +96,6 @@ export interface ExecutionActions {
setEdgeRunStatus: (workflowId: string, edgeId: string, status: EdgeRunStatus) => void setEdgeRunStatus: (workflowId: string, edgeId: string, status: EdgeRunStatus) => void
/** Clears the run path and run edges for a workflow */ /** Clears the run path and run edges for a workflow */
clearRunPath: (workflowId: string) => void clearRunPath: (workflowId: string) => void
/** Stores the current execution ID for a workflow */
setCurrentExecutionId: (workflowId: string, executionId: string | null) => void
/** Returns the current execution ID for a workflow */
getCurrentExecutionId: (workflowId: string) => string | null
/** Resets the entire store to its initial empty state */ /** Resets the entire store to its initial empty state */
reset: () => void reset: () => void
/** Stores a serializable execution snapshot for a workflow */ /** Stores a serializable execution snapshot for a workflow */

View File

@@ -310,50 +310,6 @@ function parseModelKey(compositeKey: string): { provider: string; modelId: strin
return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) } return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) }
} }
/**
* Convert legacy/variant Claude IDs into the canonical ID shape used by the model catalog.
*
* Examples:
* - claude-4.5-opus -> claude-opus-4-5
* - claude-opus-4.6 -> claude-opus-4-6
* - anthropic.claude-opus-4-5-20251101-v1:0 -> claude-opus-4-5 (match key only)
*/
function canonicalizeModelMatchKey(modelId: string): string {
if (!modelId) return modelId
const normalized = modelId.trim().toLowerCase()
const toCanonicalClaude = (tier: string, version: string): string => {
const normalizedVersion = version.replace(/\./g, '-')
return `claude-${tier}-${normalizedVersion}`
}
const tierFirstExact = normalized.match(/^claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)$/)
if (tierFirstExact) {
const [, tier, version] = tierFirstExact
return toCanonicalClaude(tier, version)
}
const versionFirstExact = normalized.match(/^claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)$/)
if (versionFirstExact) {
const [, version, tier] = versionFirstExact
return toCanonicalClaude(tier, version)
}
const tierFirstEmbedded = normalized.match(/claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)/)
if (tierFirstEmbedded) {
const [, tier, version] = tierFirstEmbedded
return toCanonicalClaude(tier, version)
}
const versionFirstEmbedded = normalized.match(/claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)/)
if (versionFirstEmbedded) {
const [, version, tier] = versionFirstEmbedded
return toCanonicalClaude(tier, version)
}
return normalized
}
const MODEL_PROVIDER_PRIORITY = [ const MODEL_PROVIDER_PRIORITY = [
'anthropic', 'anthropic',
'bedrock', 'bedrock',
@@ -394,23 +350,12 @@ function normalizeSelectedModelKey(selectedModel: string, models: AvailableModel
const { provider, modelId } = parseModelKey(selectedModel) const { provider, modelId } = parseModelKey(selectedModel)
const targetModelId = modelId || selectedModel const targetModelId = modelId || selectedModel
const targetMatchKey = canonicalizeModelMatchKey(targetModelId)
const matches = models.filter((m) => { const matches = models.filter((m) => m.id.endsWith(`/${targetModelId}`))
const candidateModelId = parseModelKey(m.id).modelId || m.id
const candidateMatchKey = canonicalizeModelMatchKey(candidateModelId)
return (
candidateModelId === targetModelId ||
m.id.endsWith(`/${targetModelId}`) ||
candidateMatchKey === targetMatchKey
)
})
if (matches.length === 0) return selectedModel if (matches.length === 0) return selectedModel
if (provider) { if (provider) {
const sameProvider = matches.find( const sameProvider = matches.find((m) => m.provider === provider)
(m) => m.provider === provider || m.id.startsWith(`${provider}/`)
)
if (sameProvider) return sameProvider.id if (sameProvider) return sameProvider.id
} }
@@ -1042,7 +987,7 @@ const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage()
// Initial state (subset required for UI/streaming) // Initial state (subset required for UI/streaming)
const initialState = { const initialState = {
mode: 'build' as const, mode: 'build' as const,
selectedModel: 'anthropic/claude-opus-4-5' as CopilotStore['selectedModel'], selectedModel: 'anthropic/claude-opus-4-6' as CopilotStore['selectedModel'],
agentPrefetch: false, agentPrefetch: false,
availableModels: [] as AvailableModel[], availableModels: [] as AvailableModel[],
isLoadingModels: false, isLoadingModels: false,
@@ -1148,12 +1093,11 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = chat.config ?? {} const chatConfig = chat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(chatModel, get().availableModels)
logger.debug('[Chat] Restoring chat config', { logger.debug('[Chat] Restoring chat config', {
chatId: chat.id, chatId: chat.id,
mode: chatMode, mode: chatMode,
model: normalizedChatModel, model: chatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1175,7 +1119,7 @@ export const useCopilotStore = create<CopilotStore>()(
showPlanTodos: false, showPlanTodos: false,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: normalizedChatModel as CopilotStore['selectedModel'], selectedModel: chatModel as CopilotStore['selectedModel'],
suppressAutoSelect: false, suppressAutoSelect: false,
}) })
@@ -1348,10 +1292,6 @@ export const useCopilotStore = create<CopilotStore>()(
const refreshedConfig = updatedCurrentChat.config ?? {} const refreshedConfig = updatedCurrentChat.config ?? {}
const refreshedMode = refreshedConfig.mode || get().mode const refreshedMode = refreshedConfig.mode || get().mode
const refreshedModel = refreshedConfig.model || get().selectedModel const refreshedModel = refreshedConfig.model || get().selectedModel
const normalizedRefreshedModel = normalizeSelectedModelKey(
refreshedModel,
get().availableModels
)
const toolCallsById = buildToolCallsById(normalizedMessages) const toolCallsById = buildToolCallsById(normalizedMessages)
set({ set({
@@ -1360,7 +1300,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: refreshedPlanArtifact, streamingPlanContent: refreshedPlanArtifact,
mode: refreshedMode, mode: refreshedMode,
selectedModel: normalizedRefreshedModel as CopilotStore['selectedModel'], selectedModel: refreshedModel as CopilotStore['selectedModel'],
}) })
} }
try { try {
@@ -1380,15 +1320,11 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = mostRecentChat.config ?? {} const chatConfig = mostRecentChat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(
chatModel,
get().availableModels
)
logger.info('[Chat] Auto-selecting most recent chat with config', { logger.info('[Chat] Auto-selecting most recent chat with config', {
chatId: mostRecentChat.id, chatId: mostRecentChat.id,
mode: chatMode, mode: chatMode,
model: normalizedChatModel, model: chatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1400,7 +1336,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: normalizedChatModel as CopilotStore['selectedModel'], selectedModel: chatModel as CopilotStore['selectedModel'],
}) })
try { try {
await get().loadMessageCheckpoints(mostRecentChat.id) await get().loadMessageCheckpoints(mostRecentChat.id)
@@ -2332,8 +2268,7 @@ export const useCopilotStore = create<CopilotStore>()(
}, },
setSelectedModel: async (model) => { setSelectedModel: async (model) => {
const normalizedModel = normalizeSelectedModelKey(model, get().availableModels) set({ selectedModel: model })
set({ selectedModel: normalizedModel as CopilotStore['selectedModel'] })
}, },
setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }), setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }),
loadAvailableModels: async () => { loadAvailableModels: async () => {
@@ -2381,17 +2316,17 @@ export const useCopilotStore = create<CopilotStore>()(
(model) => model.id === normalizedSelectedModel (model) => model.id === normalizedSelectedModel
) )
// Pick the best default: prefer claude-opus-4-5 with provider priority: // Pick the best default: prefer claude-opus-4-6 with provider priority:
// direct anthropic > bedrock > azure-anthropic > any other. // direct anthropic > bedrock > azure-anthropic > any other.
let nextSelectedModel = normalizedSelectedModel let nextSelectedModel = normalizedSelectedModel
if (!selectedModelExists && normalizedModels.length > 0) { if (!selectedModelExists && normalizedModels.length > 0) {
let opus45: AvailableModel | undefined let opus46: AvailableModel | undefined
for (const prov of MODEL_PROVIDER_PRIORITY) { for (const prov of MODEL_PROVIDER_PRIORITY) {
opus45 = normalizedModels.find((m) => m.id === `${prov}/claude-opus-4-5`) opus46 = normalizedModels.find((m) => m.id === `${prov}/claude-opus-4-6`)
if (opus45) break if (opus46) break
} }
if (!opus45) opus45 = normalizedModels.find((m) => m.id.endsWith('/claude-opus-4-5')) if (!opus46) opus46 = normalizedModels.find((m) => m.id.endsWith('/claude-opus-4-6'))
nextSelectedModel = opus45 ? opus45.id : normalizedModels[0].id nextSelectedModel = opus46 ? opus46.id : normalizedModels[0].id
} }
set({ set({

View File

@@ -224,7 +224,7 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
const newEntry = get().entries[0] const newEntry = get().entries[0]
if (newEntry?.error && newEntry.blockType !== 'cancelled') { if (newEntry?.error) {
notifyBlockError({ notifyBlockError({
error: newEntry.error, error: newEntry.error,
blockName: newEntry.blockName || 'Unknown Block', blockName: newEntry.blockName || 'Unknown Block',
@@ -243,11 +243,6 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
useExecutionStore.getState().clearRunPath(workflowId) useExecutionStore.getState().clearRunPath(workflowId)
}, },
clearExecutionEntries: (executionId: string) =>
set((state) => ({
entries: state.entries.filter((e) => e.executionId !== executionId),
})),
exportConsoleCSV: (workflowId: string) => { exportConsoleCSV: (workflowId: string) => {
const entries = get().entries.filter((entry) => entry.workflowId === workflowId) const entries = get().entries.filter((entry) => entry.workflowId === workflowId)
@@ -475,24 +470,12 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
}, },
merge: (persistedState, currentState) => { merge: (persistedState, currentState) => {
const persisted = persistedState as Partial<ConsoleStore> | undefined const persisted = persistedState as Partial<ConsoleStore> | undefined
const rawEntries = persisted?.entries ?? currentState.entries const entries = (persisted?.entries ?? currentState.entries).map((entry, index) => {
const oneHourAgo = Date.now() - 60 * 60 * 1000
const entries = rawEntries.map((entry, index) => {
let updated = entry
if (entry.executionOrder === undefined) { if (entry.executionOrder === undefined) {
updated = { ...updated, executionOrder: index + 1 } return { ...entry, executionOrder: index + 1 }
} }
if ( return entry
entry.isRunning &&
entry.startedAt &&
new Date(entry.startedAt).getTime() < oneHourAgo
) {
updated = { ...updated, isRunning: false }
}
return updated
}) })
return { return {
...currentState, ...currentState,
entries, entries,

View File

@@ -51,7 +51,6 @@ export interface ConsoleStore {
isOpen: boolean isOpen: boolean
addConsole: (entry: Omit<ConsoleEntry, 'id' | 'timestamp'>) => ConsoleEntry addConsole: (entry: Omit<ConsoleEntry, 'id' | 'timestamp'>) => ConsoleEntry
clearWorkflowConsole: (workflowId: string) => void clearWorkflowConsole: (workflowId: string) => void
clearExecutionEntries: (executionId: string) => void
exportConsoleCSV: (workflowId: string) => void exportConsoleCSV: (workflowId: string) => void
getWorkflowEntries: (workflowId: string) => ConsoleEntry[] getWorkflowEntries: (workflowId: string) => ConsoleEntry[]
toggleConsole: () => void toggleConsole: () => void

View File

@@ -1,114 +0,0 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeleteLabelParams {
accessToken: string
domain: string
pageId: string
labelName: string
cloudId?: string
}
export interface ConfluenceDeleteLabelResponse {
success: boolean
output: {
ts: string
pageId: string
labelName: string
deleted: boolean
}
}
export const confluenceDeleteLabelTool: ToolConfig<
ConfluenceDeleteLabelParams,
ConfluenceDeleteLabelResponse
> = {
id: 'confluence_delete_label',
name: 'Confluence Delete Label',
description: 'Remove a label from a Confluence page.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Confluence page ID to remove the label from',
},
labelName: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Name of the label to remove',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/labels',
method: 'DELETE',
headers: (params: ConfluenceDeleteLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeleteLabelParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
labelName: params.labelName?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
labelName: data.labelName ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: {
type: 'string',
description: 'Page ID the label was removed from',
},
labelName: {
type: 'string',
description: 'Name of the removed label',
},
deleted: {
type: 'boolean',
description: 'Deletion status',
},
},
}

View File

@@ -1,105 +0,0 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeletePagePropertyParams {
accessToken: string
domain: string
pageId: string
propertyId: string
cloudId?: string
}
export interface ConfluenceDeletePagePropertyResponse {
success: boolean
output: {
ts: string
pageId: string
propertyId: string
deleted: boolean
}
}
export const confluenceDeletePagePropertyTool: ToolConfig<
ConfluenceDeletePagePropertyParams,
ConfluenceDeletePagePropertyResponse
> = {
id: 'confluence_delete_page_property',
name: 'Confluence Delete Page Property',
description: 'Delete a content property from a Confluence page by its property ID.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the page containing the property',
},
propertyId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the property to delete',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/page-properties',
method: 'DELETE',
headers: (params: ConfluenceDeletePagePropertyParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeletePagePropertyParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
propertyId: params.propertyId?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
propertyId: data.propertyId ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: { type: 'string', description: 'ID of the page' },
propertyId: { type: 'string', description: 'ID of the deleted property' },
deleted: { type: 'boolean', description: 'Deletion status' },
},
}

View File

@@ -1,143 +0,0 @@
import { PAGE_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceGetPagesByLabelParams {
accessToken: string
domain: string
labelId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceGetPagesByLabelResponse {
success: boolean
output: {
ts: string
labelId: string
pages: Array<{
id: string
title: string
status: string | null
spaceId: string | null
parentId: string | null
authorId: string | null
createdAt: string | null
version: {
number: number
message?: string
createdAt?: string
} | null
}>
nextCursor: string | null
}
}
export const confluenceGetPagesByLabelTool: ToolConfig<
ConfluenceGetPagesByLabelParams,
ConfluenceGetPagesByLabelResponse
> = {
id: 'confluence_get_pages_by_label',
name: 'Confluence Get Pages by Label',
description: 'Retrieve all pages that have a specific label applied.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
labelId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the label to get pages for',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of pages to return (default: 50, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceGetPagesByLabelParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
labelId: params.labelId,
limit: String(params.limit || 50),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/pages-by-label?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceGetPagesByLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
labelId: data.labelId ?? '',
pages: data.pages ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
labelId: { type: 'string', description: 'ID of the label' },
pages: {
type: 'array',
description: 'Array of pages with this label',
items: {
type: 'object',
properties: PAGE_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -5,14 +5,11 @@ import { confluenceCreatePageTool } from '@/tools/confluence/create_page'
import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property' import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property'
import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment' import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment'
import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment' import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment'
import { confluenceDeleteLabelTool } from '@/tools/confluence/delete_label'
import { confluenceDeletePageTool } from '@/tools/confluence/delete_page' import { confluenceDeletePageTool } from '@/tools/confluence/delete_page'
import { confluenceDeletePagePropertyTool } from '@/tools/confluence/delete_page_property'
import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost' import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost'
import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors' import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors'
import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children' import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children'
import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version' import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version'
import { confluenceGetPagesByLabelTool } from '@/tools/confluence/get_pages_by_label'
import { confluenceGetSpaceTool } from '@/tools/confluence/get_space' import { confluenceGetSpaceTool } from '@/tools/confluence/get_space'
import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments' import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments'
import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts' import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts'
@@ -22,7 +19,6 @@ import { confluenceListLabelsTool } from '@/tools/confluence/list_labels'
import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties' import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties'
import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions' import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions'
import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space' import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space'
import { confluenceListSpaceLabelsTool } from '@/tools/confluence/list_space_labels'
import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces' import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces'
import { confluenceRetrieveTool } from '@/tools/confluence/retrieve' import { confluenceRetrieveTool } from '@/tools/confluence/retrieve'
import { confluenceSearchTool } from '@/tools/confluence/search' import { confluenceSearchTool } from '@/tools/confluence/search'
@@ -82,7 +78,6 @@ export {
// Page Properties Tools // Page Properties Tools
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceCreatePagePropertyTool, confluenceCreatePagePropertyTool,
confluenceDeletePagePropertyTool,
// Blog Post Tools // Blog Post Tools
confluenceListBlogPostsTool, confluenceListBlogPostsTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
@@ -103,9 +98,6 @@ export {
// Label Tools // Label Tools
confluenceListLabelsTool, confluenceListLabelsTool,
confluenceAddLabelTool, confluenceAddLabelTool,
confluenceDeleteLabelTool,
confluenceGetPagesByLabelTool,
confluenceListSpaceLabelsTool,
// Space Tools // Space Tools
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListSpacesTool, confluenceListSpacesTool,

View File

@@ -1,134 +0,0 @@
import { LABEL_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceListSpaceLabelsParams {
accessToken: string
domain: string
spaceId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceListSpaceLabelsResponse {
success: boolean
output: {
ts: string
spaceId: string
labels: Array<{
id: string
name: string
prefix: string
}>
nextCursor: string | null
}
}
export const confluenceListSpaceLabelsTool: ToolConfig<
ConfluenceListSpaceLabelsParams,
ConfluenceListSpaceLabelsResponse
> = {
id: 'confluence_list_space_labels',
name: 'Confluence List Space Labels',
description: 'List all labels associated with a Confluence space.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
spaceId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the Confluence space to list labels from',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of labels to return (default: 25, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceListSpaceLabelsParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
spaceId: params.spaceId,
limit: String(params.limit || 25),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/space-labels?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceListSpaceLabelsParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
spaceId: data.spaceId ?? '',
labels: data.labels ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
spaceId: { type: 'string', description: 'ID of the space' },
labels: {
type: 'array',
description: 'Array of labels on the space',
items: {
type: 'object',
properties: LABEL_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -1,9 +1,5 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { generateInternalToken } from '@/lib/auth/internal' import { generateInternalToken } from '@/lib/auth/internal'
import { getBYOKKey } from '@/lib/api-key/byok'
import { logFixedUsage } from '@/lib/billing/core/usage-log'
import { env } from '@/lib/core/config/env'
import { isHosted } from '@/lib/core/config/feature-flags'
import { DEFAULT_EXECUTION_TIMEOUT_MS } from '@/lib/core/execution-limits' import { DEFAULT_EXECUTION_TIMEOUT_MS } from '@/lib/core/execution-limits'
import { import {
secureFetchWithPinnedIP, secureFetchWithPinnedIP,
@@ -17,12 +13,7 @@ import { resolveSkillContent } from '@/executor/handlers/agent/skills-resolver'
import type { ExecutionContext } from '@/executor/types' import type { ExecutionContext } from '@/executor/types'
import type { ErrorInfo } from '@/tools/error-extractors' import type { ErrorInfo } from '@/tools/error-extractors'
import { extractErrorMessage } from '@/tools/error-extractors' import { extractErrorMessage } from '@/tools/error-extractors'
import type { import type { OAuthTokenPayload, ToolConfig, ToolResponse } from '@/tools/types'
OAuthTokenPayload,
ToolConfig,
ToolHostingPricing,
ToolResponse,
} from '@/tools/types'
import { import {
formatRequestParams, formatRequestParams,
getTool, getTool,
@@ -32,195 +23,6 @@ import {
const logger = createLogger('Tools') const logger = createLogger('Tools')
/**
* Get a hosted API key from environment variables
* Supports rotation when multiple keys are configured
*/
function getHostedKeyFromEnv(envKeys: string[]): string | null {
const keys = envKeys
.map((key) => env[key as keyof typeof env])
.filter((value): value is string => Boolean(value))
if (keys.length === 0) return null
// Round-robin rotation based on current minute
const currentMinute = Math.floor(Date.now() / 60000)
const keyIndex = currentMinute % keys.length
return keys[keyIndex]
}
/**
* Inject hosted API key if tool supports it and user didn't provide one.
* Checks BYOK workspace keys first, then falls back to hosted env keys.
* Returns whether a hosted (billable) key was injected.
*/
async function injectHostedKeyIfNeeded(
tool: ToolConfig,
params: Record<string, unknown>,
executionContext: ExecutionContext | undefined,
requestId: string
): Promise<boolean> {
if (!tool.hosting) return false
if (!isHosted) return false
const { envKeys, apiKeyParam, byokProviderId } = tool.hosting
const userProvidedKey = params[apiKeyParam]
if (userProvidedKey) {
logger.debug(`[${requestId}] User provided API key for ${tool.id}, skipping hosted key`)
return false
}
// Check BYOK workspace key first
if (byokProviderId && executionContext?.workspaceId) {
try {
const byokResult = await getBYOKKey(
executionContext.workspaceId,
byokProviderId as 'openai' | 'anthropic' | 'google' | 'mistral' | 'serper'
)
if (byokResult) {
params[apiKeyParam] = byokResult.apiKey
logger.info(`[${requestId}] Using BYOK key for ${tool.id}`)
return false // Don't bill - user's own key
}
} catch (error) {
logger.error(`[${requestId}] Failed to get BYOK key for ${tool.id}:`, error)
// Fall through to hosted key
}
}
// Fall back to hosted env key
const hostedKey = getHostedKeyFromEnv(envKeys)
if (!hostedKey) {
logger.debug(`[${requestId}] No hosted key available for ${tool.id}`)
return false
}
params[apiKeyParam] = hostedKey
logger.info(`[${requestId}] Using hosted key for ${tool.id}`)
return true // Bill the user
}
/**
* Check if an error is a rate limit (throttling) error
*/
function isRateLimitError(error: unknown): boolean {
if (error && typeof error === 'object') {
const status = (error as { status?: number }).status
// 429 = Too Many Requests, 503 = Service Unavailable (sometimes used for rate limiting)
if (status === 429 || status === 503) return true
}
return false
}
/**
* Execute a function with exponential backoff retry for rate limiting errors.
* Only used for hosted key requests.
*/
async function executeWithRetry<T>(
fn: () => Promise<T>,
requestId: string,
toolId: string,
maxRetries = 3,
baseDelayMs = 1000
): Promise<T> {
let lastError: unknown
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await fn()
} catch (error) {
lastError = error
if (!isRateLimitError(error) || attempt === maxRetries) {
throw error
}
const delayMs = baseDelayMs * Math.pow(2, attempt)
logger.warn(`[${requestId}] Rate limited for ${toolId}, retrying in ${delayMs}ms (attempt ${attempt + 1}/${maxRetries})`)
await new Promise((resolve) => setTimeout(resolve, delayMs))
}
}
throw lastError
}
/**
* Calculate cost based on pricing model
*/
function calculateToolCost(
pricing: ToolHostingPricing,
params: Record<string, unknown>,
response: Record<string, unknown>
): number {
switch (pricing.type) {
case 'per_request':
return pricing.cost
case 'per_unit': {
const usage = pricing.getUsage(params, response)
return usage * pricing.costPerUnit
}
case 'per_result': {
const resultCount = pricing.getResultCount(response)
const billableResults = pricing.maxResults
? Math.min(resultCount, pricing.maxResults)
: resultCount
return billableResults * pricing.costPerResult
}
case 'per_second': {
const duration = pricing.getDuration(response)
const billableDuration = pricing.minimumSeconds
? Math.max(duration, pricing.minimumSeconds)
: duration
return billableDuration * pricing.costPerSecond
}
default: {
const exhaustiveCheck: never = pricing
throw new Error(`Unknown pricing type: ${(exhaustiveCheck as ToolHostingPricing).type}`)
}
}
}
/**
* Log usage for a tool that used a hosted API key
*/
async function logHostedToolUsage(
tool: ToolConfig,
params: Record<string, unknown>,
response: Record<string, unknown>,
executionContext: ExecutionContext | undefined,
requestId: string
): Promise<void> {
if (!tool.hosting?.pricing || !executionContext?.userId) {
return
}
const cost = calculateToolCost(tool.hosting.pricing, params, response)
if (cost <= 0) return
try {
await logFixedUsage({
userId: executionContext.userId,
source: 'workflow',
description: `tool:${tool.id}`,
cost,
workspaceId: executionContext.workspaceId,
workflowId: executionContext.workflowId,
executionId: executionContext.executionId,
})
logger.debug(`[${requestId}] Logged hosted tool usage for ${tool.id}: $${cost}`)
} catch (error) {
logger.error(`[${requestId}] Failed to log hosted tool usage for ${tool.id}:`, error)
// Don't throw - usage logging should not break the main flow
}
}
/** /**
* Normalizes a tool ID by stripping resource ID suffix (UUID). * Normalizes a tool ID by stripping resource ID suffix (UUID).
* Workflow tools: 'workflow_executor_<uuid>' -> 'workflow_executor' * Workflow tools: 'workflow_executor_<uuid>' -> 'workflow_executor'
@@ -477,14 +279,6 @@ export async function executeTool(
throw new Error(`Tool not found: ${toolId}`) throw new Error(`Tool not found: ${toolId}`)
} }
// Inject hosted API key if tool supports it and user didn't provide one
const isUsingHostedKey = await injectHostedKeyIfNeeded(
tool,
contextParams,
executionContext,
requestId
)
// If we have a credential parameter, fetch the access token // If we have a credential parameter, fetch the access token
if (contextParams.credential) { if (contextParams.credential) {
logger.info( logger.info(
@@ -593,11 +387,6 @@ export async function executeTool(
// Process file outputs if execution context is available // Process file outputs if execution context is available
finalResult = await processFileOutputs(finalResult, tool, executionContext) finalResult = await processFileOutputs(finalResult, tool, executionContext)
// Log usage for hosted key if execution was successful
if (isUsingHostedKey && finalResult.success) {
await logHostedToolUsage(tool, contextParams, finalResult.output, executionContext, requestId)
}
// Add timing data to the result // Add timing data to the result
const endTime = new Date() const endTime = new Date()
const endTimeISO = endTime.toISOString() const endTimeISO = endTime.toISOString()
@@ -613,14 +402,7 @@ export async function executeTool(
} }
// Execute the tool request directly (internal routes use regular fetch, external use SSRF-protected fetch) // Execute the tool request directly (internal routes use regular fetch, external use SSRF-protected fetch)
// Wrap with retry logic for hosted keys to handle rate limiting due to higher usage const result = await executeToolRequest(toolId, tool, contextParams)
const result = isUsingHostedKey
? await executeWithRetry(
() => executeToolRequest(toolId, tool, contextParams),
requestId,
toolId
)
: await executeToolRequest(toolId, tool, contextParams)
// Apply post-processing if available and not skipped // Apply post-processing if available and not skipped
let finalResult = result let finalResult = result
@@ -638,11 +420,6 @@ export async function executeTool(
// Process file outputs if execution context is available // Process file outputs if execution context is available
finalResult = await processFileOutputs(finalResult, tool, executionContext) finalResult = await processFileOutputs(finalResult, tool, executionContext)
// Log usage for hosted key if execution was successful
if (isUsingHostedKey && finalResult.success) {
await logHostedToolUsage(tool, contextParams, finalResult.output, executionContext, requestId)
}
// Add timing data to the result // Add timing data to the result
const endTime = new Date() const endTime = new Date()
const endTimeISO = endTime.toISOString() const endTimeISO = endTime.toISOString()

View File

@@ -118,13 +118,10 @@ import {
confluenceCreatePageTool, confluenceCreatePageTool,
confluenceDeleteAttachmentTool, confluenceDeleteAttachmentTool,
confluenceDeleteCommentTool, confluenceDeleteCommentTool,
confluenceDeleteLabelTool,
confluenceDeletePagePropertyTool,
confluenceDeletePageTool, confluenceDeletePageTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
confluenceGetPageAncestorsTool, confluenceGetPageAncestorsTool,
confluenceGetPageChildrenTool, confluenceGetPageChildrenTool,
confluenceGetPagesByLabelTool,
confluenceGetPageVersionTool, confluenceGetPageVersionTool,
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListAttachmentsTool, confluenceListAttachmentsTool,
@@ -135,7 +132,6 @@ import {
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceListPagesInSpaceTool, confluenceListPagesInSpaceTool,
confluenceListPageVersionsTool, confluenceListPageVersionsTool,
confluenceListSpaceLabelsTool,
confluenceListSpacesTool, confluenceListSpacesTool,
confluenceRetrieveTool, confluenceRetrieveTool,
confluenceSearchInSpaceTool, confluenceSearchInSpaceTool,
@@ -2671,10 +2667,6 @@ export const tools: Record<string, ToolConfig> = {
confluence_delete_attachment: confluenceDeleteAttachmentTool, confluence_delete_attachment: confluenceDeleteAttachmentTool,
confluence_list_labels: confluenceListLabelsTool, confluence_list_labels: confluenceListLabelsTool,
confluence_add_label: confluenceAddLabelTool, confluence_add_label: confluenceAddLabelTool,
confluence_get_pages_by_label: confluenceGetPagesByLabelTool,
confluence_list_space_labels: confluenceListSpaceLabelsTool,
confluence_delete_label: confluenceDeleteLabelTool,
confluence_delete_page_property: confluenceDeletePagePropertyTool,
confluence_get_space: confluenceGetSpaceTool, confluence_get_space: confluenceGetSpaceTool,
confluence_list_spaces: confluenceListSpacesTool, confluence_list_spaces: confluenceListSpacesTool,
cursor_list_agents: cursorListAgentsTool, cursor_list_agents: cursorListAgentsTool,

View File

@@ -26,13 +26,6 @@ export const s3GetObjectTool: ToolConfig = {
visibility: 'user-only', visibility: 'user-only',
description: 'Your AWS Secret Access Key', description: 'Your AWS Secret Access Key',
}, },
region: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Optional region override when URL does not include region (e.g., us-east-1, eu-west-1)',
},
s3Uri: { s3Uri: {
type: 'string', type: 'string',
required: true, required: true,
@@ -44,7 +37,7 @@ export const s3GetObjectTool: ToolConfig = {
request: { request: {
url: (params) => { url: (params) => {
try { try {
const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri, params.region) const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri)
params.bucketName = bucketName params.bucketName = bucketName
params.region = region params.region = region
@@ -53,7 +46,7 @@ export const s3GetObjectTool: ToolConfig = {
return `https://${bucketName}.s3.${region}.amazonaws.com/${encodeS3PathComponent(objectKey)}` return `https://${bucketName}.s3.${region}.amazonaws.com/${encodeS3PathComponent(objectKey)}`
} catch (_error) { } catch (_error) {
throw new Error( throw new Error(
'Invalid S3 Object URL. Use a valid S3 URL and optionally provide region if the URL omits it.' 'Invalid S3 Object URL format. Expected format: https://bucket-name.s3.region.amazonaws.com/path/to/file'
) )
} }
}, },
@@ -62,7 +55,7 @@ export const s3GetObjectTool: ToolConfig = {
try { try {
// Parse S3 URI if not already parsed // Parse S3 URI if not already parsed
if (!params.bucketName || !params.region || !params.objectKey) { if (!params.bucketName || !params.region || !params.objectKey) {
const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri, params.region) const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri)
params.bucketName = bucketName params.bucketName = bucketName
params.region = region params.region = region
params.objectKey = objectKey params.objectKey = objectKey
@@ -109,7 +102,7 @@ export const s3GetObjectTool: ToolConfig = {
transformResponse: async (response: Response, params) => { transformResponse: async (response: Response, params) => {
// Parse S3 URI if not already parsed // Parse S3 URI if not already parsed
if (!params.bucketName || !params.region || !params.objectKey) { if (!params.bucketName || !params.region || !params.objectKey) {
const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri, params.region) const { bucketName, region, objectKey } = parseS3Uri(params.s3Uri)
params.bucketName = bucketName params.bucketName = bucketName
params.region = region params.region = region
params.objectKey = objectKey params.objectKey = objectKey

View File

@@ -20,10 +20,7 @@ export function getSignatureKey(
return kSigning return kSigning
} }
export function parseS3Uri( export function parseS3Uri(s3Uri: string): {
s3Uri: string,
fallbackRegion?: string
): {
bucketName: string bucketName: string
region: string region: string
objectKey: string objectKey: string
@@ -31,55 +28,10 @@ export function parseS3Uri(
try { try {
const url = new URL(s3Uri) const url = new URL(s3Uri)
const hostname = url.hostname const hostname = url.hostname
const normalizedPath = url.pathname.startsWith('/') ? url.pathname.slice(1) : url.pathname const bucketName = hostname.split('.')[0]
const regionMatch = hostname.match(/s3[.-]([^.]+)\.amazonaws\.com/)
const virtualHostedDualstackMatch = hostname.match( const region = regionMatch ? regionMatch[1] : 'us-east-1'
/^(.+)\.s3\.dualstack\.([^.]+)\.amazonaws\.com(?:\.cn)?$/ const objectKey = url.pathname.startsWith('/') ? url.pathname.substring(1) : url.pathname
)
const virtualHostedRegionalMatch = hostname.match(
/^(.+)\.s3[.-]([^.]+)\.amazonaws\.com(?:\.cn)?$/
)
const virtualHostedGlobalMatch = hostname.match(/^(.+)\.s3\.amazonaws\.com(?:\.cn)?$/)
const pathStyleDualstackMatch = hostname.match(
/^s3\.dualstack\.([^.]+)\.amazonaws\.com(?:\.cn)?$/
)
const pathStyleRegionalMatch = hostname.match(/^s3[.-]([^.]+)\.amazonaws\.com(?:\.cn)?$/)
const pathStyleGlobalMatch = hostname.match(/^s3\.amazonaws\.com(?:\.cn)?$/)
const isPathStyleHost = Boolean(
pathStyleDualstackMatch || pathStyleRegionalMatch || pathStyleGlobalMatch
)
const firstSlashIndex = normalizedPath.indexOf('/')
const pathStyleBucketName =
firstSlashIndex === -1 ? normalizedPath : normalizedPath.slice(0, firstSlashIndex)
const pathStyleObjectKey =
firstSlashIndex === -1 ? '' : normalizedPath.slice(firstSlashIndex + 1)
const bucketName = isPathStyleHost
? pathStyleBucketName
: (virtualHostedDualstackMatch?.[1] ??
virtualHostedRegionalMatch?.[1] ??
virtualHostedGlobalMatch?.[1] ??
'')
const rawObjectKey = isPathStyleHost ? pathStyleObjectKey : normalizedPath
const objectKey = (() => {
try {
return decodeURIComponent(rawObjectKey)
} catch {
return rawObjectKey
}
})()
const normalizedFallbackRegion = fallbackRegion?.trim()
const regionFromHost =
virtualHostedDualstackMatch?.[2] ??
virtualHostedRegionalMatch?.[2] ??
pathStyleDualstackMatch?.[1] ??
pathStyleRegionalMatch?.[1]
const region = regionFromHost || normalizedFallbackRegion || 'us-east-1'
if (!bucketName || !objectKey) { if (!bucketName || !objectKey) {
throw new Error('Invalid S3 URI format') throw new Error('Invalid S3 URI format')
@@ -88,7 +40,7 @@ export function parseS3Uri(
return { bucketName, region, objectKey } return { bucketName, region, objectKey }
} catch (_error) { } catch (_error) {
throw new Error( throw new Error(
'Invalid S3 Object URL format. Expected S3 virtual-hosted or path-style URL with object key.' 'Invalid S3 Object URL format. Expected format: https://bucket-name.s3.region.amazonaws.com/path/to/file'
) )
} }
} }

View File

@@ -48,15 +48,6 @@ export const searchTool: ToolConfig<SearchParams, SearchResponse> = {
description: 'Serper API Key', description: 'Serper API Key',
}, },
}, },
hosting: {
envKeys: ['SERPER_API_KEY'],
apiKeyParam: 'apiKey',
byokProviderId: 'serper',
pricing: {
type: 'per_request',
cost: 0.001, // $0.001 per search (Serper pricing: ~$50/50k searches)
},
},
request: { request: {
url: (params) => `https://google.serper.dev/${params.type || 'search'}`, url: (params) => `https://google.serper.dev/${params.type || 'search'}`,

View File

@@ -127,13 +127,6 @@ export interface ToolConfig<P = any, R = any> {
* Maps param IDs to their enrichment configuration. * Maps param IDs to their enrichment configuration.
*/ */
schemaEnrichment?: Record<string, SchemaEnrichmentConfig> schemaEnrichment?: Record<string, SchemaEnrichmentConfig>
/**
* Hosted API key configuration for this tool.
* When configured, the tool can use Sim's hosted API keys if user doesn't provide their own.
* Usage is billed according to the pricing config.
*/
hosting?: ToolHostingConfig
} }
export interface TableRow { export interface TableRow {
@@ -177,64 +170,3 @@ export interface SchemaEnrichmentConfig {
required?: string[] required?: string[]
} | null> } | null>
} }
/**
* Pricing models for hosted API key usage
*/
/** Flat fee per API call (e.g., Serper search) */
export interface PerRequestPricing {
type: 'per_request'
/** Cost per request in dollars */
cost: number
}
/** Usage-based on input/output size (e.g., LLM tokens, TTS characters) */
export interface PerUnitPricing {
type: 'per_unit'
/** Cost per unit in dollars */
costPerUnit: number
/** Unit of measurement */
unit: 'token' | 'character' | 'byte' | 'kb' | 'mb'
/** Extract usage count from params (before execution) or response (after execution) */
getUsage: (params: Record<string, unknown>, response?: Record<string, unknown>) => number
}
/** Based on result count (e.g., per search result, per email sent) */
export interface PerResultPricing {
type: 'per_result'
/** Cost per result in dollars */
costPerResult: number
/** Maximum results to bill for (cap) */
maxResults?: number
/** Extract result count from response */
getResultCount: (response: Record<string, unknown>) => number
}
/** Billed by execution duration (e.g., browser sessions, video processing) */
export interface PerSecondPricing {
type: 'per_second'
/** Cost per second in dollars */
costPerSecond: number
/** Minimum billable seconds */
minimumSeconds?: number
/** Extract duration from response (in seconds) */
getDuration: (response: Record<string, unknown>) => number
}
/** Union of all pricing models */
export type ToolHostingPricing = PerRequestPricing | PerUnitPricing | PerResultPricing | PerSecondPricing
/**
* Configuration for hosted API key support
* When configured, the tool can use Sim's hosted API keys if user doesn't provide their own
*/
export interface ToolHostingConfig {
/** Environment variable names to check for hosted keys (supports rotation with multiple keys) */
envKeys: string[]
/** The parameter name that receives the API key */
apiKeyParam: string
/** BYOK provider ID for workspace key lookup (e.g., 'serper') */
byokProviderId?: string
/** Pricing when using hosted key */
pricing: ToolHostingPricing
}

View File

@@ -23,12 +23,7 @@ export const SYSTEM_SUBBLOCK_IDS: string[] = [
* with default values from the trigger definition on load, which aren't present in * with default values from the trigger definition on load, which aren't present in
* the deployed state, causing false positive change detection. * the deployed state, causing false positive change detection.
*/ */
export const TRIGGER_RUNTIME_SUBBLOCK_IDS: string[] = [ export const TRIGGER_RUNTIME_SUBBLOCK_IDS: string[] = ['webhookId', 'triggerPath', 'triggerConfig']
'webhookId',
'triggerPath',
'triggerConfig',
'triggerId',
]
/** /**
* Maximum number of consecutive failures before a trigger (schedule/webhook) is auto-disabled. * Maximum number of consecutive failures before a trigger (schedule/webhook) is auto-disabled.