Compare commits

..

53 Commits

Author SHA1 Message Date
Waleed
af82820a28 v0.5.61: webhook improvements, workflow controls, react query for deployment status, chat fixes, reducto and pulse OCR, linear fixes 2026-01-16 18:06:23 -08:00
Vikhyath Mondreti
fd23220cc3 fix(slack): tool params should be in line with block (#2860)
* env var pattern outside loop

* fix(slack): tool params should line up with block

* remove comments
2026-01-16 18:00:44 -08:00
Adam Gough
a8d81097fc fix(google-vault): error handling improvement and more params (#2735)
* new error throw and improvement

* fixed critical issues

* restore error thorwing

* restore

* added handler for vault

* updated docs

* restored

* removed google vault from executor

* updated translations

* updated docs

* fixed inputs and outputs

---------

Co-authored-by: aadamgough <adam@sim.ai>
Co-authored-by: waleed <walif6@gmail.com>
2026-01-16 17:59:17 -08:00
Waleed
3768c6379c feat(readme): added deepwiki to readme, consolidated utils (#2856)
* feat(readme): added deepwiki to readme, consolidated utils

* standardized all modals

* updated modal copy

* standardized modals

* streamlined all error msg patterns
2026-01-16 16:07:31 -08:00
Siddharth Ganesan
aa80116b99 fix(copilot): copilot edit router block accepts semantic handles (#2857)
* Fix copilot diff controls

* Fix router block for copilot

* Fix queue

* Fix lint

* Get block options and config for subflows

* Lint
2026-01-16 15:54:28 -08:00
Vikhyath Mondreti
78e4ca9d45 improvement(serializer): canonical subblock, serialization cleanups, schedules/webhooks are deployment version friendly (#2848)
* hide form deployment tab from docs

* progress

* fix resolution

* cleanup code

* fix positioning

* cleanup dead sockets adv mode ops

* address greptile comments

* fix tests plus more simplification

* fix cleanup

* bring back advanced mode with specific definition

* revert feature flags

* improvement(subblock): ui

* resolver change to make all var references optional chaining

* fix(webhooks/schedules): deployment version friendly

* fix tests

* fix credential sets with new lifecycle

* prep merge

* add back migration

* fix display check for adv fields

* fix trigger vs block scoping

---------

Co-authored-by: Emir Karabeg <emirkarabeg@berkeley.edu>
2026-01-16 15:23:43 -08:00
Waleed
ce3ddb6ba0 improvement(deployed-mcp): added the ability to make the visibility for deployed mcp tools public, updated UX (#2853)
* improvement(deployed-mcp): added the ability to make the visibility for deployed mcp tools public, updated UX

* use reactquery

* migrated chats to use reactquery, upgraded entire deploymodal to use reactquery instead of manual state management

* added hooks for chat chats and updated callers to all use reactquery

* fix

* updated comments

* consolidated utils
2026-01-16 14:18:39 -08:00
Siddharth Ganesan
8361931cdf fix(copilot): fix copilot bugs (#2855)
* Fix edit workflow returning bad state

* Fix block id edit, slash commands at end, thinking tag resolution, add continue button

* Clean up autosend and continue options and enable mention menu

* Cleanup

* Fix thinking tags

* Fix thinking text

* Fix get block options text

* Fix bugs

* Fix redeploy

* Fix loading indicators

* User input expansion

* Normalize copilot subblock ids

* Fix handlecancelcheckpoint
2026-01-16 13:57:55 -08:00
Waleed
c863125c6b feat(workspace): added option to leave workspace (#2854) 2026-01-16 12:38:11 -08:00
Waleed
fa63af9222 fix(queries): remove more remaining manual state management and refetching in favor of reactquery (#2852) 2026-01-16 12:01:28 -08:00
Waleed
dba57998d2 improvement(posthog): improve posthog config to be more lightweight (#2851) 2026-01-16 08:47:06 -08:00
Waleed
583f5c4cbb fix(webflow): fix collection & site dropdown in webflow triggers (#2849)
* fix(webflow): fix collection & site dropdown in webflow triggers

* added form submission trigger to webflow

* fix(webflow): added form submission trigger and scope

* fixed function signatures
2026-01-16 08:22:09 -08:00
Emir Karabeg
6ff68b39ce Improvement: subblocks (#2850)
* improvement(panel): increased default width

* improvement: subblocks

* fix: generate, mcp, ring
2026-01-15 23:02:51 -08:00
Waleed
55700b9bf4 improvement(security): added input validation for airtable, lemlist, and more tools to protect against SSRF (#2847) 2026-01-15 19:33:29 -08:00
Waleed
51e376847f fix(linear): updated linear tools to enforce only required fields per api spec (#2845) 2026-01-15 18:58:11 -08:00
Waleed
feb994c819 improvement(presence): show presence for the same user in another tab, fix z-index of multiplayer cursor to fall behind panel,terminal,sidebar but above blocks, improved connection detection (#2844)
* improvement(presence): show presence for the same user in another tab, fix z-index of multiplayer cursor to fall behind panel,terminal,sidebar but above blocks, improved connection detection

* upsert users into presence list
2026-01-15 18:46:46 -08:00
Waleed
12470a630c feat(ocr): added reducto and pulse for OCR (#2843)
* feat(ocr): added reducto and pulse for OCR

* ack comments
2026-01-15 18:30:39 -08:00
Emir Karabeg
b813bf7f27 improvement: workflow, blocks, preview, avatars, output-select (#2840)
* improvement(workflow): ui/ux, refactors, optimizations

* improvement: blocks, preview, avatars

* improvement(output-select): ui

* update API endpoint picker to match output selector

* improvement: subflow ui/ux

---------

Co-authored-by: waleed <walif6@gmail.com>
2026-01-15 17:42:59 -08:00
Waleed
81cc88b2e2 feat(context-menu): added context menu to dead sidebar space and usage indicator (#2841) 2026-01-15 16:54:33 -08:00
Waleed
87e6057033 improvement(chat): partialize chat store to only persist image URL instead of full image in floating chat (#2842) 2026-01-15 16:54:24 -08:00
Vikhyath Mondreti
f1796d13df fix(start): permission check for executor 2026-01-15 16:43:06 -08:00
Waleed
6f469a7f37 improvement(permissions): added ability to auto-add new org members to existing permission group, disallow disabling of start block (#2836)
* improvement(permissions): added ability to auto-add new org members to existing permission group, disallow disabling of start block

* ran migrations

* add deploy modal tabs config to perm groups

* fix ordering of access control listings

* prep staging merge

* regen migrations

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
2026-01-15 15:09:00 -08:00
Waleed
a35f6eca03 improvement(tools): use react query to fetch child workflow schema, avoid refetch and duplicated utils, consolidated utils and testing mocks (#2839)
* improvement(tools): use react query to fetch child workflow schema, avoid refetch and duplicated utils

* consolidated utils & testing mocks
2026-01-15 13:25:22 -08:00
Waleed
1cc489e544 feat(workflow-controls): added action bar for workflow controls (#2767)
* feat(workflow-controls): added action bar for picker/hand/undo/redo/zoom workflow controls, added general setting to disable

* added util for fit to zoom that accounts for sidebar, terminal, and panel

* ack PR comments

* remove dead state variable, add logs

* improvement(ui/ux): action bar, panel, tooltip, dragging, invite modal

* added fit to view in canvas context menu

* fix(theme): dark mode flash

* fix: duplicate fit to view

* refactor: popovers; improvement: notifications, diff controls, action bar

* improvement(action-bar): ui/ux

* refactor(action-bar): renamed to workflow controls

* ran migrations

* fix: deleted migration

---------

Co-authored-by: Emir Karabeg <emirkarabeg@berkeley.edu>
2026-01-15 13:25:00 -08:00
Vikhyath Mondreti
e499cc4f82 improvement(webhooks): lifecycle management with external providers, remove save configuration (#2831)
* fix(webhooks): lifecycle code accuracy

* remove save configuration button

* remove useless instruction

* address greptile comments

* fix lint

* on undeploy cleanup webhooks
2026-01-15 12:42:05 -08:00
Waleed
5e44357b9f improvement(snapshot): show subblocks for trigger only blocks in frozen canvas (#2838)
* improvement(snapshot): show subblocks for trigger only blocks in frozen canvas

* ack comment
2026-01-15 10:34:38 -08:00
Waleed
4372841797 v0.5.60: invitation flow improvements, chat fixes, a2a improvements, additional copilot actions 2026-01-15 00:02:18 -08:00
Waleed
5e8c843241 v0.5.59: a2a support, documentation 2026-01-13 13:21:21 -08:00
Waleed
7bf3d73ee6 v0.5.58: export folders, new tools, permissions groups enhancements 2026-01-13 00:56:59 -08:00
Vikhyath Mondreti
7ffc11a738 v0.5.57: subagents, context menu improvements, bug fixes 2026-01-11 11:38:40 -08:00
Waleed
be578e2ed7 v0.5.56: batch operations, access control and permission groups, billing fixes 2026-01-10 00:31:34 -08:00
Waleed
f415e5edc4 v0.5.55: polling groups, bedrock provider, devcontainer fixes, workflow preview enhancements 2026-01-08 23:36:56 -08:00
Waleed
13a6e6c3fa v0.5.54: seo, model blacklist, helm chart updates, fireflies integration, autoconnect improvements, billing fixes 2026-01-07 16:09:45 -08:00
Waleed
f5ab7f21ae v0.5.53: hotkey improvements, added redis fallback, fixes for workflow tool 2026-01-06 23:34:52 -08:00
Waleed
bfb6fffe38 v0.5.52: new port-based router block, combobox expression and variable support 2026-01-06 16:14:10 -08:00
Waleed
4fbec0a43f v0.5.51: triggers, kb, condition block improvements, supabase and grain integration updates 2026-01-06 14:26:46 -08:00
Waleed
585f5e365b v0.5.50: import improvements, ui upgrades, kb styling and performance improvements 2026-01-05 00:35:55 -08:00
Waleed
3792bdd252 v0.5.49: hitl improvements, new email styles, imap trigger, logs context menu (#2672)
* feat(logs-context-menu): consolidated logs utils and types, added logs record context menu (#2659)

* feat(email): welcome email; improvement(emails): ui/ux (#2658)

* feat(email): welcome email; improvement(emails): ui/ux

* improvement(emails): links, accounts, preview

* refactor(emails): file structure and wrapper components

* added envvar for personal emails sent, added isHosted gate

* fixed failing tests, added env mock

* fix: removed comment

---------

Co-authored-by: waleed <walif6@gmail.com>

* fix(logging): hitl + trigger dev crash protection (#2664)

* hitl gaps

* deal with trigger worker crashes

* cleanup import strcuture

* feat(imap): added support for imap trigger (#2663)

* feat(tools): added support for imap trigger

* feat(imap): added parity, tested

* ack PR comments

* final cleanup

* feat(i18n): update translations (#2665)

Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>

* fix(grain): updated grain trigger to auto-establish trigger (#2666)

Co-authored-by: aadamgough <adam@sim.ai>

* feat(admin): routes to manage deployments (#2667)

* feat(admin): routes to manage deployments

* fix naming fo deployed by

* feat(time-picker): added timepicker emcn component, added to playground, added searchable prop for dropdown, added more timezones for schedule, updated license and notice date (#2668)

* feat(time-picker): added timepicker emcn component, added to playground, added searchable prop for dropdown, added more timezones for schedule, updated license and notice date

* removed unused params, cleaned up redundant utils

* improvement(invite): aligned styling (#2669)

* improvement(invite): aligned with rest of app

* fix(invite): error handling

* fix: addressed comments

---------

Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
Co-authored-by: Adam Gough <77861281+aadamgough@users.noreply.github.com>
Co-authored-by: aadamgough <adam@sim.ai>
2026-01-03 13:19:18 -08:00
Waleed
eb5d1f3e5b v0.5.48: copy-paste workflow blocks, docs updates, mcp tool fixes 2025-12-31 18:00:04 -08:00
Waleed
54ab82c8dd v0.5.47: deploy workflow as mcp, kb chunks tokenizer, UI improvements, jira service management tools 2025-12-30 23:18:58 -08:00
Waleed
f895bf469b v0.5.46: build improvements, greptile, light mode improvements 2025-12-29 02:17:52 -08:00
Waleed
dd3209af06 v0.5.45: light mode fixes, realtime usage indicator, docker build improvements 2025-12-27 19:57:42 -08:00
Waleed
b6ba3b50a7 v0.5.44: keyboard shortcuts, autolayout, light mode, byok, testing improvements 2025-12-26 21:25:19 -08:00
Waleed
b304233062 v0.5.43: export logs, circleback, grain, vertex, code hygiene, schedule improvements 2025-12-23 19:19:18 -08:00
Vikhyath Mondreti
57e4b49bd6 v0.5.42: fix memory migration 2025-12-23 01:24:54 -08:00
Vikhyath Mondreti
e12dd204ed v0.5.41: memory fixes, copilot improvements, knowledgebase improvements, LLM providers standardization 2025-12-23 00:15:18 -08:00
Vikhyath Mondreti
3d9d9cbc54 v0.5.40: supabase ops to allow non-public schemas, jira uuid 2025-12-21 22:28:05 -08:00
Waleed
0f4ec962ad v0.5.39: notion, workflow variables fixes 2025-12-20 20:44:00 -08:00
Waleed
4827866f9a v0.5.38: snap to grid, copilot ux improvements, billing line items 2025-12-20 17:24:38 -08:00
Waleed
3e697d9ed9 v0.5.37: redaction utils consolidation, logs updates, autoconnect improvements, additional kb tag types 2025-12-19 22:31:55 -08:00
Martin Yankov
4431a1a484 fix(helm): add custom egress rules to realtime network policy (#2481)
The realtime service network policy was missing the custom egress rules section
that allows configuration of additional egress rules via values.yaml. This caused
the realtime pods to be unable to connect to external databases (e.g., PostgreSQL
on port 5432) when using external database configurations.

The app network policy already had this section, but the realtime network policy
was missing it, creating an inconsistency and preventing the realtime service
from accessing external databases configured via networkPolicy.egress values.

This fix adds the same custom egress rules template section to the realtime
network policy, matching the app network policy behavior and allowing users to
configure database connectivity via values.yaml.
2025-12-19 18:59:08 -08:00
Waleed
4d1a9a3f22 v0.5.36: hitl improvements, opengraph, slack fixes, one-click unsubscribe, auth checks, new db indexes 2025-12-19 01:27:49 -08:00
Vikhyath Mondreti
eb07a080fb v0.5.35: helm updates, copilot improvements, 404 for docs, salesforce fixes, subflow resize clamping 2025-12-18 16:23:19 -08:00
403 changed files with 58586 additions and 10674 deletions

View File

@@ -10,7 +10,7 @@
<a href="https://sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/sim.ai-6F3DFA" alt="Sim.ai"></a>
<a href="https://discord.gg/Hr4UWYEcTT" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Discord-Join%20Server-5865F2?logo=discord&logoColor=white" alt="Discord"></a>
<a href="https://x.com/simdotai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/twitter/follow/simstudioai?style=social" alt="Twitter"></a>
<a href="https://docs.sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Docs-6F3DFA.svg" alt="Documentation"></a>
<a href="https://docs.sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Docs-6F3DFA.svg" alt="Documentation"></a> <a href="https://deepwiki.com/simstudioai/sim" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/DeepWiki-1E90FF.svg" alt="DeepWiki"></a>
</p>
<p align="center">

View File

@@ -4678,3 +4678,349 @@ export function BedrockIcon(props: SVGProps<SVGSVGElement>) {
</svg>
)
}
export function ReductoIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
width='400'
height='400'
viewBox='50 40 300 320'
fill='none'
xmlns='http://www.w3.org/2000/svg'
>
<path
fillRule='evenodd'
clipRule='evenodd'
d='M85.3434 70.7805H314.657V240.307L226.44 329.219H85.3434V70.7805ZM107.796 93.2319H292.205V204.487H206.493V306.767H107.801L107.796 93.2319Z'
fill='#FFFFFF'
/>
</svg>
)
}
export function PulseIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
width='24'
height='24'
viewBox='0 6 24 24'
fill='none'
xmlns='http://www.w3.org/2000/svg'
>
<path
d='M0 6.63667C0 6.28505 0.284685 6 0.635863 6H1.54133C1.89251 6 2.17719 6.28505 2.17719 6.63667V7.54329C2.17719 7.89492 1.89251 8.17997 1.54133 8.17997H0.635863C0.284686 8.17997 0 7.89492 0 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 6.63667C3.11318 6.28505 3.39787 6 3.74905 6H4.65452C5.00569 6 5.29038 6.28505 5.29038 6.63667V7.54329C5.29038 7.89492 5.00569 8.17997 4.65452 8.17997H3.74905C3.39787 8.17997 3.11318 7.89492 3.11318 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 6.63667C6.22637 6.28505 6.51105 6 6.86223 6H7.7677C8.11888 6 8.40356 6.28505 8.40356 6.63667V7.54329C8.40356 7.89492 8.11888 8.17997 7.7677 8.17997H6.86223C6.51105 8.17997 6.22637 7.89492 6.22637 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 6.63667C9.33955 6.28505 9.62424 6 9.97541 6H10.8809C11.2321 6 11.5167 6.28505 11.5167 6.63667V7.54329C11.5167 7.89492 11.2321 8.17997 10.8809 8.17997H9.97541C9.62424 8.17997 9.33955 7.89492 9.33955 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 6.63667C12.4527 6.28505 12.7374 6 13.0886 6H13.9941C14.3452 6 14.6299 6.28505 14.6299 6.63667V7.54329C14.6299 7.89492 14.3452 8.17997 13.9941 8.17997H13.0886C12.7374 8.17997 12.4527 7.89492 12.4527 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 6.63667C15.5659 6.28505 15.8506 6 16.2018 6H17.1073C17.4584 6 17.7431 6.28505 17.7431 6.63667V7.54329C17.7431 7.89492 17.4584 8.17997 17.1073 8.17997H16.2018C15.8506 8.17997 15.5659 7.89492 15.5659 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 6.63667C18.6791 6.28505 18.9638 6 19.315 6H20.2204C20.5716 6 20.8563 6.28505 20.8563 6.63667V7.54329C20.8563 7.89492 20.5716 8.17997 20.2204 8.17997H19.315C18.9638 8.17997 18.6791 7.89492 18.6791 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 6.63667C21.7923 6.28505 22.077 6 22.4282 6H23.3336C23.6848 6 23.9695 6.28505 23.9695 6.63667V7.54329C23.9695 7.89492 23.6848 8.17997 23.3336 8.17997H22.4282C22.077 8.17997 21.7923 7.89492 21.7923 7.54329V6.63667Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 9.75382C0 9.4022 0.284685 9.11715 0.635863 9.11715H1.54133C1.89251 9.11715 2.17719 9.4022 2.17719 9.75382V10.6604C2.17719 11.0121 1.89251 11.2971 1.54133 11.2971H0.635863C0.284686 11.2971 0 11.0121 0 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 9.75382C3.11318 9.4022 3.39787 9.11715 3.74905 9.11715H4.65452C5.00569 9.11715 5.29038 9.4022 5.29038 9.75382V10.6604C5.29038 11.0121 5.00569 11.2971 4.65452 11.2971H3.74905C3.39787 11.2971 3.11318 11.0121 3.11318 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 9.75382C6.22637 9.4022 6.51105 9.11715 6.86223 9.11715H7.7677C8.11888 9.11715 8.40356 9.4022 8.40356 9.75382V10.6604C8.40356 11.0121 8.11888 11.2971 7.7677 11.2971H6.86223C6.51105 11.2971 6.22637 11.0121 6.22637 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 9.75382C9.33955 9.4022 9.62424 9.11715 9.97541 9.11715H10.8809C11.2321 9.11715 11.5167 9.4022 11.5167 9.75382V10.6604C11.5167 11.0121 11.2321 11.2971 10.8809 11.2971H9.97541C9.62424 11.2971 9.33955 11.0121 9.33955 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 9.75382C12.4527 9.4022 12.7374 9.11715 13.0886 9.11715H13.9941C14.3452 9.11715 14.6299 9.4022 14.6299 9.75382V10.6604C14.6299 11.0121 14.3452 11.2971 13.9941 11.2971H13.0886C12.7374 11.2971 12.4527 11.0121 12.4527 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 9.75382C15.5659 9.4022 15.8506 9.11715 16.2018 9.11715H17.1073C17.4584 9.11715 17.7431 9.4022 17.7431 9.75382V10.6604C17.7431 11.0121 17.4584 11.2971 17.1073 11.2971H16.2018C15.8506 11.2971 15.5659 11.0121 15.5659 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 9.75382C18.6791 9.4022 18.9638 9.11715 19.315 9.11715H20.2204C20.5716 9.11715 20.8563 9.4022 20.8563 9.75382V10.6604C20.8563 11.0121 20.5716 11.2971 20.2204 11.2971H19.315C18.9638 11.2971 18.6791 11.0121 18.6791 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 9.75382C21.7923 9.4022 22.077 9.11715 22.4282 9.11715H23.3336C23.6848 9.11715 23.9695 9.4022 23.9695 9.75382V10.6604C23.9695 11.0121 23.6848 11.2971 23.3336 11.2971H22.4282C22.077 11.2971 21.7923 11.0121 21.7923 10.6604V9.75382Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 12.871C0 12.5193 0.284685 12.2343 0.635863 12.2343H1.54133C1.89251 12.2343 2.17719 12.5193 2.17719 12.871V13.7776C2.17719 14.1292 1.89251 14.4143 1.54133 14.4143H0.635863C0.284686 14.4143 0 14.1292 0 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 12.871C3.11318 12.5193 3.39787 12.2343 3.74905 12.2343H4.65452C5.00569 12.2343 5.29038 12.5193 5.29038 12.871V13.7776C5.29038 14.1292 5.00569 14.4143 4.65452 14.4143H3.74905C3.39787 14.4143 3.11318 14.1292 3.11318 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 12.871C6.22637 12.5193 6.51105 12.2343 6.86223 12.2343H7.7677C8.11888 12.2343 8.40356 12.5193 8.40356 12.871V13.7776C8.40356 14.1292 8.11888 14.4143 7.7677 14.4143H6.86223C6.51105 14.4143 6.22637 14.1292 6.22637 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 12.871C9.33955 12.5193 9.62424 12.2343 9.97541 12.2343H10.8809C11.2321 12.2343 11.5167 12.5193 11.5167 12.871V13.7776C11.5167 14.1292 11.2321 14.4143 10.8809 14.4143H9.97541C9.62424 14.4143 9.33955 14.1292 9.33955 13.7776V12.871Z'
fill='#0E7BC9'
/>
<path
d='M12.4527 12.871C12.4527 12.5193 12.7374 12.2343 13.0886 12.2343H13.9941C14.3452 12.2343 14.6299 12.5193 14.6299 12.871V13.7776C14.6299 14.1292 14.3452 14.4143 13.9941 14.4143H13.0886C12.7374 14.4143 12.4527 14.1292 12.4527 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 12.871C15.5659 12.5193 15.8506 12.2343 16.2018 12.2343H17.1073C17.4584 12.2343 17.7431 12.5193 17.7431 12.871V13.7776C17.7431 14.1292 17.4584 14.4143 17.1073 14.4143H16.2018C15.8506 14.4143 15.5659 14.1292 15.5659 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 12.871C18.6791 12.5193 18.9638 12.2343 19.315 12.2343H20.2204C20.5716 12.2343 20.8563 12.5193 20.8563 12.871V13.7776C20.8563 14.1292 20.5716 14.4143 20.2204 14.4143H19.315C18.9638 14.4143 18.6791 14.1292 18.6791 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 12.871C21.7923 12.5193 22.077 12.2343 22.4282 12.2343H23.3336C23.6848 12.2343 23.9695 12.5193 23.9695 12.871V13.7776C23.9695 14.1292 23.6848 14.4143 23.3336 14.4143H22.4282C22.077 14.4143 21.7923 14.1292 21.7923 13.7776V12.871Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 15.9881C0 15.6365 0.284685 15.3514 0.635863 15.3514H1.54133C1.89251 15.3514 2.17719 15.6365 2.17719 15.9881V16.8947C2.17719 17.2464 1.89251 17.5314 1.54133 17.5314H0.635863C0.284686 17.5314 0 17.2464 0 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 15.9881C3.11318 15.6365 3.39787 15.3514 3.74905 15.3514H4.65452C5.00569 15.3514 5.29038 15.6365 5.29038 15.9881V16.8947C5.29038 17.2464 5.00569 17.5314 4.65452 17.5314H3.74905C3.39787 17.5314 3.11318 17.2464 3.11318 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 15.9881C6.22637 15.6365 6.51105 15.3514 6.86223 15.3514H7.7677C8.11888 15.3514 8.40356 15.6365 8.40356 15.9881V16.8947C8.40356 17.2464 8.11888 17.5314 7.7677 17.5314H6.86223C6.51105 17.5314 6.22637 17.2464 6.22637 16.8947V15.9881Z'
fill='#0E7BC9'
/>
<path
d='M9.33955 15.9881C9.33955 15.6365 9.62424 15.3514 9.97541 15.3514H10.8809C11.2321 15.3514 11.5167 15.6365 11.5167 15.9881V16.8947C11.5167 17.2464 11.2321 17.5314 10.8809 17.5314H9.97541C9.62424 17.5314 9.33955 17.2464 9.33955 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 15.9881C12.4527 15.6365 12.7374 15.3514 13.0886 15.3514H13.9941C14.3452 15.3514 14.6299 15.6365 14.6299 15.9881V16.8947C14.6299 17.2464 14.3452 17.5314 13.9941 17.5314H13.0886C12.7374 17.5314 12.4527 17.2464 12.4527 16.8947V15.9881Z'
fill='#0E7BC9'
/>
<path
d='M15.5659 15.9881C15.5659 15.6365 15.8506 15.3514 16.2018 15.3514H17.1073C17.4584 15.3514 17.7431 15.6365 17.7431 15.9881V16.8947C17.7431 17.2464 17.4584 17.5314 17.1073 17.5314H16.2018C15.8506 17.5314 15.5659 17.2464 15.5659 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 15.9881C18.6791 15.6365 18.9638 15.3514 19.315 15.3514H20.2204C20.5716 15.3514 20.8563 15.6365 20.8563 15.9881V16.8947C20.8563 17.2464 20.5716 17.5314 20.2204 17.5314H19.315C18.9638 17.5314 18.6791 17.2464 18.6791 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 15.9881C21.7923 15.6365 22.077 15.3514 22.4282 15.3514H23.3336C23.6848 15.3514 23.9695 15.6365 23.9695 15.9881V16.8947C23.9695 17.2464 23.6848 17.5314 23.3336 17.5314H22.4282C22.077 17.5314 21.7923 17.2464 21.7923 16.8947V15.9881Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 19.1053C0 18.7536 0.284685 18.4686 0.635863 18.4686H1.54133C1.89251 18.4686 2.17719 18.7536 2.17719 19.1053V20.0119C2.17719 20.3635 1.89251 20.6486 1.54133 20.6486H0.635863C0.284686 20.6486 0 20.3635 0 20.0119V19.1053Z'
fill='#0E7BC9'
/>
<path
d='M3.11318 19.1053C3.11318 18.7536 3.39787 18.4686 3.74905 18.4686H4.65452C5.00569 18.4686 5.29038 18.7536 5.29038 19.1053V20.0119C5.29038 20.3635 5.00569 20.6486 4.65452 20.6486H3.74905C3.39787 20.6486 3.11318 20.3635 3.11318 20.0119V19.1053Z'
fill='#0E7BC9'
/>
<path
d='M6.22637 19.1053C6.22637 18.7536 6.51105 18.4686 6.86223 18.4686H7.7677C8.11888 18.4686 8.40356 18.7536 8.40356 19.1053V20.0119C8.40356 20.3635 8.11888 20.6486 7.7677 20.6486H6.86223C6.51105 20.6486 6.22637 20.3635 6.22637 20.0119V19.1053Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 19.1053C9.33955 18.7536 9.62424 18.4686 9.97541 18.4686H10.8809C11.2321 18.4686 11.5167 18.7536 11.5167 19.1053V20.0119C11.5167 20.3635 11.2321 20.6486 10.8809 20.6486H9.97541C9.62424 20.6486 9.33955 20.3635 9.33955 20.0119V19.1053Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 19.1053C12.4527 18.7536 12.7374 18.4686 13.0886 18.4686H13.9941C14.3452 18.4686 14.6299 18.7536 14.6299 19.1053V20.0119C14.6299 20.3635 14.3452 20.6486 13.9941 20.6486H13.0886C12.7374 20.6486 12.4527 20.3635 12.4527 20.0119V19.1053Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 19.1053C15.5659 18.7536 15.8506 18.4686 16.2018 18.4686H17.1073C17.4584 18.4686 17.7431 18.7536 17.7431 19.1053V20.0119C17.7431 20.3635 17.4584 20.6486 17.1073 20.6486H16.2018C15.8506 20.6486 15.5659 20.3635 15.5659 20.0119V19.1053Z'
fill='#0E7BC9'
/>
<path
d='M18.6791 19.1053C18.6791 18.7536 18.9638 18.4686 19.315 18.4686H20.2204C20.5716 18.4686 20.8563 18.7536 20.8563 19.1053V20.0119C20.8563 20.3635 20.5716 20.6486 20.2204 20.6486H19.315C18.9638 20.6486 18.6791 20.3635 18.6791 20.0119V19.1053Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 19.1053C21.7923 18.7536 22.077 18.4686 22.4282 18.4686H23.3336C23.6848 18.4686 23.9695 18.7536 23.9695 19.1053V20.0119C23.9695 20.3635 23.6848 20.6486 23.3336 20.6486H22.4282C22.077 20.6486 21.7923 20.3635 21.7923 20.0119V19.1053Z'
fill='#0E7BC9'
/>
<path
d='M0 22.2224C0 21.8708 0.284685 21.5857 0.635863 21.5857H1.54133C1.89251 21.5857 2.17719 21.8708 2.17719 22.2224V23.129C2.17719 23.4807 1.89251 23.7657 1.54133 23.7657H0.635863C0.284686 23.7657 0 23.4807 0 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 22.2224C3.11318 21.8708 3.39787 21.5857 3.74905 21.5857H4.65452C5.00569 21.5857 5.29038 21.8708 5.29038 22.2224V23.129C5.29038 23.4807 5.00569 23.7657 4.65452 23.7657H3.74905C3.39787 23.7657 3.11318 23.4807 3.11318 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 22.2224C6.22637 21.8708 6.51105 21.5857 6.86223 21.5857H7.7677C8.11888 21.5857 8.40356 21.8708 8.40356 22.2224V23.129C8.40356 23.4807 8.11888 23.7657 7.7677 23.7657H6.86223C6.51105 23.7657 6.22637 23.4807 6.22637 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 22.2224C9.33955 21.8708 9.62424 21.5857 9.97541 21.5857H10.8809C11.2321 21.5857 11.5167 21.8708 11.5167 22.2224V23.129C11.5167 23.4807 11.2321 23.7657 10.8809 23.7657H9.97541C9.62424 23.7657 9.33955 23.4807 9.33955 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 22.2224C12.4527 21.8708 12.7374 21.5857 13.0886 21.5857H13.9941C14.3452 21.5857 14.6299 21.8708 14.6299 22.2224V23.129C14.6299 23.4807 14.3452 23.7657 13.9941 23.7657H13.0886C12.7374 23.7657 12.4527 23.4807 12.4527 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 22.2224C15.5659 21.8708 15.8506 21.5857 16.2018 21.5857H17.1073C17.4584 21.5857 17.7431 21.8708 17.7431 22.2224V23.129C17.7431 23.4807 17.4584 23.7657 17.1073 23.7657H16.2018C15.8506 23.7657 15.5659 23.4807 15.5659 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 22.2224C18.6791 21.8708 18.9638 21.5857 19.315 21.5857H20.2204C20.5716 21.5857 20.8563 21.8708 20.8563 22.2224V23.129C20.8563 23.4807 20.5716 23.7657 20.2204 23.7657H19.315C18.9638 23.7657 18.6791 23.4807 18.6791 23.129V22.2224Z'
fill='#0E7BC9'
/>
<path
d='M21.7923 22.2224C21.7923 21.8708 22.077 21.5857 22.4282 21.5857H23.3336C23.6848 21.5857 23.9695 21.8708 23.9695 22.2224V23.129C23.9695 23.4807 23.6848 23.7657 23.3336 23.7657H22.4282C22.077 23.7657 21.7923 23.4807 21.7923 23.129V22.2224Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 25.3396C0 24.9879 0.284685 24.7029 0.635863 24.7029H1.54133C1.89251 24.7029 2.17719 24.9879 2.17719 25.3396V26.2462C2.17719 26.5978 1.89251 26.8829 1.54133 26.8829H0.635863C0.284686 26.8829 0 26.5978 0 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 25.3396C3.11318 24.9879 3.39787 24.7029 3.74905 24.7029H4.65452C5.00569 24.7029 5.29038 24.9879 5.29038 25.3396V26.2462C5.29038 26.5978 5.00569 26.8829 4.65452 26.8829H3.74905C3.39787 26.8829 3.11318 26.5978 3.11318 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 25.3396C6.22637 24.9879 6.51105 24.7029 6.86223 24.7029H7.7677C8.11888 24.7029 8.40356 24.9879 8.40356 25.3396V26.2462C8.40356 26.5978 8.11888 26.8829 7.7677 26.8829H6.86223C6.51105 26.8829 6.22637 26.5978 6.22637 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 25.3396C9.33955 24.9879 9.62424 24.7029 9.97541 24.7029H10.8809C11.2321 24.7029 11.5167 24.9879 11.5167 25.3396V26.2462C11.5167 26.5978 11.2321 26.8829 10.8809 26.8829H9.97541C9.62424 26.8829 9.33955 26.5978 9.33955 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 25.3396C12.4527 24.9879 12.7374 24.7029 13.0886 24.7029H13.9941C14.3452 24.7029 14.6299 24.9879 14.6299 25.3396V26.2462C14.6299 26.5978 14.3452 26.8829 13.9941 26.8829H13.0886C12.7374 26.8829 12.4527 26.5978 12.4527 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 25.3396C15.5659 24.9879 15.8506 24.7029 16.2018 24.7029H17.1073C17.4584 24.7029 17.7431 24.9879 17.7431 25.3396V26.2462C17.7431 26.5978 17.4584 26.8829 17.1073 26.8829H16.2018C15.8506 26.8829 15.5659 26.5978 15.5659 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 25.3396C18.6791 24.9879 18.9638 24.7029 19.315 24.7029H20.2204C20.5716 24.7029 20.8563 24.9879 20.8563 25.3396V26.2462C20.8563 26.5978 20.5716 26.8829 20.2204 26.8829H19.315C18.9638 26.8829 18.6791 26.5978 18.6791 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 25.3396C21.7923 24.9879 22.077 24.7029 22.4282 24.7029H23.3336C23.6848 24.7029 23.9695 24.9879 23.9695 25.3396V26.2462C23.9695 26.5978 23.6848 26.8829 23.3336 26.8829H22.4282C22.077 26.8829 21.7923 26.5978 21.7923 26.2462V25.3396Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M0 28.4567C0 28.1051 0.284685 27.82 0.635863 27.82H1.54133C1.89251 27.82 2.17719 28.1051 2.17719 28.4567V29.3633C2.17719 29.715 1.89251 30 1.54133 30H0.635863C0.284686 30 0 29.715 0 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M3.11318 28.4567C3.11318 28.1051 3.39787 27.82 3.74905 27.82H4.65452C5.00569 27.82 5.29038 28.1051 5.29038 28.4567V29.3633C5.29038 29.715 5.00569 30 4.65452 30H3.74905C3.39787 30 3.11318 29.715 3.11318 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M6.22637 28.4567C6.22637 28.1051 6.51105 27.82 6.86223 27.82H7.7677C8.11888 27.82 8.40356 28.1051 8.40356 28.4567V29.3633C8.40356 29.715 8.11888 30 7.7677 30H6.86223C6.51105 30 6.22637 29.715 6.22637 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M9.33955 28.4567C9.33955 28.1051 9.62424 27.82 9.97541 27.82H10.8809C11.2321 27.82 11.5167 28.1051 11.5167 28.4567V29.3633C11.5167 29.715 11.2321 30 10.8809 30H9.97541C9.62424 30 9.33955 29.715 9.33955 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M12.4527 28.4567C12.4527 28.1051 12.7374 27.82 13.0886 27.82H13.9941C14.3452 27.82 14.6299 28.1051 14.6299 28.4567V29.3633C14.6299 29.715 14.3452 30 13.9941 30H13.0886C12.7374 30 12.4527 29.715 12.4527 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M15.5659 28.4567C15.5659 28.1051 15.8506 27.82 16.2018 27.82H17.1073C17.4584 27.82 17.7431 28.1051 17.7431 28.4567V29.3633C17.7431 29.715 17.4584 30 17.1073 30H16.2018C15.8506 30 15.5659 29.715 15.5659 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M18.6791 28.4567C18.6791 28.1051 18.9638 27.82 19.315 27.82H20.2204C20.5716 27.82 20.8563 28.1051 20.8563 28.4567V29.3633C20.8563 29.715 20.5716 30 20.2204 30H19.315C18.9638 30 18.6791 29.715 18.6791 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
<path
d='M21.7923 28.4567C21.7923 28.1051 22.077 27.82 22.4282 27.82H23.3336C23.6848 27.82 23.9695 28.1051 23.9695 28.4567V29.3633C23.9695 29.715 23.6848 30 23.3336 30H22.4282C22.077 30 21.7923 29.715 21.7923 29.3633V28.4567Z'
fill='#030712'
fillOpacity='0.1'
/>
</svg>
)
}

View File

@@ -84,9 +84,11 @@ import {
PolymarketIcon,
PostgresIcon,
PosthogIcon,
PulseIcon,
QdrantIcon,
RDSIcon,
RedditIcon,
ReductoIcon,
ResendIcon,
S3Icon,
SalesforceIcon,
@@ -208,9 +210,11 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
polymarket: PolymarketIcon,
postgresql: PostgresIcon,
posthog: PosthogIcon,
pulse: PulseIcon,
qdrant: QdrantIcon,
rds: RDSIcon,
reddit: RedditIcon,
reducto: ReductoIcon,
resend: ResendIcon,
s3: S3Icon,
salesforce: SalesforceIcon,

View File

@@ -1,3 +1,3 @@
{
"pages": ["index", "basics", "api", "form", "logging", "costs"]
"pages": ["index", "basics", "api", "logging", "costs"]
}

View File

@@ -36,43 +36,47 @@ Connect Google Vault to create exports, list exports, and manage holds within ma
### `google_vault_create_matters_export`
Create an export in a matter
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `matterId` | string | Yes | The matter ID |
| `exportName` | string | Yes | Name for the export \(avoid special characters\) |
| `corpus` | string | Yes | Data corpus to export \(MAIL, DRIVE, GROUPS, HANGOUTS_CHAT, VOICE\) |
| `accountEmails` | string | No | Comma-separated list of user emails to scope export |
| `orgUnitId` | string | No | Organization unit ID to scope export \(alternative to emails\) |
| `startTime` | string | No | Start time for date filtering \(ISO 8601 format, e.g., 2024-01-01T00:00:00Z\) |
| `endTime` | string | No | End time for date filtering \(ISO 8601 format, e.g., 2024-12-31T23:59:59Z\) |
| `terms` | string | No | Search query terms to filter exported content |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `export` | json | Created export object |
### `google_vault_list_matters_export`
List exports for a matter
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `matterId` | string | Yes | The matter ID |
| `pageSize` | number | No | Number of exports to return per page |
| `pageToken` | string | No | Token for pagination |
| `exportId` | string | No | Optional export ID to fetch a specific export |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `exports` | json | Array of export objects |
| `export` | json | Single export object \(when exportId is provided\) |
| `nextPageToken` | string | Token for fetching next page of results |
### `google_vault_download_export_file`
@@ -82,10 +86,10 @@ Download a single file from a Google Vault export (GCS object)
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `matterId` | string | Yes | No description |
| `bucketName` | string | Yes | No description |
| `objectName` | string | Yes | No description |
| `fileName` | string | No | No description |
| `matterId` | string | Yes | The matter ID |
| `bucketName` | string | Yes | GCS bucket name from cloudStorageSink.files.bucketName |
| `objectName` | string | Yes | GCS object name from cloudStorageSink.files.objectName |
| `fileName` | string | No | Optional filename override for the downloaded file |
#### Output
@@ -95,82 +99,84 @@ Download a single file from a Google Vault export (GCS object)
### `google_vault_create_matters_holds`
Create a hold in a matter
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `matterId` | string | Yes | The matter ID |
| `holdName` | string | Yes | Name for the hold |
| `corpus` | string | Yes | Data corpus to hold \(MAIL, DRIVE, GROUPS, HANGOUTS_CHAT, VOICE\) |
| `accountEmails` | string | No | Comma-separated list of user emails to put on hold |
| `orgUnitId` | string | No | Organization unit ID to put on hold \(alternative to accounts\) |
| `terms` | string | No | Search terms to filter held content \(for MAIL and GROUPS corpus\) |
| `startTime` | string | No | Start time for date filtering \(ISO 8601 format, for MAIL and GROUPS corpus\) |
| `endTime` | string | No | End time for date filtering \(ISO 8601 format, for MAIL and GROUPS corpus\) |
| `includeSharedDrives` | boolean | No | Include files in shared drives \(for DRIVE corpus\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `hold` | json | Created hold object |
### `google_vault_list_matters_holds`
List holds for a matter
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `matterId` | string | Yes | The matter ID |
| `pageSize` | number | No | Number of holds to return per page |
| `pageToken` | string | No | Token for pagination |
| `holdId` | string | No | Optional hold ID to fetch a specific hold |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `holds` | json | Array of hold objects |
| `hold` | json | Single hold object \(when holdId is provided\) |
| `nextPageToken` | string | Token for fetching next page of results |
### `google_vault_create_matters`
Create a new matter in Google Vault
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `name` | string | Yes | Name for the new matter |
| `description` | string | No | Optional description for the matter |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `matter` | json | Created matter object |
### `google_vault_list_matters`
List matters, or get a specific matter if matterId is provided
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `pageSize` | number | No | Number of matters to return per page |
| `pageToken` | string | No | Token for pagination |
| `matterId` | string | No | Optional matter ID to fetch a specific matter |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `matters` | json | Array of matter objects \(for list_matters\) |
| `exports` | json | Array of export objects \(for list_matters_export\) |
| `holds` | json | Array of hold objects \(for list_matters_holds\) |
| `matter` | json | Created matter object \(for create_matters\) |
| `export` | json | Created export object \(for create_matters_export\) |
| `hold` | json | Created hold object \(for create_matters_holds\) |
| `file` | json | Downloaded export file \(UserFile\) from execution files |
| `nextPageToken` | string | Token for fetching next page of results \(for list operations\) |
| `matters` | json | Array of matter objects |
| `matter` | json | Single matter object \(when matterId is provided\) |
| `nextPageToken` | string | Token for fetching next page of results |

View File

@@ -79,9 +79,11 @@
"polymarket",
"postgresql",
"posthog",
"pulse",
"qdrant",
"rds",
"reddit",
"reducto",
"resend",
"s3",
"salesforce",

View File

@@ -0,0 +1,72 @@
---
title: Pulse
description: Extract text from documents using Pulse OCR
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="pulse"
color="#E0E0E0"
/>
{/* MANUAL-CONTENT-START:intro */}
The [Pulse](https://www.pulseapi.com/) tool enables seamless extraction of text and structured content from a wide variety of documents—including PDFs, images, and Office files—using state-of-the-art OCR (Optical Character Recognition) powered by Pulse. Designed for automated agentic workflows, Pulse Parser makes it easy to unlock valuable information trapped in unstructured documents and integrate the extracted content directly into your workflow.
With Pulse, you can:
- **Extract text from documents**: Quickly convert scanned PDFs, images, and Office documents to usable text, markdown, or JSON.
- **Process documents by URL or upload**: Simply provide a file URL or use upload to extract text from local documents or remote resources.
- **Flexible output formats**: Choose between markdown, plain text, or JSON representations of the extracted content for downstream processing.
- **Selective page processing**: Specify a range of pages to process, reducing processing time and cost when you only need part of a document.
- **Figure and table extraction**: Optionally extract figures and tables, with automatic caption and description generation for populated context.
- **Get processing insights**: Receive detailed metadata on each job, including file type, page count, processing time, and more.
- **Integration-ready responses**: Incorporate extracted content into research, workflow automation, or data analysis pipelines.
Ideal for automating tedious document review, enabling content summarization, research, and more, Pulse Parser brings real-world documents into the digital workflow era.
If you need accurate, scalable, and developer-friendly document parsing capabilities—across formats, languages, and layouts—Pulse empowers your agents to read the world.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate Pulse into the workflow. Extract text from PDF documents, images, and Office files via URL or upload.
## Tools
### `pulse_parser`
Parse documents (PDF, images, Office docs) using Pulse OCR API
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `filePath` | string | Yes | URL to a document to be processed |
| `fileUpload` | object | No | File upload data from file-upload component |
| `pages` | string | No | Page range to process \(1-indexed, e.g., "1-2,5"\) |
| `extractFigure` | boolean | No | Enable figure extraction from the document |
| `figureDescription` | boolean | No | Generate descriptions/captions for extracted figures |
| `returnHtml` | boolean | No | Include HTML in the response |
| `chunking` | string | No | Chunking strategies \(comma-separated: semantic, header, page, recursive\) |
| `chunkSize` | number | No | Maximum characters per chunk when chunking is enabled |
| `apiKey` | string | Yes | Pulse API key |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `markdown` | string | Extracted content in markdown format |
| `page_count` | number | Number of pages in the document |
| `job_id` | string | Unique job identifier |
| `bounding_boxes` | json | Bounding box layout information |
| `extraction_url` | string | URL for extraction results \(for large documents\) |
| `html` | string | HTML content if requested |
| `structured_output` | json | Structured output if schema was provided |
| `chunks` | json | Chunked content if chunking was enabled |
| `figures` | json | Extracted figures if figure extraction was enabled |

View File

@@ -0,0 +1,63 @@
---
title: Reducto
description: Extract text from PDF documents
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="reducto"
color="#5c0c5c"
/>
{/* MANUAL-CONTENT-START:intro */}
The [Reducto](https://reducto.ai/) tool enables fast and accurate extraction of text and data from PDF documents via OCR (Optical Character Recognition). Reducto is designed for agent workflows, making it easy to process uploaded or linked PDFs and transform their contents into ready-to-use information.
With the Reducto tool, you can:
- **Extract text and tables from PDFs**: Quickly convert scanned or digital PDFs to text, markdown, or structured JSON.
- **Parse PDFs from uploads or URLs**: Process documents either by uploading a PDF or specifying a direct URL.
- **Customize output formatting**: Choose your preferred output format—markdown, plain text, or JSON—and specify table formats as markdown or HTML.
- **Select specific pages**: Optionally extract content from particular pages to optimize processing and focus on what matters.
- **Receive detailed processing metadata**: Alongside extracted content, get job details, processing times, source file info, page counts, and OCR usage stats for audit and automation.
Whether youre automating workflow steps, extracting business-critical information, or unlocking archival documents for search and analysis, Reductos OCR parser gives you structured, actionable data from even the most complex PDFs.
Looking for reliable and scalable PDF parsing? Reducto is optimized for developer and agent use—providing accuracy, speed, and flexibility for modern document understanding.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate Reducto Parse into the workflow. Can extract text from uploaded PDF documents, or from a URL.
## Tools
### `reducto_parser`
Parse PDF documents using Reducto OCR API
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `filePath` | string | Yes | URL to a PDF document to be processed |
| `fileUpload` | object | No | File upload data from file-upload component |
| `pages` | array | No | Specific pages to process \(1-indexed page numbers\) |
| `tableOutputFormat` | string | No | Table output format \(html or markdown\). Defaults to markdown. |
| `apiKey` | string | Yes | Reducto API key \(REDUCTO_API_KEY\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `job_id` | string | Unique identifier for the processing job |
| `duration` | number | Processing time in seconds |
| `usage` | json | Resource consumption data |
| `result` | json | Parsed document content with chunks and blocks |
| `pdf_url` | string | Storage URL of converted PDF |
| `studio_link` | string | Link to Reducto studio interface |

View File

@@ -11,10 +11,8 @@
"next-env.d.ts",
"**/*.ts",
"**/*.tsx",
".next/types/**/*.ts",
"content/docs/execution/index.mdx",
"content/docs/connections/index.mdx",
".next/dev/types/**/*.ts"
"content/docs/connections/index.mdx"
],
"exclude": ["node_modules"]
"exclude": ["node_modules", ".next"]
}

View File

@@ -16,9 +16,12 @@ export function PostHogProvider({ children }: { children: React.ReactNode }) {
ui_host: 'https://us.posthog.com',
defaults: '2025-05-24',
person_profiles: 'identified_only',
capture_pageview: true,
autocapture: false,
capture_pageview: false,
capture_pageleave: false,
capture_performance: false,
capture_dead_clicks: false,
enable_heatmaps: false,
session_recording: {
maskAllInputs: false,
maskInputOptions: {
@@ -29,13 +32,7 @@ export function PostHogProvider({ children }: { children: React.ReactNode }) {
recordHeaders: false,
recordBody: false,
},
autocapture: {
dom_event_allowlist: ['click', 'submit', 'change'],
element_allowlist: ['button', 'a', 'input'],
},
capture_dead_clicks: false,
persistence: 'localStorage+cookie',
enable_heatmaps: false,
})
}
}, [])

View File

@@ -0,0 +1,11 @@
'use client'
import { Tooltip } from '@/components/emcn'
interface TooltipProviderProps {
children: React.ReactNode
}
export function TooltipProvider({ children }: TooltipProviderProps) {
return <Tooltip.Provider>{children}</Tooltip.Provider>
}

View File

@@ -11,7 +11,7 @@
*/
:root {
--sidebar-width: 232px; /* SIDEBAR_WIDTH.DEFAULT */
--panel-width: 290px; /* PANEL_WIDTH.DEFAULT */
--panel-width: 320px; /* PANEL_WIDTH.DEFAULT */
--toolbar-triggers-height: 300px; /* TOOLBAR_TRIGGERS_HEIGHT.DEFAULT */
--editor-connections-height: 172px; /* EDITOR_CONNECTIONS_HEIGHT.DEFAULT */
--terminal-height: 155px; /* TERMINAL_HEIGHT.DEFAULT */
@@ -59,21 +59,22 @@
}
/**
* Selected node ring indicator
* Uses a pseudo-element overlay to match the original behavior (absolute inset-0 z-40)
* Workflow canvas cursor styles
* Override React Flow's default selection cursor based on canvas mode
*/
.react-flow__node.selected > div > div {
position: relative;
.workflow-container.canvas-mode-cursor .react-flow__pane,
.workflow-container.canvas-mode-cursor .react-flow__selectionpane {
cursor: default !important;
}
.react-flow__node.selected > div > div::after {
content: "";
position: absolute;
inset: 0;
z-index: 40;
border-radius: 8px;
box-shadow: 0 0 0 1.75px var(--brand-secondary);
pointer-events: none;
.workflow-container.canvas-mode-hand .react-flow__pane,
.workflow-container.canvas-mode-hand .react-flow__selectionpane {
cursor: grab !important;
}
.workflow-container.canvas-mode-hand .react-flow__pane:active,
.workflow-container.canvas-mode-hand .react-flow__selectionpane:active {
cursor: grabbing !important;
}
/**
@@ -557,32 +558,6 @@ input[type="search"]::-ms-clear {
transition-duration: 300ms;
}
.streaming-effect {
@apply relative overflow-hidden;
}
.streaming-effect::after {
content: "";
@apply pointer-events-none absolute left-0 top-0 h-full w-full;
background: linear-gradient(
90deg,
rgba(128, 128, 128, 0) 0%,
rgba(128, 128, 128, 0.1) 50%,
rgba(128, 128, 128, 0) 100%
);
animation: code-shimmer 1.5s infinite;
z-index: 10;
}
.dark .streaming-effect::after {
background: linear-gradient(
90deg,
rgba(180, 180, 180, 0) 0%,
rgba(180, 180, 180, 0.1) 50%,
rgba(180, 180, 180, 0) 100%
);
}
.loading-placeholder::placeholder {
animation: placeholder-pulse 1.5s ease-in-out infinite;
}
@@ -657,6 +632,20 @@ input[type="search"]::-ms-clear {
}
}
/**
* Notification toast enter animation
*/
@keyframes notification-enter {
from {
opacity: 0;
transform: translateX(-16px);
}
to {
opacity: 1;
transform: translateX(var(--stack-offset, 0px));
}
}
/**
* @depricated
* Legacy globals (light/dark) kept for backward-compat with old classes.

File diff suppressed because it is too large Load Diff

View File

@@ -3,13 +3,60 @@
*
* @vitest-environment node
*/
import {
createMockRequest,
mockConsoleLogger,
mockCryptoUuid,
mockDrizzleOrm,
mockUuid,
setupCommonApiMocks,
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, setupAuthApiMocks } from '@/app/api/__test-utils__/utils'
vi.mock('@/lib/core/utils/urls', () => ({
getBaseUrl: vi.fn(() => 'https://app.example.com'),
}))
/** Setup auth API mocks for testing authentication routes */
function setupAuthApiMocks(
options: {
operations?: {
forgetPassword?: { success?: boolean; error?: string }
resetPassword?: { success?: boolean; error?: string }
}
} = {}
) {
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
mockConsoleLogger()
mockDrizzleOrm()
const { operations = {} } = options
const defaultOperations = {
forgetPassword: { success: true, error: 'Forget password error', ...operations.forgetPassword },
resetPassword: { success: true, error: 'Reset password error', ...operations.resetPassword },
}
const createAuthMethod = (config: { success?: boolean; error?: string }) => {
return vi.fn().mockImplementation(() => {
if (config.success) {
return Promise.resolve()
}
return Promise.reject(new Error(config.error))
})
}
vi.doMock('@/lib/auth', () => ({
auth: {
api: {
forgetPassword: createAuthMethod(defaultOperations.forgetPassword),
resetPassword: createAuthMethod(defaultOperations.resetPassword),
},
},
}))
}
describe('Forget Password API Route', () => {
beforeEach(() => {
vi.resetModules()

View File

@@ -3,8 +3,8 @@
*
* @vitest-environment node
*/
import { createMockLogger, createMockRequest } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Connections API Route', () => {
const mockGetSession = vi.fn()

View File

@@ -4,9 +4,9 @@
* @vitest-environment node
*/
import { createMockLogger } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger } from '@/app/api/__test-utils__/utils'
describe('OAuth Credentials API Route', () => {
const mockGetSession = vi.fn()

View File

@@ -3,8 +3,8 @@
*
* @vitest-environment node
*/
import { createMockLogger, createMockRequest } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Disconnect API Route', () => {
const mockGetSession = vi.fn()

View File

@@ -3,8 +3,8 @@
*
* @vitest-environment node
*/
import { createMockLogger, createMockRequest } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Token API Routes', () => {
const mockGetUserId = vi.fn()

View File

@@ -3,8 +3,55 @@
*
* @vitest-environment node
*/
import {
createMockRequest,
mockConsoleLogger,
mockCryptoUuid,
mockDrizzleOrm,
mockUuid,
setupCommonApiMocks,
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, setupAuthApiMocks } from '@/app/api/__test-utils__/utils'
/** Setup auth API mocks for testing authentication routes */
function setupAuthApiMocks(
options: {
operations?: {
forgetPassword?: { success?: boolean; error?: string }
resetPassword?: { success?: boolean; error?: string }
}
} = {}
) {
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
mockConsoleLogger()
mockDrizzleOrm()
const { operations = {} } = options
const defaultOperations = {
forgetPassword: { success: true, error: 'Forget password error', ...operations.forgetPassword },
resetPassword: { success: true, error: 'Reset password error', ...operations.resetPassword },
}
const createAuthMethod = (config: { success?: boolean; error?: string }) => {
return vi.fn().mockImplementation(() => {
if (config.success) {
return Promise.resolve()
}
return Promise.reject(new Error(config.error))
})
}
vi.doMock('@/lib/auth', () => ({
auth: {
api: {
forgetPassword: createAuthMethod(defaultOperations.forgetPassword),
resetPassword: createAuthMethod(defaultOperations.resetPassword),
},
},
}))
}
describe('Reset Password API Route', () => {
beforeEach(() => {

View File

@@ -5,7 +5,34 @@
*/
import { loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
/**
* Creates a mock NextRequest with cookies support for testing.
*/
function createMockNextRequest(
method = 'GET',
body?: unknown,
headers: Record<string, string> = {},
url = 'http://localhost:3000/api/test'
): any {
const headersObj = new Headers({
'Content-Type': 'application/json',
...headers,
})
return {
method,
headers: headersObj,
cookies: {
get: vi.fn().mockReturnValue(undefined),
},
json:
body !== undefined
? vi.fn().mockResolvedValue(body)
: vi.fn().mockRejectedValue(new Error('No body')),
url,
}
}
const createMockStream = () => {
return new ReadableStream({
@@ -71,10 +98,15 @@ vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn().mockReturnValue('test-request-id'),
}))
vi.mock('@/lib/core/security/encryption', () => ({
decryptSecret: vi.fn().mockResolvedValue({ decrypted: 'test-password' }),
}))
describe('Chat Identifier API Route', () => {
const mockAddCorsHeaders = vi.fn().mockImplementation((response) => response)
const mockValidateChatAuth = vi.fn().mockResolvedValue({ authorized: true })
const mockSetChatAuthCookie = vi.fn()
const mockValidateAuthToken = vi.fn().mockReturnValue(false)
const mockChatResult = [
{
@@ -114,11 +146,16 @@ describe('Chat Identifier API Route', () => {
beforeEach(() => {
vi.resetModules()
vi.doMock('@/app/api/chat/utils', () => ({
vi.doMock('@/lib/core/security/deployment', () => ({
addCorsHeaders: mockAddCorsHeaders,
validateAuthToken: mockValidateAuthToken,
setDeploymentAuthCookie: vi.fn(),
isEmailAllowed: vi.fn().mockReturnValue(false),
}))
vi.doMock('@/app/api/chat/utils', () => ({
validateChatAuth: mockValidateChatAuth,
setChatAuthCookie: mockSetChatAuthCookie,
validateAuthToken: vi.fn().mockReturnValue(true),
}))
// Mock logger - use loggerMock from @sim/testing
@@ -175,7 +212,7 @@ describe('Chat Identifier API Route', () => {
describe('GET endpoint', () => {
it('should return chat info for a valid identifier', async () => {
const req = createMockRequest('GET')
const req = createMockNextRequest('GET')
const params = Promise.resolve({ identifier: 'test-chat' })
const { GET } = await import('@/app/api/chat/[identifier]/route')
@@ -206,7 +243,7 @@ describe('Chat Identifier API Route', () => {
}
})
const req = createMockRequest('GET')
const req = createMockNextRequest('GET')
const params = Promise.resolve({ identifier: 'nonexistent' })
const { GET } = await import('@/app/api/chat/[identifier]/route')
@@ -240,7 +277,7 @@ describe('Chat Identifier API Route', () => {
}
})
const req = createMockRequest('GET')
const req = createMockNextRequest('GET')
const params = Promise.resolve({ identifier: 'inactive-chat' })
const { GET } = await import('@/app/api/chat/[identifier]/route')
@@ -261,7 +298,7 @@ describe('Chat Identifier API Route', () => {
error: 'auth_required_password',
}))
const req = createMockRequest('GET')
const req = createMockNextRequest('GET')
const params = Promise.resolve({ identifier: 'password-protected-chat' })
const { GET } = await import('@/app/api/chat/[identifier]/route')
@@ -282,7 +319,7 @@ describe('Chat Identifier API Route', () => {
describe('POST endpoint', () => {
it('should handle authentication requests without input', async () => {
const req = createMockRequest('POST', { password: 'test-password' })
const req = createMockNextRequest('POST', { password: 'test-password' })
const params = Promise.resolve({ identifier: 'password-protected-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -298,7 +335,7 @@ describe('Chat Identifier API Route', () => {
})
it('should return 400 for requests without input', async () => {
const req = createMockRequest('POST', {})
const req = createMockNextRequest('POST', {})
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -319,7 +356,7 @@ describe('Chat Identifier API Route', () => {
error: 'Authentication required',
}))
const req = createMockRequest('POST', { input: 'Hello' })
const req = createMockNextRequest('POST', { input: 'Hello' })
const params = Promise.resolve({ identifier: 'protected-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -350,7 +387,7 @@ describe('Chat Identifier API Route', () => {
},
})
const req = createMockRequest('POST', { input: 'Hello' })
const req = createMockNextRequest('POST', { input: 'Hello' })
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -369,7 +406,10 @@ describe('Chat Identifier API Route', () => {
})
it('should return streaming response for valid chat messages', async () => {
const req = createMockRequest('POST', { input: 'Hello world', conversationId: 'conv-123' })
const req = createMockNextRequest('POST', {
input: 'Hello world',
conversationId: 'conv-123',
})
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -401,7 +441,7 @@ describe('Chat Identifier API Route', () => {
}, 10000)
it('should handle streaming response body correctly', async () => {
const req = createMockRequest('POST', { input: 'Hello world' })
const req = createMockNextRequest('POST', { input: 'Hello world' })
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -431,7 +471,7 @@ describe('Chat Identifier API Route', () => {
throw new Error('Execution failed')
})
const req = createMockRequest('POST', { input: 'Trigger error' })
const req = createMockNextRequest('POST', { input: 'Trigger error' })
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')
@@ -470,7 +510,7 @@ describe('Chat Identifier API Route', () => {
})
it('should pass conversationId to streaming execution when provided', async () => {
const req = createMockRequest('POST', {
const req = createMockNextRequest('POST', {
input: 'Hello world',
conversationId: 'test-conversation-123',
})
@@ -492,7 +532,7 @@ describe('Chat Identifier API Route', () => {
})
it('should handle missing conversationId gracefully', async () => {
const req = createMockRequest('POST', { input: 'Hello world' })
const req = createMockNextRequest('POST', { input: 'Hello world' })
const params = Promise.resolve({ identifier: 'test-chat' })
const { POST } = await import('@/app/api/chat/[identifier]/route')

View File

@@ -3,9 +3,9 @@
*
* @vitest-environment node
*/
import { mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot API Keys API Route', () => {
const mockFetch = vi.fn()

View File

@@ -3,14 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Chat Delete API Route', () => {
const mockDelete = vi.fn()

View File

@@ -8,6 +8,7 @@ import { getSession } from '@/lib/auth'
import { generateChatTitle } from '@/lib/copilot/chat-title'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_API_URL_DEFAULT, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
@@ -40,34 +41,8 @@ const ChatMessageSchema = z.object({
userMessageId: z.string().optional(), // ID from frontend for the user message
chatId: z.string().optional(),
workflowId: z.string().min(1, 'Workflow ID is required'),
model: z
.enum([
'gpt-5-fast',
'gpt-5',
'gpt-5-medium',
'gpt-5-high',
'gpt-5.1-fast',
'gpt-5.1',
'gpt-5.1-medium',
'gpt-5.1-high',
'gpt-5-codex',
'gpt-5.1-codex',
'gpt-5.2',
'gpt-5.2-codex',
'gpt-5.2-pro',
'gpt-4o',
'gpt-4.1',
'o3',
'claude-4-sonnet',
'claude-4.5-haiku',
'claude-4.5-sonnet',
'claude-4.5-opus',
'claude-4.1-opus',
'gemini-3-pro',
])
.optional()
.default('claude-4.5-opus'),
mode: z.enum(['ask', 'agent', 'plan']).optional().default('agent'),
model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.5-opus'),
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
@@ -295,7 +270,8 @@ export async function POST(req: NextRequest) {
}
const defaults = getCopilotModel('chat')
const modelToUse = env.COPILOT_MODEL || defaults.model
const selectedModel = model || defaults.model
const envModel = env.COPILOT_MODEL || defaults.model
let providerConfig: CopilotProviderConfig | undefined
const providerEnv = env.COPILOT_PROVIDER as any
@@ -304,7 +280,7 @@ export async function POST(req: NextRequest) {
if (providerEnv === 'azure-openai') {
providerConfig = {
provider: 'azure-openai',
model: modelToUse,
model: envModel,
apiKey: env.AZURE_OPENAI_API_KEY,
apiVersion: 'preview',
endpoint: env.AZURE_OPENAI_ENDPOINT,
@@ -312,7 +288,7 @@ export async function POST(req: NextRequest) {
} else if (providerEnv === 'vertex') {
providerConfig = {
provider: 'vertex',
model: modelToUse,
model: envModel,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
@@ -320,12 +296,15 @@ export async function POST(req: NextRequest) {
} else {
providerConfig = {
provider: providerEnv,
model: modelToUse,
model: selectedModel,
apiKey: env.COPILOT_API_KEY,
}
}
}
const effectiveMode = mode === 'agent' ? 'build' : mode
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
// Determine conversationId to use for this request
const effectiveConversationId =
(currentChat?.conversationId as string | undefined) || conversationId
@@ -345,7 +324,7 @@ export async function POST(req: NextRequest) {
}
} | null = null
if (mode === 'agent') {
if (effectiveMode === 'build') {
// Build base tools (executed locally, not deferred)
// Include function_execute for code execution capability
baseTools = [
@@ -452,8 +431,8 @@ export async function POST(req: NextRequest) {
userId: authenticatedUserId,
stream: stream,
streamToolCalls: true,
model: model,
mode: mode,
model: selectedModel,
mode: transportMode,
messageId: userMessageIdToUse,
version: SIM_AGENT_VERSION,
...(providerConfig ? { provider: providerConfig } : {}),
@@ -477,7 +456,7 @@ export async function POST(req: NextRequest) {
hasConversationId: !!effectiveConversationId,
hasFileAttachments: processedFileContents.length > 0,
messageLength: message.length,
mode,
mode: effectiveMode,
hasTools: integrationTools.length > 0,
toolCount: integrationTools.length,
hasBaseTools: baseTools.length > 0,

View File

@@ -3,14 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Chat Update Messages API Route', () => {
const mockSelect = vi.fn()

View File

@@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { COPILOT_MODES } from '@/lib/copilot/models'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
@@ -45,7 +46,7 @@ const UpdateMessagesSchema = z.object({
planArtifact: z.string().nullable().optional(),
config: z
.object({
mode: z.enum(['ask', 'build', 'plan']).optional(),
mode: z.enum(COPILOT_MODES).optional(),
model: z.string().optional(),
})
.nullable()

View File

@@ -3,8 +3,8 @@
*
* @vitest-environment node
*/
import { mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot Chats List API Route', () => {
const mockSelect = vi.fn()

View File

@@ -3,14 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Checkpoints Revert API Route', () => {
const mockSelect = vi.fn()

View File

@@ -3,14 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Checkpoints API Route', () => {
const mockSelect = vi.fn()

View File

@@ -3,14 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Confirm API Route', () => {
const mockRedisExists = vi.fn()

View File

@@ -14,8 +14,7 @@ import {
import { generateRequestId } from '@/lib/core/utils/request'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { REFERENCE } from '@/executor/constants'
import { createEnvVarPattern } from '@/executor/utils/reference-validation'
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
import { executeTool } from '@/tools'
import { getTool, resolveToolId } from '@/tools/utils'
@@ -28,45 +27,6 @@ const ExecuteToolSchema = z.object({
workflowId: z.string().optional(),
})
/**
* Resolves all {{ENV_VAR}} references in a value recursively
* Works with strings, arrays, and objects
*/
function resolveEnvVarReferences(value: any, envVars: Record<string, string>): any {
if (typeof value === 'string') {
// Check for exact match: entire string is "{{VAR_NAME}}"
const exactMatchPattern = new RegExp(
`^\\${REFERENCE.ENV_VAR_START}([^}]+)\\${REFERENCE.ENV_VAR_END}$`
)
const exactMatch = exactMatchPattern.exec(value)
if (exactMatch) {
const envVarName = exactMatch[1].trim()
return envVars[envVarName] ?? value
}
// Check for embedded references: "prefix {{VAR}} suffix"
const envVarPattern = createEnvVarPattern()
return value.replace(envVarPattern, (match, varName) => {
const trimmedName = varName.trim()
return envVars[trimmedName] ?? match
})
}
if (Array.isArray(value)) {
return value.map((item) => resolveEnvVarReferences(item, envVars))
}
if (value !== null && typeof value === 'object') {
const resolved: Record<string, any> = {}
for (const [key, val] of Object.entries(value)) {
resolved[key] = resolveEnvVarReferences(val, envVars)
}
return resolved
}
return value
}
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
@@ -145,7 +105,17 @@ export async function POST(req: NextRequest) {
// Build execution params starting with LLM-provided arguments
// Resolve all {{ENV_VAR}} references in the arguments
const executionParams: Record<string, any> = resolveEnvVarReferences(toolArgs, decryptedEnvVars)
const executionParams: Record<string, any> = resolveEnvVarReferences(
toolArgs,
decryptedEnvVars,
{
resolveExactMatch: true,
allowEmbedded: true,
trimKeys: true,
onMissing: 'keep',
deep: true,
}
) as Record<string, any>
logger.info(`[${tracker.requestId}] Resolved env var references in arguments`, {
toolName,

View File

@@ -3,13 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Feedback API Route', () => {
const mockInsert = vi.fn()

View File

@@ -3,13 +3,9 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockCryptoUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Stats API Route', () => {
const mockFetch = vi.fn()

View File

@@ -2,12 +2,13 @@ import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import type { CopilotModelId } from '@/lib/copilot/models'
import { db } from '@/../../packages/db'
import { settings } from '@/../../packages/db/schema'
const logger = createLogger('CopilotUserModelsAPI')
const DEFAULT_ENABLED_MODELS: Record<string, boolean> = {
const DEFAULT_ENABLED_MODELS: Record<CopilotModelId, boolean> = {
'gpt-4o': false,
'gpt-4.1': false,
'gpt-5-fast': false,
@@ -28,7 +29,7 @@ const DEFAULT_ENABLED_MODELS: Record<string, boolean> = {
'claude-4.5-haiku': true,
'claude-4.5-sonnet': true,
'claude-4.5-opus': true,
// 'claude-4.1-opus': true,
'claude-4.1-opus': false,
'gemini-3-pro': true,
}
@@ -54,7 +55,9 @@ export async function GET(request: NextRequest) {
const mergedModels = { ...DEFAULT_ENABLED_MODELS }
for (const [modelId, enabled] of Object.entries(userModelsMap)) {
mergedModels[modelId] = enabled
if (modelId in mergedModels) {
mergedModels[modelId as CopilotModelId] = enabled
}
}
const hasNewModels = Object.keys(DEFAULT_ENABLED_MODELS).some(

View File

@@ -1,5 +1,87 @@
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
mockUuid,
setupCommonApiMocks,
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, setupFileApiMocks } from '@/app/api/__test-utils__/utils'
/** Setup file API mocks for file delete tests */
function setupFileApiMocks(
options: {
authenticated?: boolean
storageProvider?: 's3' | 'blob' | 'local'
cloudEnabled?: boolean
} = {}
) {
const { authenticated = true, storageProvider = 's3', cloudEnabled = true } = options
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
const authMocks = mockAuth()
if (authenticated) {
authMocks.setAuthenticated()
} else {
authMocks.setUnauthenticated()
}
vi.doMock('@/lib/auth/hybrid', () => ({
checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated,
userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized',
}),
}))
vi.doMock('@/app/api/files/authorization', () => ({
verifyFileAccess: vi.fn().mockResolvedValue(true),
verifyWorkspaceFileAccess: vi.fn().mockResolvedValue(true),
}))
const uploadFileMock = vi.fn().mockResolvedValue({
path: '/api/files/serve/test-key.txt',
key: 'test-key.txt',
name: 'test.txt',
size: 100,
type: 'text/plain',
})
const downloadFileMock = vi.fn().mockResolvedValue(Buffer.from('test content'))
const deleteFileMock = vi.fn().mockResolvedValue(undefined)
const hasCloudStorageMock = vi.fn().mockReturnValue(cloudEnabled)
vi.doMock('@/lib/uploads', () => ({
getStorageProvider: vi.fn().mockReturnValue(storageProvider),
isUsingCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
StorageService: {
uploadFile: uploadFileMock,
downloadFile: downloadFileMock,
deleteFile: deleteFileMock,
hasCloudStorage: hasCloudStorageMock,
},
uploadFile: uploadFileMock,
downloadFile: downloadFileMock,
deleteFile: deleteFileMock,
hasCloudStorage: hasCloudStorageMock,
}))
vi.doMock('@/lib/uploads/core/storage-service', () => ({
uploadFile: uploadFileMock,
downloadFile: downloadFileMock,
deleteFile: deleteFileMock,
hasCloudStorage: hasCloudStorageMock,
}))
vi.doMock('fs/promises', () => ({
unlink: vi.fn().mockResolvedValue(undefined),
access: vi.fn().mockResolvedValue(undefined),
stat: vi.fn().mockResolvedValue({ isFile: () => true }),
}))
return { auth: authMocks }
}
describe('File Delete API Route', () => {
beforeEach(() => {

View File

@@ -1,12 +1,59 @@
import path from 'path'
import { NextRequest } from 'next/server'
/**
* Tests for file parse API route
*
* @vitest-environment node
*/
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
mockUuid,
setupCommonApiMocks,
} from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, setupFileApiMocks } from '@/app/api/__test-utils__/utils'
function setupFileApiMocks(
options: {
authenticated?: boolean
storageProvider?: 's3' | 'blob' | 'local'
cloudEnabled?: boolean
} = {}
) {
const { authenticated = true, storageProvider = 's3', cloudEnabled = true } = options
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
const authMocks = mockAuth()
if (authenticated) {
authMocks.setAuthenticated()
} else {
authMocks.setUnauthenticated()
}
vi.doMock('@/lib/auth/hybrid', () => ({
checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated,
userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized',
}),
}))
vi.doMock('@/app/api/files/authorization', () => ({
verifyFileAccess: vi.fn().mockResolvedValue(true),
verifyWorkspaceFileAccess: vi.fn().mockResolvedValue(true),
}))
vi.doMock('@/lib/uploads', () => ({
getStorageProvider: vi.fn().mockReturnValue(storageProvider),
isUsingCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
}))
return { auth: authMocks }
}
const mockJoin = vi.fn((...args: string[]): string => {
if (args[0] === '/test/uploads') {

View File

@@ -1,6 +1,6 @@
import { mockAuth, mockCryptoUuid, mockUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { setupFileApiMocks } from '@/app/api/__test-utils__/utils'
/**
* Tests for file presigned API route
@@ -8,6 +8,106 @@ import { setupFileApiMocks } from '@/app/api/__test-utils__/utils'
* @vitest-environment node
*/
function setupFileApiMocks(
options: {
authenticated?: boolean
storageProvider?: 's3' | 'blob' | 'local'
cloudEnabled?: boolean
} = {}
) {
const { authenticated = true, storageProvider = 's3', cloudEnabled = true } = options
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
const authMocks = mockAuth()
if (authenticated) {
authMocks.setAuthenticated()
} else {
authMocks.setUnauthenticated()
}
vi.doMock('@/lib/auth/hybrid', () => ({
checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated,
userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized',
}),
}))
vi.doMock('@/app/api/files/authorization', () => ({
verifyFileAccess: vi.fn().mockResolvedValue(true),
verifyWorkspaceFileAccess: vi.fn().mockResolvedValue(true),
}))
const useBlobStorage = storageProvider === 'blob' && cloudEnabled
const useS3Storage = storageProvider === 's3' && cloudEnabled
vi.doMock('@/lib/uploads/config', () => ({
USE_BLOB_STORAGE: useBlobStorage,
USE_S3_STORAGE: useS3Storage,
UPLOAD_DIR: '/uploads',
getStorageConfig: vi.fn().mockReturnValue(
useBlobStorage
? {
accountName: 'testaccount',
accountKey: 'testkey',
connectionString: 'testconnection',
containerName: 'testcontainer',
}
: {
bucket: 'test-bucket',
region: 'us-east-1',
}
),
isUsingCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
getStorageProvider: vi
.fn()
.mockReturnValue(
storageProvider === 'blob' ? 'Azure Blob' : storageProvider === 's3' ? 'S3' : 'Local'
),
}))
const mockGeneratePresignedUploadUrl = vi.fn().mockImplementation(async (opts) => {
const timestamp = Date.now()
const safeFileName = opts.fileName.replace(/[^a-zA-Z0-9.-]/g, '_')
const key = `${opts.context}/${timestamp}-ik3a6w4-${safeFileName}`
return {
url: 'https://example.com/presigned-url',
key,
}
})
vi.doMock('@/lib/uploads/core/storage-service', () => ({
hasCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
generatePresignedUploadUrl: mockGeneratePresignedUploadUrl,
generatePresignedDownloadUrl: vi.fn().mockResolvedValue('https://example.com/presigned-url'),
}))
vi.doMock('@/lib/uploads/utils/validation', () => ({
validateFileType: vi.fn().mockReturnValue(null),
}))
vi.doMock('@/lib/uploads', () => ({
CopilotFiles: {
generateCopilotUploadUrl: vi.fn().mockResolvedValue({
url: 'https://example.com/presigned-url',
key: 'copilot/test-key.txt',
}),
isImageFileType: vi.fn().mockReturnValue(true),
},
getStorageProvider: vi
.fn()
.mockReturnValue(
storageProvider === 'blob' ? 'Azure Blob' : storageProvider === 's3' ? 'S3' : 'Local'
),
isUsingCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
}))
return { auth: authMocks }
}
describe('/api/files/presigned', () => {
beforeEach(() => {
vi.clearAllMocks()
@@ -210,7 +310,7 @@ describe('/api/files/presigned', () => {
const data = await response.json()
expect(response.status).toBe(200)
expect(data.fileInfo.key).toMatch(/^kb\/.*knowledge-doc\.pdf$/)
expect(data.fileInfo.key).toMatch(/^knowledge-base\/.*knowledge-doc\.pdf$/)
expect(data.directUploadSupported).toBe(true)
})

View File

@@ -1,11 +1,49 @@
import { NextRequest } from 'next/server'
/**
* Tests for file serve API route
*
* @vitest-environment node
*/
import {
defaultMockUser,
mockAuth,
mockCryptoUuid,
mockUuid,
setupCommonApiMocks,
} from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { setupApiTestMocks } from '@/app/api/__test-utils__/utils'
function setupApiTestMocks(
options: {
authenticated?: boolean
user?: { id: string; email: string }
withFileSystem?: boolean
withUploadUtils?: boolean
} = {}
) {
const { authenticated = true, user = defaultMockUser, withFileSystem = false } = options
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
const authMocks = mockAuth(user)
if (authenticated) {
authMocks.setAuthenticated(user)
} else {
authMocks.setUnauthenticated()
}
if (withFileSystem) {
vi.doMock('fs/promises', () => ({
readFile: vi.fn().mockResolvedValue(Buffer.from('test content')),
access: vi.fn().mockResolvedValue(undefined),
stat: vi.fn().mockResolvedValue({ isFile: () => true, size: 100 }),
}))
}
return { auth: authMocks }
}
describe('File Serve API Route', () => {
beforeEach(() => {
@@ -31,6 +69,17 @@ describe('File Serve API Route', () => {
existsSync: vi.fn().mockReturnValue(true),
}))
vi.doMock('@/lib/uploads', () => ({
CopilotFiles: {
downloadCopilotFile: vi.fn(),
},
isUsingCloudStorage: vi.fn().mockReturnValue(false),
}))
vi.doMock('@/lib/uploads/utils/file-utils', () => ({
inferContextFromKey: vi.fn().mockReturnValue('workspace'),
}))
vi.doMock('@/app/api/files/utils', () => ({
FileNotFoundError: class FileNotFoundError extends Error {
constructor(message: string) {
@@ -126,6 +175,17 @@ describe('File Serve API Route', () => {
verifyFileAccess: vi.fn().mockResolvedValue(true),
}))
vi.doMock('@/lib/uploads', () => ({
CopilotFiles: {
downloadCopilotFile: vi.fn(),
},
isUsingCloudStorage: vi.fn().mockReturnValue(false),
}))
vi.doMock('@/lib/uploads/utils/file-utils', () => ({
inferContextFromKey: vi.fn().mockReturnValue('workspace'),
}))
const req = new NextRequest(
'http://localhost:3000/api/files/serve/workspace/test-workspace-id/nested-path-file.txt'
)

View File

@@ -1,11 +1,76 @@
import { NextRequest } from 'next/server'
/**
* Tests for file upload API route
*
* @vitest-environment node
*/
import { mockAuth, mockCryptoUuid, mockUuid, setupCommonApiMocks } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { setupFileApiMocks } from '@/app/api/__test-utils__/utils'
function setupFileApiMocks(
options: {
authenticated?: boolean
storageProvider?: 's3' | 'blob' | 'local'
cloudEnabled?: boolean
} = {}
) {
const { authenticated = true, storageProvider = 's3', cloudEnabled = true } = options
setupCommonApiMocks()
mockUuid()
mockCryptoUuid()
const authMocks = mockAuth()
if (authenticated) {
authMocks.setAuthenticated()
} else {
authMocks.setUnauthenticated()
}
vi.doMock('@/lib/auth/hybrid', () => ({
checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated,
userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized',
}),
}))
vi.doMock('@/app/api/files/authorization', () => ({
verifyFileAccess: vi.fn().mockResolvedValue(true),
verifyWorkspaceFileAccess: vi.fn().mockResolvedValue(true),
verifyKBFileAccess: vi.fn().mockResolvedValue(true),
verifyCopilotFileAccess: vi.fn().mockResolvedValue(true),
}))
vi.doMock('@/lib/uploads/contexts/workspace', () => ({
uploadWorkspaceFile: vi.fn().mockResolvedValue({
id: 'test-file-id',
name: 'test.txt',
url: '/api/files/serve/workspace/test-workspace-id/test-file.txt',
size: 100,
type: 'text/plain',
key: 'workspace/test-workspace-id/1234567890-test.txt',
uploadedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString(),
}),
}))
const uploadFileMock = vi.fn().mockResolvedValue({
path: '/api/files/serve/test-key.txt',
key: 'test-key.txt',
name: 'test.txt',
size: 100,
type: 'text/plain',
})
vi.doMock('@/lib/uploads', () => ({
getStorageProvider: vi.fn().mockReturnValue(storageProvider),
isUsingCloudStorage: vi.fn().mockReturnValue(cloudEnabled),
uploadFile: uploadFileMock,
}))
return { auth: authMocks }
}
describe('File Upload API Route', () => {
const createMockFormData = (files: File[], context = 'workspace'): FormData => {

View File

@@ -3,15 +3,24 @@
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
type CapturedFolderValues,
createMockRequest,
type MockUser,
mockAuth,
mockLogger,
mockConsoleLogger,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
/** Type for captured folder values in tests */
interface CapturedFolderValues {
name?: string
color?: string
parentId?: string | null
isExpanded?: boolean
sortOrder?: number
updatedAt?: Date
}
interface FolderDbMockOptions {
folderLookupResult?: any
@@ -21,6 +30,8 @@ interface FolderDbMockOptions {
}
describe('Individual Folder API Route', () => {
let mockLogger: ReturnType<typeof mockConsoleLogger>
const TEST_USER: MockUser = {
id: 'user-123',
email: 'test@example.com',
@@ -39,7 +50,8 @@ describe('Individual Folder API Route', () => {
updatedAt: new Date('2024-01-01T00:00:00Z'),
}
const { mockAuthenticatedUser, mockUnauthenticated } = mockAuth(TEST_USER)
let mockAuthenticatedUser: (user?: MockUser) => void
let mockUnauthenticated: () => void
const mockGetUserEntityPermissions = vi.fn()
function createFolderDbMock(options: FolderDbMockOptions = {}) {
@@ -110,6 +122,10 @@ describe('Individual Folder API Route', () => {
vi.resetModules()
vi.clearAllMocks()
setupCommonApiMocks()
mockLogger = mockConsoleLogger()
const auth = mockAuth(TEST_USER)
mockAuthenticatedUser = auth.mockAuthenticatedUser
mockUnauthenticated = auth.mockUnauthenticated
mockGetUserEntityPermissions.mockResolvedValue('admin')

View File

@@ -3,17 +3,46 @@
*
* @vitest-environment node
*/
import { createMockRequest, mockAuth, mockConsoleLogger, setupCommonApiMocks } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
type CapturedFolderValues,
createMockRequest,
createMockTransaction,
mockAuth,
mockLogger,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
interface CapturedFolderValues {
name?: string
color?: string
parentId?: string | null
isExpanded?: boolean
sortOrder?: number
updatedAt?: Date
}
function createMockTransaction(mockData: {
selectData?: Array<{ id: string; [key: string]: unknown }>
insertResult?: Array<{ id: string; [key: string]: unknown }>
}) {
const { selectData = [], insertResult = [] } = mockData
return vi.fn().mockImplementation(async (callback: (tx: unknown) => Promise<unknown>) => {
const tx = {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
orderBy: vi.fn().mockReturnValue({
limit: vi.fn().mockReturnValue(selectData),
}),
}),
}),
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockReturnValue(insertResult),
}),
}),
}
return await callback(tx)
})
}
describe('Folders API Route', () => {
let mockLogger: ReturnType<typeof mockConsoleLogger>
const mockFolders = [
{
id: 'folder-1',
@@ -41,7 +70,8 @@ describe('Folders API Route', () => {
},
]
const { mockAuthenticatedUser, mockUnauthenticated } = mockAuth()
let mockAuthenticatedUser: () => void
let mockUnauthenticated: () => void
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
const mockSelect = vi.fn()
@@ -63,6 +93,10 @@ describe('Folders API Route', () => {
})
setupCommonApiMocks()
mockLogger = mockConsoleLogger()
const auth = mockAuth()
mockAuthenticatedUser = auth.mockAuthenticatedUser
mockUnauthenticated = auth.mockUnauthenticated
mockSelect.mockReturnValue({ from: mockFrom })
mockFrom.mockReturnValue({ where: mockWhere })

View File

@@ -9,7 +9,9 @@ import { addCorsHeaders, validateAuthToken } from '@/lib/core/security/deploymen
import { generateRequestId } from '@/lib/core/utils/request'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { normalizeInputFormatValue } from '@/lib/workflows/input-format'
import { createStreamingResponse } from '@/lib/workflows/streaming/streaming'
import { isValidStartBlockType } from '@/lib/workflows/triggers/start-block-types'
import { setFormAuthCookie, validateFormAuth } from '@/app/api/form/utils'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -34,22 +36,14 @@ async function getWorkflowInputSchema(workflowId: string): Promise<any[]> {
.from(workflowBlocks)
.where(eq(workflowBlocks.workflowId, workflowId))
// Find the start block (starter or start_trigger type)
const startBlock = blocks.find(
(block) => block.type === 'starter' || block.type === 'start_trigger'
)
const startBlock = blocks.find((block) => isValidStartBlockType(block.type))
if (!startBlock) {
return []
}
// Extract inputFormat from subBlocks
const subBlocks = startBlock.subBlocks as Record<string, any> | null
if (!subBlocks?.inputFormat?.value) {
return []
}
return Array.isArray(subBlocks.inputFormat.value) ? subBlocks.inputFormat.value : []
return normalizeInputFormatValue(subBlocks?.inputFormat?.value)
} catch (error) {
logger.error('Error fetching workflow input schema:', error)
return []

View File

@@ -3,10 +3,9 @@
*
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { createMockRequest, loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
vi.mock('@/lib/execution/isolated-vm', () => ({
executeInIsolatedVM: vi.fn().mockImplementation(async (req) => {

View File

@@ -9,6 +9,7 @@ import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import {
createEnvVarPattern,
createWorkflowVariablePattern,
resolveEnvVarReferences,
} from '@/executor/utils/reference-validation'
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
@@ -479,9 +480,29 @@ function resolveEnvironmentVariables(
const replacements: Array<{ match: string; index: number; varName: string; varValue: string }> =
[]
const resolverVars: Record<string, string> = {}
Object.entries(params).forEach(([key, value]) => {
if (value) {
resolverVars[key] = String(value)
}
})
Object.entries(envVars).forEach(([key, value]) => {
if (value) {
resolverVars[key] = value
}
})
while ((match = regex.exec(code)) !== null) {
const varName = match[1].trim()
const varValue = envVars[varName] || params[varName] || ''
const resolved = resolveEnvVarReferences(match[0], resolverVars, {
allowEmbedded: true,
resolveExactMatch: true,
trimKeys: true,
onMissing: 'empty',
deep: false,
})
const varValue =
typeof resolved === 'string' ? resolved : resolved == null ? '' : String(resolved)
replacements.push({
match: match[0],
index: match.index,

View File

@@ -3,14 +3,14 @@
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockConsoleLogger,
mockDrizzleOrm,
mockKnowledgeSchemas,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
mockKnowledgeSchemas()

View File

@@ -3,14 +3,14 @@
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockConsoleLogger,
mockDrizzleOrm,
mockKnowledgeSchemas,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
mockKnowledgeSchemas()

View File

@@ -3,14 +3,14 @@
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockConsoleLogger,
mockDrizzleOrm,
mockKnowledgeSchemas,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
mockKnowledgeSchemas()
mockDrizzleOrm()

View File

@@ -3,14 +3,14 @@
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockConsoleLogger,
mockDrizzleOrm,
mockKnowledgeSchemas,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
mockKnowledgeSchemas()
mockDrizzleOrm()

View File

@@ -5,13 +5,13 @@
*
* @vitest-environment node
*/
import { createEnvMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createEnvMock,
createMockRequest,
mockConsoleLogger,
mockKnowledgeSchemas,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('drizzle-orm', () => ({
and: vi.fn().mockImplementation((...args) => ({ and: args })),

View File

@@ -20,6 +20,7 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateInternalToken } from '@/lib/auth/internal'
import { getBaseUrl } from '@/lib/core/utils/urls'
const logger = createLogger('WorkflowMcpServeAPI')
@@ -52,6 +53,8 @@ async function getServer(serverId: string) {
id: workflowMcpServer.id,
name: workflowMcpServer.name,
workspaceId: workflowMcpServer.workspaceId,
isPublic: workflowMcpServer.isPublic,
createdBy: workflowMcpServer.createdBy,
})
.from(workflowMcpServer)
.where(eq(workflowMcpServer.id, serverId))
@@ -90,9 +93,11 @@ export async function POST(request: NextRequest, { params }: { params: Promise<R
return NextResponse.json({ error: 'Server not found' }, { status: 404 })
}
const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
if (!server.isPublic) {
const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
}
const body = await request.json()
@@ -138,7 +143,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<R
id,
serverId,
rpcParams as { name: string; arguments?: Record<string, unknown> },
apiKey
apiKey,
server.isPublic ? server.createdBy : undefined
)
default:
@@ -200,7 +206,8 @@ async function handleToolsCall(
id: RequestId,
serverId: string,
params: { name: string; arguments?: Record<string, unknown> } | undefined,
apiKey?: string | null
apiKey?: string | null,
publicServerOwnerId?: string
): Promise<NextResponse> {
try {
if (!params?.name) {
@@ -243,7 +250,13 @@ async function handleToolsCall(
const executeUrl = `${getBaseUrl()}/api/workflows/${tool.workflowId}/execute`
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (apiKey) headers['X-API-Key'] = apiKey
if (publicServerOwnerId) {
const internalToken = await generateInternalToken(publicServerOwnerId)
headers.Authorization = `Bearer ${internalToken}`
} else if (apiKey) {
headers['X-API-Key'] = apiKey
}
logger.info(`Executing workflow ${tool.workflowId} via MCP tool ${params.name}`)

View File

@@ -5,8 +5,7 @@ import { McpClient } from '@/lib/mcp/client'
import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware'
import type { McpServerConfig, McpTransport } from '@/lib/mcp/types'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
import { REFERENCE } from '@/executor/constants'
import { createEnvVarPattern } from '@/executor/utils/reference-validation'
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
const logger = createLogger('McpServerTestAPI')
@@ -24,22 +23,23 @@ function isUrlBasedTransport(transport: McpTransport): boolean {
* Resolve environment variables in strings
*/
function resolveEnvVars(value: string, envVars: Record<string, string>): string {
const envVarPattern = createEnvVarPattern()
const envMatches = value.match(envVarPattern)
if (!envMatches) return value
const missingVars: string[] = []
const resolvedValue = resolveEnvVarReferences(value, envVars, {
allowEmbedded: true,
resolveExactMatch: true,
trimKeys: true,
onMissing: 'keep',
deep: false,
missingKeys: missingVars,
}) as string
let resolvedValue = value
for (const match of envMatches) {
const envKey = match.slice(REFERENCE.ENV_VAR_START.length, -REFERENCE.ENV_VAR_END.length).trim()
const envValue = envVars[envKey]
if (envValue === undefined) {
if (missingVars.length > 0) {
const uniqueMissing = Array.from(new Set(missingVars))
uniqueMissing.forEach((envKey) => {
logger.warn(`Environment variable "${envKey}" not found in MCP server test`)
continue
}
resolvedValue = resolvedValue.replace(match, envValue)
})
}
return resolvedValue
}

View File

@@ -31,6 +31,7 @@ export const GET = withMcpAuth<RouteParams>('read')(
createdBy: workflowMcpServer.createdBy,
name: workflowMcpServer.name,
description: workflowMcpServer.description,
isPublic: workflowMcpServer.isPublic,
createdAt: workflowMcpServer.createdAt,
updatedAt: workflowMcpServer.updatedAt,
})
@@ -98,6 +99,9 @@ export const PATCH = withMcpAuth<RouteParams>('write')(
if (body.description !== undefined) {
updateData.description = body.description?.trim() || null
}
if (body.isPublic !== undefined) {
updateData.isPublic = body.isPublic
}
const [updatedServer] = await db
.update(workflowMcpServer)

View File

@@ -26,7 +26,6 @@ export const GET = withMcpAuth<RouteParams>('read')(
logger.info(`[${requestId}] Getting tool ${toolId} from server ${serverId}`)
// Verify server exists and belongs to workspace
const [server] = await db
.select({ id: workflowMcpServer.id })
.from(workflowMcpServer)
@@ -72,7 +71,6 @@ export const PATCH = withMcpAuth<RouteParams>('write')(
logger.info(`[${requestId}] Updating tool ${toolId} in server ${serverId}`)
// Verify server exists and belongs to workspace
const [server] = await db
.select({ id: workflowMcpServer.id })
.from(workflowMcpServer)
@@ -139,7 +137,6 @@ export const DELETE = withMcpAuth<RouteParams>('write')(
logger.info(`[${requestId}] Deleting tool ${toolId} from server ${serverId}`)
// Verify server exists and belongs to workspace
const [server] = await db
.select({ id: workflowMcpServer.id })
.from(workflowMcpServer)

View File

@@ -6,24 +6,10 @@ import type { NextRequest } from 'next/server'
import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { hasValidStartBlockInState } from '@/lib/workflows/triggers/trigger-utils'
import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server'
const logger = createLogger('WorkflowMcpToolsAPI')
/**
* Check if a workflow has a valid start block by loading from database
*/
async function hasValidStartBlock(workflowId: string): Promise<boolean> {
try {
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
return hasValidStartBlockInState(normalizedData)
} catch (error) {
logger.warn('Error checking for start block:', error)
return false
}
}
export const dynamic = 'force-dynamic'
interface RouteParams {
@@ -40,7 +26,6 @@ export const GET = withMcpAuth<RouteParams>('read')(
logger.info(`[${requestId}] Listing tools for workflow MCP server: ${serverId}`)
// Verify server exists and belongs to workspace
const [server] = await db
.select({ id: workflowMcpServer.id })
.from(workflowMcpServer)
@@ -53,7 +38,6 @@ export const GET = withMcpAuth<RouteParams>('read')(
return createMcpErrorResponse(new Error('Server not found'), 'Server not found', 404)
}
// Get tools with workflow details
const tools = await db
.select({
id: workflowMcpTool.id,
@@ -107,7 +91,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
)
}
// Verify server exists and belongs to workspace
const [server] = await db
.select({ id: workflowMcpServer.id })
.from(workflowMcpServer)
@@ -120,7 +103,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
return createMcpErrorResponse(new Error('Server not found'), 'Server not found', 404)
}
// Verify workflow exists and is deployed
const [workflowRecord] = await db
.select({
id: workflow.id,
@@ -137,7 +119,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
return createMcpErrorResponse(new Error('Workflow not found'), 'Workflow not found', 404)
}
// Verify workflow belongs to the same workspace
if (workflowRecord.workspaceId !== workspaceId) {
return createMcpErrorResponse(
new Error('Workflow does not belong to this workspace'),
@@ -154,7 +135,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
)
}
// Verify workflow has a valid start block
const hasStartBlock = await hasValidStartBlock(body.workflowId)
if (!hasStartBlock) {
return createMcpErrorResponse(
@@ -164,7 +144,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
)
}
// Check if tool already exists for this workflow
const [existingTool] = await db
.select({ id: workflowMcpTool.id })
.from(workflowMcpTool)
@@ -190,7 +169,6 @@ export const POST = withMcpAuth<RouteParams>('write')(
workflowRecord.description ||
`Execute ${workflowRecord.name} workflow`
// Create the tool
const toolId = crypto.randomUUID()
const [tool] = await db
.insert(workflowMcpTool)

View File

@@ -1,10 +1,12 @@
import { db } from '@sim/db'
import { workflowMcpServer, workflowMcpTool } from '@sim/db/schema'
import { workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq, inArray, sql } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server'
const logger = createLogger('WorkflowMcpServersAPI')
@@ -25,18 +27,18 @@ export const GET = withMcpAuth('read')(
createdBy: workflowMcpServer.createdBy,
name: workflowMcpServer.name,
description: workflowMcpServer.description,
isPublic: workflowMcpServer.isPublic,
createdAt: workflowMcpServer.createdAt,
updatedAt: workflowMcpServer.updatedAt,
toolCount: sql<number>`(
SELECT COUNT(*)::int
FROM "workflow_mcp_tool"
SELECT COUNT(*)::int
FROM "workflow_mcp_tool"
WHERE "workflow_mcp_tool"."server_id" = "workflow_mcp_server"."id"
)`.as('tool_count'),
})
.from(workflowMcpServer)
.where(eq(workflowMcpServer.workspaceId, workspaceId))
// Fetch all tools for these servers
const serverIds = servers.map((s) => s.id)
const tools =
serverIds.length > 0
@@ -49,7 +51,6 @@ export const GET = withMcpAuth('read')(
.where(inArray(workflowMcpTool.serverId, serverIds))
: []
// Group tool names by server
const toolNamesByServer: Record<string, string[]> = {}
for (const tool of tools) {
if (!toolNamesByServer[tool.serverId]) {
@@ -58,7 +59,6 @@ export const GET = withMcpAuth('read')(
toolNamesByServer[tool.serverId].push(tool.toolName)
}
// Attach tool names to servers
const serversWithToolNames = servers.map((server) => ({
...server,
toolNames: toolNamesByServer[server.id] || [],
@@ -90,6 +90,7 @@ export const POST = withMcpAuth('write')(
logger.info(`[${requestId}] Creating workflow MCP server:`, {
name: body.name,
workspaceId,
workflowIds: body.workflowIds,
})
if (!body.name) {
@@ -110,16 +111,76 @@ export const POST = withMcpAuth('write')(
createdBy: userId,
name: body.name.trim(),
description: body.description?.trim() || null,
isPublic: body.isPublic ?? false,
createdAt: new Date(),
updatedAt: new Date(),
})
.returning()
const workflowIds: string[] = body.workflowIds || []
const addedTools: Array<{ workflowId: string; toolName: string }> = []
if (workflowIds.length > 0) {
const workflows = await db
.select({
id: workflow.id,
name: workflow.name,
description: workflow.description,
isDeployed: workflow.isDeployed,
workspaceId: workflow.workspaceId,
})
.from(workflow)
.where(inArray(workflow.id, workflowIds))
for (const workflowRecord of workflows) {
if (workflowRecord.workspaceId !== workspaceId) {
logger.warn(
`[${requestId}] Skipping workflow ${workflowRecord.id} - does not belong to workspace`
)
continue
}
if (!workflowRecord.isDeployed) {
logger.warn(`[${requestId}] Skipping workflow ${workflowRecord.id} - not deployed`)
continue
}
const hasStartBlock = await hasValidStartBlock(workflowRecord.id)
if (!hasStartBlock) {
logger.warn(`[${requestId}] Skipping workflow ${workflowRecord.id} - no start block`)
continue
}
const toolName = sanitizeToolName(workflowRecord.name)
const toolDescription =
workflowRecord.description || `Execute ${workflowRecord.name} workflow`
const toolId = crypto.randomUUID()
await db.insert(workflowMcpTool).values({
id: toolId,
serverId,
workflowId: workflowRecord.id,
toolName,
toolDescription,
parameterSchema: {},
createdAt: new Date(),
updatedAt: new Date(),
})
addedTools.push({ workflowId: workflowRecord.id, toolName })
}
logger.info(
`[${requestId}] Added ${addedTools.length} tools to server ${serverId}:`,
addedTools.map((t) => t.toolName)
)
}
logger.info(
`[${requestId}] Successfully created workflow MCP server: ${body.name} (ID: ${serverId})`
)
return createMcpSuccessResponse({ server }, 201)
return createMcpSuccessResponse({ server, addedTools }, 201)
} catch (error) {
logger.error(`[${requestId}] Error creating workflow MCP server:`, error)
return createMcpErrorResponse(

View File

@@ -4,6 +4,8 @@ import {
invitation,
member,
organization,
permissionGroup,
permissionGroupMember,
permissions,
subscription as subscriptionTable,
user,
@@ -17,6 +19,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getEmailSubject, renderInvitationEmail } from '@/components/emails'
import { getSession } from '@/lib/auth'
import { hasAccessControlAccess } from '@/lib/billing'
import { requireStripeClient } from '@/lib/billing/stripe-client'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { sendEmail } from '@/lib/messaging/email/mailer'
@@ -382,6 +385,47 @@ export async function PUT(
// Don't fail the whole invitation acceptance due to this
}
// Auto-assign to permission group if one has autoAddNewMembers enabled
try {
const hasAccessControl = await hasAccessControlAccess(session.user.id)
if (hasAccessControl) {
const [autoAddGroup] = await tx
.select({ id: permissionGroup.id, name: permissionGroup.name })
.from(permissionGroup)
.where(
and(
eq(permissionGroup.organizationId, organizationId),
eq(permissionGroup.autoAddNewMembers, true)
)
)
.limit(1)
if (autoAddGroup) {
await tx.insert(permissionGroupMember).values({
id: randomUUID(),
permissionGroupId: autoAddGroup.id,
userId: session.user.id,
assignedBy: null,
assignedAt: new Date(),
})
logger.info('Auto-assigned new member to permission group', {
userId: session.user.id,
organizationId,
permissionGroupId: autoAddGroup.id,
permissionGroupName: autoAddGroup.name,
})
}
}
} catch (error) {
logger.error('Failed to auto-assign user to permission group', {
userId: session.user.id,
organizationId,
error,
})
// Don't fail the whole invitation acceptance due to this
}
const linkedWorkspaceInvitations = await tx
.select()
.from(workspaceInvitation)

View File

@@ -25,12 +25,19 @@ const configSchema = z.object({
disableMcpTools: z.boolean().optional(),
disableCustomTools: z.boolean().optional(),
hideTemplates: z.boolean().optional(),
disableInvitations: z.boolean().optional(),
hideDeployApi: z.boolean().optional(),
hideDeployMcp: z.boolean().optional(),
hideDeployA2a: z.boolean().optional(),
hideDeployChatbot: z.boolean().optional(),
hideDeployTemplate: z.boolean().optional(),
})
const updateSchema = z.object({
name: z.string().trim().min(1).max(100).optional(),
description: z.string().max(500).nullable().optional(),
config: configSchema.optional(),
autoAddNewMembers: z.boolean().optional(),
})
async function getPermissionGroupWithAccess(groupId: string, userId: string) {
@@ -44,6 +51,7 @@ async function getPermissionGroupWithAccess(groupId: string, userId: string) {
createdBy: permissionGroup.createdBy,
createdAt: permissionGroup.createdAt,
updatedAt: permissionGroup.updatedAt,
autoAddNewMembers: permissionGroup.autoAddNewMembers,
})
.from(permissionGroup)
.where(eq(permissionGroup.id, groupId))
@@ -140,11 +148,27 @@ export async function PUT(req: NextRequest, { params }: { params: Promise<{ id:
? { ...currentConfig, ...updates.config }
: currentConfig
// If setting autoAddNewMembers to true, unset it on other groups in the org first
if (updates.autoAddNewMembers === true) {
await db
.update(permissionGroup)
.set({ autoAddNewMembers: false, updatedAt: new Date() })
.where(
and(
eq(permissionGroup.organizationId, result.group.organizationId),
eq(permissionGroup.autoAddNewMembers, true)
)
)
}
await db
.update(permissionGroup)
.set({
...(updates.name !== undefined && { name: updates.name }),
...(updates.description !== undefined && { description: updates.description }),
...(updates.autoAddNewMembers !== undefined && {
autoAddNewMembers: updates.autoAddNewMembers,
}),
config: newConfig,
updatedAt: new Date(),
})

View File

@@ -26,6 +26,12 @@ const configSchema = z.object({
disableMcpTools: z.boolean().optional(),
disableCustomTools: z.boolean().optional(),
hideTemplates: z.boolean().optional(),
disableInvitations: z.boolean().optional(),
hideDeployApi: z.boolean().optional(),
hideDeployMcp: z.boolean().optional(),
hideDeployA2a: z.boolean().optional(),
hideDeployChatbot: z.boolean().optional(),
hideDeployTemplate: z.boolean().optional(),
})
const createSchema = z.object({
@@ -33,6 +39,7 @@ const createSchema = z.object({
name: z.string().trim().min(1).max(100),
description: z.string().max(500).optional(),
config: configSchema.optional(),
autoAddNewMembers: z.boolean().optional(),
})
export async function GET(req: Request) {
@@ -68,6 +75,7 @@ export async function GET(req: Request) {
createdBy: permissionGroup.createdBy,
createdAt: permissionGroup.createdAt,
updatedAt: permissionGroup.updatedAt,
autoAddNewMembers: permissionGroup.autoAddNewMembers,
creatorName: user.name,
creatorEmail: user.email,
})
@@ -111,7 +119,8 @@ export async function POST(req: Request) {
}
const body = await req.json()
const { organizationId, name, description, config } = createSchema.parse(body)
const { organizationId, name, description, config, autoAddNewMembers } =
createSchema.parse(body)
const membership = await db
.select({ id: member.id, role: member.role })
@@ -154,6 +163,19 @@ export async function POST(req: Request) {
...config,
}
// If autoAddNewMembers is true, unset it on any existing groups first
if (autoAddNewMembers) {
await db
.update(permissionGroup)
.set({ autoAddNewMembers: false, updatedAt: new Date() })
.where(
and(
eq(permissionGroup.organizationId, organizationId),
eq(permissionGroup.autoAddNewMembers, true)
)
)
}
const now = new Date()
const newGroup = {
id: crypto.randomUUID(),
@@ -164,6 +186,7 @@ export async function POST(req: Request) {
createdBy: session.user.id,
createdAt: now,
updatedAt: now,
autoAddNewMembers: autoAddNewMembers || false,
}
await db.insert(permissionGroup).values(newGroup)

View File

@@ -57,6 +57,7 @@ describe('Scheduled Workflow Execution API Route', () => {
not: vi.fn((condition) => ({ type: 'not', condition })),
isNull: vi.fn((field) => ({ type: 'isNull', field })),
or: vi.fn((...conditions) => ({ type: 'or', conditions })),
sql: vi.fn((strings, ...values) => ({ type: 'sql', strings, values })),
}))
vi.doMock('@sim/db', () => {
@@ -92,6 +93,17 @@ describe('Scheduled Workflow Execution API Route', () => {
status: 'status',
nextRunAt: 'nextRunAt',
lastQueuedAt: 'lastQueuedAt',
deploymentVersionId: 'deploymentVersionId',
},
workflowDeploymentVersion: {
id: 'id',
workflowId: 'workflowId',
isActive: 'isActive',
},
workflow: {
id: 'id',
userId: 'userId',
workspaceId: 'workspaceId',
},
}
})
@@ -134,6 +146,7 @@ describe('Scheduled Workflow Execution API Route', () => {
not: vi.fn((condition) => ({ type: 'not', condition })),
isNull: vi.fn((field) => ({ type: 'isNull', field })),
or: vi.fn((...conditions) => ({ type: 'or', conditions })),
sql: vi.fn((strings, ...values) => ({ type: 'sql', strings, values })),
}))
vi.doMock('@sim/db', () => {
@@ -169,6 +182,17 @@ describe('Scheduled Workflow Execution API Route', () => {
status: 'status',
nextRunAt: 'nextRunAt',
lastQueuedAt: 'lastQueuedAt',
deploymentVersionId: 'deploymentVersionId',
},
workflowDeploymentVersion: {
id: 'id',
workflowId: 'workflowId',
isActive: 'isActive',
},
workflow: {
id: 'id',
userId: 'userId',
workspaceId: 'workspaceId',
},
}
})
@@ -206,6 +230,7 @@ describe('Scheduled Workflow Execution API Route', () => {
not: vi.fn((condition) => ({ type: 'not', condition })),
isNull: vi.fn((field) => ({ type: 'isNull', field })),
or: vi.fn((...conditions) => ({ type: 'or', conditions })),
sql: vi.fn((strings, ...values) => ({ type: 'sql', strings, values })),
}))
vi.doMock('@sim/db', () => {
@@ -228,6 +253,17 @@ describe('Scheduled Workflow Execution API Route', () => {
status: 'status',
nextRunAt: 'nextRunAt',
lastQueuedAt: 'lastQueuedAt',
deploymentVersionId: 'deploymentVersionId',
},
workflowDeploymentVersion: {
id: 'id',
workflowId: 'workflowId',
isActive: 'isActive',
},
workflow: {
id: 'id',
userId: 'userId',
workspaceId: 'workspaceId',
},
}
})
@@ -265,6 +301,7 @@ describe('Scheduled Workflow Execution API Route', () => {
not: vi.fn((condition) => ({ type: 'not', condition })),
isNull: vi.fn((field) => ({ type: 'isNull', field })),
or: vi.fn((...conditions) => ({ type: 'or', conditions })),
sql: vi.fn((strings, ...values) => ({ type: 'sql', strings, values })),
}))
vi.doMock('@sim/db', () => {
@@ -310,6 +347,17 @@ describe('Scheduled Workflow Execution API Route', () => {
status: 'status',
nextRunAt: 'nextRunAt',
lastQueuedAt: 'lastQueuedAt',
deploymentVersionId: 'deploymentVersionId',
},
workflowDeploymentVersion: {
id: 'id',
workflowId: 'workflowId',
isActive: 'isActive',
},
workflow: {
id: 'id',
userId: 'userId',
workspaceId: 'workspaceId',
},
}
})

View File

@@ -1,7 +1,7 @@
import { db, workflowSchedule } from '@sim/db'
import { db, workflowDeploymentVersion, workflowSchedule } from '@sim/db'
import { createLogger } from '@sim/logger'
import { tasks } from '@trigger.dev/sdk'
import { and, eq, isNull, lt, lte, not, or } from 'drizzle-orm'
import { and, eq, isNull, lt, lte, not, or, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { verifyCronAuth } from '@/lib/auth/internal'
import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
@@ -37,7 +37,8 @@ export async function GET(request: NextRequest) {
or(
isNull(workflowSchedule.lastQueuedAt),
lt(workflowSchedule.lastQueuedAt, workflowSchedule.nextRunAt)
)
),
sql`${workflowSchedule.deploymentVersionId} = (select ${workflowDeploymentVersion.id} from ${workflowDeploymentVersion} where ${workflowDeploymentVersion.workflowId} = ${workflowSchedule.workflowId} and ${workflowDeploymentVersion.isActive} = true)`
)
)
.returning({

View File

@@ -29,12 +29,23 @@ vi.mock('@sim/db', () => ({
vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
workflowSchedule: { workflowId: 'workflowId', blockId: 'blockId' },
workflowSchedule: {
workflowId: 'workflowId',
blockId: 'blockId',
deploymentVersionId: 'deploymentVersionId',
},
workflowDeploymentVersion: {
id: 'id',
workflowId: 'workflowId',
isActive: 'isActive',
},
}))
vi.mock('drizzle-orm', () => ({
eq: vi.fn(),
and: vi.fn(),
or: vi.fn(),
isNull: vi.fn(),
}))
vi.mock('@/lib/core/utils/request', () => ({
@@ -56,6 +67,11 @@ function mockDbChain(results: any[]) {
where: () => ({
limit: () => results[callIndex++] || [],
}),
leftJoin: () => ({
where: () => ({
limit: () => results[callIndex++] || [],
}),
}),
}),
}))
}
@@ -74,7 +90,16 @@ describe('Schedule GET API', () => {
it('returns schedule data for authorized user', async () => {
mockDbChain([
[{ userId: 'user-1', workspaceId: null }],
[{ id: 'sched-1', cronExpression: '0 9 * * *', status: 'active', failedCount: 0 }],
[
{
schedule: {
id: 'sched-1',
cronExpression: '0 9 * * *',
status: 'active',
failedCount: 0,
},
},
],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
@@ -128,7 +153,7 @@ describe('Schedule GET API', () => {
it('allows workspace members to view', async () => {
mockDbChain([
[{ userId: 'other-user', workspaceId: 'ws-1' }],
[{ id: 'sched-1', status: 'active', failedCount: 0 }],
[{ schedule: { id: 'sched-1', status: 'active', failedCount: 0 } }],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
@@ -139,7 +164,7 @@ describe('Schedule GET API', () => {
it('indicates disabled schedule with failures', async () => {
mockDbChain([
[{ userId: 'user-1', workspaceId: null }],
[{ id: 'sched-1', status: 'disabled', failedCount: 100 }],
[{ schedule: { id: 'sched-1', status: 'disabled', failedCount: 100 } }],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db'
import { workflow, workflowSchedule } from '@sim/db/schema'
import { workflow, workflowDeploymentVersion, workflowSchedule } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { and, eq, isNull, or } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
@@ -62,9 +62,24 @@ export async function GET(req: NextRequest) {
}
const schedule = await db
.select()
.select({ schedule: workflowSchedule })
.from(workflowSchedule)
.where(conditions.length > 1 ? and(...conditions) : conditions[0])
.leftJoin(
workflowDeploymentVersion,
and(
eq(workflowDeploymentVersion.workflowId, workflowSchedule.workflowId),
eq(workflowDeploymentVersion.isActive, true)
)
)
.where(
and(
...conditions,
or(
eq(workflowSchedule.deploymentVersionId, workflowDeploymentVersion.id),
and(isNull(workflowDeploymentVersion.id), isNull(workflowSchedule.deploymentVersionId))
)
)
)
.limit(1)
const headers = new Headers()
@@ -74,7 +89,7 @@ export async function GET(req: NextRequest) {
return NextResponse.json({ schedule: null }, { headers })
}
const scheduleData = schedule[0]
const scheduleData = schedule[0].schedule
const isDisabled = scheduleData.status === 'disabled'
const hasFailures = scheduleData.failedCount > 0

View File

@@ -3,10 +3,9 @@
*
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { createMockRequest, loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
describe('Custom Tools API Routes', () => {
const sampleTools = [
@@ -364,7 +363,7 @@ describe('Custom Tools API Routes', () => {
})
it('should reject requests missing tool ID', async () => {
const req = createMockRequest('DELETE')
const req = new NextRequest('http://localhost:3000/api/tools/custom')
const { DELETE } = await import('@/app/api/tools/custom/route')

View File

@@ -0,0 +1,169 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic'
const logger = createLogger('PulseParseAPI')
const PulseParseSchema = z.object({
apiKey: z.string().min(1, 'API key is required'),
filePath: z.string().min(1, 'File path is required'),
pages: z.string().optional(),
extractFigure: z.boolean().optional(),
figureDescription: z.boolean().optional(),
returnHtml: z.boolean().optional(),
chunking: z.string().optional(),
chunkSize: z.number().optional(),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized Pulse parse attempt`, {
error: authResult.error || 'Missing userId',
})
return NextResponse.json(
{
success: false,
error: authResult.error || 'Unauthorized',
},
{ status: 401 }
)
}
const userId = authResult.userId
const body = await request.json()
const validatedData = PulseParseSchema.parse(body)
logger.info(`[${requestId}] Pulse parse request`, {
filePath: validatedData.filePath,
isWorkspaceFile: validatedData.filePath.includes('/api/files/serve/'),
userId,
})
let fileUrl = validatedData.filePath
if (validatedData.filePath?.includes('/api/files/serve/')) {
try {
const storageKey = extractStorageKey(validatedData.filePath)
const context = inferContextFromKey(storageKey)
const hasAccess = await verifyFileAccess(storageKey, userId, undefined, context, false)
if (!hasAccess) {
logger.warn(`[${requestId}] Unauthorized presigned URL generation attempt`, {
userId,
key: storageKey,
context,
})
return NextResponse.json(
{
success: false,
error: 'File not found',
},
{ status: 404 }
)
}
fileUrl = await StorageService.generatePresignedDownloadUrl(storageKey, context, 5 * 60)
logger.info(`[${requestId}] Generated presigned URL for ${context} file`)
} catch (error) {
logger.error(`[${requestId}] Failed to generate presigned URL:`, error)
return NextResponse.json(
{
success: false,
error: 'Failed to generate file access URL',
},
{ status: 500 }
)
}
} else if (validatedData.filePath?.startsWith('/')) {
const baseUrl = getBaseUrl()
fileUrl = `${baseUrl}${validatedData.filePath}`
}
const formData = new FormData()
formData.append('file_url', fileUrl)
if (validatedData.pages) {
formData.append('pages', validatedData.pages)
}
if (validatedData.extractFigure !== undefined) {
formData.append('extract_figure', String(validatedData.extractFigure))
}
if (validatedData.figureDescription !== undefined) {
formData.append('figure_description', String(validatedData.figureDescription))
}
if (validatedData.returnHtml !== undefined) {
formData.append('return_html', String(validatedData.returnHtml))
}
if (validatedData.chunking) {
formData.append('chunking', validatedData.chunking)
}
if (validatedData.chunkSize !== undefined) {
formData.append('chunk_size', String(validatedData.chunkSize))
}
const pulseResponse = await fetch('https://api.runpulse.com/extract', {
method: 'POST',
headers: {
'x-api-key': validatedData.apiKey,
},
body: formData,
})
if (!pulseResponse.ok) {
const errorText = await pulseResponse.text()
logger.error(`[${requestId}] Pulse API error:`, errorText)
return NextResponse.json(
{
success: false,
error: `Pulse API error: ${pulseResponse.statusText}`,
},
{ status: pulseResponse.status }
)
}
const pulseData = await pulseResponse.json()
logger.info(`[${requestId}] Pulse parse successful`)
return NextResponse.json({
success: true,
output: pulseData,
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error in Pulse parse:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,167 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic'
const logger = createLogger('ReductoParseAPI')
const ReductoParseSchema = z.object({
apiKey: z.string().min(1, 'API key is required'),
filePath: z.string().min(1, 'File path is required'),
pages: z.array(z.number()).optional(),
tableOutputFormat: z.enum(['html', 'md']).optional(),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized Reducto parse attempt`, {
error: authResult.error || 'Missing userId',
})
return NextResponse.json(
{
success: false,
error: authResult.error || 'Unauthorized',
},
{ status: 401 }
)
}
const userId = authResult.userId
const body = await request.json()
const validatedData = ReductoParseSchema.parse(body)
logger.info(`[${requestId}] Reducto parse request`, {
filePath: validatedData.filePath,
isWorkspaceFile: validatedData.filePath.includes('/api/files/serve/'),
userId,
})
let fileUrl = validatedData.filePath
if (validatedData.filePath?.includes('/api/files/serve/')) {
try {
const storageKey = extractStorageKey(validatedData.filePath)
const context = inferContextFromKey(storageKey)
const hasAccess = await verifyFileAccess(
storageKey,
userId,
undefined, // customConfig
context, // context
false // isLocal
)
if (!hasAccess) {
logger.warn(`[${requestId}] Unauthorized presigned URL generation attempt`, {
userId,
key: storageKey,
context,
})
return NextResponse.json(
{
success: false,
error: 'File not found',
},
{ status: 404 }
)
}
fileUrl = await StorageService.generatePresignedDownloadUrl(storageKey, context, 5 * 60)
logger.info(`[${requestId}] Generated presigned URL for ${context} file`)
} catch (error) {
logger.error(`[${requestId}] Failed to generate presigned URL:`, error)
return NextResponse.json(
{
success: false,
error: 'Failed to generate file access URL',
},
{ status: 500 }
)
}
} else if (validatedData.filePath?.startsWith('/')) {
const baseUrl = getBaseUrl()
fileUrl = `${baseUrl}${validatedData.filePath}`
}
const reductoBody: Record<string, unknown> = {
input: fileUrl,
}
if (validatedData.pages && validatedData.pages.length > 0) {
reductoBody.settings = {
page_range: validatedData.pages,
}
}
if (validatedData.tableOutputFormat) {
reductoBody.formatting = {
table_output_format: validatedData.tableOutputFormat,
}
}
const reductoResponse = await fetch('https://platform.reducto.ai/parse', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Accept: 'application/json',
Authorization: `Bearer ${validatedData.apiKey}`,
},
body: JSON.stringify(reductoBody),
})
if (!reductoResponse.ok) {
const errorText = await reductoResponse.text()
logger.error(`[${requestId}] Reducto API error:`, errorText)
return NextResponse.json(
{
success: false,
error: `Reducto API error: ${reductoResponse.statusText}`,
},
{ status: reductoResponse.status }
)
}
const reductoData = await reductoResponse.json()
logger.info(`[${requestId}] Reducto parse successful`)
return NextResponse.json({
success: true,
output: reductoData,
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error in Reducto parse:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -27,10 +27,11 @@ const SettingsSchema = z.object({
superUserModeEnabled: z.boolean().optional(),
errorNotificationsEnabled: z.boolean().optional(),
snapToGridSize: z.number().min(0).max(50).optional(),
showActionBar: z.boolean().optional(),
})
const defaultSettings = {
theme: 'system',
theme: 'dark',
autoConnect: true,
telemetryEnabled: true,
emailPreferences: {},
@@ -39,6 +40,7 @@ const defaultSettings = {
superUserModeEnabled: false,
errorNotificationsEnabled: true,
snapToGridSize: 0,
showActionBar: true,
}
export async function GET() {
@@ -73,6 +75,7 @@ export async function GET() {
superUserModeEnabled: userSettings.superUserModeEnabled ?? true,
errorNotificationsEnabled: userSettings.errorNotificationsEnabled ?? true,
snapToGridSize: userSettings.snapToGridSize ?? 0,
showActionBar: userSettings.showActionBar ?? true,
},
},
{ status: 200 }

View File

@@ -1,6 +1,8 @@
import { db, workflow } from '@sim/db'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { generateRequestId } from '@/lib/core/utils/request'
import { cleanupWebhooksForWorkflow } from '@/lib/webhooks/deploy'
import {
deployWorkflow,
loadWorkflowFromNormalizedTables,
@@ -58,7 +60,17 @@ export const POST = withAdminAuthParams<RouteParams>(async (request, context) =>
return internalErrorResponse(deployResult.error || 'Failed to deploy workflow')
}
const scheduleResult = await createSchedulesForDeploy(workflowId, normalizedData.blocks, db)
if (!deployResult.deploymentVersionId) {
await undeployWorkflow({ workflowId })
return internalErrorResponse('Failed to resolve deployment version')
}
const scheduleResult = await createSchedulesForDeploy(
workflowId,
normalizedData.blocks,
db,
deployResult.deploymentVersionId
)
if (!scheduleResult.success) {
logger.warn(`Schedule creation failed for workflow ${workflowId}: ${scheduleResult.error}`)
}
@@ -80,10 +92,11 @@ export const POST = withAdminAuthParams<RouteParams>(async (request, context) =>
export const DELETE = withAdminAuthParams<RouteParams>(async (request, context) => {
const { id: workflowId } = await context.params
const requestId = generateRequestId()
try {
const [workflowRecord] = await db
.select({ id: workflow.id })
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
@@ -92,6 +105,13 @@ export const DELETE = withAdminAuthParams<RouteParams>(async (request, context)
return notFoundResponse('Workflow')
}
// Clean up external webhook subscriptions before undeploying
await cleanupWebhooksForWorkflow(
workflowId,
workflowRecord as Record<string, unknown>,
requestId
)
const result = await undeployWorkflow({ workflowId })
if (!result.success) {
return internalErrorResponse(result.error || 'Failed to undeploy workflow')

View File

@@ -7,6 +7,11 @@ import { getSession } from '@/lib/auth'
import { validateInteger } from '@/lib/core/security/input-validation'
import { PlatformEvents } from '@/lib/core/telemetry'
import { generateRequestId } from '@/lib/core/utils/request'
import {
cleanupExternalWebhook,
createExternalWebhookSubscription,
shouldRecreateExternalWebhookSubscription,
} from '@/lib/webhooks/provider-subscriptions'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('WebhookAPI')
@@ -177,6 +182,46 @@ export async function PATCH(request: NextRequest, { params }: { params: Promise<
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
const existingProviderConfig =
(webhookData.webhook.providerConfig as Record<string, unknown>) || {}
let nextProviderConfig =
providerConfig !== undefined &&
resolvedProviderConfig &&
typeof resolvedProviderConfig === 'object'
? (resolvedProviderConfig as Record<string, unknown>)
: existingProviderConfig
const nextProvider = (provider ?? webhookData.webhook.provider) as string
if (
providerConfig !== undefined &&
shouldRecreateExternalWebhookSubscription({
previousProvider: webhookData.webhook.provider as string,
nextProvider,
previousConfig: existingProviderConfig,
nextConfig: nextProviderConfig,
})
) {
await cleanupExternalWebhook(
{ ...webhookData.webhook, providerConfig: existingProviderConfig },
webhookData.workflow,
requestId
)
const result = await createExternalWebhookSubscription(
request,
{
...webhookData.webhook,
provider: nextProvider,
providerConfig: nextProviderConfig,
},
webhookData.workflow,
session.user.id,
requestId
)
nextProviderConfig = result.updatedProviderConfig as Record<string, unknown>
}
logger.debug(`[${requestId}] Updating webhook properties`, {
hasPathUpdate: path !== undefined,
hasProviderUpdate: provider !== undefined,
@@ -188,16 +233,16 @@ export async function PATCH(request: NextRequest, { params }: { params: Promise<
// Merge providerConfig to preserve credential-related fields
let finalProviderConfig = webhooks[0].webhook.providerConfig
if (providerConfig !== undefined) {
const existingConfig = (webhooks[0].webhook.providerConfig as Record<string, unknown>) || {}
const existingConfig = existingProviderConfig
finalProviderConfig = {
...resolvedProviderConfig,
...nextProviderConfig,
credentialId: existingConfig.credentialId,
credentialSetId: existingConfig.credentialSetId,
userId: existingConfig.userId,
historyId: existingConfig.historyId,
lastCheckedTimestamp: existingConfig.lastCheckedTimestamp,
setupCompleted: existingConfig.setupCompleted,
externalId: existingConfig.externalId,
externalId: nextProviderConfig.externalId ?? existingConfig.externalId,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,15 +3,92 @@
*
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { createMockRequest, loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
globalMockData,
mockExecutionDependencies,
mockTriggerDevSdk,
} from '@/app/api/__test-utils__/utils'
/** Mock execution dependencies for webhook tests */
function mockExecutionDependencies() {
vi.mock('@/lib/core/security/encryption', () => ({
decryptSecret: vi.fn().mockResolvedValue({ decrypted: 'decrypted-value' }),
}))
vi.mock('@/lib/logs/execution/trace-spans/trace-spans', () => ({
buildTraceSpans: vi.fn().mockReturnValue({ traceSpans: [], totalDuration: 100 }),
}))
vi.mock('@/lib/workflows/utils', () => ({
updateWorkflowRunCounts: vi.fn().mockResolvedValue(undefined),
}))
vi.mock('@/serializer', () => ({
Serializer: vi.fn().mockImplementation(() => ({
serializeWorkflow: vi.fn().mockReturnValue({
version: '1.0',
blocks: [
{
id: 'starter-id',
metadata: { id: 'starter', name: 'Start' },
config: {},
inputs: {},
outputs: {},
position: { x: 100, y: 100 },
enabled: true,
},
{
id: 'agent-id',
metadata: { id: 'agent', name: 'Agent 1' },
config: {},
inputs: {},
outputs: {},
position: { x: 634, y: -167 },
enabled: true,
},
],
edges: [
{
id: 'edge-1',
source: 'starter-id',
target: 'agent-id',
sourceHandle: 'source',
targetHandle: 'target',
},
],
loops: {},
parallels: {},
}),
})),
}))
}
/** Mock Trigger.dev SDK */
function mockTriggerDevSdk() {
vi.mock('@trigger.dev/sdk', () => ({
tasks: { trigger: vi.fn().mockResolvedValue({ id: 'mock-task-id' }) },
task: vi.fn().mockReturnValue({}),
}))
}
/**
* Test data store - isolated per test via beforeEach reset
* This replaces the global mutable state pattern with local test data
*/
const testData = {
webhooks: [] as Array<{
id: string
provider: string
path: string
isActive: boolean
providerConfig?: Record<string, unknown>
workflowId: string
rateLimitCount?: number
rateLimitPeriod?: number
}>,
workflows: [] as Array<{
id: string
userId: string
workspaceId?: string
}>,
}
const {
generateRequestHashMock,
@@ -159,8 +236,8 @@ vi.mock('@/lib/workflows/persistence/utils', () => ({
vi.mock('@/lib/webhooks/processor', () => ({
findAllWebhooksForPath: vi.fn().mockImplementation(async (options: { path: string }) => {
// Filter webhooks by path from globalMockData
const matchingWebhooks = globalMockData.webhooks.filter(
// Filter webhooks by path from testData
const matchingWebhooks = testData.webhooks.filter(
(wh) => wh.path === options.path && wh.isActive
)
@@ -170,7 +247,7 @@ vi.mock('@/lib/webhooks/processor', () => ({
// Return array of {webhook, workflow} objects
return matchingWebhooks.map((wh) => {
const matchingWorkflow = globalMockData.workflows.find((w) => w.id === wh.workflowId) || {
const matchingWorkflow = testData.workflows.find((w) => w.id === wh.workflowId) || {
id: wh.workflowId || 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -283,14 +360,15 @@ describe('Webhook Trigger API Route', () => {
beforeEach(() => {
vi.clearAllMocks()
globalMockData.webhooks.length = 0
globalMockData.workflows.length = 0
globalMockData.schedules.length = 0
// Reset test data arrays
testData.webhooks.length = 0
testData.workflows.length = 0
mockExecutionDependencies()
mockTriggerDevSdk()
globalMockData.workflows.push({
// Set up default workflow for tests
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -326,7 +404,7 @@ describe('Webhook Trigger API Route', () => {
describe('Generic Webhook Authentication', () => {
it('should process generic webhook without authentication', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -336,7 +414,7 @@ describe('Webhook Trigger API Route', () => {
rateLimitCount: 100,
rateLimitPeriod: 60,
})
globalMockData.workflows.push({
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -354,7 +432,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should authenticate with Bearer token when no custom header is configured', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -362,7 +440,7 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'test-token-123' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -381,7 +459,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should authenticate with custom header when configured', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -393,7 +471,7 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -412,7 +490,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should handle case insensitive Bearer token authentication', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -420,7 +498,7 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'case-test-token' },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -454,7 +532,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should handle case insensitive custom header authentication', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -466,7 +544,7 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({
testData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
workspaceId: 'test-workspace-id',
@@ -495,7 +573,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject wrong Bearer token', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -519,7 +597,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject wrong custom header token', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -547,7 +625,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject missing authentication when required', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -567,7 +645,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject Bearer token when custom header is configured', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -595,7 +673,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject wrong custom header name', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -623,7 +701,7 @@ describe('Webhook Trigger API Route', () => {
})
it('should reject when auth is required but no token is configured', async () => {
globalMockData.webhooks.push({
testData.webhooks.push({
id: 'generic-webhook-id',
provider: 'generic',
path: 'test-path',
@@ -631,7 +709,7 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true },
workflowId: 'test-workflow-id',
})
globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
testData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',

View File

@@ -152,7 +152,6 @@ export async function POST(
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
executionTarget: 'deployed',
})
responses.push(response)
}

View File

@@ -22,6 +22,13 @@ export async function GET(_request: Request, { params }: { params: Promise<{ id:
.select({
id: chat.id,
identifier: chat.identifier,
title: chat.title,
description: chat.description,
customizations: chat.customizations,
authType: chat.authType,
allowedEmails: chat.allowedEmails,
outputConfigs: chat.outputConfigs,
password: chat.password,
isActive: chat.isActive,
})
.from(chat)
@@ -34,6 +41,13 @@ export async function GET(_request: Request, { params }: { params: Promise<{ id:
? {
id: deploymentResults[0].id,
identifier: deploymentResults[0].identifier,
title: deploymentResults[0].title,
description: deploymentResults[0].description,
customizations: deploymentResults[0].customizations,
authType: deploymentResults[0].authType,
allowedEmails: deploymentResults[0].allowedEmails,
outputConfigs: deploymentResults[0].outputConfigs,
hasPassword: Boolean(deploymentResults[0].password),
}
: null

View File

@@ -4,12 +4,17 @@ import { and, desc, eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { removeMcpToolsForWorkflow, syncMcpToolsForWorkflow } from '@/lib/mcp/workflow-mcp-sync'
import { cleanupWebhooksForWorkflow, saveTriggerWebhooksForDeploy } from '@/lib/webhooks/deploy'
import {
deployWorkflow,
loadWorkflowFromNormalizedTables,
undeployWorkflow,
} from '@/lib/workflows/persistence/utils'
import { createSchedulesForDeploy, validateWorkflowSchedules } from '@/lib/workflows/schedules'
import {
cleanupDeploymentVersion,
createSchedulesForDeploy,
validateWorkflowSchedules,
} from '@/lib/workflows/schedules'
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -141,14 +146,58 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
}
const deployedAt = deployResult.deployedAt!
const deploymentVersionId = deployResult.deploymentVersionId
if (!deploymentVersionId) {
await undeployWorkflow({ workflowId: id })
return createErrorResponse('Failed to resolve deployment version', 500)
}
const triggerSaveResult = await saveTriggerWebhooksForDeploy({
request,
workflowId: id,
workflow: workflowData,
userId: actorUserId,
blocks: normalizedData.blocks,
requestId,
deploymentVersionId,
})
if (!triggerSaveResult.success) {
await cleanupDeploymentVersion({
workflowId: id,
workflow: workflowData as Record<string, unknown>,
requestId,
deploymentVersionId,
})
await undeployWorkflow({ workflowId: id })
return createErrorResponse(
triggerSaveResult.error?.message || 'Failed to save trigger configuration',
triggerSaveResult.error?.status || 500
)
}
let scheduleInfo: { scheduleId?: string; cronExpression?: string; nextRunAt?: Date } = {}
const scheduleResult = await createSchedulesForDeploy(id, normalizedData.blocks, db)
const scheduleResult = await createSchedulesForDeploy(
id,
normalizedData.blocks,
db,
deploymentVersionId
)
if (!scheduleResult.success) {
logger.error(
`[${requestId}] Failed to create schedule for workflow ${id}: ${scheduleResult.error}`
)
} else if (scheduleResult.scheduleId) {
await cleanupDeploymentVersion({
workflowId: id,
workflow: workflowData as Record<string, unknown>,
requestId,
deploymentVersionId,
})
await undeployWorkflow({ workflowId: id })
return createErrorResponse(scheduleResult.error || 'Failed to create schedule', 500)
}
if (scheduleResult.scheduleId) {
scheduleInfo = {
scheduleId: scheduleResult.scheduleId,
cronExpression: scheduleResult.cronExpression,
@@ -202,11 +251,18 @@ export async function DELETE(
try {
logger.debug(`[${requestId}] Undeploying workflow: ${id}`)
const { error } = await validateWorkflowPermissions(id, requestId, 'admin')
const { error, workflow: workflowData } = await validateWorkflowPermissions(
id,
requestId,
'admin'
)
if (error) {
return createErrorResponse(error.message, error.status)
}
// Clean up external webhook subscriptions before undeploying
await cleanupWebhooksForWorkflow(id, workflowData as Record<string, unknown>, requestId)
const result = await undeployWorkflow({ workflowId: id })
if (!result.success) {
return createErrorResponse(result.error || 'Failed to undeploy workflow', 500)

View File

@@ -1,10 +1,19 @@
import { db, workflowDeploymentVersion } from '@sim/db'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { syncMcpToolsForWorkflow } from '@/lib/mcp/workflow-mcp-sync'
import { saveTriggerWebhooksForDeploy } from '@/lib/webhooks/deploy'
import { activateWorkflowVersion } from '@/lib/workflows/persistence/utils'
import {
cleanupDeploymentVersion,
createSchedulesForDeploy,
validateWorkflowSchedules,
} from '@/lib/workflows/schedules'
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
import type { BlockState } from '@/stores/workflows/workflow/types'
const logger = createLogger('WorkflowActivateDeploymentAPI')
@@ -19,30 +28,135 @@ export async function POST(
const { id, version } = await params
try {
const { error } = await validateWorkflowPermissions(id, requestId, 'admin')
const {
error,
session,
workflow: workflowData,
} = await validateWorkflowPermissions(id, requestId, 'admin')
if (error) {
return createErrorResponse(error.message, error.status)
}
const actorUserId = session?.user?.id
if (!actorUserId) {
logger.warn(`[${requestId}] Unable to resolve actor user for deployment activation: ${id}`)
return createErrorResponse('Unable to determine activating user', 400)
}
const versionNum = Number(version)
if (!Number.isFinite(versionNum)) {
return createErrorResponse('Invalid version number', 400)
}
const [versionRow] = await db
.select({
id: workflowDeploymentVersion.id,
state: workflowDeploymentVersion.state,
})
.from(workflowDeploymentVersion)
.where(
and(
eq(workflowDeploymentVersion.workflowId, id),
eq(workflowDeploymentVersion.version, versionNum)
)
)
.limit(1)
if (!versionRow?.state) {
return createErrorResponse('Deployment version not found', 404)
}
const [currentActiveVersion] = await db
.select({ id: workflowDeploymentVersion.id })
.from(workflowDeploymentVersion)
.where(
and(
eq(workflowDeploymentVersion.workflowId, id),
eq(workflowDeploymentVersion.isActive, true)
)
)
.limit(1)
const previousVersionId = currentActiveVersion?.id
const deployedState = versionRow.state as { blocks?: Record<string, BlockState> }
const blocks = deployedState.blocks
if (!blocks || typeof blocks !== 'object') {
return createErrorResponse('Invalid deployed state structure', 500)
}
const triggerSaveResult = await saveTriggerWebhooksForDeploy({
request,
workflowId: id,
workflow: workflowData as Record<string, unknown>,
userId: actorUserId,
blocks,
requestId,
deploymentVersionId: versionRow.id,
})
if (!triggerSaveResult.success) {
return createErrorResponse(
triggerSaveResult.error?.message || 'Failed to sync trigger configuration',
triggerSaveResult.error?.status || 500
)
}
const scheduleValidation = validateWorkflowSchedules(blocks)
if (!scheduleValidation.isValid) {
return createErrorResponse(`Invalid schedule configuration: ${scheduleValidation.error}`, 400)
}
const scheduleResult = await createSchedulesForDeploy(id, blocks, db, versionRow.id)
if (!scheduleResult.success) {
await cleanupDeploymentVersion({
workflowId: id,
workflow: workflowData as Record<string, unknown>,
requestId,
deploymentVersionId: versionRow.id,
})
return createErrorResponse(scheduleResult.error || 'Failed to sync schedules', 500)
}
const result = await activateWorkflowVersion({ workflowId: id, version: versionNum })
if (!result.success) {
await cleanupDeploymentVersion({
workflowId: id,
workflow: workflowData as Record<string, unknown>,
requestId,
deploymentVersionId: versionRow.id,
})
return createErrorResponse(result.error || 'Failed to activate deployment', 400)
}
if (result.state) {
await syncMcpToolsForWorkflow({
workflowId: id,
requestId,
state: result.state,
context: 'activate',
})
if (previousVersionId && previousVersionId !== versionRow.id) {
try {
logger.info(
`[${requestId}] Cleaning up previous version ${previousVersionId} webhooks/schedules`
)
await cleanupDeploymentVersion({
workflowId: id,
workflow: workflowData as Record<string, unknown>,
requestId,
deploymentVersionId: previousVersionId,
})
logger.info(`[${requestId}] Previous version cleanup completed`)
} catch (cleanupError) {
logger.error(
`[${requestId}] Failed to clean up previous version ${previousVersionId}`,
cleanupError
)
}
}
await syncMcpToolsForWorkflow({
workflowId: id,
requestId,
state: versionRow.state,
context: 'activate',
})
return createSuccessResponse({ success: true, deployedAt: result.deployedAt })
} catch (error: any) {
logger.error(`[${requestId}] Error activating deployment for workflow: ${id}`, error)

View File

@@ -110,6 +110,7 @@ type AsyncExecutionParams = {
userId: string
input: any
triggerType: CoreTriggerType
preflighted?: boolean
}
/**
@@ -132,6 +133,7 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
userId,
input,
triggerType,
preflighted: params.preflighted,
}
try {
@@ -264,6 +266,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
requestId
)
const shouldPreflightEnvVars = isAsyncMode && isTriggerDevEnabled
const preprocessResult = await preprocessExecution({
workflowId,
userId,
@@ -272,6 +275,9 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
requestId,
checkDeployment: !shouldUseDraftState,
loggingSession,
preflightEnvVars: shouldPreflightEnvVars,
useDraftState: shouldUseDraftState,
envUserId: isClientSession ? userId : undefined,
})
if (!preprocessResult.success) {
@@ -303,6 +309,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
userId: actorUserId,
input,
triggerType: loggingTriggerType,
preflighted: shouldPreflightEnvVars,
})
}

View File

@@ -1,5 +1,5 @@
import { db } from '@sim/db'
import { webhook, workflow } from '@sim/db/schema'
import { workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
@@ -13,7 +13,6 @@ import { sanitizeAgentToolsInBlocks } from '@/lib/workflows/sanitization/validat
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
import type { BlockState } from '@/stores/workflows/workflow/types'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
import { getTrigger } from '@/triggers'
const logger = createLogger('WorkflowStateAPI')
@@ -203,8 +202,6 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
)
}
await syncWorkflowWebhooks(workflowId, workflowState.blocks)
// Extract and persist custom tools to database
try {
const workspaceId = workflowData.workspaceId
@@ -290,213 +287,3 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
function getSubBlockValue<T = unknown>(block: BlockState, subBlockId: string): T | undefined {
const value = block.subBlocks?.[subBlockId]?.value
if (value === undefined || value === null) {
return undefined
}
return value as T
}
async function syncWorkflowWebhooks(
workflowId: string,
blocks: Record<string, any>
): Promise<void> {
await syncBlockResources(workflowId, blocks, {
resourceName: 'webhook',
subBlockId: 'webhookId',
buildMetadata: buildWebhookMetadata,
applyMetadata: upsertWebhookRecord,
})
}
interface WebhookMetadata {
triggerPath: string
provider: string | null
providerConfig: Record<string, any>
}
const CREDENTIAL_SET_PREFIX = 'credentialSet:'
function buildWebhookMetadata(block: BlockState): WebhookMetadata | null {
const triggerId =
getSubBlockValue<string>(block, 'triggerId') ||
getSubBlockValue<string>(block, 'selectedTriggerId')
const triggerConfig = getSubBlockValue<Record<string, any>>(block, 'triggerConfig') || {}
const triggerCredentials = getSubBlockValue<string>(block, 'triggerCredentials')
const triggerPath = getSubBlockValue<string>(block, 'triggerPath') || block.id
const triggerDef = triggerId ? getTrigger(triggerId) : undefined
const provider = triggerDef?.provider || null
// Handle credential sets vs individual credentials
const isCredentialSet = triggerCredentials?.startsWith(CREDENTIAL_SET_PREFIX)
const credentialSetId = isCredentialSet
? triggerCredentials!.slice(CREDENTIAL_SET_PREFIX.length)
: undefined
const credentialId = isCredentialSet ? undefined : triggerCredentials
const providerConfig = {
...(typeof triggerConfig === 'object' ? triggerConfig : {}),
...(credentialId ? { credentialId } : {}),
...(credentialSetId ? { credentialSetId } : {}),
...(triggerId ? { triggerId } : {}),
}
return {
triggerPath,
provider,
providerConfig,
}
}
async function upsertWebhookRecord(
workflowId: string,
block: BlockState,
webhookId: string,
metadata: WebhookMetadata
): Promise<void> {
const providerConfig = metadata.providerConfig as Record<string, unknown>
const credentialSetId = providerConfig?.credentialSetId as string | undefined
// For credential sets, delegate to the sync function which handles fan-out
if (credentialSetId && metadata.provider) {
const { syncWebhooksForCredentialSet } = await import('@/lib/webhooks/utils.server')
const { getProviderIdFromServiceId } = await import('@/lib/oauth')
const oauthProviderId = getProviderIdFromServiceId(metadata.provider)
const requestId = crypto.randomUUID().slice(0, 8)
// Extract base config (without credential-specific fields)
const {
credentialId: _cId,
credentialSetId: _csId,
userId: _uId,
...baseConfig
} = providerConfig
try {
await syncWebhooksForCredentialSet({
workflowId,
blockId: block.id,
provider: metadata.provider,
basePath: metadata.triggerPath,
credentialSetId,
oauthProviderId,
providerConfig: baseConfig as Record<string, any>,
requestId,
})
logger.info('Synced credential set webhooks during workflow save', {
workflowId,
blockId: block.id,
credentialSetId,
})
} catch (error) {
logger.error('Failed to sync credential set webhooks during workflow save', {
workflowId,
blockId: block.id,
credentialSetId,
error,
})
}
return
}
// For individual credentials, use the existing single webhook logic
const [existing] = await db.select().from(webhook).where(eq(webhook.id, webhookId)).limit(1)
if (existing) {
const needsUpdate =
existing.blockId !== block.id ||
existing.workflowId !== workflowId ||
existing.path !== metadata.triggerPath
if (needsUpdate) {
await db
.update(webhook)
.set({
workflowId,
blockId: block.id,
path: metadata.triggerPath,
provider: metadata.provider || existing.provider,
providerConfig: Object.keys(metadata.providerConfig).length
? metadata.providerConfig
: existing.providerConfig,
isActive: true,
updatedAt: new Date(),
})
.where(eq(webhook.id, webhookId))
}
return
}
await db.insert(webhook).values({
id: webhookId,
workflowId,
blockId: block.id,
path: metadata.triggerPath,
provider: metadata.provider,
providerConfig: metadata.providerConfig,
credentialSetId: null,
isActive: true,
createdAt: new Date(),
updatedAt: new Date(),
})
logger.info('Recreated missing webhook after workflow save', {
workflowId,
blockId: block.id,
webhookId,
})
}
interface BlockResourceSyncConfig<T> {
resourceName: string
subBlockId: string
buildMetadata: (block: BlockState, resourceId: string) => T | null
applyMetadata: (
workflowId: string,
block: BlockState,
resourceId: string,
metadata: T
) => Promise<void>
}
async function syncBlockResources<T>(
workflowId: string,
blocks: Record<string, any>,
config: BlockResourceSyncConfig<T>
): Promise<void> {
const blockEntries = Object.values(blocks || {}).filter(Boolean) as BlockState[]
if (blockEntries.length === 0) return
for (const block of blockEntries) {
const resourceId = getSubBlockValue<string>(block, config.subBlockId)
if (!resourceId) continue
const metadata = config.buildMetadata(block, resourceId)
if (!metadata) {
logger.warn(`Skipping ${config.resourceName} sync due to invalid configuration`, {
workflowId,
blockId: block.id,
resourceId,
resourceName: config.resourceName,
})
continue
}
try {
await config.applyMetadata(workflowId, block, resourceId, metadata)
} catch (error) {
logger.error(`Failed to sync ${config.resourceName}`, {
workflowId,
blockId: block.id,
resourceId,
resourceName: config.resourceName,
error,
})
}
}
}

View File

@@ -4,29 +4,29 @@
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockDatabase,
databaseMock,
defaultMockUser,
mockAuth,
mockCryptoUuid,
mockUser,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
} from '@sim/testing'
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('Workflow Variables API Route', () => {
let authMocks: ReturnType<typeof mockAuth>
let databaseMocks: ReturnType<typeof createMockDatabase>
const mockGetWorkflowAccessContext = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid('mock-request-id-12345678')
authMocks = mockAuth(mockUser)
authMocks = mockAuth(defaultMockUser)
mockGetWorkflowAccessContext.mockReset()
vi.doMock('@sim/db', () => databaseMock)
vi.doMock('@/lib/workflows/utils', () => ({
getWorkflowAccessContext: mockGetWorkflowAccessContext,
}))
@@ -203,10 +203,6 @@ describe('Workflow Variables API Route', () => {
isWorkspaceOwner: false,
})
databaseMocks = createMockDatabase({
update: { results: [{}] },
})
const variables = {
'var-1': {
id: 'var-1',

View File

@@ -1,5 +1,5 @@
import { createMockRequest, mockAuth, mockConsoleLogger } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, mockAuth, mockConsoleLogger } from '@/app/api/__test-utils__/utils'
describe('Workspace Invitations API Route', () => {
const mockWorkspace = { id: 'workspace-1', name: 'Test Workspace' }

View File

@@ -12,6 +12,7 @@ import { HydrationErrorHandler } from '@/app/_shell/hydration-error-handler'
import { QueryProvider } from '@/app/_shell/providers/query-provider'
import { SessionProvider } from '@/app/_shell/providers/session-provider'
import { ThemeProvider } from '@/app/_shell/providers/theme-provider'
import { TooltipProvider } from '@/app/_shell/providers/tooltip-provider'
import { season } from '@/app/_styles/fonts/season/season'
export const viewport: Viewport = {
@@ -208,7 +209,9 @@ export default function RootLayout({ children }: { children: React.ReactNode })
<ThemeProvider>
<QueryProvider>
<SessionProvider>
<BrandedLayout>{children}</BrandedLayout>
<TooltipProvider>
<BrandedLayout>{children}</BrandedLayout>
</TooltipProvider>
</SessionProvider>
</QueryProvider>
</ThemeProvider>

View File

@@ -21,12 +21,15 @@ import {
Combobox,
Connections,
Copy,
Cursor,
DatePicker,
DocumentAttachment,
Duplicate,
Expand,
Eye,
FolderCode,
FolderPlus,
Hand,
HexSimple,
Input,
Key as KeyIcon,
@@ -991,11 +994,14 @@ export default function PlaygroundPage() {
{ Icon: ChevronDown, name: 'ChevronDown' },
{ Icon: Connections, name: 'Connections' },
{ Icon: Copy, name: 'Copy' },
{ Icon: Cursor, name: 'Cursor' },
{ Icon: DocumentAttachment, name: 'DocumentAttachment' },
{ Icon: Duplicate, name: 'Duplicate' },
{ Icon: Expand, name: 'Expand' },
{ Icon: Eye, name: 'Eye' },
{ Icon: FolderCode, name: 'FolderCode' },
{ Icon: FolderPlus, name: 'FolderPlus' },
{ Icon: Hand, name: 'Hand' },
{ Icon: HexSimple, name: 'HexSimple' },
{ Icon: KeyIcon, name: 'Key' },
{ Icon: Layout, name: 'Layout' },

View File

@@ -1,15 +1,12 @@
'use client'
import { Tooltip } from '@/components/emcn'
import { season } from '@/app/_styles/fonts/season/season'
export default function TemplatesLayoutClient({ children }: { children: React.ReactNode }) {
return (
<Tooltip.Provider delayDuration={600} skipDelayDuration={0}>
<div className={`${season.variable} relative flex min-h-screen flex-col font-season`}>
<div className='-z-50 pointer-events-none fixed inset-0 bg-white' />
{children}
</div>
</Tooltip.Provider>
<div className={`${season.variable} relative flex min-h-screen flex-col font-season`}>
<div className='-z-50 pointer-events-none fixed inset-0 bg-white' />
{children}
</div>
)
}

View File

@@ -77,7 +77,7 @@ export function DeleteChunkModal({
</p>
</ModalBody>
<ModalFooter>
<Button variant='active' disabled={isDeleting} onClick={onClose}>
<Button variant='default' disabled={isDeleting} onClick={onClose}>
Cancel
</Button>
<Button variant='destructive' onClick={handleDeleteChunk} disabled={isDeleting}>

View File

@@ -392,7 +392,7 @@ export function DocumentTagsModal({
return (
<Modal open={open} onOpenChange={handleClose}>
<ModalContent>
<ModalContent size='sm'>
<ModalHeader>
<div className='flex items-center justify-between'>
<span>Document Tags</span>
@@ -486,7 +486,7 @@ export function DocumentTagsModal({
/>
)}
{tagNameConflict && (
<span className='text-[11px] text-[var(--text-error)]'>
<span className='text-[12px] text-[var(--text-error)]'>
A tag with this name already exists
</span>
)}
@@ -639,7 +639,7 @@ export function DocumentTagsModal({
/>
)}
{tagNameConflict && (
<span className='text-[11px] text-[var(--text-error)]'>
<span className='text-[12px] text-[var(--text-error)]'>
A tag with this name already exists
</span>
)}

View File

@@ -48,7 +48,7 @@ import { ActionBar } from '@/app/workspace/[workspaceId]/knowledge/[id]/componen
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useContextMenu } from '@/app/workspace/[workspaceId]/w/components/sidebar/hooks'
import { useDocument, useDocumentChunks, useKnowledgeBase } from '@/hooks/kb/use-knowledge'
import { knowledgeKeys } from '@/hooks/queries/knowledge'
import { knowledgeKeys, useDocumentChunkSearchQuery } from '@/hooks/queries/knowledge'
const logger = createLogger('Document')
@@ -313,69 +313,22 @@ export function Document({
isFetching: isFetchingChunks,
} = useDocumentChunks(knowledgeBaseId, documentId, currentPageFromURL)
const [searchResults, setSearchResults] = useState<ChunkData[]>([])
const [isLoadingSearch, setIsLoadingSearch] = useState(false)
const [searchError, setSearchError] = useState<string | null>(null)
useEffect(() => {
if (!debouncedSearchQuery.trim()) {
setSearchResults([])
setSearchError(null)
return
const {
data: searchResults = [],
isLoading: isLoadingSearch,
error: searchQueryError,
} = useDocumentChunkSearchQuery(
{
knowledgeBaseId,
documentId,
search: debouncedSearchQuery,
},
{
enabled: Boolean(debouncedSearchQuery.trim()),
}
)
let isMounted = true
const searchAllChunks = async () => {
try {
setIsLoadingSearch(true)
setSearchError(null)
const allResults: ChunkData[] = []
let hasMore = true
let offset = 0
const limit = 100
while (hasMore && isMounted) {
const response = await fetch(
`/api/knowledge/${knowledgeBaseId}/documents/${documentId}/chunks?search=${encodeURIComponent(debouncedSearchQuery)}&limit=${limit}&offset=${offset}`
)
if (!response.ok) {
throw new Error('Search failed')
}
const result = await response.json()
if (result.success && result.data) {
allResults.push(...result.data)
hasMore = result.pagination?.hasMore || false
offset += limit
} else {
hasMore = false
}
}
if (isMounted) {
setSearchResults(allResults)
}
} catch (err) {
if (isMounted) {
setSearchError(err instanceof Error ? err.message : 'Search failed')
}
} finally {
if (isMounted) {
setIsLoadingSearch(false)
}
}
}
searchAllChunks()
return () => {
isMounted = false
}
}, [debouncedSearchQuery, knowledgeBaseId, documentId])
const searchError = searchQueryError instanceof Error ? searchQueryError.message : null
const [selectedChunks, setSelectedChunks] = useState<Set<string>>(new Set())
const [selectedChunk, setSelectedChunk] = useState<ChunkData | null>(null)
@@ -1208,15 +1161,19 @@ export function Document({
<ModalHeader>Delete Document</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-secondary)]'>
Are you sure you want to delete "{effectiveDocumentName}"? This will permanently
delete the document and all {documentData?.chunkCount ?? 0} chunk
Are you sure you want to delete{' '}
<span className='font-medium text-[var(--text-primary)]'>
{effectiveDocumentName}
</span>
? This will permanently delete the document and all {documentData?.chunkCount ?? 0}{' '}
chunk
{documentData?.chunkCount === 1 ? '' : 's'} within it.{' '}
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
</p>
</ModalBody>
<ModalFooter>
<Button
variant='active'
variant='default'
onClick={() => setShowDeleteDocumentDialog(false)}
disabled={isDeletingDocument}
>

View File

@@ -1523,15 +1523,16 @@ export function KnowledgeBase({
<ModalHeader>Delete Knowledge Base</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-secondary)]'>
Are you sure you want to delete "{knowledgeBaseName}"? This will permanently delete
the knowledge base and all {pagination.total} document
Are you sure you want to delete{' '}
<span className='font-medium text-[var(--text-primary)]'>{knowledgeBaseName}</span>?
This will permanently delete the knowledge base and all {pagination.total} document
{pagination.total === 1 ? '' : 's'} within it.{' '}
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
</p>
</ModalBody>
<ModalFooter>
<Button
variant='active'
variant='default'
onClick={() => setShowDeleteDialog(false)}
disabled={isDeleting}
>
@@ -1549,14 +1550,16 @@ export function KnowledgeBase({
<ModalHeader>Delete Document</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-secondary)]'>
Are you sure you want to delete "
{documents.find((doc) => doc.id === documentToDelete)?.filename ?? 'this document'}"?{' '}
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
Are you sure you want to delete{' '}
<span className='font-medium text-[var(--text-primary)]'>
{documents.find((doc) => doc.id === documentToDelete)?.filename ?? 'this document'}
</span>
? <span className='text-[var(--text-error)]'>This action cannot be undone.</span>
</p>
</ModalBody>
<ModalFooter>
<Button
variant='active'
variant='default'
onClick={() => {
setShowDeleteDocumentModal(false)
setDocumentToDelete(null)
@@ -1582,7 +1585,7 @@ export function KnowledgeBase({
</p>
</ModalBody>
<ModalFooter>
<Button variant='active' onClick={() => setShowBulkDeleteModal(false)}>
<Button variant='default' onClick={() => setShowBulkDeleteModal(false)}>
Cancel
</Button>
<Button variant='destructive' onClick={confirmBulkDelete} disabled={isBulkOperating}>

View File

@@ -221,14 +221,14 @@ export function AddDocumentsModal({
return (
<Modal open={open} onOpenChange={handleClose}>
<ModalContent>
<ModalContent size='md'>
<ModalHeader>Add Documents</ModalHeader>
<ModalBody>
<div className='min-h-0 flex-1 overflow-y-auto'>
<div className='space-y-[12px]'>
{fileError && (
<p className='text-[11px] text-[var(--text-error)] leading-tight'>{fileError}</p>
<p className='text-[12px] text-[var(--text-error)] leading-tight'>{fileError}</p>
)}
<div className='flex flex-col gap-[8px]'>
@@ -336,7 +336,7 @@ export function AddDocumentsModal({
<ModalFooter>
<div className='flex w-full items-center justify-between gap-[12px]'>
{uploadError ? (
<p className='min-w-0 flex-1 truncate text-[11px] text-[var(--text-error)] leading-tight'>
<p className='min-w-0 flex-1 truncate text-[12px] text-[var(--text-error)] leading-tight'>
{uploadError.message}
</p>
) : (

View File

@@ -306,7 +306,7 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
return (
<>
<Modal open={open} onOpenChange={handleClose}>
<ModalContent>
<ModalContent size='sm'>
<ModalHeader>
<div className='flex items-center justify-between'>
<span>Tags</span>
@@ -400,7 +400,7 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
}}
/>
{tagNameConflict && (
<span className='text-[11px] text-[var(--text-error)]'>
<span className='text-[12px] text-[var(--text-error)]'>
A tag with this name already exists
</span>
)}
@@ -417,7 +417,7 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
placeholder='Select type'
/>
{!hasAvailableSlots(createTagForm.fieldType) && (
<span className='text-[11px] text-[var(--text-error)]'>
<span className='text-[12px] text-[var(--text-error)]'>
No available slots for this type. Choose a different type.
</span>
)}

View File

@@ -77,7 +77,7 @@ export function RenameDocumentModal({
return (
<Modal open={open} onOpenChange={onOpenChange}>
<ModalContent>
<ModalContent size='sm'>
<ModalHeader>Rename Document</ModalHeader>
<form onSubmit={handleSubmit} className='flex min-h-0 flex-1 flex-col'>
<ModalBody className='!pb-[16px]'>
@@ -108,7 +108,7 @@ export function RenameDocumentModal({
<ModalFooter>
<div className='flex w-full items-center justify-between gap-[12px]'>
{error ? (
<p className='min-w-0 flex-1 truncate text-[11px] text-[var(--text-error)] leading-tight'>
<p className='min-w-0 flex-1 truncate text-[12px] text-[var(--text-error)] leading-tight'>
{error}
</p>
) : (

View File

@@ -332,7 +332,7 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
return (
<Modal open={open} onOpenChange={handleClose}>
<ModalContent>
<ModalContent size='lg'>
<ModalHeader>Create Knowledge Base</ModalHeader>
<form onSubmit={handleSubmit(onSubmit)} className='flex min-h-0 flex-1 flex-col'>
@@ -528,7 +528,7 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
)}
{fileError && (
<p className='text-[11px] text-[var(--text-error)] leading-tight'>{fileError}</p>
<p className='text-[12px] text-[var(--text-error)] leading-tight'>{fileError}</p>
)}
</div>
</div>
@@ -537,7 +537,7 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
<ModalFooter>
<div className='flex w-full items-center justify-between gap-[12px]'>
{submitStatus?.type === 'error' || uploadError ? (
<p className='min-w-0 flex-1 truncate text-[11px] text-[var(--text-error)] leading-tight'>
<p className='min-w-0 flex-1 truncate text-[12px] text-[var(--text-error)] leading-tight'>
{uploadError?.message || submitStatus?.message}
</p>
) : (

View File

@@ -38,7 +38,7 @@ export function DeleteKnowledgeBaseModal({
}: DeleteKnowledgeBaseModalProps) {
return (
<Modal open={isOpen} onOpenChange={onClose}>
<ModalContent className='w-[400px]'>
<ModalContent size='sm'>
<ModalHeader>Delete Knowledge Base</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-secondary)]'>
@@ -55,7 +55,7 @@ export function DeleteKnowledgeBaseModal({
</p>
</ModalBody>
<ModalFooter>
<Button variant='active' onClick={onClose} disabled={isDeleting}>
<Button variant='default' onClick={onClose} disabled={isDeleting}>
Cancel
</Button>
<Button variant='destructive' onClick={onConfirm} disabled={isDeleting}>

View File

@@ -98,7 +98,7 @@ export function EditKnowledgeBaseModal({
return (
<Modal open={open} onOpenChange={onOpenChange}>
<ModalContent>
<ModalContent size='sm'>
<ModalHeader>Edit Knowledge Base</ModalHeader>
<form onSubmit={handleSubmit(onSubmit)} className='flex min-h-0 flex-1 flex-col'>
@@ -118,7 +118,7 @@ export function EditKnowledgeBaseModal({
data-form-type='other'
/>
{errors.name && (
<p className='text-[11px] text-[var(--text-error)]'>{errors.name.message}</p>
<p className='text-[12px] text-[var(--text-error)]'>{errors.name.message}</p>
)}
</div>
@@ -132,7 +132,7 @@ export function EditKnowledgeBaseModal({
className={cn(errors.description && 'border-[var(--text-error)]')}
/>
{errors.description && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{errors.description.message}
</p>
)}
@@ -143,7 +143,7 @@ export function EditKnowledgeBaseModal({
<ModalFooter>
<div className='flex w-full items-center justify-between gap-[12px]'>
{error ? (
<p className='min-w-0 flex-1 truncate text-[11px] text-[var(--text-error)] leading-tight'>
<p className='min-w-0 flex-1 truncate text-[12px] text-[var(--text-error)] leading-tight'>
{error}
</p>
) : (

View File

@@ -1,6 +1,5 @@
'use client'
import { Tooltip } from '@/components/emcn'
import { GlobalCommandsProvider } from '@/app/workspace/[workspaceId]/providers/global-commands-provider'
import { ProviderModelsLoader } from '@/app/workspace/[workspaceId]/providers/provider-models-loader'
import { SettingsLoader } from '@/app/workspace/[workspaceId]/providers/settings-loader'
@@ -13,16 +12,14 @@ export default function WorkspaceLayout({ children }: { children: React.ReactNod
<SettingsLoader />
<ProviderModelsLoader />
<GlobalCommandsProvider>
<Tooltip.Provider delayDuration={600} skipDelayDuration={0}>
<div className='flex h-screen w-full bg-[var(--bg)]'>
<WorkspacePermissionsProvider>
<div className='shrink-0' suppressHydrationWarning>
<Sidebar />
</div>
{children}
</WorkspacePermissionsProvider>
</div>
</Tooltip.Provider>
<div className='flex h-screen w-full bg-[var(--bg)]'>
<WorkspacePermissionsProvider>
<div className='shrink-0' suppressHydrationWarning>
<Sidebar />
</div>
{children}
</WorkspacePermissionsProvider>
</div>
</GlobalCommandsProvider>
</>
)

View File

@@ -112,7 +112,7 @@ export function SlackChannelSelector({
{selectedChannel.isPrivate ? 'Private' : 'Public'} channel: #{selectedChannel.name}
</p>
)}
{error && <p className='text-[11px] text-[var(--text-error)]'>{error}</p>}
{error && <p className='text-[12px] text-[var(--text-error)]'>{error}</p>}
</div>
)
}

View File

@@ -1,9 +1,10 @@
'use client'
import { useEffect, useMemo, useState } from 'react'
import { useMemo } from 'react'
import { X } from 'lucide-react'
import { Badge, Combobox, type ComboboxOption } from '@/components/emcn'
import { Skeleton } from '@/components/ui'
import { useWorkflows } from '@/hooks/queries/workflows'
interface WorkflowSelectorProps {
workspaceId: string
@@ -25,26 +26,9 @@ export function WorkflowSelector({
onChange,
error,
}: WorkflowSelectorProps) {
const [workflows, setWorkflows] = useState<Array<{ id: string; name: string }>>([])
const [isLoading, setIsLoading] = useState(true)
useEffect(() => {
const load = async () => {
try {
setIsLoading(true)
const response = await fetch(`/api/workflows?workspaceId=${workspaceId}`)
if (response.ok) {
const data = await response.json()
setWorkflows(data.data || [])
}
} catch {
setWorkflows([])
} finally {
setIsLoading(false)
}
}
load()
}, [workspaceId])
const { data: workflows = [], isPending: isLoading } = useWorkflows(workspaceId, {
syncRegistry: false,
})
const options: ComboboxOption[] = useMemo(() => {
return workflows.map((w) => ({

View File

@@ -634,7 +634,7 @@ export function NotificationSettings({
}}
/>
{formErrors.webhookUrl && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.webhookUrl}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.webhookUrl}</p>
)}
</div>
<div className='flex flex-col gap-[8px]'>
@@ -660,7 +660,7 @@ export function NotificationSettings({
placeholderWithTags='Add email'
/>
{formErrors.emailRecipients && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.emailRecipients}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.emailRecipients}</p>
)}
</div>
)}
@@ -707,7 +707,7 @@ export function NotificationSettings({
/>
)}
{formErrors.slackAccountId && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.slackAccountId}
</p>
)}
@@ -776,7 +776,7 @@ export function NotificationSettings({
allOptionLabel='All levels'
/>
{formErrors.levelFilter && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.levelFilter}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.levelFilter}</p>
)}
</div>
@@ -822,7 +822,7 @@ export function NotificationSettings({
allOptionLabel='All triggers'
/>
{formErrors.triggerFilter && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.triggerFilter}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.triggerFilter}</p>
)}
</div>
@@ -938,7 +938,7 @@ export function NotificationSettings({
}
/>
{formErrors.consecutiveFailures && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.consecutiveFailures}
</p>
)}
@@ -962,7 +962,7 @@ export function NotificationSettings({
}
/>
{formErrors.failureRatePercent && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.failureRatePercent}
</p>
)}
@@ -982,7 +982,7 @@ export function NotificationSettings({
}
/>
{formErrors.windowHours && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
)}
</div>
</div>
@@ -1004,7 +1004,7 @@ export function NotificationSettings({
}
/>
{formErrors.durationThresholdMs && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.durationThresholdMs}
</p>
)}
@@ -1028,7 +1028,7 @@ export function NotificationSettings({
}
/>
{formErrors.latencySpikePercent && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.latencySpikePercent}
</p>
)}
@@ -1048,7 +1048,7 @@ export function NotificationSettings({
}
/>
{formErrors.windowHours && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
)}
</div>
</div>
@@ -1071,7 +1071,7 @@ export function NotificationSettings({
}
/>
{formErrors.costThresholdDollars && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.costThresholdDollars}
</p>
)}
@@ -1094,7 +1094,7 @@ export function NotificationSettings({
}
/>
{formErrors.inactivityHours && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.inactivityHours}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.inactivityHours}</p>
)}
</div>
)}
@@ -1116,7 +1116,7 @@ export function NotificationSettings({
}
/>
{formErrors.errorCountThreshold && (
<p className='text-[11px] text-[var(--text-error)]'>
<p className='text-[12px] text-[var(--text-error)]'>
{formErrors.errorCountThreshold}
</p>
)}
@@ -1136,7 +1136,7 @@ export function NotificationSettings({
}
/>
{formErrors.windowHours && (
<p className='text-[11px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
<p className='text-[12px] text-[var(--text-error)]'>{formErrors.windowHours}</p>
)}
</div>
</div>
@@ -1261,7 +1261,7 @@ export function NotificationSettings({
</Modal>
<Modal open={showDeleteDialog} onOpenChange={setShowDeleteDialog}>
<ModalContent className='w-[400px]'>
<ModalContent size='sm'>
<ModalHeader>Delete Notification</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-secondary)]'>

View File

@@ -19,6 +19,7 @@ export type CommandId =
| 'clear-terminal-console'
| 'focus-toolbar-search'
| 'clear-notifications'
| 'fit-to-view'
/**
* Static metadata for a global command.
@@ -104,6 +105,11 @@ export const COMMAND_DEFINITIONS: Record<CommandId, CommandDefinition> = {
shortcut: 'Mod+E',
allowInEditable: false,
},
'fit-to-view': {
id: 'fit-to-view',
shortcut: 'Mod+Shift+F',
allowInEditable: false,
},
}
/**

View File

@@ -2,6 +2,7 @@ import { memo, useCallback } from 'react'
import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, LogOut } from 'lucide-react'
import { Button, Copy, Tooltip, Trash2 } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import { isValidStartBlockType } from '@/lib/workflows/triggers/start-block-types'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
@@ -11,6 +12,16 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const DEFAULT_DUPLICATE_OFFSET = { x: 50, y: 50 }
const ACTION_BUTTON_STYLES = [
'h-[23px] w-[23px] rounded-[8px] p-0',
'border border-[var(--border)] bg-[var(--surface-5)]',
'text-[var(--text-secondary)]',
'hover:border-transparent hover:bg-[var(--brand-secondary)] hover:!text-[var(--text-inverse)]',
'dark:border-transparent dark:bg-[var(--surface-7)] dark:hover:bg-[var(--brand-secondary)]',
].join(' ')
const ICON_SIZE = 'h-[11px] w-[11px]'
/**
* Props for the ActionBar component
*/
@@ -87,9 +98,10 @@ export const ActionBar = memo(
const userPermissions = useUserPermissionsContext()
const isStartBlock = blockType === 'starter' || blockType === 'start_trigger'
const isStartBlock = isValidStartBlockType(blockType)
const isResponseBlock = blockType === 'response'
const isNoteBlock = blockType === 'note'
const isSubflowBlock = blockType === 'loop' || blockType === 'parallel'
/**
* Get appropriate tooltip message based on disabled state
@@ -110,10 +122,12 @@ export const ActionBar = memo(
'-top-[46px] absolute right-0',
'flex flex-row items-center',
'opacity-0 transition-opacity duration-200 group-hover:opacity-100',
'gap-[5px] rounded-[10px] bg-[var(--surface-4)] p-[5px]'
'gap-[5px] rounded-[10px] p-[5px]',
'border border-[var(--border)] bg-[var(--surface-2)]',
'dark:border-transparent dark:bg-[var(--surface-4)]'
)}
>
{!isNoteBlock && (
{!isNoteBlock && !isSubflowBlock && (
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Button
@@ -124,14 +138,10 @@ export const ActionBar = memo(
collaborativeBatchToggleBlockEnabled([blockId])
}
}}
className='hover:!text-[var(--text-inverse)] h-[23px] w-[23px] rounded-[8px] bg-[var(--surface-7)] p-0 text-[var(--text-secondary)] hover:bg-[var(--brand-secondary)]'
className={ACTION_BUTTON_STYLES}
disabled={disabled}
>
{isEnabled ? (
<Circle className='h-[11px] w-[11px]' />
) : (
<CircleOff className='h-[11px] w-[11px]' />
)}
{isEnabled ? <Circle className={ICON_SIZE} /> : <CircleOff className={ICON_SIZE} />}
</Button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
@@ -140,7 +150,7 @@ export const ActionBar = memo(
</Tooltip.Root>
)}
{!isStartBlock && !isResponseBlock && (
{!isStartBlock && !isResponseBlock && !isSubflowBlock && (
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Button
@@ -151,17 +161,17 @@ export const ActionBar = memo(
handleDuplicateBlock()
}
}}
className='hover:!text-[var(--text-inverse)] h-[23px] w-[23px] rounded-[8px] bg-[var(--surface-7)] p-0 text-[var(--text-secondary)] hover:bg-[var(--brand-secondary)]'
className={ACTION_BUTTON_STYLES}
disabled={disabled}
>
<Copy className='h-[11px] w-[11px]' />
<Copy className={ICON_SIZE} />
</Button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>{getTooltipMessage('Duplicate Block')}</Tooltip.Content>
</Tooltip.Root>
)}
{!isNoteBlock && (
{!isNoteBlock && !isSubflowBlock && (
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Button
@@ -172,13 +182,13 @@ export const ActionBar = memo(
collaborativeBatchToggleBlockHandles([blockId])
}
}}
className='hover:!text-[var(--text-inverse)] h-[23px] w-[23px] rounded-[8px] bg-[var(--surface-7)] p-0 text-[var(--text-secondary)] hover:bg-[var(--brand-secondary)]'
className={ACTION_BUTTON_STYLES}
disabled={disabled}
>
{horizontalHandles ? (
<ArrowLeftRight className='h-[11px] w-[11px]' />
<ArrowLeftRight className={ICON_SIZE} />
) : (
<ArrowUpDown className='h-[11px] w-[11px]' />
<ArrowUpDown className={ICON_SIZE} />
)}
</Button>
</Tooltip.Trigger>
@@ -201,10 +211,10 @@ export const ActionBar = memo(
)
}
}}
className='hover:!text-[var(--text-inverse)] h-[23px] w-[23px] rounded-[8px] bg-[var(--surface-7)] p-0 text-[var(--text-secondary)] hover:bg-[var(--brand-secondary)]'
className={ACTION_BUTTON_STYLES}
disabled={disabled || !userPermissions.canEdit}
>
<LogOut className='h-[11px] w-[11px]' />
<LogOut className={ICON_SIZE} />
</Button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>{getTooltipMessage('Remove from Subflow')}</Tooltip.Content>
@@ -221,10 +231,10 @@ export const ActionBar = memo(
collaborativeBatchRemoveBlocks([blockId])
}
}}
className='hover:!text-[var(--text-inverse)] h-[23px] w-[23px] rounded-[8px] bg-[var(--surface-7)] p-0 text-[var(--text-secondary)] hover:bg-[var(--brand-secondary)]'
className={ACTION_BUTTON_STYLES}
disabled={disabled}
>
<Trash2 className='h-[11px] w-[11px]' />
<Trash2 className={ICON_SIZE} />
</Button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>{getTooltipMessage('Delete Block')}</Tooltip.Content>

View File

@@ -1,5 +1,6 @@
'use client'
import type { RefObject } from 'react'
import {
Popover,
PopoverAnchor,
@@ -7,14 +8,49 @@ import {
PopoverDivider,
PopoverItem,
} from '@/components/emcn'
import type { BlockContextMenuProps } from './types'
import { isValidStartBlockType } from '@/lib/workflows/triggers/start-block-types'
/**
* Block information for context menu actions
*/
export interface BlockInfo {
id: string
type: string
enabled: boolean
horizontalHandles: boolean
parentId?: string
parentType?: string
}
/**
* Props for BlockMenu component
*/
export interface BlockMenuProps {
isOpen: boolean
position: { x: number; y: number }
menuRef: RefObject<HTMLDivElement | null>
onClose: () => void
selectedBlocks: BlockInfo[]
onCopy: () => void
onPaste: () => void
onDuplicate: () => void
onDelete: () => void
onToggleEnabled: () => void
onToggleHandles: () => void
onRemoveFromSubflow: () => void
onOpenEditor: () => void
onRename: () => void
hasClipboard?: boolean
showRemoveFromSubflow?: boolean
disableEdit?: boolean
}
/**
* Context menu for workflow block(s).
* Displays block-specific actions in a popover at right-click position.
* Supports multi-selection - actions apply to all selected blocks.
*/
export function BlockContextMenu({
export function BlockMenu({
isOpen,
position,
menuRef,
@@ -32,15 +68,13 @@ export function BlockContextMenu({
hasClipboard = false,
showRemoveFromSubflow = false,
disableEdit = false,
}: BlockContextMenuProps) {
}: BlockMenuProps) {
const isSingleBlock = selectedBlocks.length === 1
const allEnabled = selectedBlocks.every((b) => b.enabled)
const allDisabled = selectedBlocks.every((b) => !b.enabled)
const hasStarterBlock = selectedBlocks.some(
(b) => b.type === 'starter' || b.type === 'start_trigger'
)
const hasStarterBlock = selectedBlocks.some((b) => isValidStartBlockType(b.type))
const allNoteBlocks = selectedBlocks.every((b) => b.type === 'note')
const isSubflow =
isSingleBlock && (selectedBlocks[0]?.type === 'loop' || selectedBlocks[0]?.type === 'parallel')

View File

@@ -0,0 +1,2 @@
export type { BlockInfo, BlockMenuProps } from './block-menu'
export { BlockMenu } from './block-menu'

View File

@@ -1,5 +1,6 @@
'use client'
import type { RefObject } from 'react'
import {
Popover,
PopoverAnchor,
@@ -7,13 +8,40 @@ import {
PopoverDivider,
PopoverItem,
} from '@/components/emcn'
import type { PaneContextMenuProps } from './types'
/**
* Context menu for workflow canvas pane.
* Props for CanvasMenu component
*/
export interface CanvasMenuProps {
isOpen: boolean
position: { x: number; y: number }
menuRef: RefObject<HTMLDivElement | null>
onClose: () => void
onUndo: () => void
onRedo: () => void
onPaste: () => void
onAddBlock: () => void
onAutoLayout: () => void
onFitToView: () => void
onOpenLogs: () => void
onToggleVariables: () => void
onToggleChat: () => void
onInvite: () => void
isVariablesOpen?: boolean
isChatOpen?: boolean
hasClipboard?: boolean
disableEdit?: boolean
disableAdmin?: boolean
canUndo?: boolean
canRedo?: boolean
isInvitationsDisabled?: boolean
}
/**
* Context menu for workflow canvas.
* Displays canvas-level actions when right-clicking empty space.
*/
export function PaneContextMenu({
export function CanvasMenu({
isOpen,
position,
menuRef,
@@ -23,6 +51,7 @@ export function PaneContextMenu({
onPaste,
onAddBlock,
onAutoLayout,
onFitToView,
onOpenLogs,
onToggleVariables,
onToggleChat,
@@ -35,7 +64,7 @@ export function PaneContextMenu({
canUndo = false,
canRedo = false,
isInvitationsDisabled = false,
}: PaneContextMenuProps) {
}: CanvasMenuProps) {
return (
<Popover
open={isOpen}
@@ -113,6 +142,14 @@ export function PaneContextMenu({
<span>Auto-layout</span>
<span className='ml-auto opacity-70 group-hover:opacity-100'>L</span>
</PopoverItem>
<PopoverItem
onClick={() => {
onFitToView()
onClose()
}}
>
Fit to View
</PopoverItem>
{/* Navigation actions */}
<PopoverDivider />

View File

@@ -0,0 +1,2 @@
export type { CanvasMenuProps } from './canvas-menu'
export { CanvasMenu } from './canvas-menu'

Some files were not shown because too many files have changed in this diff Show More