Compare commits

...

361 Commits

Author SHA1 Message Date
Siddharth Ganesan
160aa7b664 Fix handler 2026-03-14 11:31:11 -07:00
Siddharth Ganesan
cd055842e3 Fix lint 2026-03-13 20:44:59 -07:00
Siddharth Ganesan
2c85dea590 Context tags 2026-03-13 20:44:35 -07:00
waleed
ab939eb7dd fix: replace image/* wildcard with explicit supported types in file picker
The image/* accept attribute allowed users to select BMP, TIFF, HEIC,
and other image types that are rejected server-side. Replace with the
exact set of supported image MIME types and extensions to match the
copilot upload validation.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 20:42:28 -07:00
waleed
e0af69c2ef fix: SVG file support in mothership chat and file serving
- Send SVGs as document/text-xml to Claude instead of unsupported
  image/svg+xml, so the mothership can actually read SVG content
- Serve SVGs inline with proper content type and CSP sandbox so
  chat previews render correctly
- Add SVG preview support in file viewer (sandboxed iframe)
- Derive IMAGE_MIME_TYPES from MIME_TYPE_MAPPING to reduce duplication
- Add missing webp to contentTypeMap, SAFE_INLINE_TYPES, binaryExtensions
- Consolidate PREVIEWABLE_EXTENSIONS into preview-panel exports

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 20:39:58 -07:00
Theodore Li
7ad813b554 Add download file shortcut on mothership file view 2026-03-13 20:32:48 -07:00
Siddharth Ganesan
7501ab15bb Fix fast edit 2026-03-13 20:19:34 -07:00
waleed
d7a1353975 fix build, speedup tests by up to 40% 2026-03-13 20:04:55 -07:00
Emir Karabeg
7cc013e523 subagent thinking text 2026-03-13 20:04:45 -07:00
waleed
9953fda800 clamp logs panel 2026-03-13 19:44:52 -07:00
waleed
ed7ac935e4 reactquery best practices, UI alignment in restore 2026-03-13 19:34:44 -07:00
Emir Karabeg
709f91fd29 improvements: ui/ux around mothership 2026-03-13 19:15:49 -07:00
Siddharth Ganesan
0c9ab10b12 Lint 2026-03-13 19:09:33 -07:00
Vikhyath Mondreti
39477d970b Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-13 19:04:41 -07:00
Vikhyath Mondreti
afaad519d9 fix type errors 2026-03-13 19:04:28 -07:00
Theodore Li
f5ae4686a2 feat(restore) Add restore endpoints and ui (#3570)
* Add restore endpoints and ui

* Derive toast from notification

* Auth user if workspaceid not found

* Fix recently deleted ui

* Add restore error toast

* Fix deleted at timestamp mismatch

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-13 22:03:40 -04:00
waleed
22c2571661 standardize back buttons in settings 2026-03-13 18:43:35 -07:00
waleed
4f21ceb049 fix(settings): add spacing to Sim Keys toggle and replace Sim Mailer icon with Send
Add 24px top margin to the "Allow personal Sim keys" toggle so it doesn't
sit right below the empty state. Replace the Mail envelope icon for Sim
Mailer with a new Send (paper plane) icon matching the emcn icon style.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 18:41:03 -07:00
Vikhyath Mondreti
af434a121f Merge branch 'staging' into feat/mothership-copilot 2026-03-13 18:40:36 -07:00
Waleed
54e14f47f9 fix(notifications): polish modal styling, credential display, and trigger filters (#3571)
* fix(notifications): polish modal styling, credential display, and trigger filters

- Show credential display name instead of raw account ID in Slack account selector
- Fix label styling to use default Label component (text-primary) for consistency
- Fix modal body spacing with proper top padding after tab bar
- Replace list-card skeleton with form-field skeleton matching actual layout
- Replace custom "Select a Slack account first" box with disabled Combobox (dependsOn pattern)
- Use proper Label component in WorkflowSelector with consistent gap spacing
- Add overflow badge pattern (slice + +N) to level and trigger filter badges
- Use dynamic trigger options from getTriggerOptions() instead of hardcoded CORE_TRIGGER_TYPES
- Relax API validation to accept integration trigger types (z.string instead of z.enum)
- Deduplicate account rows from credential leftJoin in accounts API
- Extract getTriggerOptions() to module-level constants to avoid per-render calls

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* fix(notifications): address PR review feedback

- Restore accountId in displayName fallback chain (credentialDisplayName || accountId || providerId)
- Add .default([]) to triggerFilter in create schema to preserve backward compatibility
- Treat empty triggerFilter as "match all" in notification matching logic
- Remove unreachable overflow badge for levelFilter (only 2 possible values)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 18:20:19 -07:00
Vikhyath Mondreti
7e740e617b improvement(copilot): state persistence, subflow recreation, dynamic handle topologies (#3569)
* improvement(copilot): state persistence, subflow recreation, dynamic handle topologies

* address comments
2026-03-13 17:47:02 -07:00
waleed
cbe8fe300d fix(knowledge) use consistent empty state for documents page
Replace the centered "No documents yet" text with the standard Resource
table empty state (column headers + create row), matching all other
resource pages. Move "Upload documents" from header action to table
create row as "New documents".

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 17:03:44 -07:00
waleed
4948fc8af2 upgrade turbo 2026-03-13 16:49:55 -07:00
waleed
1d36de26a0 updated docs styling, added FAQs, updated content 2026-03-13 16:47:36 -07:00
Siddharth Ganesan
b2919baa8e Fix fast edit route 2026-03-13 15:33:37 -07:00
waleed
dd314decf8 update docs styling, add delete confirmation on inbox 2026-03-13 15:18:01 -07:00
Theodore Li
5ba7a7e53a fix(resource) handle resource deletion deletion (#3568)
* Add handle dragging tab to input chat

* Add back delete tools

* Handle deletions properly with resources view

* Fix lint

* Add permisssions checking

* Skip resource_added event when resource is deleted

* Pass workflow id as context

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-13 14:36:45 -07:00
waleed
0de4f73e75 added docs for sim mailer 2026-03-13 13:13:41 -07:00
waleed
f9db9c0e33 added agentmail domain for mailer 2026-03-13 13:07:42 -07:00
waleed
5ad18083cc fix(inbox): fetch real attachment binary from presigned URL and persist for chat display
The AgentMail attachment endpoint returns JSON metadata with a download_url,
not raw binary. We were base64-encoding the JSON text and sending it to the
LLM, causing provider rejection. Now we parse the metadata, fetch the actual
file from the presigned URL, upload it to copilot storage, and persist it on
the chat message so images render inline with previews.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-13 13:01:00 -07:00
waleed
56ac18021b feat(connectors): add 8 knowledge base connectors — Zendesk, Intercom, ServiceNow, Google Sheets, Microsoft Teams, Discord, Google Calendar, Reddit
Each connector syncs documents into knowledge bases with configurable filtering:

- Zendesk: Help Center articles + support tickets with status/locale filters
- Intercom: Articles + conversations with state filtering
- ServiceNow: KB articles + incidents with state/priority/category filters
- Google Sheets: Spreadsheet tabs as LLM-friendly row-by-row documents
- Microsoft Teams: Channel messages (Slack-like pattern) via Graph API
- Discord: Channel messages with bot token auth
- Google Calendar: Events with date range presets and attendee metadata
- Reddit: Subreddit posts with top comments, sort/time filters

All connectors validated against official API docs with bug fixes applied.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 13:00:46 -07:00
waleed
41674dedf3 cleanup resource definition 2026-03-13 12:46:19 -07:00
waleed
f2a5694298 feat(connector): add Outlook knowledge base connector with conversation grouping and filtering
Syncs email conversations from Outlook/Office 365 via Microsoft Graph API.
Groups messages by conversationId into single documents. Configurable filters:
folder selection, date range presets, Focused Inbox, KQL search syntax, and
max conversation caps.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 12:38:39 -07:00
waleed
9171973c7e feat(connector): add Gmail knowledge base connector with thread-based sync and filtering
Syncs email threads from Gmail into knowledge bases with configurable filters:
label scoping, date range presets, promotions/social exclusion, Gmail search
syntax support, and max thread caps to keep KB size manageable.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 12:15:31 -07:00
waleed
2f7b90b117 fix(sim-mailer): download email attachments and pass to LLM as multimodal content
Attachments were only passed as metadata text in the email body. Now downloads
actual file bytes from AgentMail, converts via createFileContent (same path as
interactive chat), and sends as fileAttachments to the orchestrator. Also
parallelizes attachment fetching with workspace context loading, and downloads
multiple attachments concurrently via Promise.allSettled.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-13 12:12:44 -07:00
waleed
85c5da62ff refactor(resource): remove logs-specific escape hatches from Resource abstraction
Logs now composes ResourceHeader + ResourceOptionsBar + ResourceTable directly
instead of using Resource with contentOverride/overlay escape hatches. Removes
contentOverride, onLoadMore, hasMore, isLoadingMore from ResourceProps. Adds
ColumnOption to barrel export and fixes table.tsx internal import.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-13 12:08:53 -07:00
Siddharth Ganesan
f33cf83fc5 Kb args 2026-03-13 11:39:58 -07:00
Waleed
33bb01bccb feat(sim-mailer): email inbox for mothership with chat history and plan gating (#3558)
* feat(sim-mailer): email inbox for mothership with chat history and plan gating

* revert hardcoded ff

* fix(inbox): address PR review comments - plan enforcement, idempotency, webhook auth

- Enforce Max plan at API layer: hasInboxAccess() now checks subscription tier (>= 25k credits or enterprise)
- Add idempotency guard to executeInboxTask() to prevent duplicate emails on Trigger.dev retries
- Add AGENTMAIL_WEBHOOK_SECRET env var for webhook signature verification (Bearer token)

* improvement(inbox): harden security and efficiency from code audit

- Use crypto.timingSafeEqual for webhook secret comparison (prevents timing attacks)
- Atomic claim in executor: WHERE status='received' prevents duplicate processing on retries
- Parallelize hasInboxAccess + getUserEntityPermissions in all API routes (reduces latency)
- Truncate email body at webhook insertion (50k char limit, prevents unbounded DB storage)
- Harden escapeAttr with angle bracket and single quote escaping
- Rename use-inbox.ts to inbox.ts (matches hooks/queries/ naming convention)

* fix(inbox): replace Bearer token auth with proper Svix HMAC-SHA256 webhook verification

- Use per-workspace webhook secret from DB instead of global env var
- Verify AgentMail/Svix signatures: HMAC-SHA256 over svix-id.timestamp.body
- Timing-safe comparison via crypto.timingSafeEqual
- Replay protection via timestamp tolerance (5 min window)
- Join mothershipInboxWebhook in workspace lookup (zero additional DB calls)
- Remove dead AGENTMAIL_WEBHOOK_SECRET env var
- Select only needed workspace columns in webhook handler

* fix(inbox): require webhook secret — reject requests when secret is missing

Previously, if the webhook secret was missing from the DB (corrupted state),
the handler would skip verification entirely and process the request
unauthenticated. Now all three conditions are hard requirements: secret must
exist in DB, Svix headers must be present, and signature must verify.

* fix(inbox): address second round of PR review comments

- Exclude rejected tasks from rate limit count to prevent DoS via spam
- Strip raw HTML from LLM output before marked.parse to prevent XSS in emails
- Track responseSent flag to prevent duplicate emails when DB update fails after send

* fix(inbox): address third round of PR review comments

- Use dynamic isHosted from feature-flags instead of hardcoded true
- Atomic JSON append for chat message persistence (eliminates read-modify-write race)
- Handle cutIndex === 0 in stripQuotedReply (body starts with quote)
- Clean up orphan mothershipInboxWebhook row on enableInbox rollback
- Validate status query parameter against enum in tasks API

* fix(inbox): validate cursor param, preserve code blocks in HTML stripping

- Validate cursor date before using in query (return 400 for invalid)
- Split on fenced code blocks before stripping HTML tags to preserve
  code examples in email responses

* fix(inbox): return 500 on webhook server errors to enable Svix retries

* fix(inbox): remove isHosted guard from hasInboxAccess — feature flag is sufficient

* fix(inbox): prevent double-enable from deleting webhook secret row

* fix(inbox): null-safe stripThinkingTags, encode URL params, surface remove-sender errors

- Guard against null result.content in stripThinkingTags
- Use encodeURIComponent on all AgentMail API path parameters
- Surface handleRemoveSender errors to the user instead of swallowing

* improvement(inbox): remove unused types, narrow SELECT queries, fix optimistic ID collision

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(inbox): add keyboard accessibility to clickable task rows

* fix(inbox): use Svix library for webhook verification, fix responseSent flag, prevent inbox enumeration

- Replace manual HMAC-SHA256 verification with official Svix library per AgentMail docs
- Fix responseSent flag: only set true when email delivery actually succeeds
- Return consistent 401 for unknown inbox and bad signature to prevent enumeration
- Make AgentMailInbox.organization_id optional to match API docs

* chore(db): rebase inbox migration onto feat/mothership-copilot (0172 → 0173)

Sync schema with target branch and regenerate migration as 0173
to avoid conflicts with 0172_silky_magma on feat/mothership-copilot.

* fix(db): rebase inbox migration to 0173 after feat/mothership-copilot divergence

Target branch added 0172_silky_magma, so our inbox migration is now 0173_youthful_stryfe.

* fix(db): regenerate inbox migration after rebase on feat/mothership-copilot

* fix(inbox): case-insensitive email match and sanitize javascript: URIs in email HTML

- Use lower() in isSenderAllowed SQL to match workspace members regardless
  of email case stored by auth provider
- Strip javascript:, vbscript:, and data: URIs from marked HTML output to
  prevent XSS in outbound email responses

* fix(inbox): case-insensitive email match in resolveUserId

Consistent with the isSenderAllowed fix — uses lower() so mixed-case
stored emails match correctly, preventing silent fallback to workspace owner.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-13 03:20:35 -07:00
PlaneInABottle
92290029f0 fix(execution): queued execution finalization and async correlation (#3535)
* fix(execution): finalize runs before wrapper recovery

* fix(async): preserve execution correlation across queued runs

* fix(webhooks): pass correlation into preprocessing

* style(webhooks): normalize webhook executor formatting

* fix(async): avoid pre-starting queued execution logs

Let executeWorkflowCore own normal-path logging start so queued workflow and schedule executions persist the richer deployment and environment metadata instead of an earlier placeholder start record.

* fix(async): harden execution finalization guards

Prevent leaked core finalization markers from accumulating while keeping outer recovery paths idempotent. Preserve best-effort logging completion by reusing settled completion promises instead of reopening duplicate terminal writes.

* fix(async): preserve outcomes during cleanup

Keep execution finalization cleanup best-effort so cancellation cleanup failures do not overwrite successful or failed outcomes. Restore webhook processor formatting to the repository Biome style to avoid noisy formatter churn.

* fix(async): keep execution finalization state consistent

Retry minimal logging for early failures, only mark core finalization after a log row actually completes, and let paused completions fall back cleanly.

* fix(async): clean stale finalization guards

Scan all finalized execution ids during TTL cleanup so refreshed keys cannot keep expired guards alive, and cover the reused-id ordering regression.

* fix(async): retry failed error finalization

Allow error finalization to retry after a non-error completion and fallback both fail, and always persist failed/error semantics for completeWithError.

* fix(webhooks): reuse preprocessing execution ids

Thread preprocessing execution identity into queued webhook execution so both phases share the same correlation and logs.

---------

Co-authored-by: test <test@example.com>
2026-03-13 02:55:58 -07:00
Vikhyath Mondreti
6a71daf77f improvement(refactor): move to soft deletion of resources + reliability improvements (#3561)
* improvement(deletion): migrate to soft deletion of resources

* progress

* scoping fixes

* round of fixes

* deduplicated name on workflow import

* fix tests

* add migration

* cleanup dead code

* address bugbot comments

* optimize query
2026-03-13 02:34:45 -07:00
PlaneInABottle
d84cba6d19 chore(self-hosting): add health check endpoint (#3562)
Add a simple API health route for deployment platforms and container probes, with focused route coverage.

Co-authored-by: test <test@example.com>
2026-03-13 02:10:39 -07:00
Theodore Li
8dbdebd01b Feat(references) add at to reference sim resources(#3560)
* feat(chat) add at sign

* Address bugbot issues

* Remove extra chatcontext defs

* Add table and file to schema

* Add icon to chip for files

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-13 04:32:37 -04:00
Theodore Li
2d8899b2ff feat(context) pass resource tab as context (#3555)
* feat(context) add currenttly open resource file to context for agent

* Simplify resource resolution

* Skip initialize vfs

* Restore ff

* Add back try catch

* Remove redundant code

* Remove json serialization/deserialization loop

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-13 01:08:31 -04:00
Emir Karabeg
7a1b0a99e6 reverted task logic 2026-03-12 21:09:00 -07:00
Emir Karabeg
0ba69d5992 improvement: notifications, terminal, globals 2026-03-12 21:03:06 -07:00
Emir Karabeg
3613a3aef6 improvement(ui): dropdown menus, icons, globals 2026-03-12 20:03:11 -07:00
Theodore Li
af35717a89 Fix new resource tab button not appearing on tasks 2026-03-12 19:58:06 -07:00
Theodore Li
295978a38b feat(tab) allow user to control resource tabs
* Make resources persist to backend

* Use colored squares for workflows

* Add click and drag functionality to resource

* Fix expanding panel logic

* Reduce duplication, reading resource also opens up resource panel

* Move resource dropdown to own file

* Handle renamed resources

* Clicking already open tab should just switch to tab

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-12 22:36:37 -04:00
Waleed
b7c76888c7 feat(logs): add workflow trigger type for sub-workflow executions (#3554)
* feat(logs): add workflow trigger type for sub-workflow executions

* fix(logs): align workflow filter color with blue-secondary badge variant
2026-03-12 18:32:15 -07:00
Waleed
7bd03cfb33 feat(mothership): server-persisted unread task indicators via SSE (#3549)
* feat(mothership): server-persisted unread task indicators via SSE

Replace fragile client-side polling + timer-based green flash with
server-persisted lastSeenAt semantics, real-time SSE push via Redis
pub/sub, and dot overlay UI on the Blimp icon.

- Add lastSeenAt column to copilotChats for server-persisted read state
- Add Redis/local pub/sub singleton for task status events (started,
  completed, created, deleted, renamed)
- Add SSE endpoint (GET /api/mothership/events) with heartbeat and
  workspace-scoped filtering
- Add mark-read endpoint (POST /api/mothership/chats/read)
- Publish SSE events from chat, rename, delete, and auto-title handlers
- Add useTaskEvents hook for client-side SSE subscription
- Add useMarkTaskRead mutation with optimistic update
- Replace timer logic in sidebar with TaskStatus state machine
  (running/unread/idle) and dot overlay using brand color variables
- Mark tasks read on mount and stream completion in home page
- Fix security: add userId check to delete WHERE clause
- Fix: bump updatedAt on stream completion
- Fix: set lastSeenAt on rename to prevent false-positive unread

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address PR review feedback

- Return 404 when delete finds no matching chat (was silent no-op)
- Move log after ownership check so it only fires on actual deletion
- Publish completed SSE event from stop route so sidebar dot clears on abort

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: backfill last_seen_at in migration to prevent false unread dots

Existing rows would have last_seen_at = NULL after migration, causing
all past completed tasks to show as unread. Backfill sets last_seen_at
to updated_at for all existing rows.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: timestamp mismatch on task creation + wasSendingRef leak across navigation

- Pass updatedAt explicitly alongside lastSeenAt on chat creation so
  both use the same JS timestamp (DB defaultNow() ran later, causing
  updatedAt > lastSeenAt → false unread)
- Reset wasSendingRef when chatId changes to prevent a stale true
  from task A triggering a redundant markRead on task B

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: mark-read fires for inline-created chats + encode workspaceId in SSE URL

Expose resolvedChatId from useChat so home.tsx can mark-read even when
chatId prop stays undefined after replaceState URL update. Also
URL-encode workspaceId in EventSource URL as a defensive measure.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: auto-focus home input on initial view + fix sidebar task click handling

Auto-focus the textarea when the initial home view renders. Also fix
sidebar task click to always call onMultiSelectClick so selection state
stays consistent.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: auto-title sets lastSeenAt + move started event inside DB guard

Auto-title now sets both updatedAt and lastSeenAt (matching the rename
route pattern) to prevent false-positive unread dots. Also move the
'started' SSE event inside the if(updated) guard so it only fires when
the DB update actually matched a row.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* modified tasks multi select to be just like workflows

* fix

* refactor: extract generic pub/sub and SSE factories + fixes

- Extract createPubSubChannel factory (lib/events/pubsub.ts) to eliminate
  duplicated Redis/EventEmitter boilerplate between task and MCP pub/sub
- Extract createWorkspaceSSE factory (lib/events/sse-endpoint.ts) to share
  auth, heartbeat, and cleanup logic across SSE endpoints
- Fix auto-title race suppressing unread status by removing updatedAt/lastSeenAt
  from title-only DB update
- Fix wheel event listener leak in ResourceTabs (RefCallback cleanup was silently
  discarded)
- Fix getFullSelection() missing taskIds (inconsistent with hasAnySelection)
- Deduplicate SSE_RESPONSE_HEADERS to spread from shared SSE_HEADERS
- Hoist isSttAvailable to module-level constant to avoid per-render IIFE

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 18:13:15 -07:00
Siddharth Ganesan
3ab5ca0596 Credential id field 2026-03-12 13:55:23 -07:00
Siddharth Ganesan
cc9399dfbf Credential tags 2026-03-12 13:53:04 -07:00
Vikhyath Mondreti
0dd70b78d9 fix(autolayout): targetted autolayout heuristic restored (#3536)
* fix(autolayout): targetted autolayout heuristic restored

* fix autolayout boundary cases

* more fixes

* address comments

* on conflict updates

* address more comments

* fix relative position scope

* fix tye omission

* address bugbot comment
2026-03-12 13:43:37 -07:00
Theodore Li
6b3ca1f4c1 fix(stop) Add stop of motehership ran workflows, persist stop messages (#3538)
* Connect play stop workflow in embedded view to workflow

* Fix stop not actually stoping workflow

* Fix ui not showing stopped by user

* Lint fix

* Plumb cancellation through system

* Stopping mothership chat stops workflow

* Remove extra fluff

* Persist blocks on cancellation

* Add root level stopped by user

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-12 15:56:44 -04:00
Waleed
5d57faf050 fix(mothership): insert copilot-created workflows at top of list (#3537)
* feat(mothership): remove resource-level delete tools from copilot

Remove delete operations for workflows, folders, tables, and files
from the mothership copilot to prevent destructive actions via AI.
Row-level and column-level deletes are preserved.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(mothership): insert copilot-created workflows at top of list

* fix(mothership): server-side top-insertion sort order and deduplicate registry logic

* fix(mothership): include folder sort orders when computing top-insertion position

* fix(mothership): use getNextWorkflowColor instead of hardcoded color

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 12:46:50 -07:00
Waleed
0aeb860f6e fix: stop sidebar from auto-collapsing when resource panel appears (#3540)
The sidebar was forcibly collapsed whenever a resource (e.g. workflow)
first appeared in the resource panel during a task. This was disruptive
on larger screens where users want to keep both the sidebar and resource
panel visible simultaneously.

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 12:41:41 -07:00
waleed
bb944d6a54 feat(mothership): remove resource-level delete tools from copilot
Remove delete operations for workflows, folders, tables, and files
from the mothership copilot to prevent destructive actions via AI.
Row-level and column-level deletes are preserved.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 10:50:36 -07:00
Vikhyath Mondreti
1413d8a47c Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-12 10:02:34 -07:00
Theodore Li
f7acc18690 fix(agent) subagent and main agent text being merged without spacing 2026-03-11 20:52:28 -07:00
Vikhyath Mondreti
60e326f043 edit existing workflow should bring up artifact 2026-03-11 19:01:58 -07:00
Emir Karabeg
4df7e78019 fix: chat scrollbar on sidebar collapse/open 2026-03-11 18:56:07 -07:00
Theodore Li
1266a66838 fix(resource): Hide resources that have been deleted (#3528)
* Hide resources that have been deleted

* Handle table, workflow not found

* Add animation to prevent flash when previous resource was deleted

* Fix animation playing on every switch

* Run workflows client side in mothership to transmit logs

* Fix race condition for animation

* Use shared workflow tool util file

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 21:51:06 -04:00
Theodore Li
68909e71d0 fix(import) fix missing file 2026-03-11 18:41:49 -07:00
Theodore Li
c2bf65fcf1 fix(logs) Run workflows client side in mothership to transmit logs (#3529)
* Run workflows client side in mothership to transmit logs

* Initialize set as constant, prevent duplicate execution

* Fix lint

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 21:28:32 -04:00
waleed
8927807398 feat(mothership): knowledge base resource extraction + Resource/ResourceTable refactor
- Extract KB resources from knowledge subagent respond format (knowledge_bases array)
- Add knowledge_base tool to RESOURCE_TOOL_NAMES and TOOL_UI_METADATA
- Extract ResourceTable as independently composable memoized component
- Move contentOverride/overlay to Resource shell level (not table primitive)
- Remove redundant disableHeaderSort and loadingRows props
- Rename internal sort state for clarity (sort → internalSort, sortOverride → externalSort)
- Export ResourceTable and ResourceTableProps from barrel

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 17:55:28 -07:00
waleed
38ee79da85 revert: remove inline rename UI from resource tabs
Keep the workspace_file rename tool for the mothership agent.
Only the UI-side inline rename (double-click tabs) is removed.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 17:47:20 -07:00
waleed
28c8afcb96 feat(mothership): inline rename for resource tabs + workspace_file rename tool
- Add double-click inline rename on file and table resource tabs
- Wire useInlineRename + useRenameWorkspaceFile/useRenameTable mutations
- Add rename operation to workspace_file copilot tool (schema, server, router)
- Add knowledge base resource support (type, extraction, rendering, actions)
- Accept optional className on InlineRenameInput for context-specific sizing

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 16:55:02 -07:00
waleed
511e3a9011 fix(schedules): release lastQueuedAt lock on all exit paths to prevent stuck schedules
Multiple error/early-return paths in executeScheduleJob and executeJobInline
were exiting without clearing lastQueuedAt, causing the dueFilter to permanently
skip those schedules — resulting in stale "X hours ago" display for nextRunAt.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 16:45:03 -07:00
waleed
6fd871268e fix(settings): navigate back to origin page instead of always going home
Use sessionStorage to store the return URL when entering settings, and
use router.replace for tab switches so history doesn't accumulate.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 16:28:03 -07:00
Emir Karabeg
26d62fc176 improvement: schedules, auto-scroll 2026-03-11 15:01:00 -07:00
Theodore Li
1628ffea40 fix(download-file): render correct file download link for mothership (#3522)
* fix(download-file): render correct file download link for mothership

* Fix uunecessary call

* Use simple strip instead of db lookup and moving behavior

* Make regex strip more strict

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 17:36:28 -04:00
Emir Karabeg
95efa50f69 improvement: home, sidebar 2026-03-11 14:28:57 -07:00
Theodore Li
a29717e7aa fix(remove-speed-hosted-key) Remove maps speed limit hosted key, it's deprecated (#3521)
Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 16:24:51 -04:00
Theodore Li
f161c261ef feat(resource-tab-scroll): Allow vertical scrolling to scroll resource tab 2026-03-11 13:07:15 -07:00
Theodore Li
d347b8c4af Feat/add mothership manual workflow runs (#3520)
* Add run and open workflow buttons in workflow preview

* Send log request message after manual workflow run

* Make edges in embedded workflow non-editable

* Change chat to pass in log as additional context

* Revert "Change chat to pass in log as additional context"

This reverts commit e957dffb2f.

* Revert "Send log request message after manual workflow run"

This reverts commit 0fb92751f0.

* Move run and workflow icons to tab bar

* Simplify boolean condition

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 15:59:56 -04:00
Vikhyath Mondreti
10e8eeda67 fix plan display name 2026-03-11 11:11:27 -07:00
Vikhyath Mondreti
7bd2562c99 fix stale query 2026-03-11 11:04:02 -07:00
Vikhyath Mondreti
982e84cbd9 fix tests 2026-03-11 10:56:02 -07:00
Vikhyath Mondreti
767006b1e6 fix(mothership): lint (#3517)
* fix(mothership): lint

* fix typing
2026-03-11 10:46:25 -07:00
Theodore Li
69820a486a Fix workspace dropdown getting cut off when sidebar is collapsed 2026-03-11 10:06:01 -07:00
waleed
aed74b9573 added back integrations page, reverted secrets page back to old UI 2026-03-11 06:44:27 -07:00
waleed
dc17b9642f autofill fixes 2026-03-11 04:56:31 -07:00
waleed
6fed0195fd fix(home): prevent initial view from being scrollable
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 04:46:48 -07:00
waleed
95557bda79 fix(uploads): resolve .md file upload rejection and deduplicate file type utilities
Browsers report empty or application/octet-stream MIME types for .md files,
causing copilot uploads to be rejected. Added resolveFileType() utility that
falls back to extension-based MIME resolution at both client and server
boundaries. Consolidated duplicate MIME mappings into module-level constants,
removed duplicate isImageFileType from copilot module, and replaced hardcoded
ALLOWED_EXTENSIONS with composition from shared validation constants. Also
switched file attachment previews to use shared getDocumentIcon utility.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 04:45:02 -07:00
Emir Karabeg
079c7caec3 feat(templates): create home templates 2026-03-11 04:15:01 -07:00
Emir Karabeg
f86e67dbf3 improvement: loading and file dropping 2026-03-11 03:15:19 -07:00
Emir Karabeg
c8098d38e3 improvement: chat 2026-03-11 02:35:36 -07:00
Emir Karabeg
00eb812365 improvement: panel, special tags 2026-03-10 23:43:25 -07:00
waleed
75d2dabafc fix(font): added back old font for emcn code editor 2026-03-10 23:41:06 -07:00
waleed
f5eb76c703 fix(diff-controls): fixed positioning for copilot diff controls 2026-03-10 23:37:17 -07:00
Siddharth Ganesan
b024d63dbf Update oauth cred tool 2026-03-10 23:35:10 -07:00
waleed
aa3be4b1d0 feat(workspace): add workspace color changing, consolidate update hooks, fix popover dismiss
- Add workspace color change via context menu, reusing workflow ColorGrid UI
- Consolidate useUpdateWorkspaceName + useUpdateWorkspaceColor into useUpdateWorkspace
- Fix popover hover submenu dismiss by using DismissableLayerBranch with pointerEvents
- Remove passthrough wrapper for export, reuse Workspace type for capturedWorkspaceRef
- Reorder log columns: workflow first, merge date+time into single column

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 23:25:57 -07:00
Vikhyath Mondreti
4509a75a02 improvement(usage): free plan to 1000 credits (#3516)
* improvement(billing): free plan to five dollars

* fix comment

* remove per month terminology from marketing

* generate migration

* remove migration

* add migration back
2026-03-10 23:10:37 -07:00
Emir Karabeg
b34bb643a5 improvement: search modal 2026-03-10 22:54:12 -07:00
waleed
1de25af341 feat(mothership): file attachment indicators, persistence, and chat input improvements
- Show image thumbnails and file-icon cards above user messages in mothership chat
- Persist file attachment metadata (key, filename, media_type, size) in DB with user messages
- Restore attachments from history via /api/files/serve/ URLs so they survive refresh/navigation
- Unify all chat file inputs to use shared CHAT_ACCEPT_ATTRIBUTE constant
- Fix file thumbnail overflow: use flex-wrap instead of hidden horizontal scroll
- Compact attachment cards in floating workflow chat messages

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 22:43:45 -07:00
waleed
5d308b3529 fix(mothership): fix hardcoded workflow color, tables drag line overflowing 2026-03-10 22:43:21 -07:00
Emir Karabeg
0bb756be34 ran migrations 2026-03-10 22:31:05 -07:00
Emir Karabeg
3c0da7671a improvement: modals 2026-03-10 22:30:35 -07:00
Theodore Li
a4ac7155f2 feat(email-footer) Add "sent with sim ai" for free users (#3515)
* Add "sent with sim ai" for free users

* Only add prompt injection on free tier

* Add try catch around billing info fetch

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-11 01:01:31 -04:00
Waleed
89dafb3b47 fix(random): optimized kb connector sync engine, rerenders in tables, files, editors, chat (#3513)
* optimized kb connector sync engine, rerenders in tables, files, editors, chat

* refactor(sidebar): rename onTaskClick to onMultiSelectClick for clarity

Made-with: Cursor

* ack comments, add docsFailed
2026-03-10 21:42:52 -07:00
Emir Karabeg
87e2910e19 improvement: thinking 2026-03-10 21:29:08 -07:00
Emir Karabeg
f780aaffc6 improvement(ux): streaming 2026-03-10 21:08:43 -07:00
Siddharth Ganesan
a4e5e30c1a Don't drop suabgent text 2026-03-10 20:50:42 -07:00
Siddharth Ganesan
f448134618 Subagent tool call persistence 2026-03-10 20:34:58 -07:00
Emir Karabeg
7b53137d22 fix(sidebar): task navigation 2026-03-10 19:48:57 -07:00
Emir Karabeg
c0c139cccf fix(sidebar): workspace header collapse 2026-03-10 19:45:42 -07:00
Siddharth Ganesan
9854e43687 Plan prompt 2026-03-10 19:45:21 -07:00
Siddharth Ganesan
663524971c Usage limit 2026-03-10 19:40:10 -07:00
Emir Karabeg
80830f5311 improvement: sidebar, chat 2026-03-10 19:35:38 -07:00
Theodore Li
7151e81fff fix(api-key-reminder) Add reminder on hosted keys that api key isnt needed (#3512)
* Add reminder on hosted keys that api key isnt needed

* Fix test case

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-10 22:34:29 -04:00
Vikhyath Mondreti
5815d9f556 fix(credentials): autosync behaviour cross workspace (#3511)
* fix(credentials): autosync behaviour cross workspace

* address comments
2026-03-10 19:23:44 -07:00
waleed
e6c511a6f3 feat: add task multi-select, context menu, and subscription UI updates
Add shift-click range selection, cmd/ctrl-click toggle, and right-click
context menu for tasks in sidebar matching workflow/folder patterns.
Update subscription settings tab UI.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 19:02:17 -07:00
Siddharth Ganesan
a1f917d552 Fix tests 2026-03-10 18:49:19 -07:00
Siddharth Ganesan
3b7fc9a971 Fix tool call ordering 2026-03-10 18:34:10 -07:00
waleed
ab1205efec fix: horizontal scroll in embedded table by replacing overflow-hidden with overflow-clip
Cell content spans used Tailwind's `truncate` (overflow: hidden), creating
scroll containers that consumed trackpad wheel events on macOS without
propagating to the actual scroll ancestor. Replaced with overflow-clip
which clips identically but doesn't create a scroll container. Also moved
focus target from outer container to the scroll div for correctness.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 18:29:13 -07:00
waleed
aa0ce77005 fix: manual table creation starts with 1 row, 1 column
Manual tables now create with a single 'name' column and 1 row instead
of 2 columns and 20 rows. Copilot tables remain at 0 rows, 0 columns.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 18:07:56 -07:00
Siddharth Ganesan
fdf4f033ad Fixes 2026-03-10 18:06:42 -07:00
Theodore Li
5a86b4e979 Fix schema mismatch (#3510)
Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-10 20:31:25 -04:00
Emir Karabeg
2587406b2a chat metadata 2026-03-10 17:27:48 -07:00
Emir Karabeg
18dacc53bf improvement: chat, workspace header 2026-03-10 16:49:25 -07:00
waleed
4a135aa871 revert: remove initialRowCount from copilot table creation
Copilot populates its own data after creating a table, so pre-creating
20 empty rows causes data to start at position 21 with empty rows above.
initialRowCount only makes sense for the manual UI creation flow.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 16:48:31 -07:00
Siddharth Ganesan
b5c2070baf Fix signaling 2026-03-10 16:47:57 -07:00
waleed
865108109f fix: unique constraint check crash and copilot table initial rows
- Fix TypeError in updateColumnConstraints: db.execute() returns a
  plain array with postgres-js, not { rows: [...] }. The .rows.length
  access always crashed, making "Set unique" completely broken.

- Add initialRowCount: 20 to copilot table creation so tables created
  via chat have the same empty rows as tables created from the UI.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 16:15:37 -07:00
Vikhyath Mondreti
5362f7417f feat(autosave): files and chunk editor autosave with debounce + refetch (#3508)
* feat(files): debounced autosave while editing

* address review comments

* more comments
2026-03-10 15:36:00 -07:00
waleed
de36e332d7 fix dysfunctional unique operation in tables 2026-03-10 14:56:44 -07:00
Waleed
2afe917e4e improvement(tables): fix cell editing flash, batch API docs, and UI polish (#3507)
* fix: show text cursor in chunk editor and ensure textarea fills container

Add cursor-text to the editor wrapper so the whole area shows a text
cursor. Click on empty space focuses the textarea. Changed textarea from
h-full/w-full to flex-1/min-h-0 so it properly fills the flex container.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* improvement(tables): fix cell editing flash, add batch API docs, and UI polish

Fix stale-data flash when saving inline cell edits by using TanStack Query's
isPending+variables pattern instead of manual cache writes. Also adds OpenAPI
docs for batch table endpoints, DatePicker support in row modal, duplicate row
in context menu, and styling improvements.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove dead resolveColumnFromEvent callback

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: unify paste undo into single create-rows action

Batch-created rows from paste now push one `create-rows` undo entry
instead of N individual `create-row` entries, so a single Ctrl+Z
reverses the entire paste operation.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: validate dates in inline editor and displayToStorage

InlineDateEditor now validates computed values via Date.parse before
saving, preventing invalid strings like "hello" from being sent to the
server. displayToStorage now rejects out-of-range month/day values
(e.g. 13/32) instead of producing invalid YYYY-MM-DD strings.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: accept ISO date format in inline date editor

Fall back to raw draft input when displayToStorage returns null, so
valid ISO dates like "2024-03-15" pasted or typed directly are
accepted instead of silently discarded. Date.parse still validates
the final value.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: add ISO date support to displayToStorage and fix picker Escape

displayToStorage now recognizes YYYY-MM-DD input directly, so ISO
dates typed or pasted work correctly for both saving and picker sync.

DatePicker Escape now refocuses the input instead of saving, so the
user can press Escape again to cancel or Enter to confirm — matching
the expected cancel behavior.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove dead paste boundary check

The totalR guard in handlePaste could never trigger since totalR
included pasteRows.length, making targetRow always < totalR.
Remove the unused variable and simplify the selection focus calc.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* update openapi

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 14:37:49 -07:00
Siddharth Ganesan
6f694e5201 Update vfs to handle hosted keys 2026-03-10 14:23:39 -07:00
Theodore Li
fa1ae1398e feat(clean-hosted-keys) Remove eleven labs, browseruse. Tweak firecrawl and mistral key impl (#3503)
* Remove eleven labs, browseruse, and firecrawl

* Remove creditsUsed output

* Add back mistral hosting for mistral blocks

* Add back firecrawl since they queue up concurrent requests

* Fix price calculation, remove agent since its super long running and will clog up queue

* Define hosting per tool

* Remove redundant token finding

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-10 16:53:56 -04:00
Siddharth Ganesan
6db8bc4934 Add piping 2026-03-10 12:36:28 -07:00
Vikhyath Mondreti
9ca539b626 improvement(billing): isAnnual metadata + docs updates (#3506)
* improvement(billing): on demand toggling and infinite limits

* store stripe metadata to distinguish annual vs monthly

* udpate docs

* address bugbot
2026-03-10 12:23:45 -07:00
Siddharth Ganesan
4751612b5f Make mothership block use long input instead of prompt input 2026-03-10 11:06:03 -07:00
Siddharth Ganesan
2e8e578ede Streaming fix -- need to test more 2026-03-10 11:01:14 -07:00
Waleed
8afa184c64 feat: inline chunk editor and table batch ops with undo/redo (#3504)
* feat: inline chunk editor and table batch operations with undo/redo

Replace modal-based chunk editing/creation with inline editor following
the files tab pattern (state-based view toggle with ResourceHeader).
Add batch update API endpoint, undo/redo support, and Popover-based
context menus for tables.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove icons from table context menu PopoverItems

Icons were incorrectly carried over from the DropdownMenu migration.
PopoverItems in this codebase use text-only labels.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: restore DropdownMenu for table context menu

The table-level context menu was incorrectly migrated to Popover during
conflict resolution. Only the row-level context menu uses Popover; the
table context menu should remain DropdownMenu with icons, matching the
base branch.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: bound cross-page chunk navigation polling to max 50 retries

Prevent indefinite polling if page data never loads during
chunk navigation across page boundaries.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: navigate to last page after chunk creation for multi-page documents

After creating a chunk, navigate to the last page (where new chunks
append) before selecting it. This prevents the editor from showing
"Loading chunk..." when the new chunk is not on the current page.
The loading state breadcrumb remains as an escape hatch for edge cases.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: add duplicate rowId validation to BatchUpdateByIdsSchema

Adds a .refine() check to reject duplicate rowIds in batch update
requests, consistent with the positions uniqueness check on batch insert.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address PR review comments

- Fix disableEdit logic: use || instead of && so connector doc chunks
  cannot be edited from context menu (row click still opens viewer)
- Add uniqueness validation for rowIds in BatchUpdateByIdsSchema
- Fix inconsistent bg token: bg-background → bg-[var(--bg)] in Pagination

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove duplicate rowId uniqueness refine on BatchUpdateByIdsSchema

The refine was applied both on the inner updates array and the outer
object. Keep only the inner array refine which is cleaner.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address additional PR review comments

- Fix stale rowId after create-row redo: patch undo stack with new row
  ID using patchUndoRowId so subsequent undo targets the correct row
- Fix text color tokens in Pagination: use CSS variable references
  (text-[var(--text-body)], text-[var(--text-secondary)]) instead of
  Tailwind semantic tokens for consistency with the rest of the file

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove dead code and fix type errors in table context menu

Remove unused `onAddData` prop and `isEmptyCell` variable from row context
menu (introduced in PR but never wired to JSX). Fix type errors in
optimistic update spreads by removing unnecessary `as Record<string, unknown>`
casts that lost the RowData type.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: prevent false "Saved" status on invalid content and mark fire-and-forget goToPage calls

ChunkEditor.handleSave now throws on empty/oversized content instead of
silently returning, so the parent's catch block correctly sets saveStatus
to 'error'. Also added explicit `void` to unawaited goToPage(1) calls
in filter handlers to signal intentional fire-and-forget.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: handle stale totalPages in handleChunkCreated for new-page edge case

When creating a chunk that spills onto a new page, totalPages in the
closure is stale. Now polls displayChunksRef for the new chunk, and if
not found, checks totalPagesRef for an updated page count and navigates
to the new last page before continuing to poll.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-10 01:55:54 -07:00
Emir Karabeg
9b10e4464e fix: copilot, improvement: tables, mothership 2026-03-09 21:52:57 -07:00
waleed
5c6797a0bd revert hardcoded ff 2026-03-09 21:09:29 -07:00
waleed
57f5f6e59a improvement(sidebar): match workspace switcher popover width to sidebar
Use Radix UI's built-in --radix-popover-trigger-width CSS variable
instead of hardcoded 160px so the popover matches the trigger width
and responds to sidebar resizing.
2026-03-09 21:08:24 -07:00
waleed
f26a375f3c chore: lint fixes 2026-03-09 20:59:46 -07:00
waleed
1cb8a28727 fix(settings): align skeleton layouts with actual component structures
- Fix list item gap from 12px to 8px across all skeletons (API keys, custom tools, credentials, MCP)
- Add OAuth icon placeholder to credential skeleton
- Fix credential button group gap from 8px to 4px
- Remove incorrect gap-[4px] from credential-sets text column
- Rebuild debug skeleton to match real layout (description + input/button row)
- Add scrollable wrapper to BYOK skeleton with more representative item count
2026-03-09 20:59:18 -07:00
waleed
f62fddfac5 improvement(settings): add search bar to skeleton loading states
Skeletons now include the search bar (and action button where applicable) so the layout matches the final component 1:1. Eliminates layout shift when the dynamic chunk loads — search bar area is already reserved by the skeleton.
2026-03-09 20:45:35 -07:00
waleed
5184580dbd Merge branch 'improvement/settings-perf' into feat/mothership-copilot 2026-03-09 20:35:39 -07:00
waleed
1aa9dc9ea7 fix(settings): use emcn Input for file input in general settings 2026-03-09 20:34:52 -07:00
waleed
37a1d66127 fix(byok): use ui Input for search bar to match other settings pages 2026-03-09 20:33:03 -07:00
waleed
5a5bf5ca7e fix(byok): use EMCN Input for search field instead of ui Input
Replace @/components/ui Input with the already-imported EmcnInput for design-system consistency.
2026-03-09 20:29:45 -07:00
waleed
4ccc1e5997 fix(settings): include theme sync in client-side prefetch queryFn
Hover-based prefetchGeneralSettings now calls syncThemeToNextThemes, matching the useGeneralSettings hook behavior so theme updates aren't missed when prefetch refreshes stale cache.
2026-03-09 20:29:06 -07:00
waleed
80f032b9be update byok page 2026-03-09 20:22:45 -07:00
waleed
63927e5afc fix(settings): extract shared response mappers to prevent server/client shape drift
Addresses PR review feedback — prefetch.ts duplicated response mapping logic from client hooks. Extracted mapGeneralSettingsResponse and mapUserProfileResponse as shared functions used by both client fetch and server prefetch.
2026-03-09 20:21:43 -07:00
waleed
ab61f5188c fix(settings): use emcn Skeleton in extracted skeleton files 2026-03-09 20:09:19 -07:00
waleed
94914b848e fix: bust browser cache for workspace file downloads
The downloadFile function was using a plain fetch() that honored the
aggressive cache headers, causing newly created files to download empty.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 20:07:27 -07:00
waleed
4c7e63cf7a improvement(settings): SSR prefetch, code splitting, dedicated skeletons 2026-03-09 20:01:29 -07:00
Theodore Li
8fc75a6e9d feat(hosted-key-services) Add hosted key for multiple services (#3461)
* feat(hosted keys): Implement serper hosted key

* Handle required fields correctly for hosted keys

* Add rate limiting (3 tries, exponential backoff)

* Add custom pricing, switch to exa as first hosted key

* Add telemetry

* Consolidate byok type definitions

* Add warning comment if default calculation is used

* Record usage to user stats table

* Fix unit tests, use cost property

* Include more metadata in cost output

* Fix disabled tests

* Fix spacing

* Fix lint

* Move knowledge cost restructuring away from generic block handler

* Migrate knowledge unit tests

* Lint

* Fix broken tests

* Add user based hosted key throttling

* Refactor hosted key handling. Add optimistic handling of throttling for custom throttle rules.

* Remove research as hosted key. Recommend BYOK if throtttling occurs

* Make adding api keys adjustable via env vars

* Remove vestigial fields from research

* Make billing actor id required for throttling

* Switch to round robin for api key distribution

* Add helper method for adding hosted key cost

* Strip leading double underscores to avoid breaking change

* Lint fix

* Remove falsy check in favor for explicit null check

* Add more detailed metrics for different throttling types

* Fix _costDollars field

* Handle hosted agent tool calls

* Fail loudly if cost field isn't found

* Remove any type

* Fix type error

* Fix lint

* Fix usage log double logging data

* Fix test

* Add browseruse hosted key

* Add firecrawl and serper hosted keys

* feat(hosted key): Add exa hosted key (#3221)

* feat(hosted keys): Implement serper hosted key

* Handle required fields correctly for hosted keys

* Add rate limiting (3 tries, exponential backoff)

* Add custom pricing, switch to exa as first hosted key

* Add telemetry

* Consolidate byok type definitions

* Add warning comment if default calculation is used

* Record usage to user stats table

* Fix unit tests, use cost property

* Include more metadata in cost output

* Fix disabled tests

* Fix spacing

* Fix lint

* Move knowledge cost restructuring away from generic block handler

* Migrate knowledge unit tests

* Lint

* Fix broken tests

* Add user based hosted key throttling

* Refactor hosted key handling. Add optimistic handling of throttling for custom throttle rules.

* Remove research as hosted key. Recommend BYOK if throtttling occurs

* Make adding api keys adjustable via env vars

* Remove vestigial fields from research

* Make billing actor id required for throttling

* Switch to round robin for api key distribution

* Add helper method for adding hosted key cost

* Strip leading double underscores to avoid breaking change

* Lint fix

* Remove falsy check in favor for explicit null check

* Add more detailed metrics for different throttling types

* Fix _costDollars field

* Handle hosted agent tool calls

* Fail loudly if cost field isn't found

* Remove any type

* Fix type error

* Fix lint

* Fix usage log double logging data

* Fix test

---------

Co-authored-by: Theodore Li <teddy@zenobiapay.com>

* Fail fast on cost data not being found

* Add hosted key for google services

* Add hosting configuration and pricing logic for ElevenLabs TTS tools

* Add linkup hosted key

* Add jina hosted key

* Add hugging face hosted key

* Add perplexity hosting

* Add broader metrics for throttling

* Add skill for adding hosted key

* Lint, remove vestigial hosted keys not implemented

* Revert agent changes

* fail fast

* Fix build issue

* Fix build issues

* Fix type error

* Remove byok types that aren't implemented

* Address feedback

* Use default model when model id isn't provided

* Fix cost default issues

* Remove firecrawl error suppression

* Restore original behavior for hugging face

* Add mistral hosted key

* Remove hugging face hosted key

* Fix pricing mismatch is mistral and perplexity

* Add hosted keys for parallel and brand fetch

* Add brandfetch hosted key

* Update types

* Change byok name to parallel_ai

* Add telemetry on unknown models

---------

Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-09 22:56:45 -04:00
Vikhyath Mondreti
9400df6085 Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-09 19:23:06 -07:00
Vikhyath Mondreti
d23afb97c5 fix(credentials): block usage at execution layer without perms + fix invites 2026-03-09 19:22:35 -07:00
Siddharth Ganesan
1ff89cd416 Table batch ops 2026-03-09 19:04:30 -07:00
waleed
b7a6fe574c small table rename bug, files updates not persisting 2026-03-09 18:38:40 -07:00
Siddharth Ganesan
8abe717b85 Fix table column delete 2026-03-09 18:25:07 -07:00
Emir Karabeg
d815568315 improvement: tables, chat 2026-03-09 18:23:52 -07:00
waleed
5dc026c72e upgrade turbo 2026-03-09 18:22:20 -07:00
waleed
86b67823ce update docs 2026-03-09 18:21:53 -07:00
Vikhyath Mondreti
65448766fc Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-09 18:03:49 -07:00
Vikhyath Mondreti
9098f0b805 fix(credentials): exclude regular login methods from credential sync 2026-03-09 18:03:25 -07:00
waleed
78e3d840dd updated document icon 2026-03-09 18:00:26 -07:00
waleed
09af6fb33d improve resizer for file preview for html files 2026-03-09 17:48:01 -07:00
Emir Karabeg
523aff8ab0 improvements(tables): styling improvements 2026-03-09 17:46:27 -07:00
Emir Karabeg
4fe9509e70 styling alignment 2026-03-09 17:46:27 -07:00
Emir Karabeg
9a1cb10d7a improvement(ui): consistent styling 2026-03-09 17:46:27 -07:00
Theodore Li
898f8ce1c1 feat(exa-hosted-key): Restore exa hosted key (#3499)
Co-authored-by: Theodore Li <theo@sim.ai>
2026-03-09 20:40:54 -04:00
Waleed
39334bdf7d fix(tables): one small tables ting (#3497) 2026-03-09 16:43:15 -07:00
Waleed
a6d3b3a9ad improvement(tables): click-to-select navigation, inline rename, column resize (#3496)
* improvement(tables): click-to-select navigation, inline rename, column resize

* fix(tables): address PR review comments

- Add doneRef guard to useInlineRename preventing Enter+blur double-fire
- Fix PATCH error handler: return 500 for non-validation errors, fix unreachable logger.error
- Stop click propagation on breadcrumb rename input

* fix(tables): add rows-affected check in renameTable service

Prevents silent no-op when tableId doesn't match any record.

* fix(tables): useMemo deps + placeholder memo initialCharacter check

- Use primitive editingId/editValue in useMemo deps instead of whole
  useInlineRename object (which creates a new ref every render)
- Add initialCharacter comparison to placeholderPropsAreEqual, matching
  the existing pattern in dataRowPropsAreEqual

* fix(tables): address round 2 review comments

- Mirror name validation (regex + max length) in PatchTableSchema so
  validateTableName failures return 400 instead of 500
- Add .returning() + rows-affected check to renameWorkspaceFile,
  matching the renameTable pattern
- Check response.ok before parsing JSON in useRenameWorkspaceFile,
  matching the useRenameTable pattern

* refactor(tables): reuse InlineRenameInput in BreadcrumbSegment

Replace duplicated inline input markup with the shared component.
Eliminates redundant useRef, useEffect, and input boilerplate.

* fix(tables): set doneRef in cancelRename to prevent blur-triggered save

Escape → cancelRename → input unmounts → blur → submitRename would
save instead of canceling. Now cancelRename sets doneRef like
submitRename does, blocking the subsequent blur handler.

* fix(tables): pointercancel cleanup + typed FileConflictError

- Add pointercancel handler to column resize to prevent listener leaks
  when system interrupts the pointer (touch-action override, etc.)
- Replace stringly-typed error.message.includes('already exists') with
  FileConflictError class for refactor-safe 409 status detection

* fix(tables): stable useCallback dep + rename shadowed variable

- Use listRename.startRename (stable ref) instead of whole listRename
  object in handleContextMenuRename deps
- Rename inner 'target' to 'origin' in arrow-key handler to avoid
  shadowing the outer HTMLElement 'target'

* fix(tables): move class below imports, stable submitRename, clear editingCell

- Move FileConflictError below import statements (import-first convention)
- Make submitRename a stable useCallback([]) by reading editingId and
  editValue through refs (matches existing onSaveRef pattern)
- Add setEditingCell(null) to handleEmptyRowClick for symmetry with
  handleCellClick

* feat(tables): persist column widths in table metadata

Column widths now survive navigation and page reloads. On resize-end,
widths are debounced (500ms) and saved to the table's metadata field
via a new PUT /api/table/[tableId]/metadata endpoint. On load, widths
are seeded from the server once via React Query.

* fix type checking for file viewer

* fix(tables): address review feedback — 4 fixes

1. headerRename.onSave now uses the fileId parameter directly instead
   of the selectedFile closure, preventing rename-wrong-file race
2. updateMetadataMutation uses ref pattern matching mutateRef/createRef
3. Type-to-enter filters non-numeric chars for number columns, non-date
   chars for date columns
4. renameValue only passed to actively-renaming ColumnHeaderMenu,
   preserving React.memo for other columns

* fix(tables): position-based gap rows, insert above/below, consistency fixes

- Fix gap row insert shifting: only shift rows when target position is
  occupied, preventing unnecessary displacement of rows below
- Switch to position-based indexing throughout (positionMap, maxPosition)
  instead of array-index for correct sparse position handling
- Add insert row above/below to context menu
- Use CellContent for pending values in PositionGapRows (matching PlaceholderRows)
- Add belowHeader selection overlay logic to PositionGapRows
- Remove unnecessary 500ms debounce on column width persistence

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix cells nav w keyboard

* added preview panel for html, markdown rendering, completed table

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 16:37:48 -07:00
Emir Karabeg
8fd8b1a248 improvement(mothership): chat history and stability 2026-03-09 15:57:13 -07:00
Emir Karabeg
917af6d141 improvement(mothership): chat stability 2026-03-09 15:42:12 -07:00
Vikhyath Mondreti
fe5f809e1a Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-09 15:28:07 -07:00
Vikhyath Mondreti
2f2c2b05e8 feat(templates): landing page templates workflow states 2026-03-09 15:22:14 -07:00
Siddharth Ganesan
0c44332172 File uploads to mothership 2026-03-09 15:19:56 -07:00
Siddharth Ganesan
7c0cd36936 Fix error status 2026-03-09 14:30:46 -07:00
Siddharth Ganesan
2788c68e45 Tool results 2026-03-09 14:24:44 -07:00
Siddharth Ganesan
cf9cc0377d Fix tool call persistence in chat 2026-03-09 14:24:44 -07:00
Emir Karabeg
a091149da4 improvement(mothership): worklfow resource 2026-03-09 14:20:59 -07:00
Waleed
64cedfcff7 fix(streaming): smoother streaming with throttled rendering, ResizeObserver scroll, and batched updates (#3471)
* fix(streaming): smoother streaming with throttled rendering, ResizeObserver scroll, and batched updates

- Add useThrottledValue hook (100ms trailing-edge throttle) to gate DOM re-renders during streaming across all chat surfaces
- Replace 100ms setInterval scroll polling with ResizeObserver-based auto-scroll, programmatic scroll timestamp tracking, and nested [data-scrollable] region handling
- Extract processContentBuffer from inline content handler for cleaner code organization in copilot SSE handlers
- Add RAF-based update batching (50ms max interval) to floating chat and home chat streaming paths
- Add useProgressiveList hook for progressive rendering of long conversation histories via requestAnimationFrame

Made-with: Cursor

* ack PR comments

* fix search modal

* more comments

* ack comments

* count

* ack comments

* ack comment
2026-03-09 13:27:33 -07:00
Vikhyath Mondreti
15db69231f fix tests 2026-03-09 12:22:40 -07:00
Vikhyath Mondreti
7b43091984 Merge branch 'feat/mothership-copilot' of github.com:simstudioai/sim into feat/mothership-copilot 2026-03-09 12:13:34 -07:00
Emir Karabeg
48f280427e feat(mothership): resource viewer 2026-03-09 12:07:06 -07:00
Vikhyath Mondreti
1430eb66de fix(landing): wire agent input to mothership 2026-03-09 11:58:16 -07:00
Siddharth Ganesan
2ace7252f9 Store tool call results 2026-03-09 11:35:20 -07:00
Siddharth Ganesan
bcdfc85ccb Tool updates 2026-03-09 11:28:22 -07:00
Vikhyath Mondreti
e921448bf2 fix(selections): more nested folder inaccuracies 2026-03-09 11:17:43 -07:00
Vikhyath Mondreti
71d8e227bd improvement(folder-selection): folder deselection + selection order should match visual 2026-03-09 11:00:22 -07:00
Siddharth Ganesan
4593a8a471 Table tools 2026-03-09 10:21:12 -07:00
Emir Karabeg
301fdb94ff improvement(tables): multi-select and efficiencies 2026-03-09 10:07:21 -07:00
Emir Karabeg
4afc3bbff8 improvement: logs 2026-03-09 09:13:01 -07:00
waleed
76981c356f update schedule creation ui and run lint 2026-03-09 02:18:43 -07:00
Waleed
4c562c8e04 feat(tables): column operations, row ordering, V1 API (#3488)
* feat(tables): add column operations, row ordering, V1 columns API, and OpenAPI spec

Adds column rename/delete/type change/constraint updates to the tables module,
row ordering via position column, UI metadata schema, V1 public API for column
operations with rate limiting and audit logging, and OpenAPI documentation.

Key changes:
- Service-layer column operations with validation (name pattern, type compatibility, unique/required constraints)
- Position column on user_table_rows with composite index for efficient ordering
- V1 /api/v1/tables/{tableId}/columns endpoint (POST/PATCH/DELETE) with rate limiting and audit
- Shared Zod schemas extracted to table/utils.ts using COLUMN_TYPES constant
- Targeted React Query invalidation (row vs schema mutations) with consistent onSettled usage
- OpenAPI 3.1.0 spec for columns endpoint with code samples
- Position field added to all row response mappings for consistency
- Sort fallback to position ordering when buildSortClause returns null

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(tables): use specific error prefixes instead of broad "Cannot" match

Prevents internal TypeErrors (e.g. "Cannot read properties of undefined")
from leaking as 400 responses. Now matches only domain-specific errors:
"Cannot delete the last column" and "Cannot set column".

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(tables): reject Infinity and NaN in number type compatibility check

Number.isFinite rejects Infinity, -Infinity, and NaN, preventing
non-finite values from passing column type validation.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(tables): invalidate table list on row create/delete for stale rowCount

Row create and delete mutations now invalidate the table list cache since
it includes a computed rowCount. Row updates (which don't change count)
continue to only invalidate row queries.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(tables): add column name length check, deduplicate name gen, reset pagination on clear

- Add MAX_COLUMN_NAME_LENGTH validation to addTableColumn (was missing,
  renameColumn already had it)
- Extract generateColumnName helper to eliminate triplicated logic across
  handleAddColumn, handleInsertColumnLeft, handleInsertColumnRight
- Reset pagination to page 0 when clearing sort/filter to prevent showing
  empty pages after narrowing filters are removed

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: hoist tableId above try block in V1 columns route, add detail invalidation to invalidateRowCount

- V1 columns route: `tableId` was declared inside `try` but referenced in
  `catch` logger.error, causing undefined in error logs. Hoisted `await params`
  above try in all three handlers (POST, PATCH, DELETE).
- invalidateRowCount: added `tableKeys.detail(tableId)` invalidation since the
  single-table GET response includes `rowCount`, which becomes stale after
  row create/delete without this.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: add position to all row mutation responses, remove dead filter code

- Add `position` field to POST (single + batch) and PATCH row responses
  across both internal and V1 routes, matching GET responses and OpenAPI spec.
- Remove unused `filterConfig`, `handleFilterToggle`, `handleFilterClear`,
  and `activeFilters` — dead code left over from merge conflict resolution.
  `handleFilterApply` (the one actually wired to JSX) is preserved.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: invalidateTableSchema now also invalidates table list cache

Column add/rename/delete/update mutations now invalidate tableKeys.list()
since the list endpoint returns schema.columns for each table. Without this,
the sidebar table list would show stale column schemas until staleTime expires.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: replace window.prompt/confirm with emcn Modal dialogs

Replace non-standard browser dialogs with proper emcn Modal components
to match the existing codebase pattern (e.g. delete table confirmation).

- Column rename: Modal with Input field + Enter key support
- Column delete: Modal with destructive confirmation

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 02:14:38 -07:00
Emir Karabeg
d4eb25df91 fix(files): icon 2026-03-09 02:12:27 -07:00
Emir Karabeg
ac2af53884 improvement: icons 2026-03-09 02:10:54 -07:00
Emir Karabeg
016d353baf improvement: icon, resource header options 2026-03-09 01:47:08 -07:00
Emir Karabeg
d9c1a53cad improvement(resource): layout 2026-03-09 01:20:50 -07:00
Waleed
2bdc073d7b fix(docs): use named grid lines instead of numeric column indices (#3487)
Root cause: the fumadocs grid template has 3 columns in production but
5 columns in local dev. Our CSS used `grid-column: 3 / span 2` which
targeted the wrong column in the 3-column grid, placing content in
the near-zero-width TOC column instead of the main content column.

Fix: use `grid-column: main-start / toc-end` which uses CSS named grid
lines from grid-template-areas, working regardless of column count.

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 00:50:28 -07:00
Waleed
13d2a134d0 improvement(docs): align sidebar method badges and polish API reference styling (#3484)
* improvement(docs): align sidebar method badges and polish API reference styling

* fix(docs): revert className prop on DocsPage for CI compatibility

* fix(docs): restore oneOf schema for delete rows and use rem units in CSS

* fix(docs): replace :has() selectors with direct className for reliable prod layout

The API docs layout was intermittently narrow in production because CSS
:has(.api-page-header) selectors are unreliable in Tailwind v4 production
builds. Apply className="openapi-page" directly to DocsPage and replace
all 64 :has() selectors with .openapi-page class targeting.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(docs): bypass TypeScript check for className prop on DocsPage

Use spread with type assertion to pass className to DocsPage, working
around a CI type resolution issue where the prop exists at runtime but
is not recognized by TypeScript in the Vercel build environment.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(docs): use inline style tag for grid layout, revert CSS to :has() selectors

The className prop on DocsPage doesn't exist in the fumadocs-ui version
resolved on Vercel, so .openapi-page was never applied and all 64 CSS
rules broke. Revert to :has(.api-page-header) selectors for styling and
use an inline <style> tag for the critical grid-column layout override,
which is SSR'd and doesn't depend on any CSS selector matching.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(docs): add pill styling to footer navigation method badges

The footer nav badges (POST, GET, etc.) had color from data-method rules
but lacked the structural pill styling (padding, border-radius, font-size).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 00:22:09 -07:00
Emir Karabeg
a61dc23d43 improvement: tables, dropdown 2026-03-09 00:17:52 -07:00
Waleed
12c1ede336 feat(files): inline file viewer with text editing (#3475)
* feat(files): add inline file viewer with text editing and create file modal

Add file preview/edit functionality to the workspace files page. Text files
(md, json, txt, yaml, etc.) open in an editable textarea with Cmd/Ctrl+S save.
PDFs render in an iframe. New file button creates empty .md files via a modal.
Uses ResourceHeader breadcrumbs and ResourceOptionsBar for save/download/delete.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* improvement(files): add UX polish, PR review fixes, and context menu

- Add unsaved changes guard modal (matching credentials manager pattern)
- Add delete confirmation modal for both viewer and context menu
- Add save status feedback (Save → Saving... → Saved)
- Add right-click context menu with Open, Download, Delete actions
- Add 50MB file size limit on content update API
- Add storage quota check before content updates
- Add response.ok guard on download to prevent corrupt files
- Add skeleton loading for pending file selection (prevents flicker)
- Fix updateContent in handleSave dependency array

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): propagate save errors and remove redundant sizeDiff

- Remove try/catch in TextEditor.handleSave so errors propagate to
  parent, which correctly shows save failure status
- Remove redundant inner sizeDiff declaration that shadowed outer scope

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): remove unused textareaRef

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): move Cmd+S to parent, add save error feedback, hide save for non-text files

- Move Cmd+S keyboard handler from TextEditor to Files so it goes
  through the parent handleSave with proper status management
- Add 'error' save status with red "Save failed" label that auto-resets
- Only show Save button for text-editable file types (md, txt, json, etc.)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* improvement(files): add save tooltip, deduplicate text-editable extensions

- Add Tooltip on Save button showing Cmd+S / Ctrl+S shortcut
- Export TEXT_EDITABLE_EXTENSIONS from file-viewer and reuse in files.tsx
  instead of duplicating the list inline

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* refactor: extract isMacPlatform to shared utility

Move isMacPlatform() from global-commands-provider.tsx to
lib/core/utils/platform.ts so it can be reused by files.tsx tooltip
without duplication.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* refactor(files): deduplicate delete modal, use shared formatFileSize

- Extract DeleteConfirmModal component to eliminate duplicate modal
  markup between viewer and list modes
- Replace local formatFileSize with shared utility from file-utils.ts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): fix a11y label lint error and remove mutation object from useCallback deps

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): add isDirty guard on handleSave, return proper HTTP status codes

Prevents "Saving → Saved" flash when pressing Cmd+S with no changes.
Returns 404 for file-not-found and 402 for quota-exceeded instead of 500.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): reset isDirty/saveStatus on delete and discard, remove deprecated navigator.platform

- Clear isDirty and saveStatus when deleting the currently-viewed file to
  prevent spurious beforeunload prompts
- Reset saveStatus on discard to prevent stale "Save failed" when opening
  another file
- Remove deprecated navigator.platform, userAgent fallback covers all cases

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): prevent concurrent saves on rapid Cmd+S, add YAML MIME types

- Add saveStatus === 'saving' guard to handleSave to prevent duplicate
  concurrent PUT requests from rapid keyboard shortcuts
- Add yaml/yml MIME type mappings to getMimeTypeFromExtension

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* refactor(files): reuse shared extension constants, parallelize cancelQueries

- Replace hand-rolled SUPPORTED_EXTENSIONS with composition from existing
  SUPPORTED_DOCUMENT/AUDIO/VIDEO_EXTENSIONS in validation.ts
- Parallelize sequential cancelQueries calls in delete mutation onMutate

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): guard handleCreate against duplicate calls while pending

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): show upload progress on the Upload button, not New file

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(files): use ref-based guard for create pending state to avoid stale closure

The uploadFile.isPending check was stale because the mutation object
is excluded from useCallback deps (per codebase convention). Using a
ref ensures the guard works correctly across rapid Enter key presses.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* cleanup(files): use shared icon import, remove no-op props, wrap handler in useCallback

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-09 00:07:35 -07:00
Emir Karabeg
627eaaf343 improvement: tables, favicon 2026-03-08 19:21:21 -07:00
Waleed Latif
1dbfaa4d23 style(schedules): apply linter formatting 2026-03-08 18:41:29 -07:00
Waleed Latif
4946571922 feat(schedules): add edit support with context menu for standalone jobs 2026-03-08 18:41:17 -07:00
Waleed Latif
6295fd1a11 feat(schedules): add schedule creator modal for standalone jobs
Add modal to create standalone scheduled jobs from the Schedules page.
Includes POST API endpoint, useCreateSchedule mutation hook, and full
modal with schedule type selection, timezone, lifecycle, and live preview.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-08 18:17:11 -07:00
Emir Karabeg
de5faa5265 improvement(tables): consolidation 2026-03-08 17:31:58 -07:00
Waleed
7d360649e9 fix(sidebar): restore drag-and-drop for workflows and folders (#3470)
* fix(sidebar): restore drag-and-drop for workflows and folders

Made-with: Cursor

* update docs, unrelated
2026-03-08 16:16:14 -07:00
Vikhyath Mondreti
1d955fc43a feat(mothership): billing (#3464)
* Billing update

* more billing improvements

* credits UI

* credit purchase safety

* progress

* ui improvements

* fix cancel sub

* fix types

* fix daily refresh for teams

* make max features differentiated

* address bugbot comments

* address greptile comments

* revert isHosted

* address more comments

* fix org refresh bar

* fix ui rounding

* fix minor rounding

* fix upgrade issue for legacy plans

* fix formatPlanName

* fix email dispay names

* fix legacy team reference bugs

* referral bonus in credits

* fix org upgrade bug

* improve logs

* respect toggle for paid users

* fix landing page pro features and usage limit checks

* fixed query and usage

* add unit test

* address more comments

* enterprise guard

* fix limits bug

* pass period start/end for overage
2026-03-08 03:37:54 -07:00
Waleed
1def94392b improvement(settings): fix mcp modal, add option to edit JSON and add Sim as an MCP client (#3467)
* improvement(settings): fix mcp modal, add option to edit JSON and add Sim as an MCP client

* added docs link in sidebar

* ack comments

* ack comments

* fixed error msg
2026-03-07 22:59:29 -08:00
Emir Karabeg
77bd2553f2 fix(resource): sorting 2026-03-07 21:26:54 -08:00
Emir Karabeg
8170488488 improvement(resource): sorting and icons 2026-03-07 21:19:04 -08:00
Waleed
0b42e26f10 fix(execution): ensure background tasks await post-execution DB status updates (#3466)
The fire-and-forget IIFE in execution-core.ts for post-execution logging could be abandoned when trigger.dev tasks exit, leaving executions permanently stuck in "running" status. Store the promise on LoggingSession so background tasks can optionally await it before returning.
2026-03-07 21:09:31 -08:00
Emir Karabeg
4b7a9b20c4 improvement(resources): segmented API 2026-03-07 20:48:08 -08:00
Waleed
76486ebcc8 feat(knowledge): add v1 knowledge base API, Obsidian/Evernote connectors, and docs (#3465)
* feat(knowledge): add v1 knowledge base API, Obsidian/Evernote connectors, and docs

- Add v1 REST API for knowledge bases (CRUD, document management, vector search)
- Add Obsidian and Evernote knowledge base connectors
- Add file type validation to v1 file and document upload endpoints
- Update OpenAPI spec with knowledge base endpoints and schemas
- Add connectors documentation page
- Apply query hook formatting improvements

* fix(knowledge): address PR review feedback

- Remove validateFileType from v1/files route (general file upload, not document-only)
- Reject tag filters when searching multiple KBs (tag defs are KB-specific)
- Cache tag definitions to avoid duplicate getDocumentTagDefinitions call
- Fix Obsidian connector silent empty results when syncContext is undefined

* improvement(connectors): add syncContext to getDocument, clean up caching

- Update docs to say 20+ connectors
- Add syncContext param to ConnectorConfig.getDocument interface
- Use syncContext in Evernote getDocument to cache tag/notebook maps
- Replace index-based cache check with Map keyed by KB ID in search route

* fix(knowledge): address second round of PR review feedback

- Fix Zod .default('text') overriding tag definition's actual fieldType
- Fix encodeURIComponent breaking multi-level folder paths in Obsidian
- Use 413 instead of 400 for file-too-large in document upload
- Add knowledge-bases to API reference docs navigation

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(knowledge): prevent cross-workspace KB access in search

Filter accessible KBs by matching workspaceId from the request,
preventing users from querying KBs in other workspaces they have
access to but didn't specify.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(knowledge): audit resourceId, SSRF protection, recursion depth limit

- Fix recordAudit using knowledgeBaseId instead of newDocument.id
- Add SSRF validation to Obsidian connector (reject private/loopback URLs)
- Add max recursion depth (20) to listVaultFiles

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(obsidian): remove SSRF check that blocks localhost usage

The Obsidian connector is designed to connect to the Local REST API
plugin running on localhost (127.0.0.1:27124). The SSRF check was
incorrectly blocking this primary use case.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 20:00:38 -08:00
Emir Karabeg
13d49da8bd improvement(resources): layout and items 2026-03-07 18:01:29 -08:00
Waleed
05b8481a89 fix(knowledge): compute KB tokenCount from documents instead of stale column (#3463)
The knowledge_base.token_count column was initialized to 0 and never
updated. Replace with COALESCE(SUM(document.token_count), 0) in all
read queries, which already JOIN on documents with GROUP BY.
2026-03-07 16:55:06 -08:00
Emir Karabeg
6690c55721 improvement(resource): layout 2026-03-07 16:25:53 -08:00
Siddharth Ganesan
88a8c5f4a1 Update mothership to match copilot in logs 2026-03-07 16:18:34 -08:00
Siddharth Ganesan
91ca6a531e Fix tables row count 2026-03-07 16:04:36 -08:00
Emir Karabeg
2f45f935e4 ran lint 2026-03-07 15:47:53 -08:00
Waleed
2cb12de546 refactor(queries): comprehensive TanStack Query best practices audit (#3460)
* refactor: comprehensive TanStack Query best practices audit and migration

- Add AbortSignal forwarding to all 41 queryFn implementations for proper request cancellation
- Migrate manual fetch patterns to useMutation hooks (useResetPassword, useRedeemReferralCode, usePurchaseCredits, useImportWorkflow, useOpenBillingPortal, useAllowedMcpDomains)
- Migrate standalone hooks to TanStack Query (use-next-available-slot, use-mcp-server-test, use-webhook-management, use-referral-attribution)
- Fix query key factories: add missing `all` keys, replace inline keys with factory methods
- Fix optimistic mutations: use onSettled instead of onSuccess for cache reconciliation
- Replace overly broad cache invalidations with targeted key invalidation
- Remove keepPreviousData from static-key queries where it provides no benefit
- Add staleTime to queries missing explicit cache duration
- Fix `any` type in UpdateSettingParams with proper GeneralSettings typing
- Remove dead code: loadingWebhooks/checkedWebhooks from subblock store, unused helper functions
- Update settings components (general, debug, referral-code, credit-balance, subscription, mcp) to use mutation state instead of manual useState for loading/error/success

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: remove unstable mutation object from useCallback deps

openBillingPortal mutation object is not referentially stable,
but .mutate() is stable in TanStack Query v5. Remove from deps
to prevent unnecessary handleBadgeClick recreations.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: add missing byWorkflows invalidation to useUpdateTemplate

The onSettled handler was missing the byWorkflows() invalidation
that was dropped during the onSuccess→onSettled migration. Without
this, the deploy modal (useTemplateByWorkflow) would show stale data
after a template update.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* docs: add TanStack Query best practices to CLAUDE.md and cursor rules

Add comprehensive React Query best practices covering:
- Hierarchical query key factories with intermediate plural keys
- AbortSignal forwarding in all queryFn implementations
- Targeted cache invalidation over broad .all invalidation
- onSettled for optimistic mutation cache reconciliation
- keepPreviousData only on variable-key queries
- No manual fetch in components rule
- Stable mutation references in useCallback deps

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address PR review feedback

- Fix syncedRef regression in use-webhook-management: only set
  syncedRef.current=true when webhook is found, so re-sync works
  after webhook creation (e.g., post-deploy)
- Remove redundant detail(id) invalidation from useUpdateTemplate
  onSettled since onSuccess already populates cache via setQueryData

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address second round of PR review feedback

- Reset syncedRef when blockId changes in use-webhook-management so
  component reuse with a different block syncs the new webhook
- Add response.ok check in postAttribution so non-2xx responses
  throw and trigger TanStack Query retry logic

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: use lists() prefix invalidation in useCreateWorkspaceCredential

Use workspaceCredentialKeys.lists() instead of .list(workspaceId) so
filtered list queries are also invalidated on credential creation,
matching the pattern used by update and delete mutations.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address third round of PR review feedback

- Add nullish coalescing fallback for bonusAmount in referral-code
  to prevent rendering "undefined" when server omits the field
- Reset syncedRef when queryEnabled becomes false so webhook data
  re-syncs when the query is re-enabled without component remount

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address fourth round of PR review feedback

- Add AbortSignal to testMcpServerConnection for consistency
- Wrap handleTestConnection in try/catch for mutateAsync error handling
- Replace broad subscriptionKeys.all with targeted users()/usage() invalidation
- Add intermediate users() key to subscription key factory for prefix matching
- Add comment documenting syncedRef null-webhook behavior
- Fix api-keys.ts silent error swallowing on non-ok responses
- Move deployments.ts cache invalidation from onSuccess to onSettled

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: achieve full TanStack Query best practices compliance

- Add intermediate plural keys to api-keys, deployments, and schedules
  key factories for prefix-based invalidation support
- Change copilot-keys from refetchQueries to invalidateQueries
- Add signal parameter to organization.ts fetch functions (better-auth
  client does not support AbortSignal, documented accordingly)
- Move useCreateMcpServer invalidation from onSuccess to onSettled

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 15:15:10 -08:00
Emir Karabeg
de32644940 improvement(resources): all outer page structure complete 2026-03-07 14:42:11 -08:00
Emir Karabeg
8ff93fe842 improvement(resource): tables, files 2026-03-07 13:42:22 -08:00
Waleed
0d9e04181f improvement(perf): apply react and js performance optimizations across codebase (#3459)
* improvement(perf): apply react and js performance optimizations across codebase

- Parallelize independent DB queries with Promise.all in API routes
- Defer PostHog and OneDollarStats via dynamic import() to reduce bundle size
- Use functional setState in countdown timers to prevent stale closures
- Replace O(n*m) .filter().find() with Set-based O(n) lookups in undo-redo
- Use .toSorted() instead of .sort() for immutable state operations
- Use lazy initializers for useState(new Set()) across 20 components
- Remove useMemo wrapping trivially cheap expressions (typeof, ternary, template strings)
- Add passive: true to scroll event listener

* fix(perf): address PR review feedback

- Extract IIFE Set patterns to named consts for readability in use-undo-redo
- Hoist Set construction above loops in BATCH_UPDATE_PARENT cases
- Add .catch() error handler to PostHog dynamic import
- Convert session-provider posthog import to dynamic import() to complete bundle split

* fix(analytics): add .catch() to onedollarstats dynamic import
2026-03-07 13:08:26 -08:00
Waleed
1324987def improvement(turbo): align turborepo config with best practices (#3458)
* improvement(turbo): align turborepo config with best practices

* fix(turbo): address PR review feedback

* fix(turbo): add lint:check task for read-only lint+format CI checks

lint:check previously delegated to format:check which only checked
formatting. Now it runs biome check (no --write) which enforces both
lint rules and formatting without mutating files.

* upgrade turbo
2026-03-07 12:38:46 -08:00
Waleed
9c4abf7b9b fix(connectors): add rate limiting, concurrency controls, and bug fixes (#3457)
* fix(connectors): add rate limiting, concurrency controls, and bug fixes across knowledge connectors

- Add Retry-After header support to fetchWithRetry for all 18 connectors
- Batch concurrent API calls (concurrency 5) in Dropbox, Google Docs, Google Drive, OneDrive, SharePoint
- Batch concurrent API calls (concurrency 3) in Notion to match 3 req/s limit
- Cache GitHub tree in syncContext to avoid re-fetching on every pagination page
- Batch GitHub blob fetches with concurrency 5
- Fix GitHub base64 decoding: atob() → Buffer.from() for UTF-8 safety
- Fix HubSpot OAuth scope: 'tickets' → 'crm.objects.tickets.read' (v3 API)
- Fix HubSpot syncContext key: totalFetched → totalDocsFetched for consistency
- Add jitter to nextSyncAt (10% of interval, capped at 5min) to prevent thundering herd
- Fix Date consistency in connector DELETE route

* fix(connectors): address PR review feedback on retry and SharePoint batching

- Remove 120s cap on Retry-After — pass all values through to retry loop
- Add maxDelayMs guard: if Retry-After exceeds maxDelayMs, throw immediately
  instead of hammering with shorter intervals (addresses validate timeout concern)
- Add early exit in SharePoint batch loop when maxFiles limit is reached
  to avoid unnecessary API calls

* fix(connectors): cap Retry-After at maxDelayMs instead of aborting

Match Google Cloud SDK behavior: when Retry-After exceeds maxDelayMs,
cap the wait to maxDelayMs and log a warning, rather than throwing
immediately. This ensures retries are bounded in duration while still
respecting server guidance within the configured limit.

* fix(connectors): add early-exit guard to Dropbox, Google Docs, OneDrive batch loops

Match the SharePoint fix — skip remaining batches once maxFiles limit
is reached to avoid unnecessary API calls.
2026-03-07 12:12:15 -08:00
Siddharth Ganesan
00c9b72bdd Fix lint 2026-03-07 12:06:12 -08:00
Siddharth Ganesan
386df7a062 Fix 2026-03-07 11:47:53 -08:00
Siddharth Ganesan
0967755ad4 Clean vfs 2026-03-07 11:29:20 -08:00
Siddharth Ganesan
b50ccdf314 Fixes 2026-03-07 11:14:13 -08:00
Siddharth Ganesan
7247a5f4d8 Fixes 2026-03-07 10:43:41 -08:00
Waleed Latif
875498c9aa fix: resolve post-merge test and lint failures
- airtable: sync tableSelector condition with tableId (add getSchema)
- backfillCanonicalModes test: add documentId mode to prevent false backfill
- schedule PUT test: use invalid action string now that disable is valid
- schedule execute tests: add ne mock, sourceType field, use
  mockReturnValueOnce for two db.update calls
- knowledge tools: fix biome formatting (single-line arrow functions)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 09:49:57 -08:00
Waleed Latif
3c196d180f lint 2026-03-07 01:45:56 -08:00
Waleed Latif
2940de946c fix: correct knowledge block canonical pair pattern and subblock migration
- Rename manualDocumentId to documentId (advanced subblock ID should match
  canonicalParamId, consistent with airtable/gmail patterns)
- Fix documentSelector.dependsOn to reference knowledgeBaseSelector (basic
  depends on basic, not advanced)
- Remove unnecessary documentId migration (ID unchanged from main)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 01:38:05 -08:00
Waleed Latif
212f912827 Merge staging into feat/mothership-copilot
Resolved conflicts:
- oauth-required-modal.tsx: removed local SCOPE_DESCRIPTIONS (moved to lib/oauth/utils)
- credential-selector.tsx (2 files): kept useSettingsNavigation import, removed duplicate getMissingRequiredScopes
- airtable.ts: combined HEAD's dependsOn/getSchema with staging's mode:'advanced'

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 01:25:48 -08:00
Emir Karabeg
9a505919b0 refactor, improvement 2026-03-06 23:44:34 -08:00
Waleed
e6ca3b3311 feat(knowledge): add connector tools and expand document metadata (#3452)
* feat(knowledge): add connector tools and expand document metadata

* fix(knowledge): address PR review feedback on new tools

* fix(knowledge): remove unused params from get_document transform
2026-03-06 17:58:33 -08:00
Waleed
b93c87c521 fix(fireflies): correct types from live API validation (#3450)
* fix(fireflies): correct types from live API validation

- speakers.id is number, not string (API returns 0, 1, 2...)
- summary.action_items is a single string, not string[]
- Update formatTranscriptContent to handle action_items as string

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(fireflies): correct tool types from live API validation

- FirefliesSpeaker.id: string -> number
- FirefliesSentence.speaker_id: string -> number
- FirefliesSpeakerAnalytics.speaker_id: string -> number
- FirefliesSummary.action_items: string[] -> string
- FirefliesSummary.outline: string[] -> string
- FirefliesSummary.shorthand_bullet: string[] -> string
- FirefliesSummary.bullet_gist: string[] -> string
- FirefliesSummary.topics_discussed: string[] -> string

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 17:46:42 -08:00
Siddharth Ganesan
06a4a8162a Add context 2026-03-06 17:11:18 -08:00
Waleed
96c2ae2c39 feat(connectors): add Fireflies connector and API key auth support (#3448)
* feat(connectors): add Fireflies connector and API key auth support

Extend the connector system to support both OAuth and API key authentication
via a discriminated union (`ConnectorAuthConfig`). Add Fireflies as the first
API key connector, syncing meeting transcripts via the Fireflies GraphQL API.

Schema changes:
- Make `credentialId` nullable (null for API key connectors)
- Add `encryptedApiKey` column (AES-256-GCM encrypted, null for OAuth)

This eliminates the `'_apikey_'` sentinel and inline `sourceConfig._encryptedApiKey`
patterns, giving each auth mode its own clean column.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(fireflies): allow 0 for maxTranscripts (means unlimited)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 16:48:39 -08:00
Theodore Li
1e53d5748a Fix oauth link callback from mothership task 2026-03-06 13:46:08 -08:00
Waleed Latif
6d803bcde2 fix(knowledge): pass workspaceId to useOAuthCredentials in connector card
The ConnectorCard was calling useOAuthCredentials(providerId) without
a workspaceId, causing the credentials API to return an empty array.
This meant the credential lookup always failed, getMissingRequiredScopes
received undefined, and the "Update access" banner always appeared.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 13:08:14 -08:00
Waleed Latif
bca131d597 fix(connectors): restore Linear connector requiredScopes
Linear OAuth does return scopes in the token response. The previous
fix of emptying requiredScopes was based on an incorrect assumption.
Restoring requiredScopes: ['read'] as it should work correctly.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 12:55:06 -08:00
Waleed Latif
0202c60d26 Revert "fix(connectors): remove legacy requiredScopes from Jira and Confluence connectors"
This reverts commit a0be3ff414.
2026-03-06 12:52:29 -08:00
Waleed
82ba3d7dd1 feat(tasks): add rename to task context menu (#3442) 2026-03-06 12:49:32 -08:00
Waleed Latif
a0be3ff414 fix(connectors): remove legacy requiredScopes from Jira and Confluence connectors
Jira and Confluence OAuth tokens don't return legacy scope names like
read:jira-work or read:confluence-content.all, causing the 'Update access'
banner to always appear. Set requiredScopes to empty array like Linear.
2026-03-06 12:44:46 -08:00
Waleed Latif
6cda9e60e8 fix(connectors): remove unverifiable requiredScopes for Linear connector 2026-03-06 12:40:15 -08:00
Siddharth Ganesan
a6abb9da67 Job logs 2026-03-06 12:30:33 -08:00
Siddharth Ganesan
777e4f57de Job exeuction logs 2026-03-06 11:34:48 -08:00
Waleed
695628de75 improvement(knowledge): make connector-synced document chunks readonly (#3440)
* improvement(knowledge): make connector-synced document chunks readonly

* fix(knowledge): enforce connector chunk readonly on server side

* fix(knowledge): disable toggle and delete actions for connector-synced chunks
2026-03-06 11:29:28 -08:00
Siddharth Ganesan
d71e4e51ea Fix mothership block logs 2026-03-06 10:57:16 -08:00
Siddharth Ganesan
576d9d3025 Mothership block logs 2026-03-06 10:46:30 -08:00
Waleed
43509374a2 fix(sidebar): use client-generated UUIDs for stable optimistic updates (#3439)
* fix(sidebar): use client-generated UUIDs for stable optimistic updates

* fix(folders): use zod schema validation for folder create API

Replace inline UUID regex with zod schema validation for consistency
with other API routes. Update test expectations accordingly.

* fix(sidebar): add client UUID to single workflow duplicate hook

The useDuplicateWorkflow hook was missing newId: crypto.randomUUID(),
causing the same temp-ID-swap issue for single workflow duplication
from the context menu.

* fix(folders): avoid unnecessary Set re-creation in replaceOptimisticEntry

Only create new expandedFolders/selectedFolders Sets when tempId
differs from data.id. In the common happy path (client-generated UUIDs),
this avoids unnecessary Zustand state reference changes and re-renders.
2026-03-06 06:35:19 -08:00
Emir Karabeg
0e7c719e82 improvement(sidebar): loading 2026-03-06 02:35:24 -08:00
Siddharth Ganesan
226a3f64fb Fix lint 2026-03-05 22:10:27 -08:00
Siddharth Ganesan
6c6b3579c9 Triggers in the vfs 2026-03-05 20:57:58 -08:00
Siddharth Ganesan
a5b148e19e Native kb connectors 2026-03-05 20:17:23 -08:00
Emir Karabeg
9665f49492 fix(workflow): editor visible 2026-03-05 20:07:44 -08:00
Waleed Latif
ff4b2f8c6a lint 2026-03-05 19:38:51 -08:00
Waleed Latif
4735245c8f feat(tables): inline cell editing with optimistic updates 2026-03-05 19:37:06 -08:00
Waleed
aac9e74283 feat(knowledge): add 10 new knowledge base connectors (#3430)
* feat(knowledge): add 10 new knowledge base connectors

Add connectors for Dropbox, OneDrive, SharePoint, Slack, Google Docs,
Asana, HubSpot, Salesforce, WordPress, and Webflow. Each connector
implements listDocuments, getDocument, validateConfig with proper
pagination, content hashing, and tag definitions.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(connectors): address audit findings across 5 connectors

OneDrive: fix encodeURIComponent breaking folder paths with slashes,
add recursive folder traversal via folder queue in cursor state.
Slack: add missing requiredScopes.
Asana: pass retryOptions as 3rd arg to fetchWithRetry instead of
spreading into RequestInit; add missing requiredScopes.
HubSpot: add missing requiredScopes; fix sort property to use
hs_lastmodifieddate for non-contact object types.
Google Docs: remove orphaned title tag that was never populated.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(connectors): add missing requiredScopes to OneDrive and HubSpot

OneDrive: add requiredScopes: ['Files.Read']
HubSpot: add missing crm.objects.tickets.read scope

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* chore(connectors): lint fixes

* fix(connectors): slice documents to respect max limit on last page

* fix(connectors): use per-segment encodeURIComponent for SharePoint folder paths

encodeURI does not encode #, ?, &, + or = which are valid in folder
names but break the Microsoft Graph URL. Apply the same per-segment
encoding fix already used in the OneDrive connector.

* fix(connectors): address PR review findings

- Slack: remove private_channel from conversations.list types param
  since requiredScopes only cover public channels (channels:read,
  channels:history). Adding groups:read/groups:history would force
  all users to grant private channel access unnecessarily.
- OneDrive/SharePoint: add .htm to supported extensions and handle
  it in content processing (htmlToPlainText), matching Dropbox.
- Salesforce: guard getDocument for KnowledgeArticleVersion to skip
  records that are no longer PublishStatus='Online', preventing
  un-published articles from being re-synced.

* fix(connectors): pre-download size check and remove dead parameter

- OneDrive/SharePoint: add file size check against MAX_FILE_SIZE before
  downloading, matching Dropbox's behavior. Prevents OOM on large files.
- Slack: remove unused syncContext parameter from fetchChannelMessages.

* fix(connectors): slack getDocument user cache & wordpress scope reduction

- Slack: pass a local syncContext to formatMessages in getDocument so
  resolveUserName caches user lookups across messages. Without this,
  every message triggered a fresh users.info API call.
- WordPress: replace 'global' scope with 'posts' and 'sites' following
  principle of least privilege. The connector only reads posts and
  validates site existence.

* fix(connectors): revert wordpress scope and slack local cache changes

- WordPress: revert requiredScopes to ['global'] — the scope check
  does literal string matching, so ['posts', 'sites'] would always
  fail since auth.ts requests 'global' from WordPress.com OAuth.
  Reducing scope requires changing both auth.ts and the connector.
- Slack: remove local syncContext from getDocument — the perf impact
  of uncached users.info calls is negligible for typical channels
  (bounded by unique users, not message count).

* fix(connectors): align requiredScopes with auth.ts registrations

The scope check in getMissingRequiredScopes does literal string matching
against the OAuth token's granted scopes. requiredScopes must match what
auth.ts actually requests (since that's what the provider returns).

- HubSpot: use 'tickets' (legacy scope in auth.ts) instead of
  'crm.objects.tickets.read' (v3 granular scope not requested)
- Google Docs: use 'drive' (what auth.ts requests) instead of
  'documents.readonly' and 'drive.readonly' (never requested,
  so never in the granted set)

* fix(connectors): align Google Drive requiredScopes with auth.ts

Google Drive connector required 'drive.readonly' but auth.ts requests
'drive' (the superset). Since scope validation does literal matching,
this caused a spurious 'Additional permissions required' warning.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 19:31:17 -08:00
Siddharth Ganesan
d6b97fee08 Fix lint 2026-03-05 17:37:41 -08:00
Siddharth Ganesan
280ac30d55 Jobs 2026-03-05 17:36:24 -08:00
Siddharth Ganesan
5c24d2422e Jobs 2026-03-05 17:35:38 -08:00
Siddharth Ganesan
9d001eaf70 Jobs 2026-03-05 17:32:36 -08:00
Vikhyath Mondreti
3ce947566d v0.5.106: condition block and legacy kbs fixes, GPT 5.4 2026-03-05 17:30:05 -08:00
Siddharth Ganesan
17e1bb5331 Nuke migrations 2026-03-05 17:22:40 -08:00
Siddharth Ganesan
443e15eb01 Jobs 2026-03-05 16:43:48 -08:00
Waleed
dbef14ba26 feat(knowledge): connectors, user exclusions, expanded tools & airtable integration (#3230)
* feat(knowledge): connectors, user exclusions, expanded tools & airtable integration

* improvements

* removed redundant util

* ack PR comments

* remove module level cache, use syncContext between paginated calls to avoid redundant schema fetches

* regen migrations, ack PR comments

* ack PR comment

* added tests

* ack comments

* ack comments

* feat(db): add knowledge connector migration after merge

Generated migration 0162 for knowledge_connector and
knowledge_connector_sync_log tables after resolving merge
conflicts with feat/mothership-copilot.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(connectors): audit fixes for sync engine, connectors, and knowledge tools

- Extract shared computeContentHash to connectors/utils.ts (dedup across 7 connectors)
- Include error'd connectors in cron auto-retry query
- Add syncContext caching for Confluence (cloudId, spaceId)
- Batch Confluence label fetches with concurrency limit of 10
- Enforce maxPages in Confluence v2 path
- Clean up stale storage files on document update
- Retry stuck documents (pending/failed) after sync completes
- Soft-delete documents and reclaim tag slots on connector deletion
- Add incremental sync support to ConnectorConfig interface
- Fix offset:0 falsy check in list_documents tool

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* perf(connectors): deep audit — extract shared utils, fix pagination, optimize API calls

- Extract shared htmlToPlainText to connectors/utils.ts (dedup Confluence + Google Drive)
- Add syncContext caching for Jira cloudId, Notion/Linear/Google Drive cumulative limits
- Fix cumulative maxPages/maxIssues/maxFiles enforcement across pagination pages
- Bump Notion page_size from 20 to 100 (5x fewer API round-trips)
- Batch Notion child page fetching with concurrency=5 (was serial N+1)
- Bump Confluence v2 limit from 50 to 250 (v2 API supports it)
- Pass syncContext through Confluence CQL path for cumulative tracking
- Upgrade GitHub tree truncation warning to error level
- Fix sync-engine test mock to include inArray export

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* refactor(connectors): extract tag helpers, fix Notion maxPages, rewrite broken tests

- Add parseTagDate and joinTagArray helpers to connectors/utils.ts
- Update all 7 connectors to use shared tag mapping helpers (removes 12+ duplication instances)
- Fix Notion listFromParentPage cumulative maxPages check (was using local count)
- Rewrite 3 broken connector route test files to use vi.hoisted() + static vi.mock()
  pattern instead of deprecated vi.doMock/vi.resetModules (all 86 tests now pass)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(connectors): add loading skeletons, delete pending state, and pause feedback

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(knowledge): escape LIKE wildcards, guard restore from un-deleting, fix offset=0

- Escape %, _, \ in tag filter LIKE patterns to prevent incorrect matches
- Add isNull(deletedAt) guard to restore operation to prevent un-deleting soft-deleted docs
- Change offset check from falsy to != null so offset=0 is not dropped

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 15:40:00 -08:00
Siddharth Ganesan
7140867ff9 Jobs 2026-03-05 15:14:45 -08:00
Siddharth Ganesan
73cd10ca21 Jobs 2 2026-03-05 14:40:49 -08:00
Waleed
a368827f1e feat(api): add tables and files v1 REST API with OpenAPI docs (#3422)
* feat(api): add tables and files v1 REST API with OpenAPI docs

* fix(api): address review feedback for tables/files REST API

* fix(api): reject empty filters, consolidate PUT/DELETE into service helpers

* fix(api): upsert unique constraints, POST response fields, uploadedAt timestamp

* fix(api): stop leaking internal fields in list tables, fix deleteTable requestId

* fix(api): atomic table-count limit in createTable, stop leaking internal fields

* fix(api): error classification in PATCH, z.coerce→preprocess, requestId in logs

* fix(api): audit logging, PATCH service consolidation, Content-Disposition encoding

- Add TABLE_CREATED/TABLE_DELETED audit events to v1 table routes
- Consolidate PATCH handlers to use updateRow service function
- Fix Content-Disposition header with RFC 5987 dual-parameter form
- Normalize schema in POST /tables response with normalizeColumn

* lint

* fix(api): upsert unique constraint 400, guard request.json() parse errors

- Add 'Unique constraint violation' to upsert error classification
- Wrap PUT/DELETE request.json() in try/catch to return 400 on malformed body
- Apply fixes to both v1 and internal routes

* fix(api): guard PATCH request.json(), accurate deleteRowsByIds count

- Wrap PATCH request.json() in try/catch for both v1 and internal routes
- Rewrite deleteRowsByIds to use .returning() for accurate deletedCount
  under concurrent requests (eliminates SELECT-then-DELETE race)

* fix(api): guard all remaining request.json() calls in table routes

- Wrap POST handler request.json() in try/catch across all table routes
- Also fix internal DELETE single-row handler
- Every request.json() in table routes now returns 400 on malformed body

* fix(api): safe type check on formData workspaceId in file upload

- Replace unsafe `as string | null` cast with typeof check
- Prevents File object from bypassing workspaceId validation

* fix(api): safe File cast in upload, validate column name before sql.raw()

- Use instanceof File check instead of unsafe `as File | null` cast
- Add regex validation on column name before sql.raw() interpolation

* fix(api): comprehensive hardening pass across all table/file routes

- Guard request.formData() with try/catch in file upload
- Guard all .toISOString() calls with instanceof Date checks
- Replace verifyTableWorkspace double-fetch with direct comparison
- Fix relative imports to absolute (@/app/api/table/utils)
- Fix internal list tables leaking fields via ...t spread
- Normalize schema in internal POST create table response
- Remove redundant pre-check in internal create (service handles atomically)
- Make 'maximum table limit' return 403 consistently (was 400 in internal)
- Add 'Row not found' → 404 classification in PATCH handlers
- Add NAME_PATTERN validation before sql.raw() in validation.ts

* chore: lint fixes
2026-03-05 13:16:13 -08:00
Siddharth Ganesan
eac8aca0c0 Schedules page for workflows 2026-03-05 10:31:01 -08:00
Waleed
70c36cb7aa v0.5.105: slack remove reaction, nested subflow locks fix, servicenow pagination, memory improvements 2026-03-04 22:38:26 -08:00
Siddharth Ganesan
337154054e Oauth link 2026-03-04 17:35:32 -08:00
Siddharth Ganesan
c6ac0b4445 Agent subdir 2026-03-04 16:50:24 -08:00
Waleed
b07925fcc0 feat(settings): migrate settings from modal to route-based pages (#3413) 2026-03-04 15:20:52 -08:00
Siddharth Ganesan
08fb8c1651 Tool perms 2026-03-04 13:44:46 -08:00
Siddharth Ganesan
37337aece5 Scope perms 2026-03-04 12:44:04 -08:00
Siddharth Ganesan
da349176ab Fix merge conflicts 2026-03-04 11:17:01 -08:00
Siddharth Ganesan
6f3559ce8f Fix merge conflicts 2026-03-04 11:15:43 -08:00
Siddharth Ganesan
9a7b5ffe64 Fix merge conflicts 2026-03-04 11:13:42 -08:00
Siddharth Ganesan
4ede071ecb Fix merge conflicts 2026-03-04 11:12:51 -08:00
Siddharth Ganesan
161fb37244 Remove migrations 2026-03-04 10:48:12 -08:00
Emir Karabeg
d1575927a2 improvement(theme): system default 2026-03-04 01:29:47 -08:00
Waleed
f1ec5fe824 v0.5.104: memory improvements, nested subflows, careers page redirect, brandfetch, google meet 2026-03-03 23:45:29 -08:00
Emir Karabeg
a3b19fb32a improvement(user-input): ui, files 2026-03-03 22:56:07 -08:00
Emir Karabeg
21404d17e8 fix: message stream pickup and task ordering 2026-03-03 17:29:36 -08:00
Siddharth Ganesan
df7e731c9c Add payload 2026-03-03 16:15:01 -08:00
Emir Karabeg
4f4191fe1b fix: task ordering 2026-03-03 15:45:15 -08:00
Emir Karabeg
b57636e5b1 finalized task navigation 2026-03-03 15:24:31 -08:00
Emir Karabeg
38c9ecd259 resolved merge conflicts 2026-03-03 14:49:56 -08:00
Emir Karabeg
fadda6aaef improvement: task routing optimizations 2026-03-03 14:48:43 -08:00
Emir Karabeg
82f541e9de improvement: ui 2026-03-03 14:46:09 -08:00
Siddharth Ganesan
1339915957 Task vfs 2026-03-03 13:12:13 -08:00
Siddharth Ganesan
7fafc00a07 Task management 2026-03-03 12:00:03 -08:00
Emir Karabeg
fe5ab8aee8 improved streaming 2026-03-03 11:40:42 -08:00
Siddharth Ganesan
b3a639a693 Logs 2026-03-03 11:38:06 -08:00
Siddharth Ganesan
0249ca1480 Fix files 2026-03-03 10:49:59 -08:00
Siddharth Ganesan
553c376289 Fix routes 2026-03-03 10:23:11 -08:00
Emir Karabeg
4622966643 improvement(home): interactions 2026-03-02 17:25:32 -08:00
Siddharth Ganesan
e9550c624d Wand 2026-03-02 15:12:59 -08:00
Siddharth Ganesan
1d48289c53 Mothership block pudate 2026-03-02 15:05:56 -08:00
Siddharth Ganesan
fce10241a5 Mothership block 2026-03-02 14:55:04 -08:00
Emir Karabeg
ae080f125c Merge branch 'feat/landing' into feat/mothership-copilot 2026-03-02 13:44:12 -08:00
Emir Karabeg
0fb840c8fd Cleaned up home 2026-03-02 13:39:34 -08:00
Emir Karabeg
2c20519bbd improvement: ui/ux 2026-03-02 12:36:32 -08:00
Siddharth Ganesan
f3474b0c90 Tool call loop 2026-03-02 11:15:17 -08:00
Waleed
e07e3c34cc v0.5.103: memory util instrumentation, API docs, amplitude, google pagespeed insights, pagerduty 2026-03-01 23:27:02 -08:00
Siddharth Ganesan
b2cc5b6738 Billing 2026-02-28 17:51:26 -08:00
Waleed
0d2e6ff31d v0.5.102: new integrations, new tools, ci speedups, memory leak instrumentation 2026-02-28 12:48:10 -08:00
Siddharth Ganesan
d49a2c1c25 Fixes 2026-02-27 15:56:04 -08:00
Siddharth Ganesan
8fa4745893 MCP commented out 2026-02-27 11:18:38 -08:00
Siddharth Ganesan
c168e36a05 Fix 2026-02-26 17:48:53 -08:00
Siddharth Ganesan
9cc46ffa43 Edit subagents 2026-02-26 15:53:58 -08:00
Waleed
4fd0989264 v0.5.101: circular dependency mitigation, confluence enhancements, google tasks and bigquery integrations, workflow lock 2026-02-26 15:04:53 -08:00
Siddharth Ganesan
cc5e592c46 Kb checkpoint 2026-02-26 14:59:56 -08:00
Siddharth Ganesan
7276136398 Piping 2026-02-26 12:32:09 -08:00
Siddharth Ganesan
3ad7af4b97 File creation 2026-02-25 19:23:24 -08:00
Siddharth Ganesan
3cb1768a44 Move files to separate resource 2026-02-25 18:33:07 -08:00
Siddharth Ganesan
11e6387a7d Fix run workflow 2026-02-25 18:12:13 -08:00
Siddharth Ganesan
57a91027de Fix condition edges 2026-02-25 17:48:33 -08:00
Emir Karabeg
49c29d5f7d feat: pricing, collaboration improvement, features skeleton 2026-02-25 16:28:56 -08:00
Emir Karabeg
843af915bc feat: integrations skeleton, realtime complete 2026-02-25 16:28:56 -08:00
Emir Karabeg
bb3e899f74 feat(landing): template, generic workflow 2026-02-25 16:28:56 -08:00
Emir Karabeg
e47dcdcc43 feat(landing): navbar, metadata, hero, templates header 2026-02-25 16:28:55 -08:00
Emir Karabeg
3e6cf24762 feat(landing): structure 2026-02-25 16:28:55 -08:00
Siddharth Ganesan
90a12546b2 Fix lint 2026-02-25 12:56:58 -08:00
Siddharth Ganesan
b6f8439267 Remove dead code 2026-02-25 12:55:50 -08:00
Siddharth Ganesan
4f74a8b845 Checkpopint 2026-02-25 12:45:55 -08:00
Siddharth Ganesan
f12d8f631f Split 2026-02-25 12:37:23 -08:00
Siddharth Ganesan
41f0957ccc Separation of route 2026-02-25 12:19:26 -08:00
Siddharth Ganesan
7b813be1dd Fix truncation 2026-02-25 11:09:04 -08:00
Siddharth Ganesan
704fa16bb4 run workflow checkpoint 2026-02-25 11:08:44 -08:00
Waleed
67f8a687f6 v0.5.100: multiple credentials, 40% speedup, gong, attio, audit log improvements 2026-02-25 00:28:25 -08:00
Siddharth Ganesan
eccad2a8ce Remove dup code from tool calls 2026-02-24 16:59:40 -08:00
Siddharth Ganesan
87f5c464d9 Consolidation 2026-02-24 14:55:35 -08:00
Siddharth Ganesan
724aaa1432 table tools 2026-02-24 14:32:55 -08:00
Siddharth Ganesan
3de3ef4786 Readd migration 2026-02-24 14:03:30 -08:00
Siddharth Ganesan
743f048442 Merge with origin staging 2026-02-24 14:02:59 -08:00
Siddharth Ganesan
bbcf346df0 Nuke migration 2026-02-24 13:57:31 -08:00
Siddharth Ganesan
b9c3c2f78f Checkpoint interface consolidation 2026-02-24 13:55:50 -08:00
Siddharth Ganesan
d333307a17 Checkpoint 2026-02-24 13:47:29 -08:00
Siddharth Ganesan
134c4c4f2a Checkpoint 2026-02-24 12:22:19 -08:00
Waleed
af592349d3 v0.5.99: local dev improvements, live workflow logs in terminal 2026-02-23 00:24:49 -08:00
Waleed
0d86ea01f0 v0.5.98: change detection improvements, rate limit and code execution fixes, removed retired models, hex integration 2026-02-21 18:07:40 -08:00
Waleed
115f04e989 v0.5.97: oidc discovery for copilot mcp 2026-02-21 02:06:25 -08:00
Waleed
34d92fae89 v0.5.96: sim oauth provider, slack ephemeral message tool and blockkit support 2026-02-20 18:22:20 -08:00
Waleed
67aa4bb332 v0.5.95: gemini 3.1 pro, cloudflare, dataverse, revenuecat, redis, upstash, algolia tools; isolated-vm robustness improvements, tables backend (#3271)
* feat(tools): advanced fields for youtube, vercel; added cloudflare and dataverse tools (#3257)

* refactor(vercel): mark optional fields as advanced mode

Move optional/power-user fields behind the advanced toggle:
- List Deployments: project filter, target, state
- Create Deployment: project ID override, redeploy from, target
- List Projects: search
- Create/Update Project: framework, build/output/install commands
- Env Vars: variable type
- Webhooks: project IDs filter
- Checks: path, details URL
- Team Members: role filter
- All operations: team ID scope

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* style(youtube): mark optional params as advanced mode

Hide pagination, sort order, and filter fields behind the advanced
toggle for a cleaner default UX across all YouTube operations.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* added advanced fields for vercel and youtube, added cloudflare and dataverse block

* addded desc for dataverse

* add more tools

* ack comment

* more

* ops

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>

* feat(tables): added tables (#2867)

* updates

* required

* trashy table viewer

* updates

* updates

* filtering ui

* updates

* updates

* updates

* one input mode

* format

* fix lints

* improved errors

* updates

* updates

* chages

* doc strings

* breaking down file

* update comments with ai

* updates

* comments

* changes

* revert

* updates

* dedupe

* updates

* updates

* updates

* refactoring

* renames & refactors

* refactoring

* updates

* undo

* update db

* wand

* updates

* fix comments

* fixes

* simplify comments

* u[dates

* renames

* better comments

* validation

* updates

* updates

* updates

* fix sorting

* fix appearnce

* updating prompt to make it user sort

* rm

* updates

* rename

* comments

* clean comments

* simplicifcaiton

* updates

* updates

* refactor

* reduced type confusion

* undo

* rename

* undo changes

* undo

* simplify

* updates

* updates

* revert

* updates

* db updates

* type fix

* fix

* fix error handling

* updates

* docs

* docs

* updates

* rename

* dedupe

* revert

* uncook

* updates

* fix

* fix

* fix

* fix

* prepare merge

* readd migrations

* add back missed code

* migrate enrichment logic to general abstraction

* address bugbot concerns

* adhere to size limits for tables

* remove conflicting migration

* add back migrations

* fix tables auth

* fix permissive auth

* fix lint

* reran migrations

* migrate to use tanstack query for all server state

* update table-selector

* update names

* added tables to permission groups, updated subblock types

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
Co-authored-by: waleed <walif6@gmail.com>

* fix(snapshot): changed insert to upsert when concurrent identical child workflows are running (#3259)

* fix(snapshot): changed insert to upsert when concurrent identical child workflows are running

* fixed ci tests failing

* fix(workflows): disallow duplicate workflow names at the same folder level (#3260)

* feat(tools): added redis, upstash, algolia, and revenuecat (#3261)

* feat(tools): added redis, upstash, algolia, and revenuecat

* ack comment

* feat(models): add gemini-3.1-pro-preview and update gemini-3-pro thinking levels (#3263)

* fix(audit-log): lazily resolve actor name/email when missing (#3262)

* fix(blocks): move type coercions from tools.config.tool to tools.config.params (#3264)

* fix(blocks): move type coercions from tools.config.tool to tools.config.params

Number() coercions in tools.config.tool ran at serialization time before
variable resolution, destroying dynamic references like <block.result.count>
by converting them to NaN/null. Moved all coercions to tools.config.params
which runs at execution time after variables are resolved.

Fixed in 15 blocks: exa, arxiv, sentry, incidentio, wikipedia, ahrefs,
posthog, elasticsearch, dropbox, hunter, lemlist, spotify, youtube, grafana,
parallel. Also added mode: 'advanced' to optional exa fields.

Closes #3258

* fix(blocks): address PR review — move remaining param mutations from tool() to params()

- Moved field mappings from tool() to params() in grafana, posthog,
  lemlist, spotify, dropbox (same dynamic reference bug)
- Fixed parallel.ts excerpts/full_content boolean logic
- Fixed parallel.ts search_queries empty case (must set undefined)
- Fixed elasticsearch.ts timeout not included when already ends with 's'
- Restored dropbox.ts tool() switch for proper default fallback

* fix(blocks): restore field renames to tool() for serialization-time validation

Field renames (e.g. personalApiKey→apiKey) must be in tool() because
validateRequiredFieldsBeforeExecution calls selectToolId()→tool() then
checks renamed field names on params. Only type coercions (Number(),
boolean) stay in params() to avoid destroying dynamic variable references.

* improvement(resolver): resovled empty sentinel to not pass through unexecuted valid refs to text inputs (#3266)

* fix(blocks): add required constraint for serviceDeskId in JSM block (#3268)

* fix(blocks): add required constraint for serviceDeskId in JSM block

* fix(blocks): rename custom field values to request field values in JSM create request

* fix(trigger): add isolated-vm support to trigger.dev container builds (#3269)

Scheduled workflow executions running in trigger.dev containers were
failing to spawn isolated-vm workers because the native module wasn't
available in the container. This caused loop condition evaluation to
silently fail and exit after one iteration.

- Add isolated-vm to build.external and additionalPackages in trigger config
- Include isolated-vm-worker.cjs via additionalFiles for child process spawning
- Add fallback path resolution for worker file in trigger.dev environment

* fix(tables): hide tables from sidebar and block registry (#3270)

* fix(tables): hide tables from sidebar and block registry

* fix(trigger): add isolated-vm support to trigger.dev container builds (#3269)

Scheduled workflow executions running in trigger.dev containers were
failing to spawn isolated-vm workers because the native module wasn't
available in the container. This caused loop condition evaluation to
silently fail and exit after one iteration.

- Add isolated-vm to build.external and additionalPackages in trigger config
- Include isolated-vm-worker.cjs via additionalFiles for child process spawning
- Add fallback path resolution for worker file in trigger.dev environment

* lint

* fix(trigger): update node version to align with main app (#3272)

* fix(build): fix corrupted sticky disk cache on blacksmith (#3273)

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Lakee Sivaraya <71339072+lakeesiv@users.noreply.github.com>
Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
2026-02-20 13:43:07 -08:00
Siddharth Ganesan
03908edcbb Checkpoint 2026-02-19 14:47:57 -08:00
Siddharth Ganesan
3112485c31 Checkpoint 2026-02-19 11:08:32 -08:00
Siddharth Ganesan
459c2930ae Checkpoint 2026-02-19 10:14:24 -08:00
Siddharth Ganesan
3338b25c30 Checkpoint 2026-02-18 18:55:10 -08:00
Siddharth Ganesan
4c3002f97d Checkpoint 2026-02-18 18:38:37 -08:00
Waleed
15ace5e63f v0.5.94: vercel integration, folder insertion, migrated tracking redirects to rewrites 2026-02-18 16:53:34 -08:00
Siddharth Ganesan
632e0e0762 Checkpoitn 2026-02-18 15:29:58 -08:00
Waleed
fdca73679d v0.5.93: NextJS config changes, MCP and Blocks whitelisting, copilot keyboard shortcuts, audit logs 2026-02-18 12:10:05 -08:00
Siddharth Ganesan
7599774974 Checkpoint 2026-02-17 18:54:15 -08:00
Siddharth Ganesan
471e58a2d0 Checkpoint 2026-02-17 17:04:34 -08:00
Siddharth Ganesan
231ddc59a0 V0 2026-02-17 16:07:55 -08:00
Siddharth Ganesan
b197f68828 v0 2026-02-17 15:28:23 -08:00
Waleed
da46a387c9 v0.5.92: shortlinks, copilot scrolling stickiness, pagination 2026-02-17 15:13:21 -08:00
Waleed
b7e377ec4b v0.5.91: docs i18n, turborepo upgrade 2026-02-16 00:36:05 -08:00
1405 changed files with 290026 additions and 29373 deletions

View File

@@ -0,0 +1,299 @@
---
description: Add a knowledge base connector for syncing documents from an external source
argument-hint: <service-name> [api-docs-url]
---
# Add Connector Skill
You are an expert at adding knowledge base connectors to Sim. A connector syncs documents from an external source (Confluence, Google Drive, Notion, etc.) into a knowledge base.
## Your Task
When the user asks you to create a connector:
1. Use Context7 or WebFetch to read the service's API documentation
2. Determine the auth mode: **OAuth** (if Sim already has an OAuth provider for the service) or **API key** (if the service uses API key / Bearer token auth)
3. Create the connector directory and config
4. Register it in the connector registry
## Directory Structure
Create files in `apps/sim/connectors/{service}/`:
```
connectors/{service}/
├── index.ts # Barrel export
└── {service}.ts # ConnectorConfig definition
```
## Authentication
Connectors use a discriminated union for auth config (`ConnectorAuthConfig` in `connectors/types.ts`):
```typescript
type ConnectorAuthConfig =
| { mode: 'oauth'; provider: OAuthService; requiredScopes?: string[] }
| { mode: 'apiKey'; label?: string; placeholder?: string }
```
### OAuth mode
For services with existing OAuth providers in `apps/sim/lib/oauth/types.ts`. The `provider` must match an `OAuthService`. The modal shows a credential picker and handles token refresh automatically.
### API key mode
For services that use API key / Bearer token auth. The modal shows a password input with the configured `label` and `placeholder`. The API key is encrypted at rest using AES-256-GCM and stored in a dedicated `encryptedApiKey` column on the connector record. The sync engine decrypts it automatically — connectors receive the raw access token in `listDocuments`, `getDocument`, and `validateConfig`.
## ConnectorConfig Structure
### OAuth connector example
```typescript
import { createLogger } from '@sim/logger'
import { {Service}Icon } from '@/components/icons'
import { fetchWithRetry } from '@/lib/knowledge/documents/utils'
import type { ConnectorConfig, ExternalDocument, ExternalDocumentList } from '@/connectors/types'
const logger = createLogger('{Service}Connector')
export const {service}Connector: ConnectorConfig = {
id: '{service}',
name: '{Service}',
description: 'Sync documents from {Service} into your knowledge base',
version: '1.0.0',
icon: {Service}Icon,
auth: {
mode: 'oauth',
provider: '{service}', // Must match OAuthService in lib/oauth/types.ts
requiredScopes: ['read:...'],
},
configFields: [
// Rendered dynamically by the add-connector modal UI
// Supports 'short-input' and 'dropdown' types
],
listDocuments: async (accessToken, sourceConfig, cursor) => {
// Paginate via cursor, extract text, compute SHA-256 hash
// Return { documents: ExternalDocument[], nextCursor?, hasMore }
},
getDocument: async (accessToken, sourceConfig, externalId) => {
// Return ExternalDocument or null
},
validateConfig: async (accessToken, sourceConfig) => {
// Return { valid: true } or { valid: false, error: 'message' }
},
// Optional: map source metadata to semantic tag keys (translated to slots by sync engine)
mapTags: (metadata) => {
// Return Record<string, unknown> with keys matching tagDefinitions[].id
},
}
```
### API key connector example
```typescript
export const {service}Connector: ConnectorConfig = {
id: '{service}',
name: '{Service}',
description: 'Sync documents from {Service} into your knowledge base',
version: '1.0.0',
icon: {Service}Icon,
auth: {
mode: 'apiKey',
label: 'API Key', // Shown above the input field
placeholder: 'Enter your {Service} API key', // Input placeholder
},
configFields: [ /* ... */ ],
listDocuments: async (accessToken, sourceConfig, cursor) => { /* ... */ },
getDocument: async (accessToken, sourceConfig, externalId) => { /* ... */ },
validateConfig: async (accessToken, sourceConfig) => { /* ... */ },
}
```
## ConfigField Types
The add-connector modal renders these automatically — no custom UI needed.
```typescript
// Text input
{
id: 'domain',
title: 'Domain',
type: 'short-input',
placeholder: 'yoursite.example.com',
required: true,
}
// Dropdown (static options)
{
id: 'contentType',
title: 'Content Type',
type: 'dropdown',
required: false,
options: [
{ label: 'Pages only', id: 'page' },
{ label: 'Blog posts only', id: 'blogpost' },
{ label: 'All content', id: 'all' },
],
}
```
## ExternalDocument Shape
Every document returned from `listDocuments`/`getDocument` must include:
```typescript
{
externalId: string // Source-specific unique ID
title: string // Document title
content: string // Extracted plain text
mimeType: 'text/plain' // Always text/plain (content is extracted)
contentHash: string // SHA-256 of content (change detection)
sourceUrl?: string // Link back to original (stored on document record)
metadata?: Record<string, unknown> // Source-specific data (fed to mapTags)
}
```
## Content Hashing (Required)
The sync engine uses content hashes for change detection:
```typescript
async function computeContentHash(content: string): Promise<string> {
const data = new TextEncoder().encode(content)
const hashBuffer = await crypto.subtle.digest('SHA-256', data)
return Array.from(new Uint8Array(hashBuffer)).map(b => b.toString(16).padStart(2, '0')).join('')
}
```
## tagDefinitions — Declared Tag Definitions
Declare which tags the connector populates using semantic IDs. Shown in the add-connector modal as opt-out checkboxes.
On connector creation, slots are **dynamically assigned** via `getNextAvailableSlot` — connectors never hardcode slot names.
```typescript
tagDefinitions: [
{ id: 'labels', displayName: 'Labels', fieldType: 'text' },
{ id: 'version', displayName: 'Version', fieldType: 'number' },
{ id: 'lastModified', displayName: 'Last Modified', fieldType: 'date' },
],
```
Each entry has:
- `id`: Semantic key matching a key returned by `mapTags` (e.g. `'labels'`, `'version'`)
- `displayName`: Human-readable name shown in the UI (e.g. "Labels", "Last Modified")
- `fieldType`: `'text'` | `'number'` | `'date'` | `'boolean'` — determines which slot pool to draw from
Users can opt out of specific tags in the modal. Disabled IDs are stored in `sourceConfig.disabledTagIds`.
The assigned mapping (`semantic id → slot`) is stored in `sourceConfig.tagSlotMapping`.
## mapTags — Metadata to Semantic Keys
Maps source metadata to semantic tag keys. Required if `tagDefinitions` is set.
The sync engine calls this automatically and translates semantic keys to actual DB slots
using the `tagSlotMapping` stored on the connector.
Return keys must match the `id` values declared in `tagDefinitions`.
```typescript
mapTags: (metadata: Record<string, unknown>): Record<string, unknown> => {
const result: Record<string, unknown> = {}
// Validate arrays before casting — metadata may be malformed
const labels = Array.isArray(metadata.labels) ? (metadata.labels as string[]) : []
if (labels.length > 0) result.labels = labels.join(', ')
// Validate numbers — guard against NaN
if (metadata.version != null) {
const num = Number(metadata.version)
if (!Number.isNaN(num)) result.version = num
}
// Validate dates — guard against Invalid Date
if (typeof metadata.lastModified === 'string') {
const date = new Date(metadata.lastModified)
if (!Number.isNaN(date.getTime())) result.lastModified = date
}
return result
}
```
## External API Calls — Use `fetchWithRetry`
All external API calls must use `fetchWithRetry` from `@/lib/knowledge/documents/utils` instead of raw `fetch()`. This provides exponential backoff with retries on 429/502/503/504 errors. It returns a standard `Response` — all `.ok`, `.json()`, `.text()` checks work unchanged.
For `validateConfig` (user-facing, called on save), pass `VALIDATE_RETRY_OPTIONS` to cap wait time at ~7s. Background operations (`listDocuments`, `getDocument`) use the built-in defaults (5 retries, ~31s max).
```typescript
import { VALIDATE_RETRY_OPTIONS, fetchWithRetry } from '@/lib/knowledge/documents/utils'
// Background sync — use defaults
const response = await fetchWithRetry(url, {
method: 'GET',
headers: { Authorization: `Bearer ${accessToken}` },
})
// validateConfig — tighter retry budget
const response = await fetchWithRetry(url, { ... }, VALIDATE_RETRY_OPTIONS)
```
## sourceUrl
If `ExternalDocument.sourceUrl` is set, the sync engine stores it on the document record. Always construct the full URL (not a relative path).
## Sync Engine Behavior (Do Not Modify)
The sync engine (`lib/knowledge/connectors/sync-engine.ts`) is connector-agnostic. It:
1. Calls `listDocuments` with pagination until `hasMore` is false
2. Compares `contentHash` to detect new/changed/unchanged documents
3. Stores `sourceUrl` and calls `mapTags` on insert/update automatically
4. Handles soft-delete of removed documents
5. Resolves access tokens automatically — OAuth tokens are refreshed, API keys are decrypted from the `encryptedApiKey` column
You never need to modify the sync engine when adding a connector.
## Icon
The `icon` field on `ConnectorConfig` is used throughout the UI — in the connector list, the add-connector modal, and as the document icon in the knowledge base table (replacing the generic file type icon for connector-sourced documents). The icon is read from `CONNECTOR_REGISTRY[connectorType].icon` at runtime — no separate icon map to maintain.
If the service already has an icon in `apps/sim/components/icons.tsx` (from a tool integration), reuse it. Otherwise, ask the user to provide the SVG.
## Registering
Add one line to `apps/sim/connectors/registry.ts`:
```typescript
import { {service}Connector } from '@/connectors/{service}'
export const CONNECTOR_REGISTRY: ConnectorRegistry = {
// ... existing connectors ...
{service}: {service}Connector,
}
```
## Reference Implementations
- **OAuth**: `apps/sim/connectors/confluence/confluence.ts` — multiple config field types, `mapTags`, label fetching
- **API key**: `apps/sim/connectors/fireflies/fireflies.ts` — GraphQL API with Bearer token auth
## Checklist
- [ ] Created `connectors/{service}/{service}.ts` with full ConnectorConfig
- [ ] Created `connectors/{service}/index.ts` barrel export
- [ ] **Auth configured correctly:**
- OAuth: `auth.provider` matches an existing `OAuthService` in `lib/oauth/types.ts`
- API key: `auth.label` and `auth.placeholder` set appropriately
- [ ] `listDocuments` handles pagination and computes content hashes
- [ ] `sourceUrl` set on each ExternalDocument (full URL, not relative)
- [ ] `metadata` includes source-specific data for tag mapping
- [ ] `tagDefinitions` declared for each semantic key returned by `mapTags`
- [ ] `mapTags` implemented if source has useful metadata (labels, dates, versions)
- [ ] `validateConfig` verifies the source is accessible
- [ ] All external API calls use `fetchWithRetry` (not raw `fetch`)
- [ ] All optional config fields validated in `validateConfig`
- [ ] Icon exists in `components/icons.tsx` (or asked user to provide SVG)
- [ ] Registered in `connectors/registry.ts`

View File

@@ -0,0 +1,26 @@
---
description: SEO and GEO guidelines for the landing page
globs: ["apps/sim/app/(home)/**/*.tsx"]
---
# Landing Page — SEO / GEO
## SEO
- One `<h1>` per page, in Hero only — never add another.
- Strict heading hierarchy: H1 (Hero) → H2 (section titles) → H3 (feature names).
- Every section: `<section id="…" aria-labelledby="…-heading">`.
- Decorative/animated elements: `aria-hidden="true"`.
- All internal routes use Next.js `<Link>` (crawlable). External links get `rel="noopener noreferrer"`.
- Navbar is a Server Component (no `'use client'`) for immediate crawlability. Logo `<Image>` has `priority` (LCP element).
- Navbar `<nav>` carries `SiteNavigationElement` schema.org markup.
- Feature lists must stay in sync with `WebApplication.featureList` in `structured-data.tsx`.
## GEO (Generative Engine Optimisation)
- **Answer-first pattern**: each section's H2 + subtitle should directly answer a user question (e.g. "What is Sim?", "How fast can I deploy?").
- **Atomic answer blocks**: each feature / template card should be independently extractable by an AI summariser.
- **Entity consistency**: always write "Sim" by name — never "the platform" or "our tool".
- **Keyword density**: first 150 visible chars of Hero must name "Sim", "AI agents", "agentic workflows".
- **sr-only summaries**: Hero and Templates each have a `<p className="sr-only">` (~50 words) as an atomic product/catalog summary for AI citation.
- **Specific numbers**: prefer concrete figures ("1,000+ integrations", "15+ AI providers") over vague claims.

View File

@@ -5,62 +5,122 @@ globs: ["apps/sim/hooks/queries/**/*.ts"]
# React Query Patterns
All React Query hooks live in `hooks/queries/`.
All React Query hooks live in `hooks/queries/`. All server state must go through React Query — never use `useState` + `fetch` in components for data fetching or mutations.
## Query Key Factory
Every query file defines a keys factory:
Every query file defines a hierarchical keys factory with an `all` root key and intermediate plural keys for prefix-level invalidation:
```typescript
export const entityKeys = {
all: ['entity'] as const,
list: (workspaceId?: string) => [...entityKeys.all, 'list', workspaceId ?? ''] as const,
detail: (id?: string) => [...entityKeys.all, 'detail', id ?? ''] as const,
lists: () => [...entityKeys.all, 'list'] as const,
list: (workspaceId?: string) => [...entityKeys.lists(), workspaceId ?? ''] as const,
details: () => [...entityKeys.all, 'detail'] as const,
detail: (id?: string) => [...entityKeys.details(), id ?? ''] as const,
}
```
Never use inline query keys — always use the factory.
## File Structure
```typescript
// 1. Query keys factory
// 2. Types (if needed)
// 3. Private fetch functions
// 3. Private fetch functions (accept signal parameter)
// 4. Exported hooks
```
## Query Hook
- Every `queryFn` must destructure and forward `signal` for request cancellation
- Every query must have an explicit `staleTime`
- Use `keepPreviousData` only on variable-key queries (where params change), never on static keys
```typescript
async function fetchEntities(workspaceId: string, signal?: AbortSignal) {
const response = await fetch(`/api/entities?workspaceId=${workspaceId}`, { signal })
if (!response.ok) throw new Error('Failed to fetch entities')
return response.json()
}
export function useEntityList(workspaceId?: string, options?: { enabled?: boolean }) {
return useQuery({
queryKey: entityKeys.list(workspaceId),
queryFn: () => fetchEntities(workspaceId as string),
queryFn: ({ signal }) => fetchEntities(workspaceId as string, signal),
enabled: Boolean(workspaceId) && (options?.enabled ?? true),
staleTime: 60 * 1000,
placeholderData: keepPreviousData,
placeholderData: keepPreviousData, // OK: workspaceId varies
})
}
```
## Mutation Hook
- Use targeted invalidation (`entityKeys.lists()`) not broad (`entityKeys.all`) when possible
- Invalidation must cover all affected query key prefixes (lists, details, related views)
```typescript
export function useCreateEntity() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async (variables) => { /* fetch POST */ },
onSuccess: () => queryClient.invalidateQueries({ queryKey: entityKeys.all }),
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: entityKeys.lists() })
},
})
}
```
## Optimistic Updates
For optimistic mutations, use `onSettled` (not `onSuccess`) for cache reconciliation — `onSettled` fires on both success and error, ensuring the cache is always reconciled with the server.
```typescript
export function useUpdateEntity() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async (variables) => { /* ... */ },
onMutate: async (variables) => {
await queryClient.cancelQueries({ queryKey: entityKeys.detail(variables.id) })
const previous = queryClient.getQueryData(entityKeys.detail(variables.id))
queryClient.setQueryData(entityKeys.detail(variables.id), /* optimistic value */)
return { previous }
},
onError: (_err, variables, context) => {
queryClient.setQueryData(entityKeys.detail(variables.id), context?.previous)
},
onSettled: (_data, _error, variables) => {
queryClient.invalidateQueries({ queryKey: entityKeys.lists() })
queryClient.invalidateQueries({ queryKey: entityKeys.detail(variables.id) })
},
})
}
```
For optimistic mutations syncing with Zustand, use `createOptimisticMutationHandlers` from `@/hooks/queries/utils/optimistic-mutation`.
## useCallback Dependencies
Never include mutation objects (e.g., `createEntity`) in `useCallback` dependency arrays — the mutation object is not referentially stable and changes on every state update. The `.mutate()` and `.mutateAsync()` functions are stable in TanStack Query v5.
```typescript
// ✗ Bad — causes unnecessary recreations
const handler = useCallback(() => {
createEntity.mutate(data)
}, [createEntity]) // unstable reference
// ✓ Good — omit from deps, mutate is stable
const handler = useCallback(() => {
createEntity.mutate(data)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [data])
```
## Naming
- **Keys**: `entityKeys`
- **Query hooks**: `useEntity`, `useEntityList`
- **Mutation hooks**: `useCreateEntity`, `useUpdateEntity`
- **Fetch functions**: `fetchEntity` (private)
- **Mutation hooks**: `useCreateEntity`, `useUpdateEntity`, `useDeleteEntity`
- **Fetch functions**: `fetchEntity`, `fetchEntities` (private)

View File

@@ -0,0 +1,296 @@
---
name: add-hosted-key
description: Add hosted API key support to a tool so Sim provides the key when users don't bring their own. Use when adding hosted keys, BYOK support, hideWhenHosted, or hosted key pricing to a tool or block.
---
# Adding Hosted Key Support to a Tool
When a tool has hosted key support, Sim provides its own API key if the user hasn't configured one (via BYOK or env var). Usage is metered and billed to the workspace.
## Overview
| Step | What | Where |
|------|------|-------|
| 1 | Register BYOK provider ID | `tools/types.ts`, `app/api/workspaces/[id]/byok-keys/route.ts` |
| 2 | Research the API's pricing and rate limits | API docs / pricing page (before writing any code) |
| 3 | Add `hosting` config to the tool | `tools/{service}/{action}.ts` |
| 4 | Hide API key field when hosted | `blocks/blocks/{service}.ts` |
| 5 | Add to BYOK settings UI | BYOK settings component (`byok.tsx`) |
| 6 | Summarize pricing and throttling comparison | Output to user (after all code changes) |
## Step 1: Register the BYOK Provider ID
Add the new provider to the `BYOKProviderId` union in `tools/types.ts`:
```typescript
export type BYOKProviderId =
| 'openai'
| 'anthropic'
// ...existing providers
| 'your_service'
```
Then add it to `VALID_PROVIDERS` in `app/api/workspaces/[id]/byok-keys/route.ts`:
```typescript
const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'mistral', 'your_service'] as const
```
## Step 2: Research the API's Pricing Model and Rate Limits
**Before writing any `getCost` or `rateLimit` code**, look up the service's official documentation for both pricing and rate limits. You need to understand:
### Pricing
1. **How the API charges** — per request, per credit, per token, per step, per minute, etc.
2. **Whether the API reports cost in its response** — look for fields like `creditsUsed`, `costDollars`, `tokensUsed`, or similar in the response body or headers
3. **Whether cost varies by endpoint/options** — some APIs charge more for certain features (e.g., Firecrawl charges 1 credit/page base but +4 for JSON format, +4 for enhanced mode)
4. **The dollar-per-unit rate** — what each credit/token/unit costs in dollars on our plan
### Rate Limits
1. **What rate limits the API enforces** — requests per minute/second, tokens per minute, concurrent requests, etc.
2. **Whether limits vary by plan tier** — free vs paid vs enterprise often have different ceilings
3. **Whether limits are per-key or per-account** — determines whether adding more hosted keys actually increases total throughput
4. **What the API returns when rate limited** — HTTP 429, `Retry-After` header, error body format, etc.
5. **Whether there are multiple dimensions** — some APIs limit both requests/min AND tokens/min independently
Search the API's docs/pricing page (use WebSearch/WebFetch). Capture the pricing model as a comment in `getCost` so future maintainers know the source of truth.
### Setting Our Rate Limits
Our rate limiter (`lib/core/rate-limiter/hosted-key/`) uses a token-bucket algorithm applied **per billing actor** (workspace). It supports two modes:
- **`per_request`** — simple; just `requestsPerMinute`. Good when the API charges flat per-request or cost doesn't vary much.
- **`custom`** — `requestsPerMinute` plus additional `dimensions` (e.g., `tokens`, `search_units`). Each dimension has its own `limitPerMinute` and an `extractUsage` function that reads actual usage from the response. Use when the API charges on a variable metric (tokens, credits) and you want to cap that metric too.
When choosing values for `requestsPerMinute` and any dimension limits:
- **Stay well below the API's per-key limit** — our keys are shared across all workspaces. If the API allows 60 RPM per key and we have 3 keys, the global ceiling is ~180 RPM. Set the per-workspace limit low enough (e.g., 20-60 RPM) that many workspaces can coexist without collectively hitting the API's ceiling.
- **Account for key pooling** — our round-robin distributes requests across `N` hosted keys, so the effective API-side rate per key is `(total requests) / N`. But per-workspace limits are enforced *before* key selection, so they apply regardless of key count.
- **Prefer conservative defaults** — it's easy to raise limits later but hard to claw back after users depend on high throughput.
## Step 3: Add `hosting` Config to the Tool
Add a `hosting` object to the tool's `ToolConfig`. This tells the execution layer how to acquire hosted keys, calculate cost, and rate-limit.
```typescript
hosting: {
envKeyPrefix: 'YOUR_SERVICE_API_KEY',
apiKeyParam: 'apiKey',
byokProviderId: 'your_service',
pricing: {
type: 'custom',
getCost: (_params, output) => {
if (output.creditsUsed == null) {
throw new Error('Response missing creditsUsed field')
}
const creditsUsed = output.creditsUsed as number
const cost = creditsUsed * 0.001 // dollars per credit
return { cost, metadata: { creditsUsed } }
},
},
rateLimit: {
mode: 'per_request',
requestsPerMinute: 100,
},
},
```
### Hosted Key Env Var Convention
Keys use a numbered naming pattern driven by a count env var:
```
YOUR_SERVICE_API_KEY_COUNT=3
YOUR_SERVICE_API_KEY_1=sk-...
YOUR_SERVICE_API_KEY_2=sk-...
YOUR_SERVICE_API_KEY_3=sk-...
```
The `envKeyPrefix` value (`YOUR_SERVICE_API_KEY`) determines which env vars are read at runtime. Adding more keys only requires bumping the count and adding the new env var.
### Pricing: Prefer API-Reported Cost
Always prefer using cost data returned by the API (e.g., `creditsUsed`, `costDollars`). This is the most accurate because it accounts for variable pricing tiers, feature modifiers, and plan-level discounts.
**When the API reports cost** — use it directly and throw if missing:
```typescript
pricing: {
type: 'custom',
getCost: (params, output) => {
if (output.creditsUsed == null) {
throw new Error('Response missing creditsUsed field')
}
// $0.001 per credit — from https://example.com/pricing
const cost = (output.creditsUsed as number) * 0.001
return { cost, metadata: { creditsUsed: output.creditsUsed } }
},
},
```
**When the API does NOT report cost** — compute it from params/output based on the pricing docs, but still validate the data you depend on:
```typescript
pricing: {
type: 'custom',
getCost: (params, output) => {
if (!Array.isArray(output.searchResults)) {
throw new Error('Response missing searchResults, cannot determine cost')
}
// Serper: 1 credit for <=10 results, 2 credits for >10 — from https://serper.dev/pricing
const credits = Number(params.num) > 10 ? 2 : 1
return { cost: credits * 0.001, metadata: { credits } }
},
},
```
**`getCost` must always throw** if it cannot determine cost. Never silently fall back to a default — this would hide billing inaccuracies.
### Capturing Cost Data from the API
If the API returns cost info, capture it in `transformResponse` so `getCost` can read it from the output:
```typescript
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
results: data.results,
creditsUsed: data.creditsUsed, // pass through for getCost
},
}
},
```
For async/polling tools, capture it in `postProcess` when the job completes:
```typescript
if (jobData.status === 'completed') {
result.output = {
data: jobData.data,
creditsUsed: jobData.creditsUsed,
}
}
```
## Step 4: Hide the API Key Field When Hosted
In the block config (`blocks/blocks/{service}.ts`), add `hideWhenHosted: true` to the API key subblock. This hides the field on hosted Sim since the platform provides the key:
```typescript
{
id: 'apiKey',
title: 'API Key',
type: 'short-input',
placeholder: 'Enter your API key',
password: true,
required: true,
hideWhenHosted: true,
},
```
The visibility is controlled by `isSubBlockHiddenByHostedKey()` in `lib/workflows/subblocks/visibility.ts`, which checks the `isHosted` feature flag.
### Excluding Specific Operations from Hosted Key Support
When a block has multiple operations but some operations should **not** use a hosted key (e.g., the underlying API is deprecated, unsupported, or too expensive), use the **duplicate apiKey subblock** pattern. This is the same pattern Exa uses for its `research` operation:
1. **Remove the `hosting` config** from the tool definition for that operation — it must not have a `hosting` object at all.
2. **Duplicate the `apiKey` subblock** in the block config with opposing conditions:
```typescript
// API Key — hidden when hosted for operations with hosted key support
{
id: 'apiKey',
title: 'API Key',
type: 'short-input',
placeholder: 'Enter your API key',
password: true,
required: true,
hideWhenHosted: true,
condition: { field: 'operation', value: 'unsupported_op', not: true },
},
// API Key — always visible for unsupported_op (no hosted key support)
{
id: 'apiKey',
title: 'API Key',
type: 'short-input',
placeholder: 'Enter your API key',
password: true,
required: true,
condition: { field: 'operation', value: 'unsupported_op' },
},
```
Both subblocks share the same `id: 'apiKey'`, so the same value flows to the tool. The conditions ensure only one is visible at a time. The first has `hideWhenHosted: true` and shows for all hosted operations; the second has no `hideWhenHosted` and shows only for the excluded operation — meaning users must always provide their own key for that operation.
To exclude multiple operations, use an array: `{ field: 'operation', value: ['op_a', 'op_b'] }`.
**Reference implementations:**
- **Exa** (`blocks/blocks/exa.ts`): `research` operation excluded from hosting — lines 309-329
- **Google Maps** (`blocks/blocks/google_maps.ts`): `speed_limits` operation excluded from hosting (deprecated Roads API)
## Step 5: Add to the BYOK Settings UI
Add an entry to the `PROVIDERS` array in the BYOK settings component so users can bring their own key. You need the service icon from `components/icons.tsx`:
```typescript
{
id: 'your_service',
name: 'Your Service',
icon: YourServiceIcon,
description: 'What this service does',
placeholder: 'Enter your API key',
},
```
## Step 6: Summarize Pricing and Throttling Comparison
After all code changes are complete, output a detailed summary to the user covering:
### What to include
1. **API's pricing model** — how the service charges (per token, per credit, per request, etc.), the specific rates found in docs, and whether the API reports cost in responses.
2. **Our `getCost` approach** — how we calculate cost, what fields we depend on, and any assumptions or estimates (especially when the API doesn't report exact dollar cost).
3. **API's rate limits** — the documented limits (RPM, TPM, concurrent, etc.), which plan tier they apply to, and whether they're per-key or per-account.
4. **Our `rateLimit` config** — what we set for `requestsPerMinute` (and dimensions if custom mode), why we chose those values, and how they compare to the API's limits.
5. **Key pooling impact** — how many hosted keys we expect, and how round-robin distribution affects the effective per-key rate at the API.
6. **Gaps or risks** — anything the API charges for that we don't meter, rate limit dimensions we chose not to enforce, or pricing that may be inaccurate due to variable model/tier costs.
### Format
Present this as a structured summary with clear headings. Example:
```
### Pricing
- **API charges**: $X per 1M tokens (input), $Y per 1M tokens (output) — varies by model
- **Response reports cost?**: No — only token counts in `usage` field
- **Our getCost**: Estimates cost at $Z per 1M total tokens based on median model pricing
- **Risk**: Actual cost varies by model; our estimate may over/undercharge for cheap/expensive models
### Throttling
- **API limits**: 300 RPM per key (paid tier), 60 RPM (free tier)
- **Per-key or per-account**: Per key — more keys = more throughput
- **Our config**: 60 RPM per workspace (per_request mode)
- **With N keys**: Effective per-key rate is (total RPM across workspaces) / N
- **Headroom**: Comfortable — even 10 active workspaces at full rate = 600 RPM / 3 keys = 200 RPM per key, under the 300 RPM API limit
```
This summary helps reviewers verify that the pricing and rate limiting are well-calibrated and surfaces any risks that need monitoring.
## Checklist
- [ ] Provider added to `BYOKProviderId` in `tools/types.ts`
- [ ] Provider added to `VALID_PROVIDERS` in the BYOK keys API route
- [ ] API pricing docs researched — understand per-unit cost and whether the API reports cost in responses
- [ ] API rate limits researched — understand RPM/TPM limits, per-key vs per-account, and plan tiers
- [ ] `hosting` config added to the tool with `envKeyPrefix`, `apiKeyParam`, `byokProviderId`, `pricing`, and `rateLimit`
- [ ] `getCost` throws if required cost data is missing from the response
- [ ] Cost data captured in `transformResponse` or `postProcess` if API provides it
- [ ] `hideWhenHosted: true` added to the API key subblock in the block config
- [ ] Provider entry added to the BYOK settings UI with icon and description
- [ ] Env vars documented: `{PREFIX}_COUNT` and `{PREFIX}_1..N`
- [ ] Pricing and throttling summary provided to reviewer

3
.gitignore vendored
View File

@@ -26,6 +26,9 @@ bun-debug.log*
**/standalone/
sim-standalone.tar.gz
# redis
dump.rdb
# misc
.DS_Store
*.pem

View File

@@ -134,21 +134,64 @@ Use `devtools` middleware. Use `persist` only when data should survive reload wi
## React Query
All React Query hooks live in `hooks/queries/`.
All React Query hooks live in `hooks/queries/`. All server state must go through React Query — never use `useState` + `fetch` in components for data fetching or mutations.
### Query Key Factory
Every file must have a hierarchical key factory with an `all` root key and intermediate plural keys for prefix invalidation:
```typescript
export const entityKeys = {
all: ['entity'] as const,
list: (workspaceId?: string) => [...entityKeys.all, 'list', workspaceId ?? ''] as const,
lists: () => [...entityKeys.all, 'list'] as const,
list: (workspaceId?: string) => [...entityKeys.lists(), workspaceId ?? ''] as const,
details: () => [...entityKeys.all, 'detail'] as const,
detail: (id?: string) => [...entityKeys.details(), id ?? ''] as const,
}
```
### Query Hooks
- Every `queryFn` must forward `signal` for request cancellation
- Every query must have an explicit `staleTime`
- Use `keepPreviousData` only on variable-key queries (where params change), never on static keys
```typescript
export function useEntityList(workspaceId?: string) {
return useQuery({
queryKey: entityKeys.list(workspaceId),
queryFn: () => fetchEntities(workspaceId as string),
queryFn: ({ signal }) => fetchEntities(workspaceId as string, signal),
enabled: Boolean(workspaceId),
staleTime: 60 * 1000,
placeholderData: keepPreviousData,
placeholderData: keepPreviousData, // OK: workspaceId varies
})
}
```
### Mutation Hooks
- Use targeted invalidation (`entityKeys.lists()`) not broad (`entityKeys.all`) when possible
- For optimistic updates: use `onSettled` (not `onSuccess`) for cache reconciliation — `onSettled` fires on both success and error
- Don't include mutation objects in `useCallback` deps — `.mutate()` is stable in TanStack Query v5
```typescript
export function useUpdateEntity() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async (variables) => { /* ... */ },
onMutate: async (variables) => {
await queryClient.cancelQueries({ queryKey: entityKeys.detail(variables.id) })
const previous = queryClient.getQueryData(entityKeys.detail(variables.id))
queryClient.setQueryData(entityKeys.detail(variables.id), /* optimistic */)
return { previous }
},
onError: (_err, variables, context) => {
queryClient.setQueryData(entityKeys.detail(variables.id), context?.previous)
},
onSettled: (_data, _error, variables) => {
queryClient.invalidateQueries({ queryKey: entityKeys.lists() })
queryClient.invalidateQueries({ queryKey: entityKeys.detail(variables.id) })
},
})
}
```

View File

@@ -4,7 +4,7 @@
</a>
</p>
<p align="center">Build and deploy AI agent workflows in minutes.</p>
<p align="center">The open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to orchestrate agentic workflows.</p>
<p align="center">
<a href="https://sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/sim.ai-6F3DFA" alt="Sim.ai"></a>

View File

@@ -233,6 +233,7 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
lang={lang}
breadcrumb={breadcrumbs}
/>
<style>{`#nd-page { grid-column: main-start / toc-end !important; max-width: 1400px !important; }`}</style>
<DocsPage
toc={data.toc}
breadcrumb={{
@@ -367,15 +368,17 @@ export async function generateMetadata(props: {
return {
title: data.title,
description:
data.description || 'Sim visual workflow builder for AI applications documentation',
data.description ||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
keywords: [
'AI workflow builder',
'visual workflow editor',
'AI automation',
'workflow automation',
'AI agents',
'no-code AI',
'drag and drop workflows',
'agentic workforce',
'AI agent platform',
'agentic workflows',
'LLM orchestration',
'AI automation',
'knowledge base',
'AI integrations',
data.title?.toLowerCase().split(' '),
]
.flat()
@@ -385,7 +388,8 @@ export async function generateMetadata(props: {
openGraph: {
title: data.title,
description:
data.description || 'Sim visual workflow builder for AI applications documentation',
data.description ||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
url: fullUrl,
siteName: 'Sim Documentation',
type: 'article',
@@ -406,7 +410,8 @@ export async function generateMetadata(props: {
card: 'summary_large_image',
title: data.title,
description:
data.description || 'Sim visual workflow builder for AI applications documentation',
data.description ||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
images: [ogImageUrl],
creator: '@simdotai',
site: '@simdotai',

View File

@@ -10,6 +10,7 @@ import {
SidebarSeparator,
} from '@/components/docs-layout/sidebar-components'
import { Navbar } from '@/components/navbar/navbar'
import { AnimatedBlocks } from '@/components/ui/animated-blocks'
import { SimLogoFull } from '@/components/ui/sim-logo'
import { i18n } from '@/lib/i18n'
import { source } from '@/lib/source'
@@ -66,7 +67,7 @@ export default async function Layout({ children, params }: LayoutProps) {
'@type': 'WebSite',
name: 'Sim Documentation',
description:
'Comprehensive documentation for Sim - the visual workflow builder for AI Agent Workflows.',
'Documentation for Sim the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
url: 'https://docs.sim.ai',
publisher: {
'@type': 'Organization',
@@ -102,6 +103,7 @@ export default async function Layout({ children, params }: LayoutProps) {
</head>
<body className='flex min-h-screen flex-col font-sans'>
<Script src='https://assets.onedollarstats.com/stonks.js' strategy='lazyOnload' />
<AnimatedBlocks />
<RootProvider i18n={provider(lang)}>
<Navbar />
<DocsLayout

View File

@@ -9,13 +9,24 @@ body {
overscroll-behavior: none;
}
/* Reserve scrollbar space to prevent layout jitter between pages */
html {
scrollbar-gutter: stable;
/* Prevent modals/dialogs from shifting layout via scroll-lock compensation */
html,
body {
padding-right: 0 !important;
margin-right: 0 !important;
}
@theme {
--color-fd-primary: #33c482;
--color-fd-primary: var(--color-fd-foreground);
}
/* Match landing page dark background (#1b1b1b) */
.dark {
--color-fd-background: hsl(0, 0%, 10.6%) !important;
--color-fd-card: hsl(0, 0%, 13%) !important;
--color-fd-popover: hsl(0, 0%, 14%) !important;
--color-fd-secondary: hsl(0, 0%, 15.5%) !important;
--color-fd-muted: hsl(0, 0%, 18%) !important;
}
/* Font family utilities */
@@ -32,7 +43,7 @@ html {
:root {
--fd-border: transparent !important;
--fd-border-sidebar: transparent !important;
--fd-nav-height: 65px; /* Custom navbar height (h-16 = 64px + 1px border) */
--fd-nav-height: 93px; /* Custom navbar height (52px top + 1px divider + 40px tabs) */
/* Content container width used to center main content */
--spacing-fd-container: 1400px;
/* Edge gutter = leftover space on each side of centered container */
@@ -48,27 +59,25 @@ html {
--content-gap: 1.75rem;
}
/* Light mode navbar and search styling */
/* Light mode navbar background */
:root:not(.dark) nav {
background-color: hsla(0, 0%, 96%, 0.85) !important;
}
:root:not(.dark) nav button[type="button"] {
background-color: hsla(0, 0%, 93%, 0.85) !important;
/* Dark mode navbar background */
:root.dark nav {
background-color: hsla(0, 0%, 10.6%, 0.92) !important;
}
:root.dark nav button[type="button"] {
background-color: hsla(0, 0%, 15%, 0.85) !important;
backdrop-filter: blur(33px) saturate(180%) !important;
-webkit-backdrop-filter: blur(33px) saturate(180%) !important;
color: rgba(0, 0, 0, 0.6) !important;
color: rgba(255, 255, 255, 0.5) !important;
}
:root:not(.dark) nav button[type="button"] kbd {
color: rgba(0, 0, 0, 0.6) !important;
}
/* Dark mode navbar and search styling */
:root.dark nav {
background-color: hsla(0, 0%, 7.04%, 0.92) !important;
backdrop-filter: blur(25px) saturate(180%) brightness(0.6) !important;
-webkit-backdrop-filter: blur(25px) saturate(180%) brightness(0.6) !important;
:root.dark nav button[type="button"] kbd {
color: rgba(255, 255, 255, 0.4) !important;
}
/* Floating sidebar appearance - remove background */
@@ -97,7 +106,7 @@ aside#nd-sidebar {
display: none !important;
}
/* Mobile only: Reduce gap between navbar and content */
/* Mobile only: Reduce gap between navbar and content (custom navbar hidden on mobile) */
@media (max-width: 1023px) {
#nd-docs-layout {
margin-top: -25px;
@@ -121,11 +130,11 @@ aside#nd-sidebar {
/* On mobile, let fumadocs handle the layout natively */
@media (min-width: 1024px) {
:root {
--fd-banner-height: 65px !important; /* 64px navbar + 1px border */
--fd-banner-height: 93px !important; /* 52px top + 1px divider + 40px tabs */
}
#nd-docs-layout {
--fd-docs-height: calc(100dvh - 65px) !important; /* 64px navbar + 1px border */
--fd-docs-height: calc(100dvh - 93px) !important; /* 52px top + 1px divider + 40px tabs */
--fd-sidebar-width: 300px !important;
margin-left: var(--sidebar-offset) !important;
margin-right: var(--toc-offset) !important;
@@ -552,16 +561,15 @@ video {
/* API Reference Pages — Mintlify-style overrides */
/* OpenAPI pages: span main + TOC grid columns for wide two-column layout.
The grid has columns: spacer | sidebar | main | toc | spacer.
By spanning columns 3-4, the article fills both main and toc areas,
while the grid structure stays identical to non-OpenAPI pages (no jitter). */
Use named grid lines from grid-template-areas so this works regardless
of whether the grid has 3 columns (production) or 5 columns (local dev). */
#nd-page:has(.api-page-header) {
grid-column: 3 / span 2 !important;
grid-column: main-start / toc-end !important;
max-width: 1400px !important;
}
/* Hide the empty TOC aside on OpenAPI pages so it doesn't overlay content */
#nd-docs-layout:has(#nd-page .api-page-header) #nd-toc {
#nd-docs-layout:has(#nd-page:has(.api-page-header)) #nd-toc {
display: none;
}
@@ -590,44 +598,39 @@ video {
"Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
}
/* Method badge pills in page content — colored background pills */
#nd-page span.font-mono.font-medium[class*="text-green"] {
background-color: rgb(220 252 231 / 0.6);
padding: 0.125rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.75rem;
/* Method badge pills — shared background colors (page + sidebar) */
span.font-mono.font-medium[data-method="get"],
span.font-mono.font-medium[data-method="head"],
span.font-mono.font-medium[data-method="options"] {
background-color: rgb(220 252 231 / 0.85);
}
html.dark #nd-page span.font-mono.font-medium[class*="text-green"] {
html.dark span.font-mono.font-medium[data-method="get"],
html.dark span.font-mono.font-medium[data-method="head"],
html.dark span.font-mono.font-medium[data-method="options"] {
background-color: rgb(34 197 94 / 0.15);
}
#nd-page span.font-mono.font-medium[class*="text-blue"] {
background-color: rgb(219 234 254 / 0.6);
padding: 0.125rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.75rem;
span.font-mono.font-medium[data-method="post"] {
background-color: rgb(219 234 254 / 0.85);
}
html.dark #nd-page span.font-mono.font-medium[class*="text-blue"] {
html.dark span.font-mono.font-medium[data-method="post"] {
background-color: rgb(59 130 246 / 0.15);
}
#nd-page span.font-mono.font-medium[class*="text-orange"] {
background-color: rgb(255 237 213 / 0.6);
padding: 0.125rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.75rem;
span.font-mono.font-medium[data-method="put"] {
background-color: rgb(254 249 195 / 0.85);
}
html.dark #nd-page span.font-mono.font-medium[class*="text-orange"] {
html.dark span.font-mono.font-medium[data-method="put"] {
background-color: rgb(234 179 8 / 0.15);
}
span.font-mono.font-medium[data-method="patch"] {
background-color: rgb(255 237 213 / 0.85);
}
html.dark span.font-mono.font-medium[data-method="patch"] {
background-color: rgb(249 115 22 / 0.15);
}
#nd-page span.font-mono.font-medium[class*="text-red"] {
background-color: rgb(254 226 226 / 0.6);
padding: 0.125rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.75rem;
span.font-mono.font-medium[data-method="delete"] {
background-color: rgb(254 226 226 / 0.85);
}
html.dark #nd-page span.font-mono.font-medium[class*="text-red"] {
html.dark span.font-mono.font-medium[data-method="delete"] {
background-color: rgb(239 68 68 / 0.15);
}
@@ -635,52 +638,31 @@ html.dark #nd-page span.font-mono.font-medium[class*="text-red"] {
#nd-sidebar a:has(span.font-mono.font-medium) {
display: flex !important;
align-items: center !important;
gap: 6px;
gap: 0.375rem;
}
/* Sidebar method badges — ensure proper inline flex display */
/* Sidebar method badges — fixed-width for right-aligned labels */
#nd-sidebar a span.font-mono.font-medium {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 2.25rem;
font-size: 10px !important;
width: 2.625rem;
font-size: 0.625rem !important;
line-height: 1 !important;
padding: 2.5px 4px;
border-radius: 3px;
padding: 0.15625rem 0.25rem;
border-radius: 0.1875rem;
flex-shrink: 0;
}
/* Sidebar GET badges */
#nd-sidebar a span.font-mono.font-medium[class*="text-green"] {
background-color: rgb(220 252 231 / 0.6);
}
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-green"] {
background-color: rgb(34 197 94 / 0.15);
}
/* Sidebar POST badges */
#nd-sidebar a span.font-mono.font-medium[class*="text-blue"] {
background-color: rgb(219 234 254 / 0.6);
}
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-blue"] {
background-color: rgb(59 130 246 / 0.15);
}
/* Sidebar PUT badges */
#nd-sidebar a span.font-mono.font-medium[class*="text-orange"] {
background-color: rgb(255 237 213 / 0.6);
}
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-orange"] {
background-color: rgb(249 115 22 / 0.15);
}
/* Sidebar DELETE badges */
#nd-sidebar a span.font-mono.font-medium[class*="text-red"] {
background-color: rgb(254 226 226 / 0.6);
}
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-red"] {
background-color: rgb(239 68 68 / 0.15);
/* Footer navigation method badges — pill styling to match sidebar */
#nd-page span.font-mono.font-medium[data-method] {
display: inline-flex;
align-items: center;
justify-content: center;
font-size: 0.625rem !important;
line-height: 1 !important;
padding: 0.15625rem 0.375rem;
border-radius: 0.1875rem;
}
/* Code block containers — match regular docs styling */
@@ -740,8 +722,25 @@ html.dark
font-size: 0.6875rem !important;
letter-spacing: 0.025em;
text-transform: uppercase;
padding: 0.125rem 0.5rem !important;
border-radius: 0.375rem !important;
}
/* POST — softer blue */
/* Path bar per-method colors (fumadocs renders these, so we match by class) */
/* GET */
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-green"] {
color: rgb(22 163 74) !important;
background-color: rgb(220 252 231 / 0.7) !important;
}
html.dark
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-green"] {
color: rgb(74 222 128) !important;
background-color: rgb(34 197 94 / 0.15) !important;
}
/* POST */
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-blue"] {
@@ -755,19 +754,47 @@ html.dark
color: rgb(96 165 250) !important;
background-color: rgb(59 130 246 / 0.15) !important;
}
/* GET — softer green */
/* PUT */
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-green"] {
color: rgb(22 163 74) !important;
background-color: rgb(220 252 231 / 0.7) !important;
span.font-mono.font-medium[class*="text-yellow"] {
color: rgb(161 98 7) !important;
background-color: rgb(254 249 195 / 0.7) !important;
}
html.dark
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-green"] {
color: rgb(74 222 128) !important;
background-color: rgb(34 197 94 / 0.15) !important;
span.font-mono.font-medium[class*="text-yellow"] {
color: rgb(250 204 21) !important;
background-color: rgb(234 179 8 / 0.15) !important;
}
/* PATCH */
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-orange"] {
color: rgb(194 65 12) !important;
background-color: rgb(255 237 213 / 0.7) !important;
}
html.dark
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-orange"] {
color: rgb(251 146 60) !important;
background-color: rgb(249 115 22 / 0.15) !important;
}
/* DELETE */
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-red"] {
color: rgb(185 28 28) !important;
background-color: rgb(254 226 226 / 0.7) !important;
}
html.dark
#nd-page:has(.api-page-header)
div.flex.flex-row.items-center.rounded-xl.border.not-prose
span.font-mono.font-medium[class*="text-red"] {
color: rgb(248 113 113) !important;
background-color: rgb(239 68 68 / 0.15) !important;
}
/* Path text inside method+path bar — monospace, bright like Gumloop */
@@ -966,17 +993,17 @@ html.dark .response-section-dropdown-item:hover {
order: 1;
}
/* Type badge — order 2, grey pill like Mintlify */
/* Type badge — order 2, grey pill */
#nd-page:has(.api-page-header)
.flex.flex-wrap.items-center.gap-3.not-prose
> span.text-sm.font-mono.text-fd-muted-foreground {
order: 2;
background-color: rgb(240 240 243);
color: rgb(100 100 110);
padding: 0.125rem 0.5rem;
background-color: rgb(241 245 249);
color: rgb(71 85 105);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
}
@@ -984,8 +1011,8 @@ html.dark
#nd-page:has(.api-page-header)
.flex.flex-wrap.items-center.gap-3.not-prose
> span.text-sm.font-mono.text-fd-muted-foreground {
background-color: rgb(39 39 42);
color: rgb(212 212 216);
background-color: rgb(51 51 56);
color: rgb(212 212 220);
}
/* Hide the "*" inside the name span — we'll add "required" as a ::after on the flex row */
@@ -993,26 +1020,26 @@ html.dark
display: none;
}
/* Required badge — order 3, light red pill */
/* Required badge — order 3, red pill */
#nd-page:has(.api-page-header)
.flex.flex-wrap.items-center.gap-3.not-prose:has(span.text-red-400)::after {
content: "required";
order: 3;
display: inline-flex;
align-items: center;
background-color: rgb(254 235 235);
color: rgb(220 38 38);
padding: 0.125rem 0.5rem;
background-color: rgb(254 226 226);
color: rgb(185 28 28);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
}
html.dark
#nd-page:has(.api-page-header)
.flex.flex-wrap.items-center.gap-3.not-prose:has(span.text-red-400)::after {
background-color: rgb(127 29 29 / 0.2);
background-color: rgb(153 27 27 / 0.3);
color: rgb(252 165 165);
}
@@ -1054,12 +1081,12 @@ html.dark
> span.text-sm.font-mono.text-fd-muted-foreground::after {
content: "string";
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
background-color: rgb(240 240 243);
color: rgb(100 100 110);
padding: 0.125rem 0.5rem;
background-color: rgb(241 245 249);
color: rgb(71 85 105);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
display: inline-flex;
align-items: center;
@@ -1069,8 +1096,8 @@ html.dark
div.my-4
> .flex.flex-wrap.items-center.gap-3.not-prose
> span.text-sm.font-mono.text-fd-muted-foreground::after {
background-color: rgb(39 39 42);
color: rgb(212 212 216);
background-color: rgb(51 51 56);
color: rgb(212 212 220);
}
/* "header" badge via ::before on the auth flex row */
@@ -1079,12 +1106,12 @@ html.dark
order: 3;
display: inline-flex;
align-items: center;
background-color: rgb(240 240 243);
color: rgb(100 100 110);
padding: 0.125rem 0.5rem;
background-color: rgb(241 245 249);
color: rgb(71 85 105);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
}
@@ -1092,22 +1119,22 @@ html.dark
#nd-page:has(.api-page-header)
div.my-4
> .flex.flex-wrap.items-center.gap-3.not-prose::before {
background-color: rgb(39 39 42);
color: rgb(212 212 216);
background-color: rgb(51 51 56);
color: rgb(212 212 220);
}
/* "required" badge via ::after on the auth flex row — light red pill */
/* "required" badge via ::after on the auth flex row — red pill */
#nd-page:has(.api-page-header) div.my-4 > .flex.flex-wrap.items-center.gap-3.not-prose::after {
content: "required";
order: 4;
display: inline-flex;
align-items: center;
background-color: rgb(254 235 235);
color: rgb(220 38 38);
padding: 0.125rem 0.5rem;
background-color: rgb(254 226 226);
color: rgb(185 28 28);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
}
@@ -1115,7 +1142,7 @@ html.dark
#nd-page:has(.api-page-header)
div.my-4
> .flex.flex-wrap.items-center.gap-3.not-prose::after {
background-color: rgb(127 29 29 / 0.2);
background-color: rgb(153 27 27 / 0.3);
color: rgb(252 165 165);
}
@@ -1168,12 +1195,12 @@ html.dark #nd-page:has(.api-page-header) .text-sm.border-t {
#nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose > button,
#nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose > span:has(> button) {
order: 2;
background-color: rgb(240 240 243);
color: rgb(100 100 110);
padding: 0.125rem 0.5rem;
background-color: rgb(241 245 249);
color: rgb(71 85 105);
padding: 0.1875rem 0.5rem;
border-radius: 0.375rem;
font-size: 0.6875rem;
line-height: 1.25rem;
line-height: 1.125rem;
font-weight: 500;
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
}
@@ -1182,8 +1209,8 @@ html.dark
#nd-page:has(.api-page-header)
.flex.flex-wrap.items-center.gap-3.not-prose
> span:has(> button) {
background-color: rgb(39 39 42);
color: rgb(212 212 216);
background-color: rgb(51 51 56);
color: rgb(212 212 220);
}
/* Section headings (Authorization, Path Parameters, etc.) — consistent top spacing */

View File

@@ -7,26 +7,27 @@ export default function RootLayout({ children }: { children: ReactNode }) {
export const metadata = {
metadataBase: new URL('https://docs.sim.ai'),
title: {
default: 'Sim Documentation - Visual Workflow Builder for AI Applications',
default: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
template: '%s',
},
description:
'Comprehensive documentation for Sim - the visual workflow builder for AI applications. Create powerful AI agents, automation workflows, and data processing pipelines by connecting blocks on a canvas—no coding required.',
'Documentation for Sim the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
keywords: [
'AI workflow builder',
'visual workflow editor',
'AI automation',
'workflow automation',
'AI agents',
'no-code AI',
'drag and drop workflows',
'agentic workforce',
'AI agent platform',
'open-source AI agents',
'agentic workflows',
'LLM orchestration',
'AI integrations',
'workflow canvas',
'AI Agent Workflow Builder',
'workflow orchestration',
'agent builder',
'AI workflow automation',
'visual programming',
'knowledge base',
'AI automation',
'workflow builder',
'AI workflow orchestration',
'enterprise AI',
'AI agent deployment',
'intelligent automation',
'AI tools',
],
authors: [{ name: 'Sim Team', url: 'https://sim.ai' }],
creator: 'Sim',
@@ -53,9 +54,9 @@ export const metadata = {
alternateLocale: ['es_ES', 'fr_FR', 'de_DE', 'ja_JP', 'zh_CN'],
url: 'https://docs.sim.ai',
siteName: 'Sim Documentation',
title: 'Sim Documentation - Visual Workflow Builder for AI Applications',
title: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
description:
'Comprehensive documentation for Sim - the visual workflow builder for AI applications. Create powerful AI agents, automation workflows, and data processing pipelines.',
'Documentation for Sim the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
images: [
{
url: 'https://docs.sim.ai/api/og?title=Sim%20Documentation',
@@ -67,9 +68,9 @@ export const metadata = {
},
twitter: {
card: 'summary_large_image',
title: 'Sim Documentation - Visual Workflow Builder for AI Applications',
title: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
description:
'Comprehensive documentation for Sim - the visual workflow builder for AI applications.',
'Documentation for Sim the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
creator: '@simdotai',
site: '@simdotai',
images: ['https://docs.sim.ai/api/og?title=Sim%20Documentation'],

View File

@@ -37,9 +37,9 @@ export async function GET() {
const manifest = `# Sim Documentation
> Visual Workflow Builder for AI Applications
> The open-source platform to build AI agents and run your agentic workforce.
Sim is a visual workflow builder for AI applications that lets you build AI agent workflows visually. Create powerful AI agents, automation workflows, and data processing pipelines by connecting blocks on a canvas—no coding required.
Sim is the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows. Create agents, workflows, knowledge bases, tables, and docs. Trusted by over 100,000 builders.
## Documentation Overview

View File

@@ -10,15 +10,15 @@ export function TOCFooter() {
<div className='text-balance font-semibold text-base leading-tight'>
Start building today
</div>
<div className='text-muted-foreground'>Trusted by over 70,000 builders.</div>
<div className='text-muted-foreground'>Trusted by over 100,000 builders.</div>
<div className='text-muted-foreground'>
Build Agentic workflows visually on a drag-and-drop canvas or with natural language.
The open-source platform to build AI agents and run your agentic workforce.
</div>
<Link
href='https://sim.ai/signup'
target='_blank'
rel='noopener noreferrer'
className='group mt-2 inline-flex h-8 w-fit items-center justify-center gap-1 whitespace-nowrap rounded-[10px] border border-[#2AAD6C] bg-gradient-to-b from-[#3ED990] to-[#2AAD6C] px-3 pr-[10px] pl-[12px] font-medium text-sm text-white shadow-[inset_0_2px_4px_0_#5EE8A8] outline-none transition-all hover:shadow-lg focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50'
className='group mt-2 inline-flex h-8 w-fit items-center justify-center gap-2 whitespace-nowrap rounded-[5px] border border-[#33C482] bg-[#33C482] px-[10px] font-medium text-black text-sm outline-none transition-[filter] hover:brightness-110 focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50'
aria-label='Get started with Sim - Sign up for free'
>
<span>Get started</span>

View File

@@ -548,6 +548,34 @@ export function GithubIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function GithubOutlineIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
width='24'
height='24'
viewBox='0 0 24 24'
fill='none'
xmlns='http://www.w3.org/2000/svg'
>
<path
d='M15 21C15 21 15 18.73 15 18C15 17.37 15.15 16.04 14.5 15.5C15.89 15.37 16.98 14.92 18 14C19.02 13.08 19.5 11.69 19.5 9.5C19.5 8 19.25 7 18.5 6C18.79 5.22 18.84 4 18.5 3C16.94 3 15.53 4.07 15 4.5C14.61 4.4 13.67 4 12 4C10.33 4 9.39 4.4 9 4.5C8.47 4.07 7.06 3 5.5 3C5.16 4 5.21 5.22 5.5 6C4.75 7 4.5 8 4.5 9.5C4.5 11.69 4.98 13.08 6 14C7.02 14.92 8.11 15.37 9.5 15.5C8.85 16.04 9 17.37 9 18C9 18.73 9 21 9 21'
stroke='currentColor'
strokeWidth='2'
strokeLinecap='round'
strokeLinejoin='round'
/>
<path
d='M9 19C7.59 19 6.16 18.44 5.31 17.81C4.47 17.18 4.22 16.15 3 15.5'
stroke='currentColor'
strokeWidth='2'
strokeLinecap='round'
strokeLinejoin='round'
/>
</svg>
)
}
export function GitLabIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'>
@@ -2068,7 +2096,7 @@ export function LangsmithIcon(props: SVGProps<SVGSVGElement>) {
export function LemlistIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 180 181' fill='none'>
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='24 24.92 132 132' fill='none'>
<path
fillRule='evenodd'
clipRule='evenodd'

View File

@@ -8,65 +8,88 @@ import { SimLogoFull } from '@/components/ui/sim-logo'
import { ThemeToggle } from '@/components/ui/theme-toggle'
import { cn } from '@/lib/utils'
const NAV_TABS = [
{
label: 'Documentation',
href: '/introduction',
match: (p: string) => !p.includes('/api-reference'),
external: false,
},
{
label: 'API Reference',
href: '/api-reference/getting-started',
match: (p: string) => p.includes('/api-reference'),
external: false,
},
{ label: 'Mothership', href: 'https://sim.ai', external: true },
] as const
export function Navbar() {
const pathname = usePathname()
const isApiReference = pathname.includes('/api-reference')
return (
<nav className='sticky top-0 z-50 border-border/50 border-b bg-background/80 backdrop-blur-md backdrop-saturate-150'>
{/* Desktop: Single row layout */}
<div className='hidden h-16 w-full items-center lg:flex'>
<nav className='sticky top-0 z-50 bg-background/80 backdrop-blur-md backdrop-saturate-150'>
<div className='hidden w-full flex-col lg:flex'>
{/* Top row: logo, search, controls */}
<div
className='relative flex w-full items-center justify-between'
className='relative flex h-[52px] w-full items-center justify-between'
style={{
paddingLeft: 'calc(var(--sidebar-offset) + 32px)',
paddingRight: 'calc(var(--toc-offset) + 60px)',
}}
>
{/* Left cluster: logo */}
<div className='flex items-center'>
<Link href='/' className='flex min-w-[100px] items-center'>
<SimLogoFull className='h-7 w-auto' />
</Link>
</div>
<Link href='/' className='flex min-w-[100px] items-center'>
<SimLogoFull className='h-7 w-auto' />
</Link>
{/* Center cluster: search - absolutely positioned to center */}
<div className='-translate-x-1/2 absolute left-1/2 flex items-center justify-center'>
<SearchTrigger />
</div>
{/* Right cluster aligns with TOC edge */}
<div className='flex items-center gap-1'>
<Link
href='/introduction'
className={cn(
'rounded-xl px-3 py-2 font-normal text-[0.9375rem] leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground',
!isApiReference ? 'text-foreground' : 'text-foreground/60'
)}
>
Documentation
</Link>
<Link
href='/api-reference/getting-started'
className={cn(
'rounded-xl px-3 py-2 font-normal text-[0.9375rem] leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground',
isApiReference ? 'text-foreground' : 'text-foreground/60'
)}
>
API
</Link>
<Link
href='https://sim.ai'
target='_blank'
rel='noopener noreferrer'
className='rounded-xl px-3 py-2 font-normal text-[0.9375rem] text-foreground/60 leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground'
>
Platform
</Link>
<LanguageDropdown />
<ThemeToggle />
</div>
</div>
{/* Divider — only spans content width */}
<div
className='border-b'
style={{
marginLeft: 'calc(var(--sidebar-offset) + 32px)',
marginRight: 'calc(var(--toc-offset) + 60px)',
borderColor: 'rgba(128, 128, 128, 0.1)',
}}
/>
{/* Bottom row: navigation tabs — border on row, tabs overlap it */}
<div
className='flex h-[40px] items-stretch gap-6 border-border/20 border-b'
style={{
paddingLeft: 'calc(var(--sidebar-offset) + 32px)',
}}
>
{NAV_TABS.map((tab) => {
const isActive = !tab.external && tab.match(pathname)
return (
<Link
key={tab.label}
href={tab.href}
{...(tab.external ? { target: '_blank', rel: 'noopener noreferrer' } : {})}
className={cn(
'-mb-px relative flex items-center border-b text-[14px] tracking-[-0.01em] transition-colors',
isActive
? 'border-neutral-400 font-[550] text-neutral-800 dark:border-neutral-500 dark:text-neutral-200'
: 'border-transparent font-medium text-fd-muted-foreground hover:border-neutral-300 hover:text-neutral-600 dark:hover:border-neutral-600 dark:hover:text-neutral-400'
)}
>
{/* Invisible bold text reserves width to prevent layout shift */}
<span className='invisible font-[550]'>{tab.label}</span>
<span className='absolute'>{tab.label}</span>
</Link>
)
})}
</div>
</div>
</nav>
)

View File

@@ -74,7 +74,7 @@ export function StructuredData({
name: 'Sim Documentation',
url: baseUrl,
description:
'Comprehensive documentation for Sim visual workflow builder for AI applications. Create powerful AI agents, automation workflows, and data processing pipelines.',
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
publisher: {
'@type': 'Organization',
name: 'Sim',
@@ -98,7 +98,7 @@ export function StructuredData({
applicationCategory: 'DeveloperApplication',
operatingSystem: 'Any',
description:
'Visual workflow builder for AI applications. Create powerful AI agents, automation workflows, and data processing pipelines by connecting blocks on a canvas—no coding required.',
'Sim is the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows. Create agents, workflows, knowledge bases, tables, and docs.',
url: baseUrl,
author: {
'@type': 'Organization',
@@ -109,12 +109,13 @@ export function StructuredData({
category: 'Developer Tools',
},
featureList: [
'Visual workflow builder with drag-and-drop interface',
'AI agent creation and automation',
'80+ built-in integrations',
'Real-time team collaboration',
'Multiple deployment options',
'Custom integrations via MCP protocol',
'AI agent creation',
'Agentic workflow orchestration',
'1,000+ integrations',
'LLM orchestration (OpenAI, Anthropic, Google, xAI, Mistral, Perplexity)',
'Knowledge base creation',
'Table creation',
'Document creation',
],
}

View File

@@ -0,0 +1,371 @@
'use client'
import { memo, useEffect, useState } from 'react'
/** Shared corner radius from Figma export for all decorative rects. */
const RX = '2.59574'
const ENTER_STAGGER = 0.06
const ENTER_DURATION = 0.3
const EXIT_STAGGER = 0.12
const EXIT_DURATION = 0.5
const INITIAL_HOLD = 3000
const HOLD_BETWEEN = 3000
const TRANSITION_PAUSE = 400
interface BlockRect {
opacity: number
width: string
height: string
fill: string
x?: string
y?: string
transform?: string
}
type AnimState = 'visible' | 'exiting' | 'hidden'
const RECTS = {
topRight: [
{ opacity: 1, x: '0', y: '0', width: '16.8626', height: '33.7252', fill: '#2ABBF8' },
{ opacity: 0.6, x: '0', y: '0', width: '85.3433', height: '16.8626', fill: '#2ABBF8' },
{ opacity: 1, x: '0', y: '0', width: '16.8626', height: '16.8626', fill: '#2ABBF8' },
{ opacity: 0.6, x: '34.2403', y: '0', width: '34.2403', height: '33.7252', fill: '#2ABBF8' },
{ opacity: 1, x: '34.2403', y: '0', width: '16.8626', height: '16.8626', fill: '#2ABBF8' },
{
opacity: 1,
x: '51.6188',
y: '16.8626',
width: '16.8626',
height: '16.8626',
fill: '#2ABBF8',
},
{ opacity: 1, x: '68.4812', y: '0', width: '54.6502', height: '16.8626', fill: '#00F701' },
{ opacity: 0.6, x: '106.268', y: '0', width: '34.2403', height: '33.7252', fill: '#00F701' },
{ opacity: 0.6, x: '106.268', y: '0', width: '51.103', height: '16.8626', fill: '#00F701' },
{
opacity: 1,
x: '123.6484',
y: '16.8626',
width: '16.8626',
height: '16.8626',
fill: '#00F701',
},
{ opacity: 0.6, x: '157.371', y: '0', width: '34.2403', height: '16.8626', fill: '#FFCC02' },
{ opacity: 1, x: '157.371', y: '0', width: '16.8626', height: '16.8626', fill: '#FFCC02' },
{ opacity: 0.6, x: '208.993', y: '0', width: '68.4805', height: '16.8626', fill: '#FA4EDF' },
{ opacity: 0.6, x: '209.137', y: '0', width: '16.8626', height: '33.7252', fill: '#FA4EDF' },
{ opacity: 0.6, x: '243.233', y: '0', width: '34.2403', height: '33.7252', fill: '#FA4EDF' },
{ opacity: 1, x: '243.233', y: '0', width: '16.8626', height: '16.8626', fill: '#FA4EDF' },
{ opacity: 0.6, x: '260.096', y: '0', width: '34.04', height: '16.8626', fill: '#FA4EDF' },
{
opacity: 1,
x: '260.611',
y: '16.8626',
width: '16.8626',
height: '16.8626',
fill: '#FA4EDF',
},
],
left: [
{
opacity: 0.6,
width: '34.240',
height: '33.725',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 0 0)',
},
{
opacity: 0.6,
width: '16.8626',
height: '68.480',
fill: '#FA4EDF',
transform: 'matrix(-1 0 0 1 33.727 0)',
},
{
opacity: 1,
width: '16.8626',
height: '16.8626',
fill: '#FA4EDF',
transform: 'matrix(-1 0 0 1 33.727 17.378)',
},
{
opacity: 0.6,
width: '16.8626',
height: '33.986',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 0 51.616)',
},
{
opacity: 0.6,
width: '16.8626',
height: '140.507',
fill: '#00F701',
transform: 'matrix(-1 0 0 1 33.986 85.335)',
},
{
opacity: 0.4,
x: '17.119',
y: '136.962',
width: '34.240',
height: '16.8626',
fill: '#FFCC02',
transform: 'rotate(-90 17.119 136.962)',
},
{
opacity: 1,
x: '17.119',
y: '136.962',
width: '16.8626',
height: '16.8626',
fill: '#FFCC02',
transform: 'rotate(-90 17.119 136.962)',
},
{
opacity: 0.5,
width: '34.240',
height: '33.725',
fill: '#00F701',
transform: 'matrix(0 1 1 0 0.257 153.825)',
},
{
opacity: 1,
width: '16.8626',
height: '16.8626',
fill: '#00F701',
transform: 'matrix(0 1 1 0 0.257 153.825)',
},
],
right: [
{
opacity: 0.6,
width: '16.8626',
height: '33.726',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 0 0)',
},
{
opacity: 0.6,
width: '34.241',
height: '16.8626',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 16.891 0)',
},
{
opacity: 0.6,
width: '16.8626',
height: '68.482',
fill: '#FA4EDF',
transform: 'matrix(-1 0 0 1 33.739 16.888)',
},
{
opacity: 0.6,
width: '16.8626',
height: '33.726',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 0 33.776)',
},
{
opacity: 1,
width: '16.8626',
height: '16.8626',
fill: '#FA4EDF',
transform: 'matrix(-1 0 0 1 33.739 34.272)',
},
{
opacity: 0.6,
width: '16.8626',
height: '33.726',
fill: '#FA4EDF',
transform: 'matrix(0 1 1 0 0.012 68.510)',
},
{
opacity: 0.6,
width: '16.8626',
height: '102.384',
fill: '#2ABBF8',
transform: 'matrix(-1 0 0 1 33.787 102.384)',
},
{
opacity: 0.4,
x: '17.131',
y: '153.859',
width: '34.241',
height: '16.8626',
fill: '#00F701',
transform: 'rotate(-90 17.131 153.859)',
},
{
opacity: 1,
x: '17.131',
y: '153.859',
width: '16.8626',
height: '16.8626',
fill: '#00F701',
transform: 'rotate(-90 17.131 153.859)',
},
],
} as const satisfies Record<string, readonly BlockRect[]>
type Position = keyof typeof RECTS
function enterTime(pos: Position): number {
return (RECTS[pos].length - 1) * ENTER_STAGGER + ENTER_DURATION
}
function exitTime(pos: Position): number {
return (RECTS[pos].length - 1) * EXIT_STAGGER + EXIT_DURATION
}
interface BlockGroupProps {
width: number
height: number
viewBox: string
rects: readonly BlockRect[]
animState: AnimState
globalOpacity: number
}
const BlockGroup = memo(function BlockGroup({
width,
height,
viewBox,
rects,
animState,
globalOpacity,
}: BlockGroupProps) {
const isVisible = animState === 'visible'
const isExiting = animState === 'exiting'
return (
<svg
width={width}
height={height}
viewBox={viewBox}
fill='none'
xmlns='http://www.w3.org/2000/svg'
className='h-auto w-full'
style={{ opacity: globalOpacity }}
>
{rects.map((r, i) => (
<rect
key={i}
x={r.x}
y={r.y}
width={r.width}
height={r.height}
rx={RX}
fill={r.fill}
transform={r.transform}
style={{
opacity: isVisible ? r.opacity : 0,
transition: `opacity ${isExiting ? EXIT_DURATION : ENTER_DURATION}s ease ${
isVisible ? i * ENTER_STAGGER : isExiting ? i * EXIT_STAGGER : 0
}s`,
}}
/>
))}
</svg>
)
})
function useGroupState(): [AnimState, (s: AnimState) => void] {
return useState<AnimState>('visible')
}
function useBlockCycle() {
const [topRight, setTopRight] = useGroupState()
const [left, setLeft] = useGroupState()
const [right, setRight] = useGroupState()
useEffect(() => {
if (typeof window !== 'undefined' && !window.matchMedia('(min-width: 1024px)').matches) return
const cancelled = { current: false }
const wait = (ms: number) => new Promise<void>((r) => setTimeout(r, ms))
async function exit(setter: (s: AnimState) => void, pos: Position, pauseAfter: number) {
if (cancelled.current) return
setter('exiting')
await wait(exitTime(pos) * 1000)
if (cancelled.current) return
setter('hidden')
await wait(pauseAfter)
}
async function enter(setter: (s: AnimState) => void, pos: Position, pauseAfter: number) {
if (cancelled.current) return
setter('visible')
await wait(enterTime(pos) * 1000 + pauseAfter)
}
const run = async () => {
await wait(INITIAL_HOLD)
while (!cancelled.current) {
await exit(setTopRight, 'topRight', TRANSITION_PAUSE)
await exit(setLeft, 'left', HOLD_BETWEEN)
await enter(setLeft, 'left', TRANSITION_PAUSE)
await enter(setTopRight, 'topRight', TRANSITION_PAUSE)
await exit(setRight, 'right', HOLD_BETWEEN)
await enter(setRight, 'right', HOLD_BETWEEN)
}
}
run()
return () => {
cancelled.current = true
}
}, [])
return { topRight, left, right } as const
}
/**
* Ambient animated block decorations for the docs layout.
* Adapts the landing page's colorful block patterns with slightly reduced
* opacity and the same staggered enter/exit animation cycle.
*/
export function AnimatedBlocks() {
const states = useBlockCycle()
return (
<div
className='pointer-events-none fixed inset-0 z-0 hidden overflow-hidden lg:block'
aria-hidden='true'
>
<div className='absolute top-[93px] right-0 w-[calc(140px+10.76vw)] max-w-[295px]'>
<BlockGroup
width={295}
height={34}
viewBox='0 0 295 34'
rects={RECTS.topRight}
animState={states.topRight}
globalOpacity={0.75}
/>
</div>
<div className='-translate-y-1/2 absolute top-[50%] left-0 w-[calc(16px+1.25vw)] max-w-[34px] scale-x-[-1]'>
<BlockGroup
width={34}
height={226}
viewBox='0 0 34 226.021'
rects={RECTS.left}
animState={states.left}
globalOpacity={0.75}
/>
</div>
<div className='-translate-y-1/2 absolute top-[50%] right-0 w-[calc(16px+1.25vw)] max-w-[34px]'>
<BlockGroup
width={34}
height={205}
viewBox='0 0 34 204.769'
rects={RECTS.right}
animState={states.right}
globalOpacity={0.75}
/>
</div>
</div>
)
}

View File

@@ -0,0 +1,73 @@
'use client'
import * as React from 'react'
import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu'
import { Check } from 'lucide-react'
import { cn } from '@/lib/utils'
const DropdownMenu = DropdownMenuPrimitive.Root
const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
const DropdownMenuContent = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
>(({ className, sideOffset = 4, ...props }, ref) => (
<DropdownMenuPrimitive.Portal>
<DropdownMenuPrimitive.Content
ref={ref}
sideOffset={sideOffset}
className={cn(
'data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 max-h-[var(--radix-dropdown-menu-content-available-height)] min-w-[8rem] origin-[--radix-dropdown-menu-content-transform-origin] overflow-y-auto overflow-x-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-md data-[state=closed]:animate-out data-[state=open]:animate-in',
className
)}
{...props}
/>
</DropdownMenuPrimitive.Portal>
))
DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
const DropdownMenuItem = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.Item>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item>
>(({ className, ...props }, ref) => (
<DropdownMenuPrimitive.Item
ref={ref}
className={cn(
'relative flex cursor-default select-none items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50',
className
)}
{...props}
/>
))
DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
const DropdownMenuCheckboxItem = React.forwardRef<
React.ElementRef<typeof DropdownMenuPrimitive.CheckboxItem>,
React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.CheckboxItem>
>(({ className, children, checked, ...props }, ref) => (
<DropdownMenuPrimitive.CheckboxItem
ref={ref}
className={cn(
'relative flex cursor-default select-none items-center rounded-sm py-1.5 pr-2 pl-8 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50',
className
)}
checked={checked}
{...props}
>
<span className='absolute left-2 flex h-3.5 w-3.5 items-center justify-center'>
<DropdownMenuPrimitive.ItemIndicator>
<Check className='h-4 w-4' />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.CheckboxItem>
))
DropdownMenuCheckboxItem.displayName = DropdownMenuPrimitive.CheckboxItem.displayName
export {
DropdownMenu,
DropdownMenuTrigger,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuCheckboxItem,
}

View File

@@ -0,0 +1,47 @@
'use client'
import { useState } from 'react'
import { ChevronRight } from 'lucide-react'
interface FAQItem {
question: string
answer: string
}
interface FAQProps {
items: FAQItem[]
title?: string
}
export function FAQ({ items, title = 'Common Questions' }: FAQProps) {
const [openIndex, setOpenIndex] = useState<number | null>(null)
return (
<div className='mt-12'>
<h2 className='mb-4 font-bold text-xl'>{title}</h2>
<div className='rounded-xl border border-border'>
{items.map((item, index) => (
<div key={index} className={index !== items.length - 1 ? 'border-border border-b' : ''}>
<button
type='button'
onClick={() => setOpenIndex(openIndex === index ? null : index)}
className='flex w-full cursor-pointer items-center gap-3 px-5 py-4 text-left font-medium text-[0.9375rem]'
>
<ChevronRight
className={`h-4 w-4 shrink-0 text-fd-muted-foreground transition-transform duration-200 ${
openIndex === index ? 'rotate-90' : ''
}`}
/>
{item.question}
</button>
{openIndex === index && (
<div className='px-5 pb-4 pl-12 text-[0.9375rem] text-fd-muted-foreground leading-relaxed'>
{item.answer}
</div>
)}
</div>
))}
</div>
</div>
)
}

View File

@@ -30,7 +30,7 @@ export function Image({
<NextImage
className={cn(
'overflow-hidden rounded-xl border border-border object-cover shadow-sm',
enableLightbox && 'cursor-pointer transition-opacity hover:opacity-90',
enableLightbox && 'cursor-pointer transition-opacity hover:opacity-95',
className
)}
alt={alt}

View File

@@ -55,8 +55,9 @@ export function Lightbox({ isOpen, onClose, src, alt, type }: LightboxProps) {
<img
src={src}
alt={alt}
className='max-h-[calc(100vh-6rem)] max-w-[calc(100vw-6rem)] rounded-xl object-contain'
className='max-h-[75vh] max-w-[75vw] cursor-pointer rounded-xl object-contain'
loading='lazy'
onClick={onClose}
/>
) : (
<video
@@ -65,7 +66,8 @@ export function Lightbox({ isOpen, onClose, src, alt, type }: LightboxProps) {
loop
muted
playsInline
className='max-h-[calc(100vh-6rem)] max-w-[calc(100vw-6rem)] rounded-xl outline-none focus:outline-none'
className='max-h-[75vh] max-w-[75vw] cursor-pointer rounded-xl outline-none focus:outline-none'
onClick={onClose}
/>
)}
</div>

View File

@@ -15,23 +15,14 @@ export function SearchTrigger() {
return (
<button
type='button'
className='flex h-10 w-[460px] cursor-pointer items-center gap-2 rounded-xl border border-border/50 px-3 py-2 text-sm backdrop-blur-xl transition-colors hover:border-border'
style={{
backgroundColor: 'hsla(0, 0%, 5%, 0.85)',
backdropFilter: 'blur(33px) saturate(180%)',
WebkitBackdropFilter: 'blur(33px) saturate(180%)',
color: 'rgba(255, 255, 255, 0.6)',
}}
className='flex h-9 w-[360px] cursor-pointer items-center gap-2 rounded-lg border border-border/50 bg-fd-muted/50 px-3 text-[13px] text-fd-muted-foreground transition-colors hover:border-border hover:text-fd-foreground'
onClick={handleClick}
>
<Search className='h-4 w-4' />
<Search className='h-3.5 w-3.5' />
<span>Search...</span>
<kbd
className='ml-auto flex items-center gap-0.5 font-medium'
style={{ color: 'rgba(255, 255, 255, 0.6)' }}
>
<span style={{ fontSize: '15px', lineHeight: '1' }}></span>
<span style={{ fontSize: '13px', lineHeight: '1' }}>K</span>
<kbd className='ml-auto flex items-center font-medium'>
<span className='text-[15px]'></span>
<span className='text-[12px]'>K</span>
</kbd>
</button>
)

View File

@@ -38,7 +38,7 @@ export function Video({
loop={loop}
muted={muted}
playsInline={playsInline}
className={`${className} ${enableLightbox ? 'cursor-pointer transition-opacity hover:opacity-90' : ''}`}
className={`${className} ${enableLightbox ? 'cursor-pointer transition-opacity hover:opacity-95' : ''}`}
src={getAssetUrl(src)}
onClick={handleVideoClick}
/>

View File

@@ -11,6 +11,8 @@
"(generated)/workflows",
"(generated)/logs",
"(generated)/usage",
"(generated)/audit-logs"
"(generated)/audit-logs",
"(generated)/tables",
"(generated)/files"
]
}

View File

@@ -190,13 +190,8 @@ console.log(`${processedItems} gültige Elemente verarbeitet`);
### Einschränkungen
<Callout type="warning">
Container-Blöcke (Schleifen und Parallele) können nicht ineinander verschachtelt werden. Das bedeutet:
- Du kannst keinen Schleifenblock in einen anderen Schleifenblock platzieren
- Du kannst keinen Parallel-Block in einen Schleifenblock platzieren
- Du kannst keinen Container-Block in einen anderen Container-Block platzieren
Wenn du mehrdimensionale Iterationen benötigst, erwäge eine Umstrukturierung deines Workflows, um sequentielle Schleifen zu verwenden oder Daten in Stufen zu verarbeiten.
<Callout type="info">
Container-Blöcke (Schleifen und Parallele) unterstützen Verschachtelung. Du kannst Schleifen in Schleifen, Parallele in Schleifen und jede Kombination von Container-Blöcken platzieren, um komplexe mehrdimensionale Workflows zu erstellen.
</Callout>
<Callout type="info">

View File

@@ -142,11 +142,8 @@ Jede parallele Instanz läuft unabhängig:
### Einschränkungen
<Callout type="warning">
Container-Blöcke (Schleifen und Parallele) können nicht ineinander verschachtelt werden. Das bedeutet:
- Sie können keinen Schleifenblock in einen Parallelblock platzieren
- Sie können keinen weiteren Parallelblock in einen Parallelblock platzieren
- Sie können keinen Container-Block in einen anderen Container-Block platzieren
<Callout type="info">
Container-Blöcke (Schleifen und Parallele) unterstützen Verschachtelung. Sie können Parallele in Parallele, Schleifen in Parallele und jede Kombination von Container-Blöcken platzieren, um komplexe mehrdimensionale Workflows zu erstellen.
</Callout>
<Callout type="info">

View File

@@ -11,6 +11,9 @@
"(generated)/workflows",
"(generated)/logs",
"(generated)/usage",
"(generated)/audit-logs"
"(generated)/audit-logs",
"(generated)/tables",
"(generated)/files",
"(generated)/knowledge-bases"
]
}

View File

@@ -5,6 +5,7 @@ title: Agent
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Agent block connects your workflow to Large Language Models (LLMs). It processes natural language inputs, calls external tools, and generates structured or unstructured outputs.
@@ -58,7 +59,7 @@ Controls response randomness and creativity:
### Max Output Tokens
Controls the maximum length of the model's response. For Anthropic models, Sim uses reliable defaults: streaming executions use the model's full capacity (e.g. 64,000 tokens for Claude 4.5), while non-streaming executions default to 8,192 to avoid timeout issues. When using tools with Anthropic models, intermediate tool-calling requests use a capped limit of 8,192 tokens to avoid SDK timeout errors, regardless of your configured max tokens—the final streaming response uses your full configured limit. This only affects Anthropic's direct API; AWS Bedrock handles this automatically. For long-form content generation via API, explicitly set a higher value.
Controls the maximum length of the model's response. Each model defaults to its full max output token limit (e.g., 64,000 tokens for Claude Sonnet 4.5). You can override this with a custom value using the Max Tokens setting. For Anthropic models, when non-streaming requests exceed the SDK's internal threshold, the provider automatically uses internal streaming to avoid timeouts.
### API Key
@@ -78,7 +79,7 @@ Extend agent capabilities with external integrations. Select from 60+ pre-built
**Execution Modes:**
- **Auto**: Model decides when to use tools based on context
- **Required**: Tool must be called in every request
- **None**: Tool available but not suggested to model
- **None**: Tool is completely filtered out and not sent to the model — effectively disables the tool
### Response Format
@@ -113,7 +114,7 @@ After an agent completes, you can access its outputs:
- **`<agent.content>`**: The agent's response text or structured data
- **`<agent.tokens>`**: Token usage statistics (prompt, completion, total)
- **`<agent.tool_calls>`**: Details of any tools the agent used during execution
- **`<agent.toolCalls>`**: Details of any tools the agent used during execution
- **`<agent.cost>`**: Estimated cost of the API call (if available)
## Advanced Features
@@ -131,8 +132,9 @@ See the [`Memory`](/tools/memory) block reference for details.
## Outputs
- **`<agent.content>`**: Agent's response text
- **`<agent.model>`**: Model identifier used for the request
- **`<agent.tokens>`**: Token usage statistics
- **`<agent.tool_calls>`**: Tool execution details
- **`<agent.toolCalls>`**: Tool execution details
- **`<agent.cost>`**: Estimated API call cost
## Example Use Cases
@@ -157,3 +159,13 @@ Input → Agent (Google Search, Notion) → Function (Compile Report)
- **Be specific in system prompts**: Clearly define the agent's role, tone, and limitations. The more specific your instructions are, the better the agent will be able to fulfill its intended purpose.
- **Choose the right temperature setting**: Use lower temperature settings (0-0.3) when accuracy is important, or increase temperature (0.7-2.0) for more creative or varied responses
- **Leverage tools effectively**: Integrate tools that complement the agent's purpose and enhance its capabilities. Be selective about which tools you provide to avoid overwhelming the agent. For tasks with little overlap, use another Agent block for the best results.
<FAQ items={[
{ question: "What LLM providers does the Agent block support?", answer: "The Agent block supports OpenAI, Anthropic, Google (Gemini), xAI (Grok), DeepSeek, Groq, Cerebras, Azure OpenAI, Azure Anthropic, Google Vertex AI, AWS Bedrock, OpenRouter, and local models via Ollama or VLLM. You can type or select any supported model from the model combobox." },
{ question: "What are the memory options for the Agent block?", answer: "The Agent block has four memory modes: None (no memory, each request is independent), Conversation (full conversation history keyed by a conversation ID), Sliding Window by messages (keeps the N most recent messages), and Sliding Window by tokens (keeps messages up to a token limit). Memory requires a conversation ID to persist across runs." },
{ question: "What is the difference between the tool execution modes (Auto, Required, None)?", answer: "In Auto mode, the model decides when to call a tool based on context. In Required mode, the model must call the tool on every request. In None mode, the tool is completely filtered out and never sent to the model — it effectively disables that tool without removing it from the configuration." },
{ question: "How does the Response Format work?", answer: "Response Format enforces structured output by providing a JSON Schema. When set, the model's response is constrained to match the schema exactly. Fields from the structured response can be accessed directly by downstream blocks using <agent.fieldName> syntax. If no response format is set, the agent returns its standard outputs: content, model, tokens, and toolCalls." },
{ question: "What does the Reasoning Effort / Thinking Level setting do?", answer: "These are advanced settings that appear only for models that support extended reasoning. Reasoning Effort (for OpenAI o-series and GPT-5 models) and Thinking Level (for Anthropic Claude and Gemini models with thinking) control how much compute the model spends reasoning before responding. Higher levels produce more thorough answers but cost more tokens and take longer." },
{ question: "How does max output tokens work with Anthropic models specifically?", answer: "The Agent block uses each Anthropic model's full max output token limit by default (e.g., 64,000 for Claude Sonnet 4.5). You can override this with the Max Tokens setting. For non-streaming requests that exceed the SDK's internal threshold, the provider automatically uses internal streaming to avoid timeouts." },
{ question: "Can I use the Agent block with a custom or self-hosted model?", answer: "Yes. You can use any Ollama or VLLM-compatible model by typing the model name directly into the model combobox. This lets you connect to locally hosted or custom-deployed models as long as they expose a compatible API endpoint." },
]} />

View File

@@ -5,6 +5,7 @@ title: API
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The API block connects your workflow to external services through HTTP requests. Supports GET, POST, PUT, DELETE, and PATCH methods for interacting with REST APIs.
@@ -79,7 +80,6 @@ After an API request completes, you can access its outputs:
- **`<api.data>`**: The response body data from the API
- **`<api.status>`**: HTTP status code (200, 404, 500, etc.)
- **`<api.headers>`**: Response headers from the server
- **`<api.error>`**: Error details if the request failed
## Advanced Features
@@ -127,7 +127,6 @@ if (<api.status> === 200) {
- **`<api.data>`**: Response body data from the API
- **`<api.status>`**: HTTP status code
- **`<api.headers>`**: Response headers
- **`<api.error>`**: Error details if request failed
## Example Use Cases
@@ -147,3 +146,12 @@ Function (Validate) → API (Stripe) → Condition (Success) → Supabase (Updat
- **Handle errors gracefully**: Connect error handling logic for failed requests
- **Validate responses**: Check status codes and response formats before processing data
- **Respect rate limits**: Be mindful of API rate limits and implement appropriate throttling
<FAQ items={[
{ question: "What is the default request timeout?", answer: "The default timeout is 300,000 milliseconds (5 minutes). You can configure it up to a maximum of 600,000 milliseconds (10 minutes) in the block's Advanced settings." },
{ question: "Which HTTP errors trigger automatic retries?", answer: "Retries are attempted for network/connection failures, timeouts, rate limit responses (HTTP 429), and server errors (5xx). Client errors like 400 or 404 are not retried." },
{ question: "How does retry backoff work?", answer: "Retries use exponential backoff starting from the configured retry delay (default 500ms). Each subsequent retry doubles the delay, up to the maximum retry delay (default 30,000ms)." },
{ question: "Are POST and PATCH requests retried by default?", answer: "No. POST and PATCH are non-idempotent methods, so retries are disabled for them by default to avoid creating duplicate resources. You can enable retries for these methods with the 'Retry non-idempotent methods' toggle in Advanced settings, but be aware this may cause duplicate requests." },
{ question: "What headers are included automatically?", answer: "Standard headers such as User-Agent, Accept, and Cache-Control are added automatically. Any custom headers you configure will be merged with these defaults, and your custom values will override automatic headers with the same name." },
{ question: "Can I send form data or file uploads?", answer: "The API block primarily sends JSON request bodies through the UI. The underlying HTTP tool also supports form data natively — if you pass form data parameters, it will construct a proper multipart/form-data request automatically. For most use cases, the JSON body field in the block UI is sufficient." },
]} />

View File

@@ -5,6 +5,7 @@ title: Condition
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Condition block branches workflow execution based on boolean expressions. Evaluate conditions using previous block outputs and route to different paths without requiring an LLM.
@@ -60,10 +61,9 @@ Conditions use JavaScript syntax and can reference input values from previous bl
After a condition evaluates, you can access its outputs:
- **`<condition.result>`**: Boolean result of the condition evaluation
- **`<condition.matched_condition>`**: ID of the condition that was matched
- **`<condition.content>`**: Description of the evaluation result
- **`<condition.path>`**: Details of the chosen routing destination
- **`<condition.conditionResult>`**: Boolean result of the condition evaluation
- **`<condition.selectedOption>`**: ID of the condition that was matched
- **`<condition.selectedPath>`**: Details of the chosen routing destination
## Advanced Features
@@ -102,18 +102,13 @@ true
### Error Handling
Conditions automatically handle:
- Undefined or null values with safe evaluation
- Type mismatches with appropriate fallbacks
- Invalid expressions with error logging
- Missing variables with default values
If a condition expression references an undefined variable or throws a runtime error, the block will throw an error and the execution will fail (or follow the error path if one is connected). Use optional chaining (`?.`) or explicit null checks in your expressions to handle missing values safely.
## Outputs
- **`<condition.result>`**: Boolean result of the evaluation
- **`<condition.matched_condition>`**: ID of the matched condition
- **`<condition.content>`**: Description of the evaluation result
- **`<condition.path>`**: Details of the chosen routing destination
- **`<condition.conditionResult>`**: Boolean result of the evaluation
- **`<condition.selectedOption>`**: ID of the matched condition
- **`<condition.selectedPath>`**: Details of the chosen routing destination
## Example Use Cases
@@ -139,3 +134,11 @@ Function (Process) → Condition (account_type === 'enterprise') → Advanced or
- **Keep expressions simple**: Use clear, straightforward boolean expressions for better readability and easier debugging
- **Document your conditions**: Add descriptions to explain the purpose of each condition for better team collaboration and maintenance
- **Test edge cases**: Verify conditions handle boundary values correctly by testing with values at the edges of your condition ranges
<FAQ items={[
{ question: "Does the Condition block use an LLM?", answer: "No. The Condition block evaluates boolean expressions using JavaScript syntax directly — it does not call any AI model. This makes it fast, deterministic, and free of API costs. If you need AI-powered routing decisions, use the Router block instead." },
{ question: "What happens if no condition matches?", answer: "If none of your defined conditions evaluate to true, the workflow follows the else branch. If the else branch is not connected to any downstream block, that workflow path ends gracefully without an error. Add a fallback condition of simply true as the last condition to guarantee a match." },
{ question: "In what order are conditions evaluated?", answer: "Conditions are evaluated from top to bottom in the order they are defined. The first condition that evaluates to true determines the execution path. Subsequent conditions are not evaluated after a match is found, so place more specific conditions before general ones." },
{ question: "What JavaScript features can I use in condition expressions?", answer: "You can use standard JavaScript operators and methods including comparison operators (===, !==, >, <), logical operators (&&, ||, !), string methods (.includes(), .endsWith(), .toLowerCase()), array methods (.includes(), .length), mathematical operations, and Date comparisons. Reference block outputs using <blockName.output> syntax." },
{ question: "How does the Condition block handle null or undefined values?", answer: "If a condition expression references an undefined variable or throws a runtime error, the Condition block will throw an error and the execution will fail (or follow the error path if one is connected). Use optional chaining (?.) or explicit null checks in your expressions to handle missing values safely." },
]} />

View File

@@ -5,6 +5,7 @@ title: Evaluator
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Evaluator block uses AI to score and assess content quality against custom metrics. Perfect for quality control, A/B testing, and ensuring AI outputs meet specific standards.
@@ -49,12 +50,12 @@ The content to be evaluated. This can be:
Choose an AI model to perform the evaluation:
- **OpenAI**: GPT-4o, o1, o3, o4-mini, gpt-4.1
- **Anthropic**: Claude 3.7 Sonnet
- **Anthropic**: Claude Sonnet 4.5
- **Google**: Gemini 2.5 Pro, Gemini 2.0 Flash
- **Other Providers**: Groq, Cerebras, xAI, DeepSeek
- **Local Models**: Ollama or VLLM compatible models
Use models with strong reasoning capabilities like GPT-4o or Claude 3.7 Sonnet for best results.
Use models with strong reasoning capabilities like GPT-4o or Claude Sonnet 4.5 for best results.
### API Key
@@ -91,3 +92,12 @@ Agent (Support Response) → Evaluator (Score) → Function (Log) → Condition
- **Connect with Agent blocks**: Use Evaluator blocks to assess Agent block outputs and create feedback loops
- **Use consistent metrics**: For comparative analysis, maintain consistent metrics across similar evaluations
- **Combine multiple metrics**: Use several metrics to get a comprehensive evaluation
<FAQ items={[
{ question: "What format does the Evaluator return scores in?", answer: "The Evaluator returns a JSON object where each key is the lowercase version of your metric name and the value is a numeric score within the range you defined. For example, a metric named 'Accuracy' with range 1-5 would appear as { \"accuracy\": 4 } in the output." },
{ question: "Which models work best for evaluation?", answer: "Models with strong reasoning capabilities produce the most consistent evaluations. GPT-4o and Claude Sonnet are recommended. The default model is Claude Sonnet 4.5." },
{ question: "Can I evaluate non-text content?", answer: "The content field accepts any string input. If you pass JSON or structured data, the Evaluator will automatically detect and format it before evaluation. However, the evaluation is text-based — it cannot directly evaluate images or audio." },
{ question: "What happens if a metric name is invalid or incomplete?", answer: "Metrics missing a name or range are automatically filtered out. The Evaluator only scores metrics that have both a valid name and a defined min/max range." },
{ question: "Does the Evaluator use structured output?", answer: "Yes. The Evaluator generates a JSON Schema response format based on your metrics and enforces strict mode, so the LLM is constrained to return only the expected metric scores as numbers — no extra text or explanations." },
{ question: "How are evaluation costs calculated?", answer: "Costs are based on the token usage of the underlying LLM call. The Evaluator outputs include token counts (prompt, completion, total) and cost breakdown (input, output, total) so you can track spending per evaluation." },
]} />

View File

@@ -3,6 +3,7 @@ title: Function
---
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Function block executes custom JavaScript or TypeScript code in your workflows. Transform data, perform calculations, or implement custom logic.
@@ -71,3 +72,12 @@ return {
- **Test edge cases**: Ensure your code handles unusual inputs, null values, and boundary conditions correctly
- **Optimize for performance**: Be mindful of computational complexity and memory usage for large datasets
- **Use console.log() for debugging**: Leverage stdout output to debug and monitor function execution
<FAQ items={[
{ question: "What languages does the Function block support?", answer: "The Function block supports JavaScript and Python. JavaScript is the default. Python support requires the E2B feature to be enabled, as Python code always runs in a secure E2B sandbox environment." },
{ question: "When does code run locally vs. in a sandbox?", answer: "JavaScript code without external imports runs in a local isolated VM for fast execution. JavaScript code that uses import or require statements requires E2B and runs in a secure sandbox. Python code always runs in the E2B sandbox regardless of whether it has imports." },
{ question: "How do I reference outputs from other blocks inside my code?", answer: "Use the angle bracket syntax directly in your code, like <agent.content> or <api.data>. Do not wrap these references in quotes — the system replaces them with actual values before execution. For environment variables, use double curly braces: {{API_KEY}}." },
{ question: "What does the Function block return?", answer: "The Function block has two outputs: result (the return value of your code, accessed via <function.result>) and stdout (anything logged with console.log(), accessed via <function.stdout>). Make sure your code includes a return statement if you need to pass data to downstream blocks." },
{ question: "Can I make HTTP requests from a Function block?", answer: "Yes. The fetch() API is available in the JavaScript execution environment. You can use async/await with fetch to call external APIs. However, you cannot use libraries like axios or request — only the built-in fetch is supported. Your code runs inside an async context automatically, so you can use await directly." },
{ question: "Is there a timeout for Function block execution?", answer: "Yes. Function blocks have a configurable execution timeout. If your code exceeds the timeout, the execution is terminated and the block reports an error. Keep this in mind when making external API calls or processing large datasets." },
]} />

View File

@@ -5,6 +5,7 @@ title: Guardrails
import { Callout } from 'fumadocs-ui/components/callout'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
The Guardrails block validates and protects your AI workflows by checking content against multiple validation types. Ensure data quality, prevent hallucinations, detect PII, and enforce format requirements before content moves through your workflow.
@@ -66,8 +67,8 @@ Uses Retrieval-Augmented Generation (RAG) with LLM scoring to detect when AI-gen
- **Knowledge Base**: Select from your existing knowledge bases
- **Model**: Choose LLM for scoring (requires strong reasoning - GPT-4o, Claude 3.7 Sonnet recommended)
- **API Key**: Authentication for selected LLM provider (auto-hidden for hosted/Ollama or VLLM compatible models)
- **Confidence Threshold**: Minimum score to pass (0-10, default: 3)
- **Top K** (Advanced): Number of knowledge base chunks to retrieve (default: 10)
- **Confidence**: Minimum score to pass (0-10, default: 3)
- **Top K** (Advanced): Number of knowledge base chunks to retrieve (default: 5)
**Output:**
- `passed`: `true` if confidence score ≥ threshold
@@ -83,7 +84,7 @@ Uses Retrieval-Augmented Generation (RAG) with LLM scoring to detect when AI-gen
### PII Detection
Detects personally identifiable information using Microsoft Presidio. Supports 40+ entity types across multiple countries and languages.
Detects personally identifiable information using Microsoft Presidio. Supports over 30 entity types across multiple countries and languages.
<div className="flex justify-center">
<Image
@@ -98,7 +99,7 @@ Detects personally identifiable information using Microsoft Presidio. Supports 4
**How It Works:**
1. Pass content to validate (e.g., `<agent1.content>`)
2. Select PII types to detect using the modal selector
3. Choose detection mode (Detect or Mask)
3. Choose the action (Block Request or Mask PII)
4. Content is scanned for matching PII entities
5. Returns detection results and optionally masked text
@@ -109,17 +110,17 @@ Detects personally identifiable information using Microsoft Presidio. Supports 4
**Configuration:**
- **PII Types to Detect**: Select from grouped categories via modal selector
- **Common**: Person name, Email, Phone, Credit card, IP address, etc.
- **USA**: SSN, Driver's license, Passport, etc.
- **USA**: SSN, Driver's license, Passport, Bank account number, ITIN
- **UK**: NHS number, National insurance number
- **Spain**: NIF, NIE, CIF
- **Italy**: Fiscal code, Driver's license, VAT code
- **Poland**: PESEL, NIP, REGON
- **Singapore**: NRIC/FIN, UEN
- **Spain**: NIF, NIE
- **Italy**: Fiscal code, Driver's license, Identity card, Passport
- **Poland**: PESEL
- **Singapore**: NRIC/FIN
- **Australia**: ABN, ACN, TFN, Medicare
- **India**: Aadhaar, PAN, Passport, Voter number
- **Mode**:
- **Detect**: Only identify PII (default)
- **Mask**: Replace detected PII with masked values
- **India**: Aadhaar, PAN, Vehicle registration, Voter number, Passport
- **Action**:
- **Block Request**: Only identify PII (default)
- **Mask PII**: Replace detected PII with masked values
- **Language**: Detection language (default: English)
**Output:**
@@ -140,7 +141,7 @@ Detects personally identifiable information using Microsoft Presidio. Supports 4
The input content to validate. This typically comes from:
- Agent block outputs: `<agent.content>`
- Function block results: `<function.output>`
- Function block results: `<function.result>`
- API responses: `<api.output>`
- Any other block output
@@ -203,3 +204,13 @@ Input → Guardrails (Detect PII) → Condition (No PII) → Process or Reject
Guardrails validation happens synchronously in your workflow. For hallucination detection, choose faster models (like GPT-4o-mini) if latency is critical.
</Callout>
<FAQ items={[
{ question: "Can I run multiple validation types on the same content?", answer: "Each Guardrails block performs one validation type at a time. To apply multiple validations, chain several Guardrails blocks in sequence — for example, first validate JSON format, then check for PII." },
{ question: "What does the hallucination confidence score mean?", answer: "The score ranges from 0 to 10. A score of 0 means the content is completely ungrounded (full hallucination), and 10 means it is fully supported by the knowledge base. Validation passes when the score meets or exceeds your configured threshold (default: 3)." },
{ question: "How many knowledge base chunks are retrieved for hallucination detection?", answer: "By default, 5 chunks are retrieved. You can adjust this up to 20 in the Advanced settings using the 'Number of Chunks to Retrieve' slider. More chunks provide broader context but increase latency and token usage." },
{ question: "What PII detection engine is used?", answer: "PII detection is powered by Microsoft Presidio. It supports over 30 entity types across multiple countries including the US, UK, Spain, Italy, Poland, Singapore, Australia, and India." },
{ question: "What is the difference between Block and Mask modes for PII?", answer: "Block mode fails the validation (passed = false) if any selected PII types are detected. Mask mode also detects PII but replaces it with masked values in the output, making the content safe to use downstream. Both modes return the list of detected entities." },
{ question: "Which languages does PII detection support?", answer: "PII detection supports English, Spanish, Italian, Polish, and Finnish. The language setting affects the NLP models used for entity recognition, so selecting the correct language improves detection accuracy." },
{ question: "Does JSON validation check schema structure or just syntax?", answer: "JSON validation only checks that the content is syntactically valid JSON (i.e., it can be parsed without errors). It does not validate against a specific schema. For schema validation, use a Function block after the Guardrails check." },
]} />

View File

@@ -6,6 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
The Human in the Loop block pauses workflow execution and waits for human intervention before continuing. Use it to add approval gates, collect feedback, or gather additional input at critical decision points.
@@ -33,7 +34,7 @@ When execution reaches this block, the workflow pauses indefinitely until a huma
## Configuration Options
### Paused Output
### Display Data
Defines what data is displayed to the approver. This is the context shown in the approval portal to help them make an informed decision.
@@ -60,7 +61,7 @@ Configures how approvers are alerted when approval is needed. Supported channels
Include the approval URL (`<blockId.url>`) in your notification messages so approvers can access the portal.
### Resume Input
### Resume Form
Defines the fields approvers fill in when responding. This data becomes available to downstream blocks after the workflow resumes.
@@ -136,8 +137,12 @@ Agent (Generate) → Human in the Loop (QA) → Gmail (Send)
## Block Outputs
**`url`** - Unique URL for the approval portal
**`resumeInput.*`** - All fields defined in Resume Input become available after the workflow resumes
**`url`** - Unique URL for the approval portal
**`resumeEndpoint`** - Resume API endpoint URL
**`response`** - Display data shown to the approver (json)
**`submission`** - Form submission data from the approver (json)
**`submittedAt`** - ISO timestamp when the workflow was resumed
**`resumeInput.*`** - All fields defined in Resume Form become available after the workflow resumes
Access using `<blockId.resumeInput.fieldName>`.
@@ -176,3 +181,12 @@ The example below shows an approval portal as seen by an approver after the work
- **[Condition](/blocks/condition)** - Branch based on approval decisions
- **[Variables](/blocks/variables)** - Store approval history and metadata
- **[Response](/blocks/response)** - Return workflow results to API callers
<FAQ items={[
{ question: "How long does the workflow stay paused?", answer: "The workflow pauses indefinitely until a human provides input through the approval portal, the REST API, or a webhook. There is no automatic timeout — it will wait until someone responds." },
{ question: "What notification channels can I use to alert approvers?", answer: "You can configure notifications through Slack, Gmail, Microsoft Teams, SMS (via Twilio), or custom webhooks. Include the approval URL in your notification message so approvers can access the portal directly." },
{ question: "How do I access the approver's input in downstream blocks?", answer: "Use the syntax <blockId.resumeInput.fieldName> to reference specific fields from the resume form. For example, if your block ID is 'approval1' and the form has an 'approved' field, use <approval1.resumeInput.approved>." },
{ question: "Can I chain multiple Human in the Loop blocks for multi-stage approvals?", answer: "Yes. You can place multiple Human in the Loop blocks in sequence to create multi-stage approval workflows. Each block pauses independently and can have its own notification configuration and resume form fields." },
{ question: "Can I resume the workflow programmatically without the portal?", answer: "Yes. Each block exposes a resume API endpoint that you can call with a POST request containing the form data as JSON. This lets you build custom approval UIs or integrate with existing systems like Jira or ServiceNow." },
{ question: "What outputs are available after the workflow resumes?", answer: "The block outputs include the approval portal URL, the resume API endpoint URL, the display data shown to the approver, the form submission data, the raw resume input, and an ISO timestamp of when the workflow was resumed." },
]} />

View File

@@ -7,6 +7,7 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Blocks are the building components you connect together to create AI workflows. Think of them as specialized modules that each handle a specific task—from chatting with AI models to making API calls or processing data.
@@ -138,3 +139,12 @@ Each block type has specific configuration options:
Pause workflow execution for specified time delays
</Card>
</Cards>
<FAQ items={[
{ question: "How many block types are available in Sim?", answer: "Sim has over 200 blocks in its registry, spanning core workflow blocks (Agent, Function, Condition, Router, etc.), integration blocks for third-party services (Gmail, Slack, GitHub, Notion, and many more), and trigger blocks that start workflows from external events like webhooks or schedules. Loop and parallel execution are built into the execution engine as container constructs on the canvas, rather than being standalone registry blocks." },
{ question: "Can one block's output connect to multiple downstream blocks?", answer: "Yes. A single output port can connect to multiple input ports on different blocks. This lets you fan out data from one processing step to several parallel paths without needing to duplicate the block." },
{ question: "What happens if a block in the middle of a workflow fails?", answer: "When a block encounters an error, the workflow stops executing along that path. Blocks that support error handling (like the Router) can route to an error path so you can handle failures gracefully instead of halting the entire workflow." },
{ question: "What is the difference between Processing blocks and Logic blocks?", answer: "Processing blocks (Agent, Function, API) transform or generate data — they do the actual work. Logic blocks (Condition, Router, Evaluator) make decisions about which path the workflow should take based on the data, without modifying it themselves." },
{ question: "Can I use blocks from different categories together in one workflow?", answer: "Absolutely. A typical workflow combines blocks from multiple categories. For example, you might use a trigger block to start the workflow, an Agent block to process input, a Condition block to branch logic, and an integration block like Gmail to send results." },
{ question: "Are there container blocks that can hold other blocks inside them?", answer: "Yes. Loop and Parallel are execution engine constructs that appear as container regions on the canvas. You drag other blocks inside them. Loop containers execute their contained blocks repeatedly, while Parallel containers execute their contained blocks concurrently across multiple branches. Unlike registry blocks, these are handled directly by the execution engine." },
]} />

View File

@@ -5,6 +5,7 @@ title: Loop
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Loop block is a container that executes blocks repeatedly. Iterate over collections, repeat operations a fixed number of times, or continue while a condition is met.
@@ -184,13 +185,8 @@ Variables (i=0) → Loop (While i<10) → Agent (Process) → Variables (i++)
### Limitations
<Callout type="warning">
Container blocks (Loops and Parallels) cannot be nested inside each other. This means:
- You cannot place a Loop block inside another Loop block
- You cannot place a Parallel block inside a Loop block
- You cannot place any container block inside another container block
If you need multi-dimensional iteration, consider restructuring your workflow to use sequential loops or process data in stages.
<Callout type="info">
Container blocks (Loops and Parallels) support nesting. You can place loops inside loops, parallels inside loops, and any combination of container blocks to build complex multi-dimensional workflows.
</Callout>
<Callout type="info">
@@ -250,3 +246,13 @@ Variables (i=0) → Loop (While i<10) → Agent (Process) → Variables (i++)
- **Set reasonable limits**: Keep iteration counts reasonable to avoid long execution times
- **Use ForEach for collections**: When processing arrays or objects, use ForEach instead of For loops
- **Handle errors gracefully**: Consider adding error handling inside loops for robust workflows
<FAQ items={[
{ question: "What is the maximum number of iterations a loop can run?", answer: "For loops (fixed count) and ForEach loops are capped at 1,000 iterations/items (from executor constants). While loops and Do-While loops with a condition have no hard iteration cap — they run until the condition evaluates to false. Do-While loops without a condition fall back to a fixed iteration count, which is capped at 1,000. Always ensure your While/Do-While conditions will eventually become false to avoid infinite loops." },
{ question: "Do loops execute iterations in parallel or sequentially?", answer: "Loops execute all iterations sequentially, one after another. If you need concurrent execution across items, use the Parallel block instead. You can also nest a Parallel block inside a Loop if you need both iteration patterns." },
{ question: "How do I access the current item inside a ForEach loop?", answer: "Inside the loop, use <loop.currentItem> to get the current item being processed and <loop.index> for the zero-based iteration number. These references are only available to blocks placed inside the loop container — blocks outside the loop cannot access them." },
{ question: "How do I access loop results after it finishes?", answer: "After the loop completes, reference the loop block by its normalized name (lowercase, no spaces) using <blockname.results>. This returns an array of all iteration results in order. For example, if your loop block is named 'Process Items', use <processitems.results>. Do not use <loop.> syntax outside the loop — that only works inside." },
{ question: "Can I nest loops inside each other?", answer: "Yes. Container blocks (Loops and Parallels) fully support nesting. You can place loops inside loops, parallels inside loops, loops inside parallels, and any combination. Each nested container maintains its own scope and iteration context independently." },
{ question: "What is the difference between a While loop and a Do-While loop?", answer: "A While loop checks its condition before each iteration, so it may execute zero times if the condition is false initially. A Do-While loop executes its body at least once, then checks the condition after each iteration to decide whether to continue. Use Do-While when you need guaranteed first execution." },
{ question: "What happens if a ForEach loop receives an empty collection?", answer: "If the ForEach loop's collection is empty, the loop body is skipped entirely and the loop outputs an empty results array. The workflow continues normally to any blocks connected after the loop." },
]} />

View File

@@ -5,6 +5,7 @@ title: Parallel
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Parallel block is a container that executes multiple instances concurrently for faster workflow processing. Process items simultaneously instead of sequentially.
@@ -148,11 +149,8 @@ Each parallel instance runs independently:
### Limitations
<Callout type="warning">
Container blocks (Loops and Parallels) cannot be nested inside each other. This means:
- You cannot place a Loop block inside a Parallel block
- You cannot place another Parallel block inside a Parallel block
- You cannot place any container block inside another container block
<Callout type="info">
Container blocks (Loops and Parallels) support nesting. You can place parallels inside parallels, loops inside parallels, and any combination of container blocks to build complex multi-dimensional workflows.
</Callout>
<Callout type="info">
@@ -221,3 +219,13 @@ Understanding when to use each:
- **Independent operations only**: Ensure operations don't depend on each other
- **Handle rate limits**: Add delays or throttling for API-heavy workflows
- **Error handling**: Each instance should handle its own errors gracefully
<FAQ items={[
{ question: "What is the maximum number of concurrent instances?", answer: "The maximum is 20 concurrent instances. This limit exists to prevent resource exhaustion and ensure stable execution." },
{ question: "Can parallel instances share state with each other?", answer: "No. Each parallel instance runs in complete isolation with its own variable scope. There is no shared state between instances, and one instance cannot read or write data from another during execution." },
{ question: "What happens if one parallel instance fails?", answer: "Failures in one instance do not affect other instances. Each instance runs independently, so the remaining instances will continue to execute normally." },
{ question: "Can I nest Parallel blocks inside other Parallel or Loop blocks?", answer: "Yes. Container blocks (Parallels and Loops) support nesting. You can place parallels inside parallels, loops inside parallels, and any combination to build multi-dimensional workflows." },
{ question: "How do I access results after the Parallel block completes?", answer: "Use <blockname.results> where blockname is the normalized name of your Parallel block (lowercase, no spaces). This returns an array containing the results from all instances." },
{ question: "Is the order of results guaranteed?", answer: "No. Because instances execute concurrently, the order of results in the output array is not guaranteed to match the input order. If ordering matters, include an index or identifier in each instance's output." },
{ question: "What is the difference between count-based and collection-based parallel?", answer: "Count-based runs a fixed number of identical instances (e.g., run 5 times). Collection-based distributes items from an array or object across instances, with each instance processing one item. Use count-based for repeated operations and collection-based for batch processing." },
]} />

View File

@@ -5,6 +5,7 @@ title: Response
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Response block formats and sends structured HTTP responses back to API callers. Use it to return workflow results with proper status codes and headers.
@@ -35,23 +36,15 @@ The response data is the main content that will be sent back to the API caller.
### Status Code
Set the HTTP status code for the response (defaults to 200):
A free-text input field where you can enter any valid HTTP status code (the default placeholder is 200). Common examples include:
**Success (2xx):**
- **200**: OK - Standard success response
- **201**: Created - Resource successfully created
- **204**: No Content - Success with no response body
**Client Error (4xx):**
- **400**: Bad Request - Invalid request parameters
- **401**: Unauthorized - Authentication required
- **404**: Not Found - Resource doesn't exist
- **422**: Unprocessable Entity - Validation errors
**Server Error (5xx):**
- **500**: Internal Server Error - Server-side error
- **502**: Bad Gateway - External service error
- **503**: Service Unavailable - Service temporarily down
Any valid HTTP status code can be entered directly into the field.
### Response Headers
@@ -84,7 +77,7 @@ Condition (Error Detected) → Router → Response (400/500, Error Details)
## Outputs
Response blocks are terminal - they end workflow execution and send the HTTP response to the API caller. No outputs are available to downstream blocks.
Response blocks are terminal — no downstream blocks execute after them. However, the block does define outputs (`data`, `status`, `headers`) which are used to construct the HTTP response sent back to the API caller.
## Variable References
@@ -116,3 +109,11 @@ Use the `<variable.name>` syntax to dynamically insert workflow variables into y
- **Handle errors gracefully**: Use conditional logic in your workflow to set appropriate error responses with descriptive messages
- **Validate variable references**: Ensure all referenced variables exist and contain the expected data types before the Response block executes
<FAQ items={[
{ question: "Can I have multiple Response blocks in a workflow?", answer: "No. The Response block is a single-instance block — only one is allowed per workflow. If you need different responses for different conditions, use a Condition or Router block upstream to determine what data reaches the single Response block." },
{ question: "What triggers require a Response block?", answer: "The Response block is designed for use with the API Trigger. When your workflow is invoked via the API, the Response block sends the structured HTTP response back to the caller. Other trigger types (like webhooks or schedules) do not require a Response block." },
{ question: "What is the difference between Builder and Editor mode?", answer: "Builder mode provides a visual interface for constructing your response structure with fields and types. Editor mode gives you a raw JSON code editor where you can write the response body directly. Builder mode is recommended for most use cases." },
{ question: "What is the default status code?", answer: "If you do not specify a status code, the Response block defaults to 200 (OK). You can set any valid HTTP status code including error codes like 400, 404, or 500." },
{ question: "Can the Response block connect to downstream blocks?", answer: "No. Response blocks are terminal — they end workflow execution and send the HTTP response. No further blocks can be connected after a Response block." },
]} />

View File

@@ -4,6 +4,7 @@ title: Router
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Router block uses AI to intelligently route workflows based on content analysis. Unlike Condition blocks that use simple rules, Routers understand context and intent. Each route you define creates a separate output port, allowing you to connect different paths to different downstream blocks.
@@ -54,12 +55,12 @@ Each route you add creates a **separate output port** on the Router block. Conne
Choose an AI model to power the routing decision:
- **OpenAI**: GPT-4o, o1, o3, o4-mini, gpt-4.1
- **Anthropic**: Claude 3.7 Sonnet
- **Anthropic**: Claude Sonnet 4.5
- **Google**: Gemini 2.5 Pro, Gemini 2.0 Flash
- **Other Providers**: Groq, Cerebras, xAI, DeepSeek
- **Local Models**: Ollama or VLLM compatible models
Use models with strong reasoning capabilities like GPT-4o or Claude 3.7 Sonnet for best results.
Use models with strong reasoning capabilities like GPT-4o or Claude Sonnet 4.5 for best results.
### API Key
@@ -68,11 +69,12 @@ Your API key for the selected LLM provider. This is securely stored and used for
## Outputs
- **`<router.context>`**: The context that was analyzed
- **`<router.selectedRoute>`**: The ID of the selected route
- **`<router.selected_path>`**: Details of the chosen destination block
- **`<router.model>`**: Model used for decision-making
- **`<router.tokens>`**: Token usage statistics
- **`<router.cost>`**: Estimated routing cost
- **`<router.model>`**: Model used for decision-making
- **`<router.selectedRoute>`**: The ID of the selected route
- **`<router.reasoning>`**: Explanation of why this route was chosen
- **`<router.selectedPath>`**: Details of the chosen destination block
## Example Use Cases
@@ -117,3 +119,12 @@ When the Router cannot determine an appropriate route for the given context, it
- **Test with diverse inputs**: Ensure the Router handles various input types, edge cases, and unexpected content.
- **Monitor routing performance**: Review routing decisions regularly and refine route descriptions based on actual usage patterns.
- **Choose appropriate models**: Use models with strong reasoning capabilities for complex routing decisions.
<FAQ items={[
{ question: "How does the Router decide which route to take?", answer: "The Router sends your context and all route descriptions to an LLM, which analyzes the input and selects the route whose description best matches. The LLM is prompted to be deterministic: it always prefers selecting a route over returning no match, and only reports NO_MATCH if the context is completely unrelated to every route description." },
{ question: "What happens when the Router cannot match any route?", answer: "When the LLM determines that the context does not match any defined route, it returns NO_MATCH and the Router directs execution to the error path. Connect an error handler to this path for graceful fallback behavior rather than letting the workflow fail silently." },
{ question: "Does the Router cost money to run?", answer: "Yes. The Router uses an LLM API call for every routing decision, which consumes tokens and incurs costs. You can monitor this via the <router.tokens> and <router.cost> outputs. If your routing logic can be expressed as simple boolean conditions, use the Condition block instead — it is free and faster." },
{ question: "Can I see why the Router chose a particular route?", answer: "Yes. The Router V2 block outputs a reasoning field (<router.reasoning>) that contains a brief 1-2 sentence explanation of why the selected route was chosen. This is useful for debugging and understanding routing decisions." },
{ question: "What models work best for routing?", answer: "Models with strong reasoning capabilities like GPT-4o or Claude Sonnet 4.5 tend to produce the most accurate routing decisions. For simpler routing scenarios with clearly distinct routes, a faster and cheaper model like GPT-4o-mini or Gemini Flash may be sufficient." },
{ question: "How many routes can I define?", answer: "There is no hard limit on the number of routes. Each route you define creates a separate output port on the block. However, keep in mind that more routes with overlapping descriptions make it harder for the LLM to distinguish between them, so aim for clear, mutually exclusive route descriptions." },
]} />

View File

@@ -5,6 +5,7 @@ title: Variables
import { Callout } from 'fumadocs-ui/components/callout'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Variables block updates workflow variables during execution. Variables must first be initialized in your workflow's Variables section, then you can use this block to update their values as your workflow runs.
@@ -73,7 +74,7 @@ API (Fetch Profile) → Variables (userId, userTier) → Agent (Personalize)
## Outputs
- **`<variables.assignments>`**: JSON object with all variable assignments from this block
The Variables block does not produce traditional block outputs. Variables are accessed globally via `<variable.variableName>` syntax from any block in the workflow, not through block output connections.
## Best Practices
@@ -81,3 +82,12 @@ API (Fetch Profile) → Variables (userId, userTier) → Agent (Personalize)
- **Update dynamically**: Use Variables blocks to update values based on block outputs or calculations
- **Use in loops**: Perfect for tracking state across iterations
- **Name descriptively**: Use clear names like `currentIndex`, `totalProcessed`, or `lastError`
<FAQ items={[
{ question: "Do variables persist between workflow executions?", answer: "No. Variables are workflow-scoped and only persist for the duration of a single execution. Each new execution starts with the initial values defined in your workflow's Variables section." },
{ question: "Can multiple Variables blocks update the same variable?", answer: "Yes. All Variables blocks share the same namespace. A later Variables block can overwrite a value set by an earlier one by using the same variable name." },
{ question: "How do I reference a variable in other blocks?", answer: "Use the <variable.variableName> syntax in any block's input field. For example, <variable.retryCount> or <variable.customerEmail>." },
{ question: "Do Variables blocks produce outputs I can wire to other blocks?", answer: "Variables do not appear as traditional block outputs in the connection UI. Instead, they are accessed globally via the <variable.variableName> prefix from any block in the workflow." },
{ question: "What naming convention should I use for variables?", answer: "Use descriptive names in camelCase or snake_case. Variable names are case-sensitive, so 'retryCount' and 'RetryCount' are treated as different variables." },
{ question: "Can I use block outputs to set variable values?", answer: "Yes. You can reference any block output when setting a variable value, such as setting customerEmail to <api.email> or incrementing a counter based on previous values." },
]} />

View File

@@ -5,6 +5,7 @@ title: Wait
import { Callout } from 'fumadocs-ui/components/callout'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Wait block pauses your workflow for a specified amount of time before continuing to the next block. Use it to add delays between actions, respect API rate limits, or space out operations.
@@ -62,3 +63,11 @@ API (Trigger Job) → Wait (30s) → API (Check Status)
- **Keep waits reasonable**: Use Wait for delays up to 10 minutes. For longer delays, consider scheduled workflows
- **Monitor execution time**: Remember that waits extend total workflow duration
<FAQ items={[
{ question: "What is the maximum wait time?", answer: "The maximum wait time is 600 seconds (10 minutes). You can specify the duration in either seconds or minutes." },
{ question: "Can a Wait block be cancelled?", answer: "Yes. Wait blocks are interruptible via workflow cancellation. If the workflow is stopped while a Wait block is active, the wait is cancelled and the status output will reflect 'cancelled'." },
{ question: "What happens if I enter a value exceeding the maximum?", answer: "The wait is capped at 600 seconds. If you enter a value greater than 600 seconds (or greater than 10 minutes), it will be limited to the maximum allowed duration." },
{ question: "Does the Wait block consume resources while paused?", answer: "The Wait block performs a simple sleep for the configured duration. It does not actively consume compute resources during the wait, but the workflow execution remains open until the wait completes." },
{ question: "What outputs does the Wait block provide?", answer: "The Wait block outputs the wait duration in milliseconds (waitDuration) and a status string that indicates whether the wait is 'waiting', 'completed', or 'cancelled'." },
]} />

View File

@@ -4,6 +4,7 @@ title: Webhook
import { Callout } from 'fumadocs-ui/components/callout'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Webhook block sends HTTP POST requests to external webhook endpoints with automatic webhook headers and optional HMAC signing.
@@ -85,3 +86,11 @@ Condition (check) → Webhook (trigger) → Response
<Callout>
The Webhook block always uses POST. For other HTTP methods or more control, use the [API block](/blocks/api).
</Callout>
<FAQ items={[
{ question: "Can I use HTTP methods other than POST?", answer: "No. The Webhook block always sends POST requests. If you need GET, PUT, DELETE, or PATCH, use the API block instead, which supports all standard HTTP methods." },
{ question: "How does HMAC payload signing work?", answer: "When you provide a signing secret, the block generates an HMAC-SHA256 signature of the payload and includes it in the X-Webhook-Signature header in the format t=timestamp,v1=signature. The receiver can verify by computing HMAC-SHA256(secret, \"timestamp.body\") and comparing with the v1 value." },
{ question: "What headers are added automatically?", answer: "Every webhook request automatically includes Content-Type (application/json), X-Webhook-Timestamp (Unix timestamp in milliseconds), X-Delivery-ID (unique UUID), and Idempotency-Key (same as X-Delivery-ID for deduplication)." },
{ question: "Can custom headers override the automatic ones?", answer: "Yes. Any custom headers you define in the Additional Headers section will override automatic headers that share the same name." },
{ question: "How is the Webhook block different from the API block?", answer: "The Webhook block is purpose-built for webhook delivery: it is POST-only, automatically adds webhook-specific headers (timestamp, delivery ID, idempotency key), and supports optional HMAC signing. The API block is more general-purpose with support for all HTTP methods, query parameters, and configurable retries." },
]} />

View File

@@ -39,6 +39,8 @@ Drop a Workflow block when you want to call a child workflow as part of a larger
- `result` the child workflow's final response
- `success` whether it ran without errors
- `error` message when the run fails
- `childWorkflowName` the name of the child workflow (string)
- `childWorkflowId` the ID of the child workflow (string)
## Deployment Status Badge
@@ -61,3 +63,14 @@ The Workflow block always executes the most recent deployed version of the child
<Callout>
Keep child workflows focused. Small, reusable flows make it easier to combine them without creating deep nesting.
</Callout>
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "Can a workflow call itself recursively?", answer: "No. The workflow selector blocks self-references to prevent infinite loops. Additionally, Sim tracks the call chain across nested executions using an internal header and enforces a maximum call chain depth of 25 hops. If the limit is exceeded, the execution is rejected with a 409 error." },
{ question: "What is the maximum nesting depth for sub-workflows?", answer: "The maximum call chain depth is 25. This means workflow A can call B, which calls C, and so on up to 25 levels deep. This limit applies across all chained calls, not just direct parent-child relationships." },
{ question: "Does the Workflow block use the deployed or draft version of the child workflow?", answer: "The child workflow inherits the execution context of the parent. If the parent is running in a deployed context (API, schedule, webhook), the child also uses its deployed version. If the parent is running in draft mode (manual run from the editor), the child also uses its draft state. This lets you test nested workflows end-to-end before deploying." },
{ question: "How do I pass data to a child workflow?", answer: "Use the Inputs field on the Workflow block. If the child workflow has an Input Form trigger, each field appears in the block configuration and you can map parent variables to them. The mapped values are available as start.input in the child workflow." },
{ question: "What outputs does the Workflow block return?", answer: "The block returns a success boolean, the child workflow's result (its final response output), the child workflow's name and ID, and an error message if the run failed. You can reference these outputs from downstream blocks using the tag syntax." },
{ question: "What happens if the child workflow fails?", answer: "The Workflow block raises an error that propagates to the parent workflow. If you need to handle failures gracefully, connect an error path from the Workflow block to a downstream block that processes the error." },
]} />

View File

@@ -5,6 +5,7 @@ title: Basics
import { Callout } from 'fumadocs-ui/components/callout'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
## How Connections Work
@@ -47,3 +48,13 @@ The flow of data through connections follows these principles:
Deleting a connection will immediately stop data flow between the blocks. Make sure this is
intended before removing connections.
</Callout>
<FAQ items={[
{ question: "How does Sim determine the order blocks execute in?", answer: "Sim builds a directed acyclic graph (DAG) from your connections. Blocks with no unresolved incoming edges execute first. Once a block completes, the engine removes its edge from downstream blocks and queues any block whose incoming edges are all satisfied. This means execution order is entirely determined by how you wire your connections." },
{ question: "Can a block have multiple incoming connections?", answer: "Yes. A block with multiple incoming connections will wait until all source blocks have completed before it executes. The engine tracks incoming edges and only marks a block as ready when every incoming edge has been resolved." },
{ question: "Can a block send its output to multiple downstream blocks?", answer: "Yes. A single block can have outgoing connections to multiple destination blocks. When the source block completes, all connected downstream blocks that are ready (all their other incoming edges are satisfied) will be queued for execution." },
{ question: "What happens to downstream blocks when a Condition or Router block picks a specific path?", answer: "The engine activates only the edge matching the selected condition or route. Edges on unselected paths are deactivated, and any blocks reachable only through those deactivated edges are cascadingly skipped for that execution." },
{ question: "Are connections between blocks inside a Loop or Parallel block handled differently?", answer: "Yes. The engine inserts sentinel nodes (start and end) around Loop and Parallel subflows. Connections that cross a loop boundary are redirected through these sentinels, and Loop back-edges are wired automatically so blocks inside the loop re-execute on each iteration." },
{ question: "Is there an error-handling path for connections?", answer: "Yes. Connections can use an error source handle. If a block produces an error, only edges marked with the error handle are activated, while the normal source edges are deactivated. This lets you route errors to a dedicated error-handling branch in your workflow." },
]} />

View File

@@ -4,6 +4,7 @@ title: Data Structure
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { FAQ } from '@/components/ui/faq'
When you connect blocks, understanding the data structure of different block outputs is important because the output data structure from the source block determines what values are available in the destination block. Each block type produces a specific output structure that you can reference in downstream blocks.
@@ -185,3 +186,13 @@ For example:
- `<agent1.tokens.total>` - Access the total tokens from an Agent block
- `<api1.data.results[0].id>` - Access the ID of the first result from an API response
- `<function1.result.calculations.total>` - Access a nested field in a Function block's result
<FAQ items={[
{ question: "What output fields does an Agent block produce?", answer: "An Agent block outputs content (the text response), model (the model used, e.g. gpt-4o), tokens (an object with prompt, completion, and total counts), and optionally toolCalls, cost, and usage arrays when tools are invoked." },
{ question: "What does the API block output look like?", answer: "The API block returns data (the response body, which can be any type), status (the HTTP status code as a number), and headers (an object containing the response HTTP headers)." },
{ question: "What does a Function block return?", answer: "A Function block outputs result (the return value of your function, which can be any type) and stdout (any console output captured during execution)." },
{ question: "How does the Condition block output differ from the Router block?", answer: "The Condition block outputs conditionResult (a boolean), selectedPath (with blockId, blockType, and blockTitle of the next block), and selectedOption (the ID of the matched condition). The Router block outputs content (the routing decision text), model, tokens, and selectedPath, but does not include conditionResult or selectedOption." },
{ question: "What happens to the Agent block output when a response format schema is configured?", answer: "When you define a response format on an Agent block, the output structure matches your defined schema instead of the standard content/model/tokens structure. Always verify the actual output shape when using response formats." },
{ question: "How do I access deeply nested data from an API response?", answer: "Use dot notation with bracket indices in your connection tags. For example, <api1.data.results[0].id> navigates into the data field, then into the results array at index 0, and retrieves the id property." },
]} />

View File

@@ -7,6 +7,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
import { Card, Cards } from 'fumadocs-ui/components/card'
import { ConnectIcon } from '@/components/icons'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Connections are the pathways that allow data to flow between blocks in your workflow. They define how information is passed from one block to another, enabling you to create sophisticated, multi-step processes.
@@ -40,3 +41,12 @@ Sim supports different types of connections that enable various workflow pattern
Follow recommended patterns for effective connection management
</Card>
</Cards>
<FAQ items={[
{ question: "How does data actually flow between connected blocks?", answer: "The execution engine builds a directed acyclic graph (DAG) from your connections and processes blocks in dependency order. When a block finishes, its output is stored in the execution context. Downstream blocks reference that output using connection tags like <BlockName.response>, which the variable resolver replaces with the actual data at execution time." },
{ question: "Can a block receive input from multiple upstream blocks?", answer: "Yes. A block waits until all of its incoming connections have been fulfilled before it executes. The engine tracks incoming edges for each node and only marks a block as ready when every upstream dependency has completed. You can reference outputs from any connected block using their respective connection tags." },
{ question: "What happens if an upstream block fails?", answer: "If a block errors, the engine activates the error edge (if one exists) and deactivates the normal output edge. Downstream blocks on the success path will not execute. You can connect an error handle to a separate block to build fallback or recovery logic." },
{ question: "Do connections support conditional branching?", answer: "Yes. Router and Condition blocks produce a selected route or option that determines which outgoing edge is activated. Only the blocks on the chosen path will execute. Edges on unselected paths are deactivated and their entire downstream subgraph is skipped." },
{ question: "Can blocks in parallel branches share data with each other?", answer: "Blocks within the same parallel branch cannot directly reference blocks in a sibling branch because branches execute independently. However, once all branches complete and the parallel block exits, downstream blocks can access the aggregated results from all branches." },
{ question: "How are connection tags formatted?", answer: "Connection tags use angle-bracket syntax: <BlockName.property>. For nested data you can chain dot notation, such as <BlockName.response.items[0].name>. The resolver walks the object path at execution time and substitutes the resolved value into your input field." },
]} />

View File

@@ -4,6 +4,7 @@ title: Tags
import { Callout } from 'fumadocs-ui/components/callout'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Connection tags are visual representations of the data available from connected blocks, providing an easy way to reference data between blocks and outputs from previous blocks in your workflow.
@@ -107,3 +108,14 @@ const total = <apiBlock.data.total> * 1.1; // Add 10% tax
When using connection tags in numeric contexts, make sure the referenced data is actually a number
to avoid type conversion issues.
</Callout>
<FAQ items={[
{ question: "How are tag references resolved at runtime?", answer: "The executor uses a chain of resolvers. Each reference like <blockName.path> is matched against resolvers in order: loop references, parallel references, workflow variables, environment variables, and finally block output references. The first resolver that recognizes the reference handles it." },
{ question: "Does the block name in a tag reference need to match exactly?", answer: "Block names are normalized by converting to lowercase and removing spaces before matching. So <My Agent.content> and <myagent.content> resolve to the same block. However, the field path after the block name (e.g., content, data.users) is case-sensitive." },
{ question: "Can I reference environment variables in tag syntax?", answer: "Yes, but environment variables use double-brace syntax instead of angle brackets: {{MY_VAR}}. These are resolved by a dedicated environment variable resolver during execution." },
{ question: "What happens if I reference a block that did not execute on the current path?", answer: "If the referenced block exists in the workflow but did not produce output (for example, it was on an unselected condition branch), the reference resolves to an empty value. In most blocks this becomes an empty string; in Function blocks it becomes null." },
{ question: "Can I access array elements inside a tag reference?", answer: "Yes. Use bracket notation for array indices within the dot path, for example <api1.data.users[0].name>. The resolver supports multiple levels of array indexing like items[0][1] as well." },
{ question: "How are tag values formatted inside Function blocks versus other blocks?", answer: "In Function blocks, resolved values are formatted as code literals (strings are quoted, objects are JSON, null stays as null) so they can be used directly in JavaScript or Python code. In other block types, objects are JSON-stringified and primitives are converted to plain strings." },
{ question: "Can I mix static text with tag references?", answer: "Yes. You can embed tag references anywhere in a text string, such as \"Hello, <agent1.content>! Your order total is <api1.data.total>.\" The resolver replaces each tag in place while leaving the surrounding text intact." },
]} />

View File

@@ -5,7 +5,7 @@ title: Copilot
import { Callout } from 'fumadocs-ui/components/callout'
import { Card, Cards } from 'fumadocs-ui/components/card'
import { Image } from '@/components/ui/image'
import { MessageCircle, Hammer, Zap, Globe, Paperclip, History, RotateCcw, Brain } from 'lucide-react'
import { MessageCircle, Hammer, ListChecks, Zap, Globe, Paperclip, History, RotateCcw, Brain } from 'lucide-react'
Copilot is your in-editor assistant that helps you build and edit workflows. It can:
@@ -49,6 +49,18 @@ Switch between modes using the mode selector at the bottom of the input area.
Workflow building mode. Copilot can add blocks, wire connections, edit configurations, and debug issues.
</div>
</Card>
<Card
title={
<span className="inline-flex items-center gap-2">
<ListChecks className="h-4 w-4 text-muted-foreground" />
Plan
</span>
}
>
<div className="m-0 text-sm">
Creates a step-by-step implementation plan for your workflow without making any changes. Helps you think through the approach before building.
</div>
</Card>
</Cards>
## Models
@@ -185,10 +197,10 @@ Selected options are highlighted; unselected options appear struck through.
## Usage Limits
Copilot usage is billed per token from the underlying LLM. If you reach your usage limit, Copilot will prompt you to increase your limit. You can add usage in increments ($50, $100) from your current base.
Copilot usage is billed per token from the underlying LLM and counts toward your plan's credit usage. If you reach your usage limit, enable on-demand billing from Settings → Subscription to continue using Copilot beyond your plan's included credits.
<Callout type="info">
See the [Cost Calculation page](/execution/costs) for billing details.
See the [Cost Calculation page](/execution/costs) for billing and plan details.
</Callout>
## Copilot MCP
@@ -286,3 +298,15 @@ Replace `YOUR_COPILOT_API_KEY` with your key.
For self-hosted deployments, replace `https://www.sim.ai` with your self-hosted Sim URL.
</Callout>
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "What is the difference between Ask, Build, and Plan mode?", answer: "Copilot has three modes. Ask mode is a read-only Q&A mode for explanations, guidance, and suggestions without making any changes to your workflow. Build mode allows Copilot to actively modify your workflow by adding blocks, wiring connections, editing configurations, and debugging issues. Plan mode creates a step-by-step implementation plan for your request without making any changes, so you can review the approach before committing. Use Ask when you want to learn or explore ideas, Plan when you want to see a proposed approach first, and Build when you want Copilot to make changes directly." },
{ question: "Does Copilot have access to my full workflow when answering questions?", answer: "Copilot has access to the workflow you are currently editing as context. You can also use the @ context menu to reference other workflows, previous chats, execution logs, knowledge bases, documentation, and templates to give Copilot additional context for your request." },
{ question: "How do I use Copilot from an external editor like Cursor or VS Code?", answer: "You can use Copilot as an MCP server from external editors. First, generate a Copilot API key from Settings > Copilot on sim.ai. Then add the MCP server configuration to your editor using the endpoint https://www.sim.ai/api/mcp/copilot with your API key in the X-API-Key header. Configuration examples are available for Cursor, Claude Code, Claude Desktop, and VS Code." },
{ question: "Can I revert changes that Copilot made to my workflow?", answer: "Yes. When Copilot makes changes in Build mode, it saves checkpoints of your workflow state. You can hover over a Copilot message and click the checkpoints icon to see saved states, then click Revert on any checkpoint to restore your workflow. Note that reverting cannot be undone, so review the checkpoint before confirming." },
{ question: "How does Copilot billing work?", answer: "Copilot usage is billed per token from the underlying LLM and counts toward your plan's credit usage. More capable models like Claude Opus cost more per token than lighter models like Haiku. If you reach your usage limit, you can enable on-demand billing from Settings > Subscription to continue using Copilot." },
{ question: "What do the slash commands like /research and /search do?", answer: "Slash commands trigger specialized behaviors. /fast enables fast mode execution, /research activates a research and exploration mode, and /actions executes agent actions. Web commands like /search, /read, /scrape, and /crawl let Copilot interact with the web to search for information, read URLs, scrape page content, or crawl multiple pages to gather context for your request." },
{ question: "How do I set up Copilot for a self-hosted deployment?", answer: "For self-hosted deployments, go to sim.ai > Settings > Copilot and generate a Copilot API key. Then set the COPILOT_API_KEY environment variable in your self-hosted environment. Copilot is a Sim-managed service, so the self-hosted instance communicates with Sim's servers to process requests." },
]} />

View File

@@ -6,6 +6,7 @@ description: Manage secrets, API keys, and OAuth connections for your workflows
import { Callout } from 'fumadocs-ui/components/callout'
import { Image } from '@/components/ui/image'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { FAQ } from '@/components/ui/faq'
Credentials provide a secure way to manage API keys, tokens, and third-party service connections across your workflows. Instead of hardcoding sensitive values into your workflow, you store them as credentials and reference them at runtime.
@@ -190,3 +191,13 @@ To share a credential with specific team members:
- **Name keys descriptively** — `STRIPE_SECRET_KEY` over `KEY1`
- **Connect multiple OAuth accounts** when you need different permissions or identities per workflow
- **Never hardcode secrets** in workflow input fields — always use `{{KEY}}` references
<FAQ items={[
{ question: "Are my secrets encrypted at rest?", answer: "Yes. Secret values and OAuth tokens are encrypted before being stored in the database. The platform uses server-side encryption so that raw secret values are never persisted in plaintext. Secret values are also never exposed in the workflow editor, logs, or API responses." },
{ question: "What happens if both a workspace secret and a personal secret have the same key name?", answer: "The workspace secret takes precedence. During execution, the resolver checks workspace secrets first and uses personal secrets only as a fallback. This ensures that production workflows use the shared, team-managed value." },
{ question: "Who determines which personal secret is used for automated runs?", answer: "For manual runs, the personal secrets of the user who clicked Run are used as fallback. For automated runs triggered by API, webhook, or schedule, the personal secrets of the workflow owner are used instead." },
{ question: "Does Sim handle OAuth token refresh automatically?", answer: "Yes. When an OAuth token is used during execution, the platform checks whether the access token has expired and automatically refreshes it using the stored refresh token before making the API call. You do not need to handle token refresh manually." },
{ question: "Can I connect multiple OAuth accounts for the same provider?", answer: "Yes. You can connect multiple accounts per provider (for example, two separate Gmail accounts). Each block that requires OAuth lets you select which specific account to use from the credential dropdown. This is useful when different workflows or blocks need different permissions or identities." },
{ question: "What happens if I delete a credential that is used in a workflow?", answer: "If a block references a deleted credential, the workflow will fail at that block during execution because the credential cannot be resolved. Make sure to update any blocks that reference a credential before deleting it." },
{ question: "Can I import secrets from a .env file?", answer: "Yes. The bulk import feature lets you paste .env-style content in KEY=VALUE format. The parser supports quoted values, comments (lines starting with #), and blank lines. All imported secrets are created with the scope you choose (workspace or personal)." },
]} />

View File

@@ -4,6 +4,7 @@ description: Enterprise features for business organizations
---
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
Sim Enterprise provides advanced features for organizations with enhanced security, compliance, and management requirements.
@@ -108,3 +109,14 @@ curl -X DELETE "https://your-instance/api/v1/admin/workspaces/{workspaceId}/memb
- Enabling `ACCESS_CONTROL_ENABLED` automatically enables organizations, as access control requires organization membership.
- When `DISABLE_INVITATIONS` is set, users cannot send invitations. Use the Admin API to manage workspace and organization memberships instead.
<FAQ items={[
{ question: "What are the minimum requirements to self-host Sim?", answer: "The Docker Compose production setup includes the Sim application (8 GB memory limit), a realtime collaboration server (1 GB memory limit), and a PostgreSQL database with pgvector. A machine with at least 16 GB of RAM and 4 CPU cores is recommended. You will also need Docker and Docker Compose installed." },
{ question: "Can I run Sim completely offline with local AI models?", answer: "Yes. Sim supports Ollama and VLLM for running local AI models. A separate Docker Compose configuration (docker-compose.ollama.yml) is available for deploying with Ollama. This lets you run workflows without any external API calls, keeping all data on your infrastructure." },
{ question: "How does data privacy work with self-hosted deployments?", answer: "When self-hosted, all data stays on your infrastructure. Workflow definitions, execution logs, credentials, and user data are stored in your PostgreSQL database. If you use local AI models through Ollama or VLLM, no data leaves your network. When using external AI providers, only the data sent in prompts goes to those providers." },
{ question: "Do I need a paid license to self-host Sim?", answer: "The core Sim platform is open source under Apache 2.0 and can be self-hosted for free. Enterprise features like SSO (SAML/OIDC), access control with permission groups, and organization management require an Enterprise subscription for production use. These features can be enabled via environment variables for development and evaluation without a license." },
{ question: "Which SSO providers are supported?", answer: "Sim supports SAML 2.0 and OIDC protocols, which means it works with virtually any enterprise identity provider including Okta, Azure AD (Entra ID), Google Workspace, and OneLogin. Configuration is done through Settings in the workspace UI." },
{ question: "How do I manage users when invitations are disabled?", answer: "Use the Admin API with your admin API key. You can create organizations, add members to organizations with specific roles, add users to workspaces with defined permissions, and remove users. All management is done through REST API calls authenticated with the x-admin-key header." },
{ question: "Can I scale Sim horizontally for high availability?", answer: "The Docker Compose setup is designed for single-node deployments. For production scaling, you can deploy on Kubernetes with multiple application replicas behind a load balancer. The database can be scaled independently using managed PostgreSQL services. Redis can be configured for session and cache management across multiple instances." },
{ question: "How do access control permission groups work?", answer: "Permission groups let you restrict which AI providers, workflow blocks, and platform features are available to specific team members. Users not assigned to any group have full access. Restrictions are enforced at both the UI level (hiding restricted options) and at execution time (blocking unauthorized operations). Enabling access control automatically enables organization management." },
]} />

View File

@@ -592,3 +592,15 @@ app.listen(3000, () => {
console.log('Webhook server listening on port 3000');
});
```
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "How do I trigger async execution via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
{ question: "What authentication methods does the API support?", answer: "The API supports two authentication methods: API keys passed in the x-api-key header, and session-based authentication for logged-in users. API keys can be generated from Settings > Sim Keys in the platform. Workflows with public API access enabled can also be called without authentication." },
{ question: "How does the webhook retry policy work?", answer: "Failed webhook deliveries are retried up to 5 times with exponential backoff: 5 seconds, 15 seconds, 1 minute, 3 minutes, and 10 minutes, plus up to 10% jitter. Only HTTP 5xx and 429 responses trigger retries. Each delivery times out after 30 seconds." },
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow execution rate limits, which are shown in the response body." },
{ question: "How do I verify that a webhook is from Sim?", answer: "Configure a webhook secret when setting up notifications. Sim signs each delivery with HMAC-SHA256 using the format 't={timestamp},v1={signature}' in the sim-signature header. Compute the HMAC of '{timestamp}.{body}' with your secret and compare it to the signature value." },
{ question: "What alert rules are available for notifications?", answer: "You can configure alerts for consecutive failures, failure rate thresholds, latency thresholds, latency spikes (percentage above average), cost thresholds, no-activity periods, and error counts within a time window. All alert types include a 1-hour cooldown to prevent notification spam." },
{ question: "Can I filter which executions trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
]} />

View File

@@ -8,16 +8,22 @@ import { Image } from '@/components/ui/image'
Sim automatically calculates costs for all workflow executions, providing transparent pricing based on AI model usage and execution charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
## Credits
Sim uses **credits** as the unit of measurement for all usage. **1 credit = $0.005**.
All plan limits, usage meters, and billing thresholds are displayed in credits throughout the Sim UI. Dollar amounts in this documentation are provided for reference.
## How Costs Are Calculated
Every workflow execution includes two cost components:
**Base Execution Charge**: $0.005 per execution
**Base Execution Charge**: 1 credit ($0.005) per execution
**AI Model Usage**: Variable cost based on token consumption
```javascript
modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
totalCost = baseExecutionCharge + modelCost
totalCredits = baseExecutionCharge + modelCost × 200
```
<Callout type="info">
@@ -129,22 +135,131 @@ Use your own API keys for AI model providers instead of Sim's hosted keys to pay
When configured, workflows use your key instead of Sim's hosted keys. If removed, workflows automatically fall back to hosted keys with the multiplier.
## Cost Optimization Strategies
## Plans
- **Model Selection**: Choose models based on task complexity. Simple tasks can use GPT-4.1-nano while complex reasoning might need o1 or Claude Opus.
- **Prompt Engineering**: Well-structured, concise prompts reduce token usage without sacrificing quality.
- **Local Models**: Use Ollama or VLLM for non-critical tasks to eliminate API costs entirely.
- **Caching and Reuse**: Store frequently used results in variables or files to avoid repeated AI model calls.
- **Batch Processing**: Process multiple items in a single AI request rather than making individual calls.
Sim has two paid plan tiers — **Pro** and **Max**. Either can be used individually or with a team. Team plans pool credits across all seats in the organization.
| Plan | Price | Credits Included | Daily Refresh |
|------|-------|------------------|---------------|
| **Community** | $0 | 1,000 (one-time) | — |
| **Pro** | $25/mo | 6,000/mo | +50/day |
| **Max** | $100/mo | 25,000/mo | +200/day |
| **Enterprise** | Custom | Custom | — |
To use Pro or Max with a team, select **Get For Team** in subscription settings and choose the tier and number of seats. Credits are pooled across the organization at the per-seat rate (e.g. Max for Teams with 3 seats = 75,000 credits/mo pooled).
### Daily Refresh Credits
Paid plans include a small daily credit allowance that does not count toward your plan limit. Each day, usage up to the daily refresh amount is excluded from billable usage. This allowance resets every 24 hours and does not carry over — use it or lose it.
| Plan | Daily Refresh |
|------|---------------|
| **Pro** | 50 credits/day ($0.25) |
| **Max** | 200 credits/day ($1.00) |
For team plans, the daily refresh scales with seats (e.g. Max for Teams with 3 seats = 600 credits/day).
### Annual Billing
All paid plans are available with annual billing at a **15% discount**. Switch between monthly and annual billing in Settings → Subscription.
| Plan | Monthly | Annual (per month) | Annual Total |
|------|---------|-------------------|--------------|
| **Pro** | $25/mo | $21.25/mo | $255/yr |
| **Max** | $100/mo | $85/mo | $1,020/yr |
Team plans follow the same pricing per seat.
### On-Demand Billing
By default, your usage is capped at the credits included in your plan. To allow usage beyond your plan's included amount, you can either enable **on-demand billing** or manually edit your usage limit to any value above your plan's minimum.
- **Enable On-Demand**: Removes the usage cap entirely. You pay for any overage at the end of the billing period.
- **Edit Usage Limit**: Set a specific cap above your plan's included amount to control how much overage you're willing to allow.
- **Disable On-Demand**: Resets your usage limit back to the plan's included amount (only available if your current usage hasn't already exceeded it).
<Callout type="info">
On-demand billing is managed by workspace admins for team plans. Non-admin team members cannot toggle on-demand billing.
</Callout>
## Plan Limits
### Rate Limits
| Plan | Sync (req/min) | Async (req/min) |
|------|----------------|-----------------|
| **Free** | 50 | 200 |
| **Pro** | 150 | 1,000 |
| **Max** | 300 | 2,500 |
| **Enterprise** | 600 | 5,000 |
Max (individual) shares the same rate limits as team plans. Team plans (Pro or Max for Teams) use the Max-tier rate limits.
### File Storage
| Plan | Storage |
|------|---------|
| **Free** | 5 GB |
| **Pro** | 50 GB |
| **Max** | 500 GB |
| **Enterprise** | 500 GB (customizable) |
Team plans (Pro or Max for Teams) use 500 GB.
### Execution Time Limits
| Plan | Sync | Async |
|------|------|-------|
| **Free** | 5 minutes | 90 minutes |
| **Pro / Max / Team / Enterprise** | 50 minutes | 90 minutes |
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
<Callout type="info">
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows.
</Callout>
## Billing Model
Sim uses a **base subscription + overage** billing model:
### How It Works
**Pro Plan ($25/month — 6,000 credits):**
- Monthly subscription includes 6,000 credits of usage
- Usage under 6,000 credits → No additional charges
- Usage over 6,000 credits (with on-demand enabled) → Pay the overage at month end
- Example: 7,000 credits used = $25 (subscription) + $5 (overage for 1,000 extra credits at $0.005/credit)
**Team Plans:**
- Usage is pooled across all team members in the organization
- Overage is calculated from total team usage against the pooled limit
- Organization owner receives one bill
**Enterprise Plans:**
- Fixed monthly price, no overages
- Custom usage limits per agreement
### Threshold Billing
When on-demand is enabled and unbilled overage reaches $50, Sim automatically bills the full unbilled amount.
**Example:**
- Day 10: $70 overage → Bill $70 immediately
- Day 15: Additional $35 usage ($105 total) → Already billed, no action
- Day 20: Another $50 usage ($155 total, $85 unbilled) → Bill $85 immediately
This spreads large overage charges throughout the month instead of one large bill at period end.
## Usage Monitoring
Monitor your usage and billing in Settings → Subscription:
- **Current Usage**: Real-time usage and costs for the current period
- **Usage Limits**: Plan limits with visual progress indicators
- **Billing Details**: Projected charges and minimum commitments
- **Plan Management**: Upgrade options and billing history
- **Current Usage**: Real-time credit usage for the current billing period
- **Usage Limits**: Plan limits with a visual progress bar
- **On-Demand Billing**: Toggle on-demand billing to allow usage beyond your plan's included credits
- **Plan Management**: Upgrade, downgrade, or switch between monthly and annual billing
### Programmatic Usage Tracking
@@ -187,7 +302,7 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
"usage": {
"currentPeriodCost": 12.34,
"limit": 100,
"plan": "pro"
"plan": "pro_6000"
}
}
```
@@ -198,83 +313,33 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
- `remaining`: Current tokens available (can be up to `maxBurst`)
**Response Fields:**
- `currentPeriodCost` reflects usage in the current billing period
- `limit` is derived from individual limits (Free/Pro) or pooled organization limits (Team/Enterprise)
- `currentPeriodCost` reflects usage in the current billing period (in dollars)
- `limit` is derived from individual limits (Free/Pro/Max) or pooled organization limits (Team/Enterprise)
- `plan` is the highest-priority active plan associated with your user
## Plan Limits
## Cost Optimization Strategies
Different subscription plans have different usage limits:
| Plan | Monthly Usage Included | Rate Limits (per minute) |
|------|------------------------|-------------------------|
| **Free** | $20 | 50 sync, 200 async |
| **Pro** | $20 (adjustable) | 150 sync, 1,000 async |
| **Team** | $40/seat (pooled, adjustable) | 300 sync, 2,500 async |
| **Enterprise** | Custom | Custom |
## Execution Time Limits
Workflows have maximum execution time limits based on your subscription plan:
| Plan | Sync Execution | Async Execution |
|------|----------------|-----------------|
| **Free** | 5 minutes | 10 minutes |
| **Pro** | 50 minutes | 90 minutes |
| **Team** | 50 minutes | 90 minutes |
| **Enterprise** | 50 minutes | 90 minutes |
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background. Async time limits are up to 2x the sync limit, capped at 90 minutes.
<Callout type="info">
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows.
</Callout>
## Billing Model
Sim uses a **base subscription + overage** billing model:
### How It Works
**Pro Plan ($20/month):**
- Monthly subscription includes $20 of usage
- Usage under $20 → No additional charges
- Usage over $20 → Pay the overage at month end
- Example: $35 usage = $20 (subscription) + $15 (overage)
**Team Plan ($40/seat/month):**
- Pooled usage across all team members
- Overage calculated from total team usage
- Organization owner receives one bill
**Enterprise Plans:**
- Fixed monthly price, no overages
- Custom usage limits per agreement
### Threshold Billing
When unbilled overage reaches $50, Sim automatically bills the full unbilled amount.
**Example:**
- Day 10: $70 overage → Bill $70 immediately
- Day 15: Additional $35 usage ($105 total) → Already billed, no action
- Day 20: Another $50 usage ($155 total, $85 unbilled) → Bill $85 immediately
This spreads large overage charges throughout the month instead of one large bill at period end.
## Cost Management Best Practices
1. **Monitor Regularly**: Check your usage dashboard frequently to avoid surprises
2. **Set Budgets**: Use plan limits as guardrails for your spending
3. **Optimize Workflows**: Review high-cost executions and optimize prompts or model selection
4. **Use Appropriate Models**: Match model complexity to task requirements
5. **Batch Similar Tasks**: Combine multiple requests when possible to reduce overhead
- **Model Selection**: Choose models based on task complexity. Simple tasks can use GPT-4.1-nano while complex reasoning might need o1 or Claude Opus.
- **Prompt Engineering**: Well-structured, concise prompts reduce token usage without sacrificing quality.
- **Local Models**: Use Ollama or VLLM for non-critical tasks to eliminate API costs entirely.
- **Caching and Reuse**: Store frequently used results in variables or files to avoid repeated AI model calls.
- **Batch Processing**: Process multiple items in a single AI request rather than making individual calls.
## Next Steps
- Review your current usage in [Settings → Subscription](https://sim.ai/settings/subscription)
- Learn about [Logging](/execution/logging) to track execution details
- Explore the [External API](/execution/api) for programmatic cost monitoring
- Check out [workflow optimization techniques](/blocks) to reduce costs
- Check out [workflow optimization techniques](/blocks) to reduce costs
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "How much does a single workflow execution cost?", answer: "Every execution incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base execution charge." },
{ question: "What is the credit-to-dollar conversion rate?", answer: "1 credit equals $0.005. All plan limits, usage meters, and billing thresholds in the Sim UI are displayed in credits." },
{ question: "Do unused daily refresh credits carry over?", answer: "No. Daily refresh credits reset every 24 hours and do not accumulate. If you do not use them within the day, they are lost." },
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and executions will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
{ question: "How does the 1.1x hosted model multiplier work?", answer: "When you use Sim's hosted API keys (instead of bringing your own), a 1.1x multiplier is applied to the base model pricing for Agent blocks. This covers infrastructure and API management costs. You can avoid this multiplier by using your own API keys via the BYOK feature." },
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base execution charge of 1 credit per execution." },
{ question: "When does threshold billing trigger?", answer: "When on-demand billing is enabled and your unbilled overage reaches $50, Sim automatically bills the full unbilled amount. This spreads large charges throughout the month instead of accumulating one large bill at period end." },
]} />

View File

@@ -166,3 +166,14 @@ Use `url` for direct downloads or `base64` for inline processing.
2. **Check file types** - Ensure the file type matches what the receiving block expects. The Vision block needs images, the File block handles documents.
3. **Consider file size** - Large files increase execution time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during workflow execution is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
{ question: "What file input formats are supported via the API?", answer: "When triggering a workflow via API, you can send files as base64-encoded data (using a data URI with the format 'data:{mime};base64,{data}') or as a URL pointing to a publicly accessible file. In both cases, include the file name and MIME type in the request." },
{ question: "How are files passed between blocks internally?", answer: "Files are represented as standardized UserFile objects with name, url, base64, type, and size properties. Most blocks accept the full file object and extract what they need automatically, so you typically pass the entire object rather than individual properties." },
{ question: "Which blocks can output files?", answer: "Gmail outputs email attachments, Slack outputs downloaded files, TTS generates audio files, Video Generator and Image Generator produce media files. Storage blocks like S3, Supabase, Google Drive, and Dropbox can also retrieve files for use in downstream blocks." },
{ question: "Do I need to extract base64 or URL from file objects manually?", answer: "No. Most blocks accept the full file object and handle the format conversion automatically. Simply pass the entire file reference (e.g., <gmail.attachments[0]>) and the receiving block will extract the data it needs." },
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the execution engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
]} />

View File

@@ -5,6 +5,7 @@ title: Overview
import { Callout } from 'fumadocs-ui/components/callout'
import { Card, Cards } from 'fumadocs-ui/components/card'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows are executed in Sim.
@@ -55,7 +56,7 @@ Each workflow maintains a rich context during execution containing:
## Deployment Snapshots
All public entry points—API, Chat, Schedule, Webhook, and Manual runs—execute the workflows active deployment snapshot. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
API, Chat, Schedule, and Webhook executions run against the workflows active deployment snapshot. Manual runs from the editor execute the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
<div className='flex justify-center my-6'>
<Image
@@ -114,3 +115,13 @@ const result = await client.executeWorkflow('workflow-id', {
## What's Next?
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your executions and [Cost Calculation](/execution/costs) to optimize your spending.
<FAQ items={[
{ question: "What are the execution timeout limits?", answer: "Synchronous executions (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous executions (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
{ question: "How does parallel execution work?", answer: "The engine identifies layers of blocks with no dependencies on each other and runs them concurrently. Within loops and parallel blocks, the engine supports up to 20 parallel branches by default and up to 1,000 loop iterations. Nested subflows (loops inside parallels, or vice versa) are supported up to 10 levels deep." },
{ question: "Can I cancel a running execution?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel an execution, the engine checks for cancellation between block executions (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the execution returns with a cancelled status." },
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based executions (API, chat, schedule, webhook) run against the active snapshot, not your draft canvas. Manual runs from the editor execute the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
{ question: "How are execution costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that execution. You can review costs in the execution logs." },
{ question: "What happens when a block fails during execution?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the execution with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-execute from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-executes." },
]} />

View File

@@ -121,10 +121,8 @@ The snapshot provides:
## Log Retention
- **Free Plan**: 7 days of log retention
- **Pro Plan**: 30 days of log retention
- **Team Plan**: 90 days of log retention
- **Enterprise Plan**: Custom retention periods available
- **Free Plan**: 7 days of log retention (logs are archived to cloud storage and then deleted)
- **Pro / Team / Enterprise Plans**: Logs are retained indefinitely (no automatic cleanup)
## Best Practices
@@ -147,4 +145,15 @@ The snapshot provides:
- Learn about [Cost Calculation](/execution/costs) to understand workflow pricing
- Explore the [External API](/execution/api) for programmatic log access
- Set up [Notifications](/execution/api#notifications) for real-time alerts via webhook, email, or Slack
- Set up [Notifications](/execution/api#notifications) for real-time alerts via webhook, email, or Slack
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "How long are execution logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
{ question: "What data is captured in each execution log?", answer: "Each log entry includes the execution ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), execution data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
{ question: "Are API keys visible in the logs?", answer: "No. API keys and credentials are automatically redacted in the log input tab for security. You can safely inspect block inputs without exposing sensitive values." },
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at execution time. It lets you see the exact state of the workflow when a particular execution ran, which is useful for debugging workflows that have been modified since the execution." },
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when executions complete." },
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new execution entries appear as they are logged, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
]} />

View File

@@ -23,6 +23,7 @@ import {
} from '@/components/icons'
import { Video } from '@/components/ui/video'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
Build your first AI workflow in 10 minutes. In this tutorial, you'll create a people research agent that uses advanced LLM-powered search tools to extract and structure information about individuals.
@@ -177,7 +178,7 @@ Build, test, and refine workflows quickly with immediate feedback
Discover API, Function, Condition, and other workflow blocks
</Card>
<Card title="Browse Integrations" href="/tools">
Connect 80+ services including Gmail, Slack, Notion, and more
Connect 160+ services including Gmail, Slack, Notion, and more
</Card>
<Card title="Add Custom Logic" href="/blocks/function">
Write custom functions for advanced data processing
@@ -191,6 +192,16 @@ Build, test, and refine workflows quickly with immediate feedback
**Need detailed explanations?** Visit the [Blocks documentation](/blocks) for comprehensive guides on each component.
**Looking for integrations?** Explore the [Tools documentation](/tools) to see all 80+ available integrations.
**Looking for integrations?** Explore the [Tools documentation](/tools) to see all 160+ available integrations.
**Ready to go live?** Learn about [Execution and Deployment](/execution) to make your workflows production-ready.
<FAQ items={[
{ question: "How long does the getting started tutorial take?", answer: "About 10 minutes. The tutorial walks you through creating a people research agent with web search tools and structured output. You will have a fully working workflow by the end." },
{ question: "Do I need API keys to follow this tutorial?", answer: "You need API keys for the search tools (Exa and Linkup) used in this tutorial. For the AI model, you can either use Sim's hosted keys (included with your plan credits) or bring your own OpenAI API key. If you prefer not to set up search tool keys, you can still build a basic agent workflow without them." },
{ question: "Do I need coding experience to complete this tutorial?", answer: "No. The entire tutorial uses the visual drag-and-drop interface. You will connect blocks, configure settings, and test through the chat panel without writing any code." },
{ question: "Can I use a different AI model instead of GPT-4o?", answer: "Yes. The Agent block supports models from OpenAI, Anthropic, Google, Groq, Cerebras, DeepSeek, Mistral, xAI, and more. You can select any available model from the dropdown. If you self-host, you can also use local models through Ollama." },
{ question: "Can I import workflows from other tools?", answer: "Sim does not currently support importing workflows from other automation platforms. However, you can use the Copilot feature to describe what you want in natural language and have it build the workflow for you, which is often faster than manual recreation." },
{ question: "What if my workflow does not produce the expected output?", answer: "Use the Chat panel to test iteratively and inspect outputs from each block. You can click the dropdown to view different block outputs and pinpoint where the issue is. The execution logs (accessible from the Logs tab) show detailed information about each step including token usage, costs, and any errors." },
{ question: "Where do I go after completing this tutorial?", answer: "Explore the Blocks documentation to learn about Condition, Router, Function, and API blocks. Browse the Tools section to discover 160+ integrations you can add to your agents. When you are ready to deploy, check the Execution docs for REST API, webhook, and scheduled trigger options." },
]} />

View File

@@ -6,6 +6,7 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Callout } from 'fumadocs-ui/components/callout'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Sim is an open-source visual workflow builder for building and deploying AI agent workflows. Design intelligent automation systems using a no-code interface—connect AI models, databases, APIs, and business tools through an intuitive drag-and-drop canvas. Whether you're building chatbots, automating business processes, or orchestrating complex data pipelines, Sim provides the tools to bring your AI workflows to life.
@@ -57,11 +58,11 @@ Enable your team to build together. Multiple users can edit workflows simultaneo
## Integrations
Sim provides native integrations with 80+ services across multiple categories:
Sim provides native integrations with 160+ services across multiple categories:
- **AI Models**: OpenAI, Anthropic, Google Gemini, Groq, Cerebras, local models via Ollama or VLLM
- **Communication**: Gmail, Slack, Microsoft Teams, Telegram, WhatsApp
- **Productivity**: Notion, Google Workspace, Airtable, Monday.com
- **Productivity**: Notion, Google Workspace, Airtable
- **Development**: GitHub, Jira, Linear, automated browser testing
- **Search & Data**: Google Search, Perplexity, Firecrawl, Exa AI
- **Databases**: PostgreSQL, MySQL, Supabase, Pinecone, Qdrant
@@ -109,9 +110,20 @@ Ready to build your first AI workflow?
Learn about the building blocks
</Card>
<Card title="Tools & Integrations" href="/tools">
Explore 80+ built-in integrations
Explore 160+ built-in integrations
</Card>
<Card title="Team Permissions" href="/permissions/roles-and-permissions">
Set up workspace roles and permissions
</Card>
</Cards>
<FAQ items={[
{ question: "Is Sim free to use?", answer: "Sim offers a free Community plan with 1,000 one-time credits to get started. Paid plans start at $25/month (Pro) with 5,000 credits and go up to $100/month (Max) with 20,000 credits. Annual billing is available at a 15% discount. You can also self-host Sim for free on your own infrastructure." },
{ question: "Is Sim open source?", answer: "Yes. Sim is open source under the Apache 2.0 license. The full source code is available on GitHub and you can self-host it, contribute to development, or modify it for your own needs. Enterprise features (SSO, access control) have a separate license that requires a subscription for production use." },
{ question: "Which AI models and providers are supported?", answer: "Sim supports 15+ providers including OpenAI, Anthropic, Google Gemini, Groq, Cerebras, DeepSeek, Mistral, xAI, and OpenRouter. You can also run local models through Ollama or VLLM at no API cost. Bring Your Own Key (BYOK) is supported so you can use your own API keys at base provider pricing with no markup." },
{ question: "Do I need coding experience to use Sim?", answer: "No. Sim is a no-code visual builder where you design workflows by dragging blocks onto a canvas and connecting them. For advanced use cases, the Function block lets you write custom JavaScript, but it is entirely optional." },
{ question: "Can I self-host Sim?", answer: "Yes. Sim provides Docker Compose configurations for self-hosted deployments. The stack includes the Sim application, a PostgreSQL database with pgvector, and a realtime collaboration server. You can also integrate local AI models via Ollama for a fully offline setup." },
{ question: "Is there a limit on how many workflows I can create?", answer: "There is no limit on the number of workflows you can create on any plan. Usage limits apply to execution credits, rate limits, and file storage, which vary by plan tier." },
{ question: "What integrations are available?", answer: "Sim offers 160+ native integrations across categories including AI models, communication tools (Gmail, Slack, Teams, Telegram), productivity apps (Notion, Google Workspace, Airtable), development tools (GitHub, Jira, Linear), search services (Google Search, Perplexity, Exa), and databases (PostgreSQL, Supabase, Pinecone). For anything not built in, you can use the MCP (Model Context Protocol) support to connect custom services." },
{ question: "How does Sim compare to other workflow automation tools?", answer: "Sim is purpose-built for AI agent workflows rather than general task automation. It provides a visual canvas for orchestrating LLM-powered agents with built-in support for tool use, structured outputs, conditional branching, and real-time collaboration. The Copilot feature also lets you build and modify workflows using natural language." },
]} />

View File

@@ -0,0 +1,156 @@
---
title: Connectors
description: Automatically sync documents from external sources into your knowledge base
---
import { Callout } from 'fumadocs-ui/components/callout'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { FAQ } from '@/components/ui/faq'
Connectors let you pull documents directly from external services into your knowledge base. Instead of manually uploading files, a connector continuously syncs content from sources like Notion, Google Drive, GitHub, Slack, and more — keeping your knowledge base up to date automatically.
## Available Connectors
Sim ships with 30 built-in connectors spanning productivity tools, cloud storage, development platforms, and more.
| Category | Connectors |
|----------|-----------|
| **Productivity** | Notion, Confluence, Asana, Linear, Jira, Google Calendar, Google Sheets |
| **Cloud Storage** | Google Drive, Dropbox, OneDrive, SharePoint |
| **Documents** | Google Docs, WordPress, Webflow |
| **Development** | GitHub |
| **Communication** | Slack, Discord, Microsoft Teams, Reddit |
| **Email** | Gmail, Outlook |
| **CRM** | HubSpot, Salesforce |
| **Support** | Intercom, ServiceNow, Zendesk |
| **Data** | Airtable |
| **Note-taking** | Evernote, Obsidian |
| **Meetings** | Fireflies |
## Adding a Connector
<Steps>
<Step>
### Select a source
Open a knowledge base and click **Add Connector**. You'll see the full list of available connectors — pick the service you want to sync from.
</Step>
<Step>
### Authenticate
Most connectors use **OAuth** — select an existing credential from the dropdown, or click **Connect new account** to authorize through the service's login flow. Tokens are refreshed automatically, so you won't need to re-authenticate unless you revoke access.
A few connectors (Evernote, Obsidian, Fireflies) use **API keys** instead. Paste your key or developer token directly, and it will be stored securely.
<Callout type="info">
If you rotate an API key in the external service, you'll need to update it in Sim as well. OAuth tokens are refreshed automatically, but API keys are not.
</Callout>
</Step>
<Step>
### Configure
Each connector has its own configuration fields that control what gets synced. Some examples:
- **Notion**: Choose between syncing an entire workspace, a specific database, or a single page tree
- **GitHub**: Specify a repository, branch, and optional file extension filter
- **Confluence**: Enter your Atlassian domain and optionally filter by space key or content type
- **Obsidian**: Provide your vault URL and optionally restrict to a folder path
All configuration is validated when you save — if a repository doesn't exist or a domain is unreachable, you'll get an immediate error.
</Step>
<Step>
### Choose sync frequency
Select how often the connector should re-sync:
| Frequency | Description |
|-----------|-------------|
| Every hour | Best for fast-moving sources |
| Every 6 hours | Good balance for most use cases |
| **Daily** (default) | Suitable for content that changes infrequently |
| Weekly | For stable, rarely-updated sources |
| Manual only | Sync only when you trigger it |
</Step>
<Step>
### Configure metadata tags (optional)
If the connector supports metadata tags, you'll see checkboxes for each tag type (e.g., Labels, Last Modified, Notebook). All are enabled by default — uncheck any you don't need.
See the [Metadata Tags](#metadata-tags) section below for details.
</Step>
<Step>
### Connect & Sync
Click **Connect & Sync** to save the connector and trigger the first sync immediately. Documents will begin appearing in your knowledge base as they are processed.
</Step>
</Steps>
## How Syncing Works
On each sync, the connector fetches documents from the external service and compares them against what's already in your knowledge base. Only documents that have actually changed are reprocessed — new content is added, updated content is re-chunked and re-embedded, and documents that no longer exist in the source are removed.
This means syncing is efficient even for large document sets. A connector with thousands of documents will only do meaningful work when something changes.
### Handling Failures
If a single document fails to fetch (e.g., due to a permission issue or timeout), the sync continues with the remaining documents. The failed document will be retried on the next sync cycle.
If an entire sync fails (e.g., the service is down or credentials expired), the connector automatically backs off and retries later. The backoff resets as soon as a sync succeeds.
## Metadata Tags
Connectors can automatically populate [tags](/docs/knowledgebase/tags) with metadata from the source, letting you filter documents in the Knowledge block based on information from the external service.
For example, a Notion connector might tag documents with their **Labels**, **Last Modified** date, and **Created** date. A GitHub connector might tag documents with their **Repository** and **File Path**. This metadata becomes available for [tag-based filtering](/docs/knowledgebase/tags) in your workflows.
### Opting Out
You can disable specific metadata tags during connector setup. Disabled tags won't be populated, leaving those tag slots available for other connectors or manual tagging.
<Callout type="info">
Tag slots are shared across all documents in a knowledge base. If you have multiple connectors, each one's metadata tags draw from the same pool of available slots.
</Callout>
## Excluding Documents
You can manually exclude specific documents from a connector's sync. Excluded documents are skipped on every subsequent sync, even if they change in the source. This is useful for filtering out templates, drafts, or other content you don't want in your knowledge base.
## Source Links
Every synced document retains a link back to the original in the external service. This lets you trace any knowledge base document to its source — whether that's a Notion page, a GitHub file, a Confluence article, or a Slack conversation.
## Multiple Connectors
You can add multiple connectors to a single knowledge base. For example, you might sync internal documentation from Confluence alongside code from GitHub and meeting notes from Fireflies — all searchable together through the Knowledge block.
Each connector manages its own documents independently. Metadata tag slots are shared across the knowledge base, so keep an eye on slot usage if you're combining several connectors that each populate tags.
## Common Use Cases
- **Internal knowledge base**: Sync your team's Notion workspace and Confluence spaces so AI agents can answer questions about internal processes, policies, and documentation
- **Customer support**: Connect HubSpot or Salesforce alongside your help docs from WordPress or Google Docs to give support agents full context on customers and product information
- **Engineering assistant**: Sync a GitHub repository and Jira or Linear issues so an AI agent can reference code, specs, and ticket history when answering developer questions
- **Meeting intelligence**: Pull in Fireflies transcripts alongside Slack conversations to build a searchable archive of decisions and discussions
- **Research and notes**: Sync Evernote notebooks or an Obsidian vault to make your personal notes available to AI workflows
<FAQ items={[
{ question: "How often do connectors sync?", answer: "You can choose from hourly, every 6 hours, daily (default), weekly, or manual-only sync frequencies. Each connector can have its own schedule." },
{ question: "What happens if a source document is deleted?", answer: "On the next sync, the connector detects that the document no longer exists in the source and removes it from your knowledge base automatically." },
{ question: "Can I connect multiple services to one knowledge base?", answer: "Yes. You can add as many connectors as you need to a single knowledge base. Each connector manages its documents independently." },
{ question: "Do I need to re-authenticate connectors?", answer: "OAuth-based connectors refresh tokens automatically. API key-based connectors (Evernote, Obsidian, Fireflies) need manual updates if you rotate the key." },
{ question: "What if a connector sync fails?", answer: "If a single document fails, the rest of the sync continues. If the entire sync fails (e.g., service is down), the connector backs off and retries automatically." },
{ question: "Can I exclude specific documents from syncing?", answer: "Yes. You can manually exclude documents from any connector. Excluded documents are skipped on every subsequent sync, even if they change in the source." },
{ question: "Do metadata tags count against a limit?", answer: "Tag slots are shared across all documents in a knowledge base. If you have multiple connectors, their metadata tags draw from the same pool of available slots." },
]} />

View File

@@ -5,6 +5,7 @@ description: Upload, process, and search through your documents with intelligent
import { Video } from '@/components/ui/video'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The knowledgebase allows you to upload, process, and search through your documents with intelligent vector search and chunking. Documents of various types are automatically processed, embedded, and made searchable. Your documents are intelligently chunked, and you can view, edit, and search through them using natural language queries.
@@ -25,7 +26,7 @@ The system handles the entire processing pipeline for you:
## Supported File Types
Sim supports PDF, Word (DOC/DOCX), plain text (TXT), Markdown (MD), HTML, Excel (XLS/XLSX), PowerPoint (PPT/PPTX), and CSV files. Files can be up to 100MB each, with optimal performance for files under 50MB. You can upload multiple documents simultaneously, and PDF files include OCR processing for scanned documents.
Sim supports PDF, Word (DOC/DOCX), plain text (TXT), Markdown (MD), HTML, HTM, Excel (XLS/XLSX), PowerPoint (PPT/PPTX), CSV, JSON, and YAML/YML files. Files can be up to 100MB each, with optimal performance for files under 50MB. You can upload multiple documents simultaneously, and PDF files include OCR processing for scanned documents.
## Viewing and Editing Chunks
@@ -40,8 +41,8 @@ When creating a knowledge base, you can configure how documents are split into c
| Setting | Unit | Default | Range | Description |
|---------|------|---------|-------|-------------|
| **Max Chunk Size** | tokens | 1,024 | 100-4,000 | Maximum size of each chunk (1 token ≈ 4 characters) |
| **Min Chunk Size** | characters | 1 | 1-2,000 | Minimum chunk size to avoid tiny fragments |
| **Overlap** | characters | 200 | 0-500 | Context overlap between consecutive chunks |
| **Min Chunk Size** | characters | 100 | 100-2,000 | Minimum chunk size to avoid tiny fragments |
| **Overlap** | tokens | 200 | 0-500 | Context overlap between consecutive chunks |
- **Hierarchical splitting**: Respects document structure (sections, paragraphs, sentences)
@@ -117,4 +118,14 @@ Sim uses vector search powered by [pgvector](https://github.com/pgvector/pgvecto
4. **Explore chunks**: View and edit the processed content
5. **Add to workflows**: Use the Knowledge block to integrate with your AI agents
The knowledgebase transforms your static documents into an intelligent, searchable resource that your AI workflows can leverage for more informed and contextual responses.
The knowledgebase transforms your static documents into an intelligent, searchable resource that your AI workflows can leverage for more informed and contextual responses.
<FAQ items={[
{ question: "What file types are supported?", answer: "PDF, Word (DOC/DOCX), plain text (TXT), Markdown (MD), HTML, HTM, Excel (XLS/XLSX), PowerPoint (PPT/PPTX), CSV, JSON, and YAML/YML files." },
{ question: "Is there a file size limit?", answer: "Files can be up to 100 MB each, with optimal performance for files under 50 MB." },
{ question: "Can I edit chunks after processing?", answer: "Yes. You can view, edit, merge, split, and add metadata to individual chunks after documents are processed." },
{ question: "How does semantic search work?", answer: "Documents are embedded as vectors using AI models. When you search, your query is also embedded and compared against document vectors to find conceptually similar content — even without exact keyword matches." },
{ question: "Does it support scanned PDFs?", answer: "Yes. When configured with Azure or Mistral OCR, Sim can extract text from image-based and scanned PDF documents." },
{ question: "Can I search across multiple knowledge bases?", answer: "Each Knowledge block targets a specific knowledge base. You can use multiple Knowledge blocks in a workflow to search across different knowledge bases." },
{ question: "How do I control chunk size?", answer: "When creating a knowledge base, you can configure max chunk size (100-4,000 tokens), min chunk size (100-2,000 characters), and overlap (0-500 tokens)." },
]} />

View File

@@ -1,4 +1,4 @@
{
"title": "Knowledgebase",
"pages": ["index", "tags"]
"pages": ["index", "connectors", "tags"]
}

View File

@@ -3,6 +3,7 @@ title: Tags and Filtering
---
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Tags provide a powerful way to organize your documents and create precise filtering for your vector searches. By combining tag-based filtering with semantic search, you can retrieve exactly the content you need from your knowledgebase.
@@ -16,7 +17,7 @@ You can add custom tags to any document in your knowledgebase to organize and ca
### Tag Management
- **Custom tags**: Create your own tag system that fits your workflow
- **Multiple tags per document**: Apply as many tags as needed to each document, there are 7 tag slots available per knowledgebase that are shared by all documents in the knowledgebase
- **Multiple tags per document**: Apply as many tags as needed to each document. Each knowledgebase has 17 tag slots total: 7 text, 5 number, 2 date, and 3 boolean slots, shared by all documents in the knowledgebase
- **Tag organization**: Group related documents with consistent tagging
### Tag Best Practices
@@ -65,10 +66,10 @@ When you **provide both tags and a search query**:
### Search Configuration
#### Tag Filtering
- **Multiple tags**: Use multiple tags for OR logic (document must have one or more of the tags)
- **Multiple tags**: Use multiple tags with AND or OR logic to control whether documents must match all or any of the specified tags
- **Tag combinations**: Mix different tag types for precise filtering
- **Case sensitivity**: Tag matching is case-insensitive
- **Partial matching**: Exact tag name matching required
- **Partial matching**: Text fields support partial matching operators such as contains, starts_with, and ends_with in addition to exact matching
#### Vector Search Parameters
- **Query complexity**: Natural language questions work best
@@ -105,4 +106,13 @@ When you **provide both tags and a search query**:
4. **Integrate into workflows**: Use the Knowledge block with your tagging strategy
5. **Refine over time**: Adjust your tagging approach based on search results
Tags transform your knowledgebase from a simple document store into a precisely organized, searchable intelligence system that your AI workflows can navigate with surgical precision.
Tags transform your knowledgebase from a simple document store into a precisely organized, searchable intelligence system that your AI workflows can navigate with surgical precision.
<FAQ items={[
{ question: "How many tag slots are available per knowledgebase?", answer: "Each knowledgebase supports up to 17 tag slots total across four field types: 7 text slots, 5 number slots, 2 date slots, and 3 boolean slots. These slots are shared across all documents in the knowledgebase." },
{ question: "What tag field types are supported?", answer: "Four field types are supported: text (free-form string values), number (numeric values), date (date values in YYYY-MM-DD format), and boolean (true/false values). Each type has its own pool of available slots." },
{ question: "Is tag matching case-sensitive?", answer: "No, tag matching is case-insensitive. You can use any capitalization when filtering by tags and it will match regardless of how the tag value was originally entered." },
{ question: "How does combined tag and vector search work?", answer: "When you provide both tags and a search query, tag filtering is applied first to narrow down the document set, then vector search runs within that filtered subset. This approach is more efficient because it reduces the number of vectors that need similarity comparison." },
{ question: "What is the default number of results returned from a knowledge search?", answer: "The default is 10 results. You can configure this with the topK parameter, which accepts values from 1 to 100." },
{ question: "What embedding model does Sim use for knowledge base search?", answer: "Sim uses OpenAI's text-embedding-3-small model with 1536 dimensions for generating document embeddings and performing vector similarity search." },
]} />

View File

@@ -0,0 +1,76 @@
---
title: Sim Mailer
description: Send emails to your workspace and let Sim handle them as tasks.
---
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
Sim Mailer gives your workspace a dedicated email address. Forward or send emails to it and Sim will process them as tasks — reading the subject, body, and any attachments, then replying to the thread with the result.
This means you can interact with Sim directly from your email client without switching apps.
## Getting Started
1. Navigate to **Settings** → **Inbox**
2. Toggle the inbox on
3. Optionally choose a custom address prefix (e.g., `acme` → `acme@mothership.sim.ai`)
4. Copy your inbox address and start sending emails
If you skip the custom prefix, one is generated automatically.
<Callout type="info">
Changing your address creates a new inbox. The old address stops working immediately.
</Callout>
## What You Can Send
Write your email like you would to a colleague. The subject and body become the task prompt.
**Attachments are fully supported.** Images, PDFs, and documents (up to 10 MB each) are read by Sim and displayed inline in the conversation — image attachments show as previews, just like when you upload them directly in the chat.
| Good email | Why it works |
|------------|-------------|
| "Summarize the attached PDF and list action items" | Clear task with an attachment |
| "What's in this image?" with a photo attached | Sim reads and describes the image |
| "Draft a reply to this forwarded thread" | Uses the email body as context |
## Allowed Senders
Only authorized senders can create tasks. Emails from anyone else are automatically rejected.
- **Workspace members** are allowed by default — no setup needed
- **External senders** can be added manually with an optional label for easy identification
Manage your allowed senders list in **Settings** → **Inbox** → **Allowed Senders**.
## Tracking Tasks
Every email becomes a task you can track in **Settings** → **Inbox**:
- **Search** by subject, sender, or body content
- **Filter** by status to find what you need
- **Click** any completed or failed task to jump to the full conversation
### Task Statuses
| Status | Meaning |
|--------|---------|
| **Received** | Email accepted, queued for processing |
| **Processing** | Sim is actively working on it |
| **Completed** | Done — the result was sent as an email reply |
| **Failed** | Something went wrong during execution |
| **Rejected** | Email blocked (sender not allowed, automated sender, or rate limit exceeded) |
## Conversations
Each email task creates a conversation in your workspace. You can continue the conversation from the Sim chat interface, and any follow-up emails in the same thread are linked to the same conversation.
<FAQ items={[
{ question: "Can I use my own email domain?", answer: "Not currently. All inbox addresses use the @mothership.sim.ai domain. You can customize the prefix (e.g., acme@mothership.sim.ai) but not the domain itself." },
{ question: "What happens if I send from an unauthorized email?", answer: "The email is automatically rejected. Only workspace members and manually added external senders can create tasks." },
{ question: "Is there a size limit for attachments?", answer: "Yes, each attachment can be up to 10 MB. Images, PDFs, and common document formats are supported." },
{ question: "Can I reply to Sim's email responses?", answer: "Yes. Replies in the same email thread are linked to the original conversation, so you can continue the interaction from your email client." },
{ question: "How long does it take to process an email?", answer: "Most emails are processed within a few seconds. Emails with large attachments or complex tasks may take slightly longer." },
{ question: "Can multiple people in my workspace use the same inbox?", answer: "Yes. All workspace members can send to the shared inbox address. Each email creates its own task and conversation." },
]} />

View File

@@ -5,6 +5,7 @@ description: Expose your workflows as MCP tools for external AI assistants and a
import { Video } from '@/components/ui/video'
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
Deploy your workflows as MCP tools to make them accessible to external AI assistants like Claude Desktop, Cursor, and other MCP-compatible clients. This turns your workflows into callable tools that can be invoked from anywhere.
@@ -101,8 +102,18 @@ Workflows execute using the same deployment version as API calls, ensuring consi
| Action | Required Permission |
|--------|-------------------|
| Create MCP servers | **Admin** |
| Create MCP servers | **Write** or **Admin** |
| Add workflows to servers | **Write** or **Admin** |
| View MCP servers | **Read**, **Write**, or **Admin** |
| Delete MCP servers | **Admin** |
<FAQ items={[
{ question: "Does my workflow need to be deployed before adding it as an MCP tool?", answer: "Yes. Only deployed workflows can be added as MCP tools. The MCP tool executes the same deployment version as API calls, ensuring consistent behavior between both access methods." },
{ question: "What MCP protocol does Sim implement?", answer: "Sim implements the standard MCP protocol using the official @modelcontextprotocol/sdk types, supporting JSON-RPC 2.0 messages including tools/list and tools/call methods. It handles both requests and notifications per the MCP specification." },
{ question: "How do I authenticate MCP client connections?", answer: "Include your API key in the X-API-Key header when connecting via mcp-remote or other HTTP-based MCP transports. The server validates authentication using hybrid auth that supports both session-based and API key-based access." },
{ question: "Can I add the same workflow to multiple MCP servers?", answer: "Yes. When configuring a workflow as an MCP tool, you can select multiple MCP servers to add it to. Each server exposes its own URL, so you can organize tools into different servers for different use cases or clients." },
{ question: "What naming conventions should I follow for tool names?", answer: "Use lowercase letters, numbers, and underscores only. The name should be descriptive and follow MCP naming conventions, such as search_documents or send_email. This helps AI assistants understand and correctly invoke your tools." },
{ question: "How are workflow inputs mapped to MCP tool parameters?", answer: "Your workflow's input format fields automatically become MCP tool parameters. You can add descriptions to each parameter in the MCP configuration to help AI assistants understand what values to provide." },
]} />

View File

@@ -97,10 +97,11 @@ MCP functionality requires specific workspace permissions:
| Action | Required Permission |
|--------|-------------------|
| Configure MCP servers in settings | **Admin** |
| Create or update MCP servers | **Write** or **Admin** |
| Delete MCP servers | **Admin** |
| Use MCP tools in agents | **Write** or **Admin** |
| View available MCP tools | **Read**, **Write**, or **Admin** |
| Execute MCP Tool blocks | **Write** or **Admin** |
| Execute MCP Tool blocks | **Read**, **Write**, or **Admin** |
## Common Use Cases
@@ -141,4 +142,15 @@ Fetch live data from external systems during workflow execution.
### Permission Errors
- Confirm your workspace permission level
- Check if the MCP server requires additional authentication
- Verify the server is properly configured for your workspace
- Verify the server is properly configured for your workspace
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "What is the difference between using MCP tools in an Agent block vs. the standalone MCP Tool block?", answer: "When you add MCP tools to an Agent block, the AI decides which tools to use based on the conversation context and its reasoning. This is best for dynamic, conversational workflows. The standalone MCP Tool block executes a specific tool with explicit parameters every time, giving you deterministic, predictable execution. Use Agent blocks for flexible reasoning and MCP Tool blocks for structured, repeatable steps." },
{ question: "Who can configure MCP servers in a workspace?", answer: "Users with Write permission can configure (add and update) MCP servers in workspace settings. Only Admin permission is required to delete MCP servers. Users with Read permission can view available MCP tools and execute them in agents and MCP Tool blocks. This means all workspace members with at least Read access can use MCP tools in their workflows." },
{ question: "Can I use MCP servers from multiple workspaces?", answer: "MCP servers are configured per workspace. Each workspace maintains its own set of MCP server connections. If you need the same MCP server in multiple workspaces, you need to configure it separately in each workspace's settings." },
{ question: "How do I update MCP tool schemas after a server changes its available tools?", answer: "Click the Refresh button on the MCP server in your workspace settings. This fetches the latest tool schemas from the server and automatically updates any agent blocks that use those tools with the new parameter definitions." },
{ question: "Can permission groups restrict access to MCP tools?", answer: "Yes. Organization admins can create permission groups that disable MCP tools for specific members using the disableMcpTools configuration option. When this is enabled, affected users will not be able to add or use MCP tools in their workflows." },
{ question: "What happens if an MCP server goes offline during workflow execution?", answer: "If the MCP server is unreachable during execution, the tool call will fail and return an error. In an Agent block, the AI may attempt to handle the failure gracefully. In a standalone MCP Tool block, the workflow step will fail. Check MCP server logs and verify the server is running and accessible to troubleshoot connectivity issues." },
]} />

View File

@@ -10,6 +10,7 @@
"connections",
"mcp",
"copilot",
"mailer",
"skills",
"knowledgebase",
"variables",

View File

@@ -154,8 +154,19 @@ When inviting someone to your organization, you can assign one of two roles:
- Manage billing and subscription settings
- Access all workspaces within the organization
### Organization Member
### Organization Member
**What they can do:**
- Access workspaces they've been specifically invited to
- View the list of organization members
- Cannot invite new people or manage organization settings
- Cannot invite new people or manage organization settings
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "What is the difference between organization roles and workspace permissions?", answer: "Organization roles (Admin or Member) control who can manage the organization itself, including inviting people, creating workspaces, and handling billing. Workspace permissions (Read, Write, Admin) control what a user can do within a specific workspace, such as viewing, editing, or managing workflows. A user needs both an organization role and a workspace permission to work within a workspace." },
{ question: "Can I restrict which integrations or model providers a team member can use?", answer: "Yes. Organization admins can create permission groups with fine-grained controls, including restricting allowed integrations and allowed model providers to specific lists. You can also disable access to MCP tools, custom tools, skills, and various platform features like the knowledge base, API keys, or Copilot on a per-group basis." },
{ question: "What happens when a personal environment variable has the same name as a workspace variable?", answer: "The personal environment variable takes priority. When a workflow runs, if both a personal and workspace variable share the same name, the personal value is used. This allows individual users to override shared workspace configuration when needed." },
{ question: "Can an Admin remove the workspace owner?", answer: "No. The workspace owner cannot be removed from the workspace by anyone. Only the workspace owner can delete the workspace or transfer ownership to another user. Admins can do everything else, including inviting and removing other users and managing workspace settings." },
{ question: "What are permission groups and how do they work?", answer: "Permission groups are an advanced access control feature that lets organization admins define granular restrictions beyond the standard Read/Write/Admin roles. A permission group can hide UI sections (like trace spans, knowledge base, API keys, or deployment options), disable features (MCP tools, custom tools, skills, invitations), and restrict which integrations and model providers members can access. Members can be assigned to groups, and new members can be auto-added." },
{ question: "How should I set up permissions for a new team member?", answer: "Start with the lowest permission level they need. Invite them to the organization as a Member, then add them to the relevant workspace with Read permission if they only need visibility, Write if they need to create and run workflows, or Admin if they need to manage the workspace and its users. You can always increase permissions later." },
]} />

View File

@@ -65,14 +65,14 @@ Execute a workflow with optional input data.
```python
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Hello, world!"},
input={"message": "Hello, world!"},
timeout=30.0 # 30 seconds
)
```
**Parameters:**
- `workflow_id` (str): The ID of the workflow to execute
- `input_data` (dict, optional): Input data to pass to the workflow
- `input` (dict, optional): Input data to pass to the workflow
- `timeout` (float, optional): Timeout in seconds (default: 30.0)
- `stream` (bool, optional): Enable streaming responses (default: False)
- `selected_outputs` (list[str], optional): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
@@ -144,7 +144,7 @@ Execute a workflow with automatic retry on rate limit errors using exponential b
```python
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Hello"},
input={"message": "Hello"},
timeout=30.0,
max_retries=3, # Maximum number of retries
initial_delay=1.0, # Initial delay in seconds
@@ -155,7 +155,7 @@ result = client.execute_with_retry(
**Parameters:**
- `workflow_id` (str): The ID of the workflow to execute
- `input_data` (dict, optional): Input data to pass to the workflow
- `input` (dict, optional): Input data to pass to the workflow
- `timeout` (float, optional): Timeout in seconds
- `stream` (bool, optional): Enable streaming responses
- `selected_outputs` (list, optional): Block outputs to stream
@@ -359,7 +359,7 @@ def run_workflow():
# Execute the workflow
result = client.execute_workflow(
"my-workflow-id",
input_data={
input={
"message": "Process this data",
"user_id": "12345"
}
@@ -488,7 +488,7 @@ def execute_async():
# Start async execution
result = client.execute_workflow(
"workflow-id",
input_data={"data": "large dataset"},
input={"data": "large dataset"},
async_execution=True # Execute asynchronously
)
@@ -533,7 +533,7 @@ def execute_with_retry_handling():
# Automatically retries on rate limit
result = client.execute_with_retry(
"workflow-id",
input_data={"message": "Process this"},
input={"message": "Process this"},
max_retries=5,
initial_delay=1.0,
max_delay=60.0,
@@ -615,7 +615,7 @@ def execute_with_streaming():
# Enable streaming for specific block outputs
result = client.execute_workflow(
"workflow-id",
input_data={"message": "Count to five"},
input={"message": "Count to five"},
stream=True,
selected_outputs=["agent1.content"] # Use blockName.attribute format
)
@@ -758,4 +758,15 @@ Configure the client using environment variables:
## License
Apache-2.0
Apache-2.0
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "Do I need to deploy a workflow before I can execute it via the SDK?", answer: "Yes. Workflows must be deployed before they can be executed through the SDK. You can use the validate_workflow() method to check whether a workflow is deployed and ready. If it returns False, deploy the workflow from the Sim UI first and create or select an API key during deployment." },
{ question: "What is the difference between sync and async execution?", answer: "Sync execution (the default) blocks until the workflow completes and returns the full result. Async execution (async_execution=True) returns immediately with a task ID that you can poll using get_job_status(). Use async mode for long-running workflows to avoid request timeouts. Async job statuses include queued, processing, completed, failed, and cancelled." },
{ question: "How does the SDK handle rate limiting?", answer: "The SDK provides built-in rate limiting support through the execute_with_retry() method. It uses exponential backoff (1s, 2s, 4s, 8s...) with 25% jitter to avoid thundering herd problems. If the API returns a retry-after header, that value is used instead. You can configure max_retries, initial_delay, max_delay, and backoff_multiplier. Use get_rate_limit_info() to check your current rate limit status." },
{ question: "Can I use the Python SDK as a context manager?", answer: "Yes. The SimStudioClient supports Python's context manager protocol. Use it with the 'with' statement to automatically close the underlying HTTP session when you are done, which is especially useful for scripts that create and discard client instances." },
{ question: "How do I handle different types of errors from the SDK?", answer: "The SDK raises SimStudioError with a code property for API-specific errors. Common error codes are UNAUTHORIZED (invalid API key), TIMEOUT (request timed out), RATE_LIMIT_EXCEEDED (too many requests), USAGE_LIMIT_EXCEEDED (billing limit reached), and EXECUTION_ERROR (workflow failed). Use the error code to implement targeted error handling and recovery logic." },
{ question: "How do I monitor my API usage and remaining quota?", answer: "Use the get_usage_limits() method to check your current usage. It returns sync and async rate limit details (limit, remaining, reset time, whether you are currently limited), plus your current period cost, usage limit, and plan tier. This lets you monitor consumption and alert before hitting limits." },
]} />

View File

@@ -78,16 +78,15 @@ new SimStudioClient(config: SimStudioConfig)
Execute a workflow with optional input data.
```typescript
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Hello, world!' },
const result = await client.executeWorkflow('workflow-id', { message: 'Hello, world!' }, {
timeout: 30000 // 30 seconds
});
```
**Parameters:**
- `workflowId` (string): The ID of the workflow to execute
- `input` (any, optional): Input data to pass to the workflow
- `options` (ExecutionOptions, optional):
- `input` (any): Input data to pass to the workflow
- `timeout` (number): Timeout in milliseconds (default: 30000)
- `stream` (boolean): Enable streaming responses (default: false)
- `selectedOutputs` (string[]): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
@@ -158,8 +157,7 @@ if (status.status === 'completed') {
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
```typescript
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Hello' },
const result = await client.executeWithRetry('workflow-id', { message: 'Hello' }, {
timeout: 30000
}, {
maxRetries: 3, // Maximum number of retries
@@ -171,6 +169,7 @@ const result = await client.executeWithRetry('workflow-id', {
**Parameters:**
- `workflowId` (string): The ID of the workflow to execute
- `input` (any, optional): Input data to pass to the workflow
- `options` (ExecutionOptions, optional): Same as `executeWorkflow()`
- `retryOptions` (RetryOptions, optional):
- `maxRetries` (number): Maximum number of retries (default: 3)
@@ -389,10 +388,8 @@ async function runWorkflow() {
// Execute the workflow
const result = await client.executeWorkflow('my-workflow-id', {
input: {
message: 'Process this data',
userId: '12345'
}
});
if (result.success) {
@@ -508,8 +505,7 @@ app.post('/execute-workflow', async (req, res) => {
try {
const { workflowId, input } = req.body;
const result = await client.executeWorkflow(workflowId, {
input,
const result = await client.executeWorkflow(workflowId, input, {
timeout: 60000
});
@@ -555,8 +551,7 @@ export default async function handler(
try {
const { workflowId, input } = req.body;
const result = await client.executeWorkflow(workflowId, {
input,
const result = await client.executeWorkflow(workflowId, input, {
timeout: 30000
});
@@ -586,9 +581,7 @@ const client = new SimStudioClient({
async function executeClientSideWorkflow() {
try {
const result = await client.executeWorkflow('workflow-id', {
input: {
userInput: 'Hello from browser'
}
});
console.log('Workflow result:', result);
@@ -642,10 +635,8 @@ Alternatively, you can manually provide files using the URL format:
// Include files under the field name from your API trigger's input format
const result = await client.executeWorkflow('workflow-id', {
input: {
documents: files, // Must match your workflow's "files" field name
instructions: 'Analyze these documents'
}
});
console.log('Result:', result);
@@ -669,10 +660,8 @@ Alternatively, you can manually provide files using the URL format:
// Include files under the field name from your API trigger's input format
const result = await client.executeWorkflow('workflow-id', {
input: {
documents: [file], // Must match your workflow's "files" field name
query: 'Summarize this document'
}
});
```
</Tab>
@@ -712,8 +701,7 @@ export function useWorkflow(): UseWorkflowResult {
setResult(null);
try {
const workflowResult = await client.executeWorkflow(workflowId, {
input,
const workflowResult = await client.executeWorkflow(workflowId, input, {
timeout: 30000
});
setResult(workflowResult);
@@ -774,8 +762,7 @@ const client = new SimStudioClient({
async function executeAsync() {
try {
// Start async execution
const result = await client.executeWorkflow('workflow-id', {
input: { data: 'large dataset' },
const result = await client.executeWorkflow('workflow-id', { data: 'large dataset' }, {
async: true // Execute asynchronously
});
@@ -823,9 +810,7 @@ const client = new SimStudioClient({
async function executeWithRetryHandling() {
try {
// Automatically retries on rate limit
const result = await client.executeWithRetry('workflow-id', {
input: { message: 'Process this' }
}, {
const result = await client.executeWithRetry('workflow-id', { message: 'Process this' }, {}, {
maxRetries: 5,
initialDelay: 1000,
maxDelay: 60000,
@@ -908,8 +893,7 @@ const client = new SimStudioClient({
async function executeWithStreaming() {
try {
// Enable streaming for specific block outputs
const result = await client.executeWorkflow('workflow-id', {
input: { message: 'Count to five' },
const result = await client.executeWorkflow('workflow-id', { message: 'Count to five' }, {
stream: true,
selectedOutputs: ['agent1.content'] // Use blockName.attribute format
});
@@ -1033,3 +1017,14 @@ function StreamingWorkflow() {
## License
Apache-2.0
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "Do I need to deploy a workflow before I can execute it via the SDK?", answer: "Yes. Workflows must be deployed before they can be executed through the SDK. You can use the validateWorkflow() method to check whether a workflow is deployed and ready. If it returns false, deploy the workflow from the Sim UI first and create or select an API key during deployment." },
{ question: "What is the difference between sync and async execution?", answer: "Sync execution (the default) blocks until the workflow completes and returns the full result. Async execution returns immediately with a task ID that you can poll using getJobStatus(). Use async mode for long-running workflows to avoid request timeouts. Async job statuses include queued, processing, completed, failed, and cancelled." },
{ question: "How does streaming work with the SDK?", answer: "Enable streaming by setting stream: true and specifying selectedOutputs with block names and attributes in blockName.attribute format (e.g., ['agent1.content']). The response uses Server-Sent Events (SSE) format, sending incremental chunks as the workflow executes. Each chunk includes the blockId and the text content. A final done event includes the execution metadata." },
{ question: "How does the SDK handle rate limiting?", answer: "The SDK provides built-in rate limiting support through the executeWithRetry() method. It uses exponential backoff (1s, 2s, 4s, 8s...) with 25% jitter to avoid thundering herd problems. If the API returns a retry-after header, that value is used instead. You can configure maxRetries, initialDelay, maxDelay, and backoffMultiplier. Use getRateLimitInfo() to check your current rate limit status." },
{ question: "Is it safe to use the SDK in browser-side code?", answer: "You can use the SDK in the browser, but you should not expose your API key in client-side code. In production, use a backend proxy server to handle SDK calls, or use a public API key with limited permissions. The SDK works with both Node.js and browser environments, but sensitive keys should stay server-side." },
{ question: "How do I send files to a workflow through the SDK?", answer: "File objects are automatically detected and converted to base64 format. Include them in the input object under the field name that matches your workflow's API trigger input format. In the browser, pass File objects directly from file inputs. In Node.js, create File objects from buffers. You can also provide files as URL references with type, data, name, and mime fields." },
]} />

View File

@@ -5,6 +5,7 @@ description: Deploy Sim with Docker Compose
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
## Quick Start
@@ -148,3 +149,14 @@ docker compose -f docker-compose.prod.yml pull && docker compose -f docker-compo
# Backup database
docker compose -f docker-compose.prod.yml exec db pg_dump -U postgres simstudio > backup.sql
```
<FAQ items={[
{ question: "What containers are started by docker-compose.prod.yml?", answer: "Four services are started: simstudio (main app on port 3000, 8 GB memory limit), realtime (WebSocket server on port 3002, 1 GB memory limit), db (PostgreSQL 17 with pgvector on port 5432), and migrations (runs once to apply database schema changes, then exits)." },
{ question: "How do I configure SSL for production?", answer: "You can use either Caddy (recommended, handles certificates automatically) or Nginx with Certbot. Both need to reverse-proxy port 3000 for the main app and port 3002 for WebSocket connections at the /socket.io/ path." },
{ question: "Why can I not connect to Ollama running on my host from inside Docker?", answer: "Inside a Docker container, localhost refers to the container itself, not your host machine. On macOS and Windows, use http://host.docker.internal:11434. On Linux, use your host machine's actual IP address (e.g., http://192.168.1.100:11434)." },
{ question: "What is the difference between the GPU and CPU Ollama profiles?", answer: "The GPU profile (--profile gpu) configures NVIDIA driver capabilities and reserves GPU devices for accelerated inference. The CPU profile (--profile cpu) runs Ollama without GPU acceleration. Both use the --profile setup flag to automatically pull the gemma3:4b starter model." },
{ question: "How do I update Sim to the latest version?", answer: "Run docker compose -f docker-compose.prod.yml pull to fetch the latest images, then docker compose -f docker-compose.prod.yml up -d to restart with the new versions. The migrations container will automatically apply any new database schema changes on startup." },
{ question: "How do I back up and restore the database?", answer: "Back up with: docker compose -f docker-compose.prod.yml exec db pg_dump -U postgres simstudio > backup.sql. Restore with: docker compose -f docker-compose.prod.yml exec -T db psql -U postgres simstudio < backup.sql. The database data is persisted in a Docker volume named postgres_data." },
{ question: "Can I customize the PostgreSQL credentials?", answer: "Yes. The docker-compose.prod.yml uses environment variable defaults: POSTGRES_USER (default: postgres), POSTGRES_PASSWORD (default: postgres), POSTGRES_DB (default: simstudio), and POSTGRES_PORT (default: 5432). Set these in your .env file to override them." },
]} />

View File

@@ -5,6 +5,7 @@ description: Deploy Sim on your own infrastructure
import { Card, Cards } from 'fumadocs-ui/components/card'
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
Deploy Sim on your own infrastructure with Docker or Kubernetes.
@@ -63,3 +64,13 @@ Open [http://localhost:3000](http://localhost:3000)
| db | 5432 | PostgreSQL with pgvector |
| migrations | - | Database migrations (runs once) |
<FAQ items={[
{ question: "What are the minimum requirements to self-host Sim?", answer: "At minimum you need 2 CPU cores, 12 GB RAM, 20 GB SSD storage, and Docker 20.10 or later. Memory is typically the constraining factor due to workflow execution (isolated-vm sandboxing), file processing, and vector operations (pgvector)." },
{ question: "Which database does Sim use?", answer: "Sim uses PostgreSQL 17 with the pgvector extension for vector similarity search. The Docker setup uses the pgvector/pgvector:pg17 image, which comes with the vector extension pre-installed." },
{ question: "What are the required environment variables for production?", answer: "Three secrets are required: BETTER_AUTH_SECRET (authentication), ENCRYPTION_KEY (data encryption), and INTERNAL_API_SECRET (service-to-service auth). Generate each with openssl rand -hex 32. You also need to set NEXT_PUBLIC_APP_URL and BETTER_AUTH_URL to your domain." },
{ question: "What does the realtime service do?", answer: "The realtime service is a dedicated WebSocket server that runs on port 3002. It handles real-time communication for features like live workflow execution updates. It has a 1 GB memory limit and requires its own DATABASE_URL and BETTER_AUTH_SECRET configuration." },
{ question: "How does the migrations service work?", answer: "The migrations container runs once at startup (restart: no) and executes bun run db:migrate to apply database schema changes. It depends on the database being healthy before running and must complete successfully before the main application starts." },
{ question: "Can I use Sim with local AI models?", answer: "Yes. Sim supports Ollama for local model inference. Use docker-compose.ollama.yml instead of docker-compose.prod.yml. It offers both GPU (with NVIDIA support) and CPU-only profiles, and automatically pulls gemma3:4b as a starter model." },
]} />

View File

@@ -5,6 +5,7 @@ description: Deploy Sim with Helm
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Callout } from 'fumadocs-ui/components/callout'
import { FAQ } from '@/components/ui/faq'
## Prerequisites
@@ -125,3 +126,13 @@ helm upgrade sim ./helm/sim --namespace simstudio
# Uninstall
helm uninstall sim --namespace simstudio
```
<FAQ items={[
{ question: "What are the default resource limits for the Sim app pod?", answer: "The Helm chart defaults to 8 Gi memory limit / 4 Gi request and 2000m CPU limit / 1000m request per app pod. The pod runs as non-root (UID 1001) with fsGroup 1001 for security." },
{ question: "Can I use an external database instead of the bundled PostgreSQL?", answer: "Yes. Set postgresql.enabled to false and configure the externalDatabase section with your host, port, username, password, database name, and sslMode. The external database must have the pgvector extension installed." },
{ question: "How do I manage secrets securely in Kubernetes?", answer: "The Helm chart supports pre-existing Kubernetes secrets via app.secrets.existingSecret. Set enabled to true and provide the secret name. This integrates with External Secrets Operator, HashiCorp Vault, Azure Key Vault, and similar tools. Key mappings can be customized if your secret uses different key names." },
{ question: "Can I scale the app to multiple replicas?", answer: "Yes. Set app.replicaCount in your values.yaml. The chart supports standard Kubernetes scaling. Ensure your database can handle the additional connections from multiple replicas." },
{ question: "Are there cloud-specific example configurations?", answer: "Yes. The Helm chart includes example values files for AWS EKS (values-aws.yaml), Azure AKS (values-azure.yaml), GCP GKE (values-gcp.yaml), as well as files for production, development, external databases, external secrets, and whitelabeled deployments." },
{ question: "What ingress options are supported?", answer: "The chart supports ingress with configurable className (e.g., nginx), TLS termination, and separate host configuration for the app. Enable it with ingress.enabled: true and set your host under ingress.app.host." },
]} />

View File

@@ -132,3 +132,15 @@ Skills are most valuable when agents need specialized knowledge or multi-step wo
- [Agent Skills specification](https://agentskills.io) — The open format for portable agent skills
- [Example skills](https://github.com/anthropics/skills) — Browse community skill examples
- [Best practices](https://agentskills.io/what-are-skills) — Writing effective skills
import { FAQ } from '@/components/ui/faq'
<FAQ items={[
{ question: "How many skills can I attach to a single agent?", answer: "You can attach as many skills as you want, but the recommended limit is 5-10 per agent. More skills mean more decision overhead for the agent when scanning descriptions. Since only the names and descriptions are included in the system prompt (about 50-100 tokens each), many skills will not dramatically increase context usage, but they can slow down the agent's decision-making." },
{ question: "How does the agent decide when to load a skill?", answer: "The agent sees an available_skills section in its system prompt listing each skill's name and description. When the agent determines that a skill is relevant to the current task, it calls the load_skill tool with the skill name. The full skill content is then returned as a tool response. This is why writing a specific, keyword-rich description is critical -- it is the only thing the agent reads before deciding whether to activate a skill." },
{ question: "Do skills work with all LLM providers?", answer: "Yes. The load_skill mechanism uses standard tool-calling, which is supported by all LLM providers in Sim. No provider-specific configuration is needed. The skill system works the same way whether you are using Anthropic, OpenAI, Google, or any other supported provider." },
{ question: "When should I use skills vs. agent instructions?", answer: "Use skills for knowledge that applies across multiple workflows or changes frequently. Skills are reusable packages that can be attached to any agent. Use agent instructions for task-specific context that is unique to a single agent and workflow. If you find yourself copying the same instructions into multiple agents, that content should be a skill instead." },
{ question: "Can permission groups disable skills for certain users?", answer: "Yes. Organization admins can create permission groups with the disableSkills option enabled. When a user is assigned to such a permission group, the skills dropdown in agent blocks will be disabled and they will not be able to add or use skills in their workflows." },
{ question: "What is the recommended maximum length for skill content?", answer: "Keep skills focused and under 500 lines. If a skill grows too large, split it into multiple specialized skills. Shorter, focused skills are more effective because the agent can load exactly what it needs. A broad skill with too much content can overwhelm the agent and reduce the quality of its responses." },
{ question: "Where do I create and manage skills?", answer: "Go to Settings and select Skills under the Tools section. From there you can add new skills with a name (kebab-case identifier, max 64 characters), description (max 1024 characters), and content (full instructions in markdown). You can also edit or delete existing skills from this page." },
]} />

View File

@@ -34,7 +34,7 @@ Integrates Airtable into the workflow. Can list bases, list tables (with schema)
### `airtable_list_bases`
List all Airtable bases the user has access to
List all bases the authenticated user has access to
#### Input
@@ -46,7 +46,7 @@ List all Airtable bases the user has access to
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `bases` | array | List of Airtable bases |
| `bases` | array | Array of Airtable bases with id, name, and permissionLevel |
| ↳ `id` | string | Base ID \(starts with "app"\) |
| ↳ `name` | string | Base name |
| ↳ `permissionLevel` | string | Permission level \(none, read, comment, edit, create\) |
@@ -204,4 +204,21 @@ Update multiple existing records in an Airtable table
| ↳ `recordCount` | number | Number of records updated |
| ↳ `updatedRecordIds` | array | List of updated record IDs |
### `airtable_get_base_schema`
Get the schema of all tables, fields, and views in an Airtable base
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `baseId` | string | Yes | Airtable base ID \(starts with "app", e.g., "appXXXXXXXXXXXXXX"\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `tables` | json | Array of table schemas with fields and views |
| `metadata` | json | Operation metadata including total tables count |

View File

@@ -55,6 +55,9 @@ Search the web using Exa AI. Returns relevant search results with titles, URLs,
| `summary` | boolean | No | Include AI-generated summaries in results \(default: false\) |
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
| `apiKey` | string | Yes | Exa AI API Key |
| `pricing` | custom | No | No description |
| `metadata` | string | No | No description |
| `rateLimit` | string | No | No description |
#### Output
@@ -87,6 +90,9 @@ Retrieve the contents of webpages using Exa AI. Returns the title, text content,
| `highlights` | boolean | No | Include highlighted snippets in results \(default: false\) |
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
| `apiKey` | string | Yes | Exa AI API Key |
| `pricing` | custom | No | No description |
| `metadata` | string | No | No description |
| `rateLimit` | string | No | No description |
#### Output
@@ -116,6 +122,9 @@ Find webpages similar to a given URL using Exa AI. Returns a list of similar lin
| `summary` | boolean | No | Include AI-generated summaries in results \(default: false\) |
| `livecrawl` | string | No | Live crawling mode: never \(default\), fallback, always, or preferred \(always try livecrawl, fall back to cache if fails\) |
| `apiKey` | string | Yes | Exa AI API Key |
| `pricing` | custom | No | No description |
| `metadata` | string | No | No description |
| `rateLimit` | string | No | No description |
#### Output
@@ -138,6 +147,9 @@ Get an AI-generated answer to a question with citations from the web using Exa A
| `query` | string | Yes | The question to answer |
| `text` | boolean | No | Whether to include the full text of the answer |
| `apiKey` | string | Yes | Exa AI API Key |
| `pricing` | custom | No | No description |
| `metadata` | string | No | No description |
| `rateLimit` | string | No | No description |
#### Output

View File

@@ -6,6 +6,7 @@ description: Powerful tools to enhance your agentic workflows
import { Card, Cards } from "fumadocs-ui/components/card";
import { Step, Steps } from "fumadocs-ui/components/steps";
import { Video } from '@/components/ui/video';
import { FAQ } from '@/components/ui/faq';
Tools are powerful components in Sim that allow your workflows to interact with external services, process data, and perform specialized tasks. They extend the capabilities of your agents and workflows by providing access to various APIs and services.
@@ -49,10 +50,10 @@ Each tool requires specific configuration to function properly. Common configura
Sim provides a diverse collection of tools for various purposes, including:
- **AI and Language Processing**: OpenAI, ElevenLabs, Translation services
- **AI and Language Processing**: OpenAI, ElevenLabs, Google Translate
- **Search and Research**: Google Search, Tavily, Exa, Perplexity
- **Document Manipulation**: Google Docs, Google Sheets, Notion, Confluence
- **Media Processing**: Vision, Image Generator
- **Media Processing**: Vision
- **Communication**: Slack, WhatsApp, Twilio SMS, Gmail
- **Data Storage**: Pinecone, Supabase, Airtable
- **Development**: GitHub
@@ -68,3 +69,13 @@ Tools typically return structured data that can be processed by subsequent block
- Status information
Refer to each tool's specific documentation to understand its exact output format.
<FAQ items={[
{ question: "How many tool integrations does Sim provide?", answer: "Sim includes over 180 service integrations with 1,000+ individual tool actions, spanning categories like AI and language processing, search and research, document manipulation, media processing, communication, data storage, development platforms, and more." },
{ question: "What is the difference between using a tool as a standalone block vs. as an agent tool?", answer: "As a standalone block, the tool is called deterministically at a fixed point in your workflow, giving you precise control. As an agent tool, the tool is provided to an AI agent that dynamically decides whether and when to call it based on context and task requirements." },
{ question: "How does tool authentication work?", answer: "Tools support multiple authentication methods. Many use OAuth for secure token-based access to external services. Others accept API keys provided by the user. Some tools also support Sim's hosted API keys, so you can use the tool without providing your own key." },
{ question: "Can tools return files?", answer: "Yes. Tools can return file-typed outputs with associated metadata like file name, MIME type, and size. File data can be provided as a buffer, base64 string, or a URL to download from. This enables workflows that process documents, images, and other binary content." },
{ question: "What happens when a tool API call fails?", answer: "Tools support configurable retry logic with settings for maximum retries, initial delay, and maximum delay. Error extractors parse provider-specific error responses to surface clear error messages. You can also configure whether only idempotent requests should be retried." },
{ question: "Can tools dynamically adapt their parameters based on context?", answer: "Yes. Tools support schema enrichment, where parameter schemas are dynamically updated at runtime based on other parameter values. For example, the knowledge search tool enriches its tag filter options based on the selected knowledgebase's actual tag definitions." },
]} />

View File

@@ -25,7 +25,7 @@ In Sim, the Knowledge Base block enables your agents to perform intelligent sema
## Usage Instructions
Integrate Knowledge into the workflow. Can search, upload chunks, and create documents.
Integrate Knowledge into the workflow. Perform full CRUD operations on documents, chunks, and tags.
@@ -122,4 +122,281 @@ Create a new document in a knowledge base
| `message` | string | Success or error message describing the operation result |
| `documentId` | string | ID of the created document |
### `knowledge_list_tags`
List all tag definitions for a knowledge base
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base to list tags for |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `knowledgeBaseId` | string | ID of the knowledge base |
| `tags` | array | Array of tag definitions for the knowledge base |
| ↳ `id` | string | Tag definition ID |
| ↳ `tagSlot` | string | Internal tag slot \(e.g. tag1, number1\) |
| ↳ `displayName` | string | Human-readable tag name |
| ↳ `fieldType` | string | Tag field type \(text, number, date, boolean\) |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last update timestamp |
| `totalTags` | number | Total number of tag definitions |
### `knowledge_list_documents`
List documents in a knowledge base with optional filtering, search, and pagination
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base to list documents from |
| `search` | string | No | Search query to filter documents by filename |
| `enabledFilter` | string | No | Filter by enabled status: "all", "enabled", or "disabled" |
| `limit` | number | No | Maximum number of documents to return \(default: 50\) |
| `offset` | number | No | Number of documents to skip for pagination \(default: 0\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `knowledgeBaseId` | string | ID of the knowledge base |
| `documents` | array | Array of documents in the knowledge base |
| ↳ `id` | string | Document ID |
| ↳ `filename` | string | Document filename |
| ↳ `fileSize` | number | File size in bytes |
| ↳ `mimeType` | string | MIME type of the document |
| ↳ `enabled` | boolean | Whether the document is enabled |
| ↳ `processingStatus` | string | Processing status \(pending, processing, completed, failed\) |
| ↳ `chunkCount` | number | Number of chunks in the document |
| ↳ `tokenCount` | number | Total token count across chunks |
| ↳ `uploadedAt` | string | Upload timestamp |
| ↳ `updatedAt` | string | Last update timestamp |
| ↳ `connectorId` | string | Connector ID if document was synced from an external source |
| ↳ `connectorType` | string | Connector type \(e.g. notion, github, confluence\) if synced |
| ↳ `sourceUrl` | string | Original URL in the source system if synced from a connector |
| `totalDocuments` | number | Total number of documents matching the filter |
| `limit` | number | Page size used |
| `offset` | number | Offset used for pagination |
### `knowledge_get_document`
Get full details of a single document including tags, connector metadata, and processing status
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base the document belongs to |
| `documentId` | string | Yes | ID of the document to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Document ID |
| `filename` | string | Document filename |
| `fileSize` | number | File size in bytes |
| `mimeType` | string | MIME type of the document |
| `enabled` | boolean | Whether the document is enabled |
| `processingStatus` | string | Processing status \(pending, processing, completed, failed\) |
| `processingError` | string | Error message if processing failed |
| `chunkCount` | number | Number of chunks in the document |
| `tokenCount` | number | Total token count across chunks |
| `characterCount` | number | Total character count |
| `uploadedAt` | string | Upload timestamp |
| `updatedAt` | string | Last update timestamp |
| `connectorId` | string | Connector ID if document was synced from an external source |
| `sourceUrl` | string | Original URL in the source system if synced from a connector |
| `externalId` | string | External ID from the source system |
| `tags` | object | Tag values keyed by tag slot \(tag1-7, number1-5, date1-2, boolean1-3\) |
### `knowledge_delete_document`
Delete a document from a knowledge base
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base containing the document |
| `documentId` | string | Yes | ID of the document to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `documentId` | string | ID of the deleted document |
| `message` | string | Confirmation message |
### `knowledge_list_chunks`
List chunks for a document in a knowledge base with optional filtering and pagination
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base |
| `documentId` | string | Yes | ID of the document to list chunks from |
| `search` | string | No | Search query to filter chunks by content |
| `enabled` | string | No | Filter by enabled status: "true", "false", or "all" \(default: "all"\) |
| `limit` | number | No | Maximum number of chunks to return \(1-100, default: 50\) |
| `offset` | number | No | Number of chunks to skip for pagination \(default: 0\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `knowledgeBaseId` | string | ID of the knowledge base |
| `documentId` | string | ID of the document |
| `chunks` | array | Array of chunks in the document |
| ↳ `id` | string | Chunk ID |
| ↳ `chunkIndex` | number | Index of the chunk within the document |
| ↳ `content` | string | Chunk text content |
| ↳ `contentLength` | number | Content length in characters |
| ↳ `tokenCount` | number | Token count for the chunk |
| ↳ `enabled` | boolean | Whether the chunk is enabled |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last update timestamp |
| `totalChunks` | number | Total number of chunks matching the filter |
| `limit` | number | Page size used |
| `offset` | number | Offset used for pagination |
### `knowledge_update_chunk`
Update the content or enabled status of a chunk in a knowledge base
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base |
| `documentId` | string | Yes | ID of the document containing the chunk |
| `chunkId` | string | Yes | ID of the chunk to update |
| `content` | string | No | New content for the chunk |
| `enabled` | boolean | No | Whether the chunk should be enabled or disabled |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `documentId` | string | ID of the parent document |
| `id` | string | Chunk ID |
| `chunkIndex` | number | Index of the chunk within the document |
| `content` | string | Updated chunk content |
| `contentLength` | number | Content length in characters |
| `tokenCount` | number | Token count for the chunk |
| `enabled` | boolean | Whether the chunk is enabled |
| `updatedAt` | string | Last update timestamp |
### `knowledge_delete_chunk`
Delete a chunk from a document in a knowledge base
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base |
| `documentId` | string | Yes | ID of the document containing the chunk |
| `chunkId` | string | Yes | ID of the chunk to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `chunkId` | string | ID of the deleted chunk |
| `documentId` | string | ID of the parent document |
| `message` | string | Confirmation message |
### `knowledge_list_connectors`
List all connectors for a knowledge base, showing sync status, type, and document counts
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base to list connectors for |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `knowledgeBaseId` | string | ID of the knowledge base |
| `connectors` | array | Array of connectors for the knowledge base |
| ↳ `id` | string | Connector ID |
| ↳ `connectorType` | string | Type of connector \(e.g. notion, github, confluence\) |
| ↳ `status` | string | Connector status \(active, paused, syncing\) |
| ↳ `syncIntervalMinutes` | number | Sync interval in minutes \(0 = manual only\) |
| ↳ `lastSyncAt` | string | Timestamp of last sync |
| ↳ `lastSyncError` | string | Error from last sync if failed |
| ↳ `lastSyncDocCount` | number | Number of documents synced in last sync |
| ↳ `nextSyncAt` | string | Timestamp of next scheduled sync |
| ↳ `consecutiveFailures` | number | Number of consecutive sync failures |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last update timestamp |
| `totalConnectors` | number | Total number of connectors |
### `knowledge_get_connector`
Get detailed connector information including recent sync logs for monitoring sync health
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base the connector belongs to |
| `connectorId` | string | Yes | ID of the connector to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `connector` | object | Connector details |
| ↳ `id` | string | Connector ID |
| ↳ `connectorType` | string | Type of connector |
| ↳ `status` | string | Connector status \(active, paused, syncing\) |
| ↳ `syncIntervalMinutes` | number | Sync interval in minutes |
| ↳ `lastSyncAt` | string | Timestamp of last sync |
| ↳ `lastSyncError` | string | Error from last sync if failed |
| ↳ `lastSyncDocCount` | number | Docs synced in last sync |
| ↳ `nextSyncAt` | string | Next scheduled sync timestamp |
| ↳ `consecutiveFailures` | number | Consecutive sync failures |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last update timestamp |
| `syncLogs` | array | Recent sync log entries |
| ↳ `id` | string | Sync log ID |
| ↳ `status` | string | Sync status |
| ↳ `startedAt` | string | Sync start time |
| ↳ `completedAt` | string | Sync completion time |
| ↳ `docsAdded` | number | Documents added |
| ↳ `docsUpdated` | number | Documents updated |
| ↳ `docsDeleted` | number | Documents deleted |
| ↳ `docsUnchanged` | number | Documents unchanged |
| ↳ `errorMessage` | string | Error message if sync failed |
### `knowledge_trigger_sync`
Trigger a manual sync for a knowledge base connector
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `knowledgeBaseId` | string | Yes | ID of the knowledge base the connector belongs to |
| `connectorId` | string | Yes | ID of the connector to trigger sync for |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `connectorId` | string | ID of the connector that was synced |
| `message` | string | Status message from the sync trigger |

View File

@@ -5,6 +5,7 @@ description: Triggers are the core ways to start Sim workflows
import { Card, Cards } from 'fumadocs-ui/components/card'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
<div className="flex justify-center">
<Image
@@ -91,3 +92,12 @@ Invitees receive an email with a link to connect their account. Once connected,
**Using in a Workflow**
When configuring an email trigger, select your polling group from the credentials dropdown instead of an individual account. The system creates webhooks for each member and routes all emails through your workflow.
<FAQ items={[
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim executes the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
{ question: "How do I secure my webhook endpoint?", answer: "The Generic Webhook trigger supports authentication. Enable the Require Authentication toggle, set an auth token, and optionally specify a custom header name. Incoming requests must include the token as a Bearer token in the Authorization header (or in your custom header). Requests without a valid token are rejected." },
{ question: "What happens when I test an external trigger manually?", answer: "When you click Run on a workflow with an external trigger (webhook, Slack, Gmail, etc.), Sim generates a mock payload based on the trigger's expected data structure. This lets downstream blocks resolve their variable references correctly so you can test the full workflow without waiting for a real event." },
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based executions (API, chat, schedule, webhook) run against the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
{ question: "What integrations are available as triggers?", answer: "Sim supports a wide range of trigger integrations including GitHub (push, PR, issues), Slack, Gmail, Outlook, Linear, Jira, HubSpot, Stripe, Airtable, Calendly, Typeform, Telegram, WhatsApp, Microsoft Teams, RSS feeds, and more. Each integration provides event-specific triggers like issue_created or email_received." },
{ question: "How does the Schedule trigger work?", answer: "The Schedule trigger runs your workflow on a timer using cron expressions or interval-based configuration. The schedule is managed within the schedule block settings. Like all triggers, scheduled runs execute the active deployment snapshot, so make sure to redeploy after making workflow changes." },
]} />

View File

@@ -4,6 +4,7 @@ title: RSS Feed
import { Callout } from 'fumadocs-ui/components/callout'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The RSS Feed block monitors RSS and Atom feeds when new items are published, your workflow triggers automatically.
@@ -47,3 +48,13 @@ Access mapped fields directly (`<rss.title>`) or use the raw objects for any fie
<Callout>
RSS triggers only fire for items published after you save the trigger. Existing feed items are not processed.
</Callout>
<FAQ items={[
{ question: "How often is the RSS feed checked for new items?", answer: "The feed is polled every minute. On each poll, the service fetches the feed, compares items against the last checked timestamp and a list of previously seen GUIDs, and triggers your workflow only for genuinely new items." },
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow executions for the same item." },
{ question: "Is there a limit on how many new items are processed per poll?", answer: "Yes. Each polling cycle processes a maximum of 25 new items, sorted by publication date (newest first). If a feed publishes more than 25 items between polls, only the 25 most recent are processed." },
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered execution receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
{ question: "What happens if the RSS feed is temporarily unreachable?", answer: "A failed fetch increments the webhook's consecutive failure counter. After 100 consecutive failures, the RSS trigger is automatically disabled. On any successful poll, the counter resets to zero." },
{ question: "Does the RSS trigger support Atom feeds?", answer: "Yes. The underlying parser (rss-parser) supports both RSS and Atom feed formats. You can use the URL of either format in the Feed URL field." },
]} />

View File

@@ -5,6 +5,7 @@ title: Schedule
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { FAQ } from '@/components/ui/faq'
The Schedule block automatically triggers workflows on a recurring schedule at specified intervals or times.
@@ -75,3 +76,13 @@ Schedules automatically disable after **100 consecutive failures** to prevent ru
<Callout>
Schedule blocks cannot receive incoming connections and serve as workflow entry points only.
</Callout>
<FAQ items={[
{ question: "Do I need to deploy my workflow for the schedule to start?", answer: "Yes. Schedules are created in the database only when you deploy the workflow. Undeploying removes the schedule, and redeploying recreates it with the current configuration." },
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful execution." },
{ question: "Does the schedule support timezones?", answer: "Yes. The schedule configuration includes a timezone setting. Cron expressions and simple intervals are evaluated relative to the configured timezone, which defaults to UTC if not specified." },
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during execution, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
{ question: "Can I have multiple schedule blocks in one workflow?", answer: "Yes. The deployment process finds all schedule blocks in the workflow and creates a separate schedule record for each one. Each schedule operates independently with its own cron expression and failure counter." },
{ question: "What happens if the workflow is undeployed while a schedule execution is in progress?", answer: "The currently running execution will complete, but no new executions will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before executing." },
]} />

View File

@@ -6,6 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Webhooks allow external services to trigger workflow execution by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
@@ -146,4 +147,14 @@ Always validate and sanitize incoming webhook data before processing it in your
### Customer Support
- Support ticket creation workflows
- Automated escalation processes
- Multi-channel communication routing
- Multi-channel communication routing
<FAQ items={[
{ question: "What HTTP methods does the Generic Webhook endpoint accept?", answer: "The webhook endpoint handles POST requests for triggering workflows. GET requests are only used for provider-specific verification challenges (such as Microsoft Graph or WhatsApp verification). Other methods return a 405 Method Not Allowed response." },
{ question: "How do I authenticate webhook requests?", answer: "Enable the Require Authentication toggle in the webhook configuration, then set an Authentication Token. Callers can send the token as a Bearer token in the Authorization header, or you can specify a custom header name (e.g., X-Secret-Key) and the token will be matched against that header instead." },
{ question: "Can I define the expected payload structure for a webhook?", answer: "Yes. The Generic Webhook block includes an Input Format field where you can define the expected JSON schema. This is optional but helps document the expected structure. You can also use type \"file[]\" for file upload fields." },
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate executions from repeated requests with the same payload." },
{ question: "What data from the webhook request is available in my workflow?", answer: "All request data including headers, body, and query parameters is parsed and made available to subsequent blocks. Common fields like event, id, and data are automatically extracted from the payload when present." },
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering execution. If the workflow is not deployed, the webhook returns a not-found response." },
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the execution logs for error details." },
]} />

View File

@@ -6,6 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Video } from '@/components/ui/video'
import { FAQ } from '@/components/ui/faq'
Variables in Sim act as a global store for data that can be accessed and modified by any block in your workflow, allowing you to store and share data across your workflow with global variables. They provide a powerful way to share information between different parts of your workflow, maintain state, and create more dynamic applications.
@@ -119,7 +120,7 @@ Variables in Sim have global scope, meaning:
- They are accessible from any block in your workflow
- Changes to variables persist throughout workflow execution
- Variables maintain their values between runs, unless explicitly reset
- Variables start fresh from their defined values on each run. Changes during execution are visible within that run only
## Best Practices
@@ -129,3 +130,12 @@ Variables in Sim have global scope, meaning:
- **Initialize Variables Early**: Set up and initialize your variables at the beginning of your workflow to ensure they're available when needed.
- **Handle Missing Variables**: Always consider the case where a variable might not yet exist or might have an unexpected value. Add appropriate validation in your blocks.
- **Limit Variable Count**: Keep the number of variables manageable. Too many variables can make your workflow difficult to understand and maintain.
<FAQ items={[
{ question: "What is the difference between workflow variables and environment variables (secrets)?", answer: "Workflow variables are defined per workflow and store data like text, numbers, objects, or arrays that blocks can read and modify during execution. They are referenced with <variable.name> syntax. Environment variables (secrets) are workspace-level credentials referenced with {{KEY}} syntax, designed for sensitive values like API keys that should never appear in logs." },
{ question: "How are variables resolved during execution?", answer: "The execution engine uses a chain of resolvers that run in order: loop variables, parallel variables, workflow variables, environment variables, and then block references. When a block input contains a variable reference, the resolver matches it by normalized name (case-insensitive, spaces removed) or exact ID, then substitutes the resolved value before the block runs." },
{ question: "Can I use nested paths to access properties inside an object variable?", answer: "Yes. If your variable stores an object or array, you can use dot notation to access nested properties. For example, <variable.config.retryCount> will navigate into the config object and return the retryCount value." },
{ question: "Do workflow variables persist between separate workflow runs?", answer: "Variables maintain their initial values as defined in the Variables panel. Each execution starts with those configured values. If a block modifies a variable during execution, that change is visible to subsequent blocks in the same run, but does not alter the saved initial value for future runs." },
{ question: "What happens if I reference a variable that does not exist?", answer: "If the resolver cannot find a matching variable, the raw reference string is left in place without substitution. This typically causes downstream blocks to receive unexpected input, so make sure all referenced variables are defined before running the workflow." },
{ question: "Can I store JSON objects or arrays as variable values?", answer: "Yes. Variables support text, numbers, booleans, JSON objects, and arrays. When the value is parsed for execution, the engine determines the type and resolves it accordingly, so downstream blocks receive the proper JavaScript object or array rather than a raw string." },
]} />

View File

@@ -183,13 +183,8 @@ while (count < items.length) {
### Limitaciones
<Callout type="warning">
Los bloques contenedores (Bucles y Paralelos) no pueden anidarse unos dentro de otros. Esto significa:
- No puedes colocar un bloque de Bucle dentro de otro bloque de Bucle
- No puedes colocar un bloque Paralelo dentro de un bloque de Bucle
- No puedes colocar ningún bloque contenedor dentro de otro bloque contenedor
Si necesitas iteración multidimensional, considera reestructurar tu flujo de trabajo para usar bucles secuenciales o procesar datos por etapas.
<Callout type="info">
Los bloques contenedores (Bucles y Paralelos) admiten anidamiento. Puedes colocar bucles dentro de bucles, paralelos dentro de bucles, y cualquier combinación de bloques contenedores para construir flujos de trabajo multidimensionales complejos.
</Callout>
<Callout type="info">

View File

@@ -117,11 +117,8 @@ Cada instancia paralela se ejecuta de forma independiente:
### Limitaciones
<Callout type="warning">
Los bloques contenedores (Bucles y Paralelos) no pueden anidarse unos dentro de otros. Esto significa:
- No puedes colocar un bloque de Bucle dentro de un bloque Paralelo
- No puedes colocar otro bloque Paralelo dentro de un bloque Paralelo
- No puedes colocar ningún bloque contenedor dentro de otro bloque contenedor
<Callout type="info">
Los bloques contenedores (Bucles y Paralelos) admiten anidamiento. Puedes colocar paralelos dentro de paralelos, bucles dentro de paralelos, y cualquier combinación de bloques contenedores para construir flujos de trabajo multidimensionales complejos.
</Callout>
<Callout type="info">

View File

@@ -190,13 +190,8 @@ return results;
### Limitations
<Callout type="warning">
Les blocs conteneurs (Boucles et Parallèles) ne peuvent pas être imbriqués les uns dans les autres. Cela signifie :
- Vous ne pouvez pas placer un bloc Boucle à l'intérieur d'un autre bloc Boucle
- Vous ne pouvez pas placer un bloc Parallèle à l'intérieur d'un bloc Boucle
- Vous ne pouvez pas placer un bloc conteneur à l'intérieur d'un autre bloc conteneur
Si vous avez besoin d'une itération multidimensionnelle, envisagez de restructurer votre flux de travail pour utiliser des boucles séquentielles ou traiter les données par étapes.
<Callout type="info">
Les blocs conteneurs (Boucles et Parallèles) prennent en charge l'imbrication. Vous pouvez placer des boucles dans des boucles, des parallèles dans des boucles, et toute combinaison de blocs conteneurs pour construire des flux de travail multidimensionnels complexes.
</Callout>
<Callout type="info">

View File

@@ -117,11 +117,8 @@ Chaque instance parallèle s'exécute indépendamment :
### Limitations
<Callout type="warning">
Les blocs conteneurs (Boucles et Parallèles) ne peuvent pas être imbriqués les uns dans les autres. Cela signifie :
- Vous ne pouvez pas placer un bloc de Boucle à l'intérieur d'un bloc Parallèle
- Vous ne pouvez pas placer un autre bloc Parallèle à l'intérieur d'un bloc Parallèle
- Vous ne pouvez pas placer un bloc conteneur à l'intérieur d'un autre bloc conteneur
<Callout type="info">
Les blocs conteneurs (Boucles et Parallèles) prennent en charge l'imbrication. Vous pouvez placer des parallèles dans des parallèles, des boucles dans des parallèles, et toute combinaison de blocs conteneurs pour construire des flux de travail multidimensionnels complexes.
</Callout>
<Callout type="info">

View File

@@ -198,13 +198,8 @@ while (counter < items.length && !foundTarget) {
### 制限事項
<Callout type="warning">
コンテナブロック(ループと並列)は互いに入れ子にすることができません。つまり:
- ループブロックを別のループブロック内に配置することはできません
- 並列ブロックをループブロック内に配置することはできません
- どのコンテナブロックも別のコンテナブロック内に配置することはできません
多次元反復が必要な場合は、順次ループを使用するか、データを段階的に処理するようにワークフローを再構成することを検討してください。
<Callout type="info">
コンテナブロック(ループと並列)はネストをサポートしています。ループの中にループを、ループの中に並列を、そして任意のコンテナブロックの組み合わせで複雑な多次元ワークフローを構築できます。
</Callout>
<Callout type="info">

View File

@@ -117,11 +117,8 @@ Parallel (["gpt-4o", "claude-3.7-sonnet", "gemini-2.5-pro"]) → Agent → Evalu
### 制限事項
<Callout type="warning">
コンテナブロック(ループと並列)は互いにネストできません。つまり:
- 並列ブロック内にループブロックを配置できません
- 並列ブロック内に別の並列ブロックを配置できません
- どのコンテナブロック内にも別のコンテナブロックを配置できません
<Callout type="info">
コンテナブロック(ループと並列)はネストをサポートしています。並列の中に並列を、並列の中にループを、そして任意のコンテナブロックの組み合わせで複雑な多次元ワークフローを構築できます。
</Callout>
<Callout type="info">

View File

@@ -160,13 +160,8 @@ Variables (i=0) → Loop (While i<10) → Agent (Process) → Variables (i++)
### 限制
<Callout type="warning">
容器块(循环和并行)不能嵌套在彼此内部。这意味着:
- 您不能将一个循环块放入另一个循环块中
- 您不能将一个并行块放入循环块中
- 您不能将任何容器块放入另一个容器块中
如果您需要多维迭代,请考虑重构您的工作流以使用顺序循环或分阶段处理数据。
<Callout type="info">
容器块(循环和并行)支持嵌套。您可以将循环放入循环中、将并行放入循环中,以及任意组合容器块来构建复杂的多维工作流。
</Callout>
<Callout type="info">

View File

@@ -121,11 +121,8 @@ const allResults = input.parallel.results;
### 限制
<Callout type="warning">
容器块(循环和并行)不能嵌套在彼此内部。这意味着:
- 您不能在并行块中放置循环块
- 您不能在并行块中放置另一个并行块
- 您不能在一个容器块中放置另一个容器块
<Callout type="info">
容器块(循环和并行)支持嵌套。您可以将并行放入并行中、将循环放入并行中,以及任意组合容器块来构建复杂的多维工作流。
</Callout>
<Callout type="info">

View File

@@ -62,7 +62,10 @@ function openapiPluginBadgeLeft() {
null,
createElement(
'span',
{ className: `font-mono font-medium me-1.5 text-[10px] text-nowrap ${colorClass}` },
{
className: `font-mono font-medium me-1.5 text-[10px] text-nowrap ${colorClass}`,
'data-method': method.toLowerCase(),
},
method
),
node.name

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,11 @@
"build": "fumadocs-mdx && NODE_OPTIONS='--max-old-space-size=8192' next build",
"start": "next start",
"postinstall": "fumadocs-mdx",
"type-check": "tsc --noEmit"
"type-check": "tsc --noEmit",
"lint": "biome check --write --unsafe .",
"lint:check": "biome check .",
"format": "biome format --write .",
"format:check": "biome format ."
},
"dependencies": {
"@sim/db": "workspace:*",

View File

@@ -0,0 +1,37 @@
'use client'
import { useEffect } from 'react'
import AuthBackground from '@/app/(auth)/components/auth-background'
import Nav from '@/app/(landing)/components/nav/nav'
function isColorDark(hexColor: string): boolean {
const hex = hexColor.replace('#', '')
const r = Number.parseInt(hex.substr(0, 2), 16)
const g = Number.parseInt(hex.substr(2, 2), 16)
const b = Number.parseInt(hex.substr(4, 2), 16)
const luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return luminance < 0.5
}
export default function AuthLayoutClient({ children }: { children: React.ReactNode }) {
useEffect(() => {
const rootStyle = getComputedStyle(document.documentElement)
const brandBackground = rootStyle.getPropertyValue('--brand-background-hex').trim()
if (brandBackground && isColorDark(brandBackground)) {
document.body.classList.add('auth-dark-bg')
} else {
document.body.classList.remove('auth-dark-bg')
}
}, [])
return (
<AuthBackground>
<main className='relative flex min-h-screen flex-col text-foreground'>
<Nav hideAuthButtons={true} variant='auth' />
<div className='relative z-30 flex flex-1 items-center justify-center px-4 pb-24'>
<div className='w-full max-w-lg px-4'>{children}</div>
</div>
</main>
</AuthBackground>
)
}

View File

@@ -1,42 +1,10 @@
'use client'
import type { Metadata } from 'next'
import AuthLayoutClient from '@/app/(auth)/auth-layout-client'
import { useEffect } from 'react'
import AuthBackground from '@/app/(auth)/components/auth-background'
import Nav from '@/app/(landing)/components/nav/nav'
// Helper to detect if a color is dark
function isColorDark(hexColor: string): boolean {
const hex = hexColor.replace('#', '')
const r = Number.parseInt(hex.substr(0, 2), 16)
const g = Number.parseInt(hex.substr(2, 2), 16)
const b = Number.parseInt(hex.substr(4, 2), 16)
const luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return luminance < 0.5
export const metadata: Metadata = {
robots: { index: false, follow: false },
}
export default function AuthLayout({ children }: { children: React.ReactNode }) {
useEffect(() => {
// Check if brand background is dark and add class accordingly
const rootStyle = getComputedStyle(document.documentElement)
const brandBackground = rootStyle.getPropertyValue('--brand-background-hex').trim()
if (brandBackground && isColorDark(brandBackground)) {
document.body.classList.add('auth-dark-bg')
} else {
document.body.classList.remove('auth-dark-bg')
}
}, [])
return (
<AuthBackground>
<main className='relative flex min-h-screen flex-col text-foreground'>
{/* Header - Nav handles all conditional logic */}
<Nav hideAuthButtons={true} variant='auth' />
{/* Content */}
<div className='relative z-30 flex flex-1 items-center justify-center px-4 pb-24'>
<div className='w-full max-w-lg px-4'>{children}</div>
</div>
</main>
</AuthBackground>
)
return <AuthLayoutClient>{children}</AuthLayoutClient>
}

View File

@@ -1,6 +1,11 @@
import type { Metadata } from 'next'
import { getOAuthProviderStatus } from '@/app/(auth)/components/oauth-provider-checker'
import LoginForm from '@/app/(auth)/login/login-form'
export const metadata: Metadata = {
title: 'Log In',
}
export const dynamic = 'force-dynamic'
export default async function LoginPage() {

View File

@@ -1,117 +1,8 @@
'use client'
import type { Metadata } from 'next'
import ResetPasswordPage from '@/app/(auth)/reset-password/reset-password-content'
import { Suspense, useEffect, useState } from 'react'
import { createLogger } from '@sim/logger'
import Link from 'next/link'
import { useRouter, useSearchParams } from 'next/navigation'
import { inter } from '@/app/_styles/fonts/inter/inter'
import { soehne } from '@/app/_styles/fonts/soehne/soehne'
import { SetNewPasswordForm } from '@/app/(auth)/reset-password/reset-password-form'
const logger = createLogger('ResetPasswordPage')
function ResetPasswordContent() {
const router = useRouter()
const searchParams = useSearchParams()
const token = searchParams.get('token')
const [isSubmitting, setIsSubmitting] = useState(false)
const [statusMessage, setStatusMessage] = useState<{
type: 'success' | 'error' | null
text: string
}>({
type: null,
text: '',
})
useEffect(() => {
if (!token) {
setStatusMessage({
type: 'error',
text: 'Invalid or missing reset token. Please request a new password reset link.',
})
}
}, [token])
const handleResetPassword = async (password: string) => {
try {
setIsSubmitting(true)
setStatusMessage({ type: null, text: '' })
const response = await fetch('/api/auth/reset-password', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
token,
newPassword: password,
}),
})
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.message || 'Failed to reset password')
}
setStatusMessage({
type: 'success',
text: 'Password reset successful! Redirecting to login...',
})
setTimeout(() => {
router.push('/login?resetSuccess=true')
}, 1500)
} catch (error) {
logger.error('Error resetting password:', { error })
setStatusMessage({
type: 'error',
text: error instanceof Error ? error.message : 'Failed to reset password',
})
} finally {
setIsSubmitting(false)
}
}
return (
<>
<div className='space-y-1 text-center'>
<h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
Reset your password
</h1>
<p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
Enter a new password for your account
</p>
</div>
<div className={`${inter.className} mt-8`}>
<SetNewPasswordForm
token={token}
onSubmit={handleResetPassword}
isSubmitting={isSubmitting}
statusType={statusMessage.type}
statusMessage={statusMessage.text}
/>
</div>
<div className={`${inter.className} pt-6 text-center font-light text-[14px]`}>
<Link
href='/login'
className='font-medium text-[var(--brand-accent-hex)] underline-offset-4 transition hover:text-[var(--brand-accent-hover-hex)] hover:underline'
>
Back to login
</Link>
</div>
</>
)
export const metadata: Metadata = {
title: 'Reset Password',
}
export default function ResetPasswordPage() {
return (
<Suspense
fallback={<div className='flex h-screen items-center justify-center'>Loading...</div>}
>
<ResetPasswordContent />
</Suspense>
)
}
export default ResetPasswordPage

View File

@@ -0,0 +1,117 @@
'use client'
import { Suspense, useEffect, useState } from 'react'
import { createLogger } from '@sim/logger'
import Link from 'next/link'
import { useRouter, useSearchParams } from 'next/navigation'
import { inter } from '@/app/_styles/fonts/inter/inter'
import { soehne } from '@/app/_styles/fonts/soehne/soehne'
import { SetNewPasswordForm } from '@/app/(auth)/reset-password/reset-password-form'
const logger = createLogger('ResetPasswordPage')
function ResetPasswordContent() {
const router = useRouter()
const searchParams = useSearchParams()
const token = searchParams.get('token')
const [isSubmitting, setIsSubmitting] = useState(false)
const [statusMessage, setStatusMessage] = useState<{
type: 'success' | 'error' | null
text: string
}>({
type: null,
text: '',
})
useEffect(() => {
if (!token) {
setStatusMessage({
type: 'error',
text: 'Invalid or missing reset token. Please request a new password reset link.',
})
}
}, [token])
const handleResetPassword = async (password: string) => {
try {
setIsSubmitting(true)
setStatusMessage({ type: null, text: '' })
const response = await fetch('/api/auth/reset-password', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
token,
newPassword: password,
}),
})
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.message || 'Failed to reset password')
}
setStatusMessage({
type: 'success',
text: 'Password reset successful! Redirecting to login...',
})
setTimeout(() => {
router.push('/login?resetSuccess=true')
}, 1500)
} catch (error) {
logger.error('Error resetting password:', { error })
setStatusMessage({
type: 'error',
text: error instanceof Error ? error.message : 'Failed to reset password',
})
} finally {
setIsSubmitting(false)
}
}
return (
<>
<div className='space-y-1 text-center'>
<h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
Reset your password
</h1>
<p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
Enter a new password for your account
</p>
</div>
<div className={`${inter.className} mt-8`}>
<SetNewPasswordForm
token={token}
onSubmit={handleResetPassword}
isSubmitting={isSubmitting}
statusType={statusMessage.type}
statusMessage={statusMessage.text}
/>
</div>
<div className={`${inter.className} pt-6 text-center font-light text-[14px]`}>
<Link
href='/login'
className='font-medium text-[var(--brand-accent-hex)] underline-offset-4 transition hover:text-[var(--brand-accent-hover-hex)] hover:underline'
>
Back to login
</Link>
</div>
</>
)
}
export default function ResetPasswordPage() {
return (
<Suspense
fallback={<div className='flex h-screen items-center justify-center'>Loading...</div>}
>
<ResetPasswordContent />
</Suspense>
)
}

View File

@@ -1,7 +1,12 @@
import type { Metadata } from 'next'
import { isRegistrationDisabled } from '@/lib/core/config/feature-flags'
import { getOAuthProviderStatus } from '@/app/(auth)/components/oauth-provider-checker'
import SignupForm from '@/app/(auth)/signup/signup-form'
export const metadata: Metadata = {
title: 'Sign Up',
}
export const dynamic = 'force-dynamic'
export default async function SignupPage() {

View File

@@ -1,7 +1,12 @@
import type { Metadata } from 'next'
import { redirect } from 'next/navigation'
import { getEnv, isTruthy } from '@/lib/core/config/env'
import SSOForm from '@/ee/sso/components/sso-form'
export const metadata: Metadata = {
title: 'Single Sign-On',
}
export const dynamic = 'force-dynamic'
export default async function SSOPage() {

View File

@@ -1,7 +1,12 @@
import type { Metadata } from 'next'
import { isEmailVerificationEnabled, isProd } from '@/lib/core/config/feature-flags'
import { hasEmailService } from '@/lib/messaging/email/mailer'
import { VerifyContent } from '@/app/(auth)/verify/verify-content'
export const metadata: Metadata = {
title: 'Verify Email',
}
export const dynamic = 'force-dynamic'
export default function VerifyPage() {

Some files were not shown because too many files have changed in this diff Show More